]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
authorKalle Valo <kvalo@codeaurora.org>
Thu, 20 Apr 2017 08:16:06 +0000 (11:16 +0300)
committerKalle Valo <kvalo@codeaurora.org>
Thu, 20 Apr 2017 08:16:06 +0000 (11:16 +0300)
ath.git patches for 4.12. Major changes:

ath9k

* add support for Dell Wireless 1601 PCI device

* add debugfs file to manually override noise floor

ath10k

* bump up FW API to 6 for a new QCA6174 firmware branch

wil6210

* support 8 kB RX buffers

2248 files changed:
Documentation/ABI/testing/sysfs-class-net-qmi
Documentation/admin-guide/kernel-parameters.txt
Documentation/arm64/silicon-errata.txt
Documentation/cgroup-v2.txt
Documentation/dev-tools/kcov.rst
Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt
Documentation/devicetree/bindings/net/can/holt_hi311x.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/can/ti_hecc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/dsa/mt7530.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/marvell-pp2.txt
Documentation/devicetree/bindings/net/stmmac.txt
Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt [deleted file]
Documentation/devicetree/bindings/powerpc/4xx/emac.txt
Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt
Documentation/devicetree/bindings/rng/omap_rng.txt
Documentation/devicetree/bindings/usb/usb251xb.txt
Documentation/extcon/intel-int3496.txt
Documentation/gcc-plugins.txt
Documentation/networking/i40e.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/ipvs-sysctl.txt
Documentation/networking/mpls-sysctl.txt
Documentation/trace/kprobetrace.txt
Documentation/trace/uprobetracer.txt
Documentation/virtual/kvm/api.txt
Documentation/vm/userfaultfd.txt
MAINTAINERS
Makefile
arch/alpha/include/uapi/asm/socket.h
arch/arc/boot/dts/skeleton.dtsi
arch/arc/boot/dts/skeleton_hs.dtsi
arch/arc/boot/dts/skeleton_hs_idu.dtsi
arch/arc/boot/dts/vdk_axs10x_mb.dtsi
arch/arc/include/asm/hugepage.h
arch/arc/include/asm/kprobes.h
arch/arc/include/asm/pgtable.h
arch/arc/kernel/entry-arcv2.S
arch/arc/kernel/setup.c
arch/arc/mm/cache.c
arch/arm/boot/dts/am335x-pcm-953.dtsi
arch/arm/boot/dts/am57xx-idk-common.dtsi
arch/arm/boot/dts/aspeed-g4.dtsi
arch/arm/boot/dts/aspeed-g5.dtsi
arch/arm/boot/dts/bcm5301x.dtsi
arch/arm/boot/dts/bcm953012k.dts
arch/arm/boot/dts/bcm958522er.dts
arch/arm/boot/dts/bcm958525er.dts
arch/arm/boot/dts/bcm958525xmc.dts
arch/arm/boot/dts/bcm958622hr.dts
arch/arm/boot/dts/bcm958623hr.dts
arch/arm/boot/dts/bcm958625hr.dts
arch/arm/boot/dts/bcm988312hr.dts
arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
arch/arm/boot/dts/sama5d2.dtsi
arch/arm/boot/dts/ste-dbx5x0.dtsi
arch/arm/boot/dts/ste-href.dtsi
arch/arm/boot/dts/ste-snowball.dts
arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
arch/arm/boot/dts/sun8i-a23-a33.dtsi
arch/arm/boot/dts/sun8i-a33.dtsi
arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
arch/arm/configs/omap2plus_defconfig
arch/arm/include/asm/kvm_arm.h
arch/arm/include/asm/kvm_host.h
arch/arm/include/asm/pgtable.h
arch/arm/kvm/arm.c
arch/arm/kvm/handle_exit.c
arch/arm/mach-at91/pm.c
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/gpmc-nand.c [deleted file]
arch/arm/mach-omap2/gpmc-onenand.c
arch/arm/mach-omap2/omap-headsmp.S
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/tools/syscall.tbl
arch/arm/xen/mm.c
arch/arm64/Kconfig
arch/arm64/boot/dts/broadcom/ns2.dtsi
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/current.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/pgtable-types.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/cpuidle.c
arch/arm64/kernel/kaslr.c
arch/arm64/kernel/probes/kprobes.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/vdso/.gitignore
arch/arm64/kvm/handle_exit.c
arch/arm64/kvm/hyp/tlb.c
arch/arm64/mm/kasan_init.c
arch/avr32/include/asm/pgtable-2level.h
arch/avr32/include/uapi/asm/socket.h
arch/avr32/oprofile/backtrace.c
arch/c6x/kernel/ptrace.c
arch/cris/arch-v32/drivers/cryptocop.c
arch/cris/include/asm/pgtable.h
arch/frv/include/asm/pgtable.h
arch/frv/include/uapi/asm/socket.h
arch/h8300/include/asm/pgtable.h
arch/h8300/kernel/ptrace.c
arch/h8300/kernel/ptrace_h.c
arch/hexagon/include/asm/pgtable.h
arch/ia64/include/asm/pgtable.h
arch/ia64/include/uapi/asm/socket.h
arch/m32r/include/uapi/asm/socket.h
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/bitops.h
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/metag/include/asm/pgtable.h
arch/metag/kernel/ptrace.c
arch/microblaze/include/asm/page.h
arch/mips/cavium-octeon/cpu.c
arch/mips/cavium-octeon/crypto/octeon-crypto.c
arch/mips/cavium-octeon/smp.c
arch/mips/include/asm/fpu.h
arch/mips/include/asm/pgtable-32.h
arch/mips/include/asm/pgtable-64.h
arch/mips/include/uapi/asm/socket.h
arch/mips/kernel/ptrace.c
arch/mips/kernel/smp-bmips.c
arch/mips/kernel/smp-mt.c
arch/mips/loongson64/loongson-3/cop2-ex.c
arch/mips/netlogic/common/smp.c
arch/mips/netlogic/xlp/cop2-ex.c
arch/mips/sgi-ip22/ip28-berr.c
arch/mips/sgi-ip27/ip27-berr.c
arch/mips/sgi-ip27/ip27-smp.c
arch/mips/sgi-ip32/ip32-berr.c
arch/mips/sgi-ip32/ip32-reset.c
arch/mn10300/include/asm/page.h
arch/mn10300/include/uapi/asm/socket.h
arch/nios2/include/asm/pgtable.h
arch/nios2/kernel/prom.c
arch/nios2/kernel/setup.c
arch/openrisc/include/asm/cmpxchg.h
arch/openrisc/include/asm/pgtable.h
arch/openrisc/include/asm/uaccess.h
arch/openrisc/kernel/or32_ksyms.c
arch/openrisc/kernel/process.c
arch/parisc/include/asm/cacheflush.h
arch/parisc/include/asm/uaccess.h
arch/parisc/include/uapi/asm/socket.h
arch/parisc/include/uapi/asm/unistd.h
arch/parisc/kernel/cache.c
arch/parisc/kernel/module.c
arch/parisc/kernel/parisc_ksyms.c
arch/parisc/kernel/perf.c
arch/parisc/kernel/process.c
arch/parisc/kernel/syscall_table.S
arch/parisc/lib/Makefile
arch/parisc/lib/fixup.S [deleted file]
arch/parisc/lib/lusercopy.S
arch/parisc/lib/memcpy.c
arch/parisc/mm/fault.c
arch/powerpc/Kconfig
arch/powerpc/Makefile
arch/powerpc/boot/zImage.lds.S
arch/powerpc/crypto/crc32c-vpmsum_glue.c
arch/powerpc/include/asm/bitops.h
arch/powerpc/include/asm/book3s/32/pgtable.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/checksum.h
arch/powerpc/include/asm/cpuidle.h
arch/powerpc/include/asm/elf.h
arch/powerpc/include/asm/mce.h
arch/powerpc/include/asm/nohash/32/pgtable.h
arch/powerpc/include/asm/nohash/64/pgtable-4k.h
arch/powerpc/include/asm/nohash/64/pgtable-64k.h
arch/powerpc/include/asm/nohash/pgtable.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/include/asm/prom.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/socket.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/idle_book3s.S
arch/powerpc/kernel/mce.c
arch/powerpc/kernel/mce_power.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/lib/Makefile
arch/powerpc/lib/sstep.c
arch/powerpc/lib/test_emulate_step.c [new file with mode: 0644]
arch/powerpc/mm/init_64.c
arch/powerpc/mm/pgtable-radix.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/isa207-common.c
arch/powerpc/perf/isa207-common.h
arch/powerpc/platforms/powernv/opal-wrappers.S
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/purgatory/trampoline.S
arch/powerpc/sysdev/axonram.c
arch/powerpc/sysdev/xics/icp-opal.c
arch/powerpc/sysdev/xics/xics-common.c
arch/s390/boot/compressed/misc.c
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/crypto/paes_s390.c
arch/s390/defconfig
arch/s390/include/asm/cputime.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/sections.h
arch/s390/include/asm/timex.h
arch/s390/include/asm/uaccess.h
arch/s390/include/uapi/asm/socket.h
arch/s390/include/uapi/asm/unistd.h
arch/s390/kernel/compat_wrapper.c
arch/s390/kernel/entry.S
arch/s390/kernel/ipl.c
arch/s390/kernel/process.c
arch/s390/kernel/smp.c
arch/s390/kernel/syscalls.S
arch/s390/kernel/vmlinux.lds.S
arch/s390/kernel/vtime.c
arch/s390/mm/pgtable.c
arch/score/include/asm/pgtable.h
arch/score/kernel/traps.c
arch/score/mm/extable.c
arch/sh/boards/mach-cayman/setup.c
arch/sh/include/asm/pgtable-2level.h
arch/sh/include/asm/pgtable-3level.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/uapi/asm/socket.h
arch/sparc/kernel/ptrace_64.c
arch/tile/include/asm/pgtable_32.h
arch/tile/include/asm/pgtable_64.h
arch/um/include/asm/pgtable-2level.h
arch/um/include/asm/pgtable-3level.h
arch/unicore32/include/asm/pgtable.h
arch/x86/Makefile
arch/x86/Makefile_32.cpu
arch/x86/boot/compressed/error.c
arch/x86/configs/x86_64_defconfig
arch/x86/events/amd/core.c
arch/x86/events/core.c
arch/x86/events/intel/cstate.c
arch/x86/events/intel/rapl.c
arch/x86/events/intel/uncore.h
arch/x86/hyperv/hv_init.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/kvm_page_track.h
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/pkeys.h
arch/x86/include/asm/purgatory.h [new file with mode: 0644]
arch/x86/include/asm/timer.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/uv/uv_hub.h
arch/x86/include/uapi/asm/bootparam.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/centaur.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cyrix.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/transmeta.c
arch/x86/kernel/cpu/vmware.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/head64.c
arch/x86/kernel/hpet.c
arch/x86/kernel/kdebugfs.c
arch/x86/kernel/kprobes/common.h
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/kprobes/opt.c
arch/x86/kernel/machine_kexec_64.c
arch/x86/kernel/nmi.c
arch/x86/kernel/reboot.c
arch/x86/kernel/tsc.c
arch/x86/kernel/unwind_frame.c
arch/x86/kvm/i8259.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/page_track.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/memcpy_64.S
arch/x86/mm/gup.c
arch/x86/mm/kasan_init_64.c
arch/x86/mm/kaslr.c
arch/x86/mm/mpx.c
arch/x86/pci/common.c
arch/x86/pci/xen.c
arch/x86/platform/intel-mid/device_libs/Makefile
arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c [new file with mode: 0644]
arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
arch/x86/platform/intel-mid/mfld.c
arch/x86/platform/uv/tlb_uv.c
arch/x86/purgatory/Makefile
arch/x86/purgatory/purgatory.c
arch/x86/purgatory/setup-x86_64.S
arch/x86/purgatory/sha256.h
arch/xtensa/include/asm/page.h
arch/xtensa/include/asm/pgtable.h
arch/xtensa/include/uapi/asm/socket.h
arch/xtensa/include/uapi/asm/unistd.h
arch/xtensa/kernel/traps.c
block/bio.c
block/blk-core.c
block/blk-mq-sysfs.c
block/blk-mq-tag.c
block/blk-mq.c
block/blk-mq.h
block/blk-stat.c
block/genhd.c
block/sed-opal.c
crypto/af_alg.c
crypto/algif_hash.c
crypto/lrw.c
crypto/xts.c
drivers/acpi/Makefile
drivers/acpi/acpi_platform.c
drivers/acpi/acpi_processor.c
drivers/acpi/apei/ghes.c
drivers/acpi/bus.c
drivers/acpi/internal.h
drivers/acpi/ioapic.c
drivers/acpi/pci_root.c
drivers/acpi/processor_core.c
drivers/acpi/spcr.c
drivers/ata/ahci_qoriq.c
drivers/ata/libata-sff.c
drivers/ata/libata-transport.c
drivers/atm/ambassador.c
drivers/auxdisplay/img-ascii-lcd.c
drivers/base/core.c
drivers/bcma/driver_gpio.c
drivers/bcma/main.c
drivers/block/nbd.c
drivers/block/paride/pcd.c
drivers/block/paride/pd.c
drivers/block/paride/pf.c
drivers/block/paride/pg.c
drivers/block/paride/pt.c
drivers/block/rbd.c
drivers/block/zram/zram_drv.c
drivers/bluetooth/Kconfig
drivers/bluetooth/btqcomsmd.c
drivers/char/hw_random/amd-rng.c
drivers/char/hw_random/geode-rng.c
drivers/char/hw_random/omap-rng.c
drivers/char/nwbutton.c
drivers/char/ppdev.c
drivers/char/random.c
drivers/clk/clk.c
drivers/clk/rockchip/clk-rk3036.c
drivers/clk/sunxi-ng/Kconfig
drivers/clk/sunxi-ng/ccu-sun50i-a64.c
drivers/clk/sunxi-ng/ccu-sun6i-a31.c
drivers/clk/sunxi-ng/ccu_mp.c
drivers/clk/sunxi-ng/ccu_nkmp.c
drivers/clocksource/clkevt-probe.c
drivers/clocksource/tcb_clksrc.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpuidle/cpuidle-powernv.c
drivers/cpuidle/sysfs.c
drivers/crypto/ccp/ccp-dev-v5.c
drivers/crypto/ccp/ccp-dev.c
drivers/crypto/ccp/ccp-dev.h
drivers/crypto/ccp/ccp-dmaengine.c
drivers/crypto/s5p-sss.c
drivers/crypto/ux500/cryp/cryp.c
drivers/dax/dax.c
drivers/dma/bcm2835-dma.c
drivers/dma/dmaengine.c
drivers/edac/Kconfig
drivers/edac/Makefile
drivers/edac/i5000_edac.c
drivers/edac/i5400_edac.c
drivers/edac/pnd2_edac.c [new file with mode: 0644]
drivers/edac/pnd2_edac.h [new file with mode: 0644]
drivers/edac/xgene_edac.c
drivers/extcon/Kconfig
drivers/extcon/extcon-intel-int3496.c
drivers/firmware/efi/arm-runtime.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/esrt.c
drivers/firmware/efi/libstub/secureboot.c
drivers/gpio/gpio-altera-a10sr.c
drivers/gpio/gpio-altera.c
drivers/gpio/gpio-mcp23s08.c
drivers/gpio/gpio-mockup.c
drivers/gpio/gpio-xgene.c
drivers/gpio/gpiolib-acpi.c
drivers/gpu/drm/amd/acp/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
drivers/gpu/drm/arm/malidp_crtc.c
drivers/gpu/drm/arm/malidp_hw.c
drivers/gpu/drm/arm/malidp_planes.c
drivers/gpu/drm/arm/malidp_regs.h
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_crtc.h
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_drm_ipp.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/gvt/aperture_gm.c
drivers/gpu/drm/i915/gvt/cfg_space.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/debug.h
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/display.h
drivers/gpu/drm/i915/gvt/edid.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/firmware.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mmio.c
drivers/gpu/drm/i915/gvt/mmio.h
drivers/gpu/drm/i915/gvt/opregion.c
drivers/gpu/drm/i915/gvt/render.c
drivers/gpu/drm/i915/gvt/sched_policy.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_context.h
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_object.h
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_gvt.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/dsi/dsi_manager.c
drivers/gpu/drm/msm/hdmi/hdmi_audio.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/mxsfb/mxsfb_crtc.c
drivers/gpu/drm/mxsfb/mxsfb_drv.c
drivers/gpu/drm/mxsfb/mxsfb_out.c
drivers/gpu/drm/mxsfb/mxsfb_regs.h
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/rcar-du/rcar_du_vsp.c
drivers/gpu/drm/tilcdc/tilcdc_crtc.c
drivers/gpu/drm/ttm/ttm_object.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/hid/Kconfig
drivers/hid/hid-chicony.c
drivers/hid/hid-core.c
drivers/hid/hid-corsair.c
drivers/hid/hid-ids.h
drivers/hid/hid-sony.c
drivers/hid/hid-xinmo.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/hv/channel.c
drivers/hv/channel_mgmt.c
drivers/hv/hv_fcopy.c
drivers/hv/hv_kvp.c
drivers/hv/hv_snapshot.c
drivers/hv/hv_util.c
drivers/hv/hv_utils_transport.c
drivers/hv/hv_utils_transport.h
drivers/hv/vmbus_drv.c
drivers/hwmon/asus_atk0110.c
drivers/hwmon/it87.c
drivers/hwmon/max31790.c
drivers/hwtracing/intel_th/core.c
drivers/hwtracing/intel_th/pci.c
drivers/i2c/busses/i2c-brcmstb.c
drivers/i2c/busses/i2c-designware-core.h
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-exynos5.c
drivers/i2c/busses/i2c-meson.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/busses/i2c-riic.c
drivers/i2c/i2c-mux.c
drivers/i2c/muxes/i2c-mux-pca954x.c
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/common/hid-sensors/hid-sensor-trigger.c
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
drivers/iio/magnetometer/ak8974.c
drivers/infiniband/core/cq.c
drivers/infiniband/core/device.c
drivers/infiniband/hw/i40iw/i40iw_utils.c
drivers/infiniband/hw/nes/nes.h
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/hw/qedr/qedr.h
drivers/infiniband/hw/qedr/qedr_cm.c
drivers/infiniband/hw/qedr/qedr_hsi.h [deleted file]
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
drivers/infiniband/sw/rdmavt/mmap.c
drivers/infiniband/sw/rxe/Kconfig
drivers/infiniband/sw/rxe/rxe_mmap.c
drivers/infiniband/sw/rxe/rxe_req.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/input/joystick/iforce/iforce-usb.c
drivers/input/misc/cm109.c
drivers/input/misc/ims-pcu.c
drivers/input/misc/yealink.c
drivers/input/mouse/alps.c
drivers/input/mouse/alps.h
drivers/input/mouse/elan_i2c_core.c
drivers/input/rmi4/rmi_f30.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/tablet/hanwang.c
drivers/input/tablet/kbtab.c
drivers/input/touchscreen/sur40.c
drivers/iommu/amd_iommu.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/arm-smmu.c
drivers/iommu/exynos-iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/io-pgtable-arm-v7s.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/iommu.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-crossbar.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-mips-gic.c
drivers/isdn/capi/kcapi.c
drivers/isdn/divert/isdn_divert.c
drivers/isdn/gigaset/bas-gigaset.c
drivers/isdn/hardware/eicon/divasi.c
drivers/isdn/hardware/mISDN/Kconfig
drivers/isdn/hardware/mISDN/hfc_multi_8xx.h
drivers/isdn/hardware/mISDN/hfcmulti.c
drivers/isdn/hardware/mISDN/hfcpci.c
drivers/isdn/hardware/mISDN/mISDNipac.c
drivers/isdn/hardware/mISDN/mISDNisar.c
drivers/isdn/hardware/mISDN/w6692.c
drivers/isdn/hisax/amd7930_fn.c
drivers/isdn/hisax/arcofi.c
drivers/isdn/hisax/diva.c
drivers/isdn/hisax/elsa.c
drivers/isdn/hisax/fsm.c
drivers/isdn/hisax/hfc4s8s_l1.c
drivers/isdn/hisax/hfc_2bds0.c
drivers/isdn/hisax/hfc_pci.c
drivers/isdn/hisax/hfc_sx.c
drivers/isdn/hisax/hfc_usb.c
drivers/isdn/hisax/hfcscard.c
drivers/isdn/hisax/icc.c
drivers/isdn/hisax/ipacx.c
drivers/isdn/hisax/isac.c
drivers/isdn/hisax/isar.c
drivers/isdn/hisax/isdnl3.c
drivers/isdn/hisax/st5481_b.c
drivers/isdn/hisax/teleint.c
drivers/isdn/hisax/w6692.c
drivers/isdn/i4l/isdn_ppp.c
drivers/isdn/i4l/isdn_tty.c
drivers/isdn/mISDN/dsp_core.c
drivers/isdn/mISDN/fsm.c
drivers/isdn/mISDN/l1oip_core.c
drivers/macintosh/macio_asic.c
drivers/md/bcache/util.h
drivers/md/dm.c
drivers/md/md-cluster.c
drivers/md/md.c
drivers/md/md.h
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/dvb-frontends/drx39xyj/drx_driver.h
drivers/media/platform/coda/imx-vdoa.c
drivers/media/platform/exynos-gsc/gsc-core.c
drivers/media/platform/sti/bdisp/bdisp-v4l2.c
drivers/media/platform/vsp1/vsp1_drm.c
drivers/media/rc/lirc_dev.c
drivers/media/rc/nuvoton-cir.c
drivers/media/rc/rc-main.c
drivers/media/rc/serial_ir.c
drivers/media/usb/dvb-usb/dvb-usb-firmware.c
drivers/media/usb/dvb-usb/dw2102.c
drivers/memory/omap-gpmc.c
drivers/misc/cxl/pci.c
drivers/misc/mei/bus-fixup.c
drivers/misc/mei/init.c
drivers/misc/sgi-gru/grufault.c
drivers/misc/vmw_vmci/vmci_guest.c
drivers/mmc/core/block.c
drivers/mmc/core/mmc.c
drivers/mmc/host/mtk-sd.c
drivers/mmc/host/sdhci-of-arasan.c
drivers/mmc/host/sdhci-of-at91.c
drivers/mmc/host/sdhci-pci-core.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/ushc.c
drivers/mtd/spi-nor/spi-nor.c
drivers/net/Makefile
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_procfs.c
drivers/net/can/spi/Kconfig
drivers/net/can/spi/Makefile
drivers/net/can/spi/hi311x.c [new file with mode: 0644]
drivers/net/can/ti_hecc.c
drivers/net/cris/eth_v10.c
drivers/net/dsa/Kconfig
drivers/net/dsa/Makefile
drivers/net/dsa/bcm_sf2_cfp.c
drivers/net/dsa/dsa_loop.c [new file with mode: 0644]
drivers/net/dsa/dsa_loop.h [new file with mode: 0644]
drivers/net/dsa/dsa_loop_bdinfo.c [new file with mode: 0644]
drivers/net/dsa/mt7530.c [new file with mode: 0644]
drivers/net/dsa/mt7530.h [new file with mode: 0644]
drivers/net/dsa/mv88e6xxx/Makefile
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/global1.c
drivers/net/dsa/mv88e6xxx/global1.h
drivers/net/dsa/mv88e6xxx/global1_atu.c [new file with mode: 0644]
drivers/net/dsa/mv88e6xxx/global2.c
drivers/net/dsa/mv88e6xxx/global2.h
drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
drivers/net/dsa/mv88e6xxx/port.c
drivers/net/dsa/mv88e6xxx/port.h
drivers/net/dummy.c
drivers/net/ethernet/3com/typhoon.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/adi/bfin_mac.h
drivers/net/ethernet/aeroflex/greth.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amazon/ena/ena_netdev.h
drivers/net/ethernet/amd/nmclan_cs.c
drivers/net/ethernet/amd/xgbe/xgbe-common.h
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
drivers/net/ethernet/apm/Kconfig
drivers/net/ethernet/apm/Makefile
drivers/net/ethernet/apm/xgene-v2/Kconfig [new file with mode: 0644]
drivers/net/ethernet/apm/xgene-v2/Makefile [new file with mode: 0644]
drivers/net/ethernet/apm/xgene-v2/enet.c [new file with mode: 0644]
drivers/net/ethernet/apm/xgene-v2/enet.h [new file with mode: 0644]
drivers/net/ethernet/apm/xgene-v2/ethtool.c [new file with mode: 0644]
drivers/net/ethernet/apm/xgene-v2/mac.c [new file with mode: 0644]
drivers/net/ethernet/apm/xgene-v2/mac.h [new file with mode: 0644]
drivers/net/ethernet/apm/xgene-v2/main.c [new file with mode: 0644]
drivers/net/ethernet/apm/xgene-v2/main.h [new file with mode: 0644]
drivers/net/ethernet/apm/xgene-v2/mdio.c [new file with mode: 0644]
drivers/net/ethernet/apm/xgene-v2/ring.c [new file with mode: 0644]
drivers/net/ethernet/apm/xgene-v2/ring.h [new file with mode: 0644]
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.h
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
drivers/net/ethernet/aquantia/atlantic/aq_main.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
drivers/net/ethernet/atheros/alx/alx.h
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/broadcom/bgmac-bcma.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/brocade/bna/bfa_ioc.c
drivers/net/ethernet/brocade/bna/bnad_debugfs.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
drivers/net/ethernet/cavium/liquidio/lio_core.c
drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
drivers/net/ethernet/cavium/liquidio/liquidio_common.h
drivers/net/ethernet/cavium/liquidio/octeon_config.h
drivers/net/ethernet/cavium/liquidio/octeon_device.c
drivers/net/ethernet/cavium/liquidio/octeon_device.h
drivers/net/ethernet/cavium/liquidio/octeon_droq.c
drivers/net/ethernet/cavium/liquidio/octeon_droq.h
drivers/net/ethernet/cavium/liquidio/octeon_iq.h
drivers/net/ethernet/cavium/liquidio/octeon_main.h
drivers/net/ethernet/cavium/liquidio/octeon_network.h
drivers/net/ethernet/cavium/liquidio/octeon_nic.c
drivers/net/ethernet/cavium/liquidio/octeon_nic.h
drivers/net/ethernet/cavium/liquidio/request_manager.c
drivers/net/ethernet/cavium/liquidio/response_manager.c
drivers/net/ethernet/cavium/liquidio/response_manager.h
drivers/net/ethernet/cavium/thunder/nic.h
drivers/net/ethernet/cavium/thunder/nic_main.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.h
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
drivers/net/ethernet/chelsio/cxgb/common.h
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
drivers/net/ethernet/chelsio/cxgb3/adapter.h
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_values.h
drivers/net/ethernet/dec/tulip/de2104x.c
drivers/net/ethernet/dlink/dl2k.c
drivers/net/ethernet/dlink/dl2k.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/ezchip/nps_enet.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/faraday/ftgmac100.h
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fman/fman.c
drivers/net/ethernet/freescale/fman/fman.h
drivers/net/ethernet/freescale/fman/fman_dtsec.c
drivers/net/ethernet/freescale/fman/fman_memac.c
drivers/net/ethernet/freescale/fman/fman_memac.h
drivers/net/ethernet/freescale/fman/fman_port.c
drivers/net/ethernet/freescale/fs_enet/mac-fec.c
drivers/net/ethernet/freescale/fs_enet/mac-scc.c
drivers/net/ethernet/hisilicon/hns/hnae.c
drivers/net/ethernet/hisilicon/hns/hnae.h
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns/hns_enet.h
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/hisilicon/hns_mdio.c
drivers/net/ethernet/ibm/emac/Makefile
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/core.h
drivers/net/ethernet/ibm/emac/debug.c [deleted file]
drivers/net/ethernet/ibm/emac/debug.h
drivers/net/ethernet/ibm/emac/mal.c
drivers/net/ethernet/ibm/ibmveth.h
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/intel/Kconfig
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/fm10k/fm10k.h
drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
drivers/net/ethernet/intel/i40e/Makefile
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_client.c
drivers/net/ethernet/intel/i40e/i40e_client.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_osdep.h
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/i40evf/Makefile
drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40evf/i40e_common.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
drivers/net/ethernet/intel/i40evf/i40evf.h
drivers/net/ethernet/intel/i40evf/i40evf_client.c [new file with mode: 0644]
drivers/net/ethernet/intel/i40evf/i40evf_client.h [new file with mode: 0644]
drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igbvf/ethtool.c
drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/Kconfig
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_port.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlxsw/Makefile
drivers/net/ethernet/mellanox/mlxsw/cmd.h
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/core.h
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/port.h
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/resources.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/mellanox/mlxsw/switchx2.c
drivers/net/ethernet/micrel/ks8851.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/moxa/moxart_ether.h
drivers/net/ethernet/netronome/nfp/Makefile
drivers/net/ethernet/netronome/nfp/nfp_main.c
drivers/net/ethernet/netronome/nfp/nfp_main.h
drivers/net/ethernet/netronome/nfp/nfp_net.h
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/netronome/nfp/nfp_net_main.c
drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h [deleted file]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
drivers/net/ethernet/nuvoton/w90p910_ether.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_cxt.c
drivers/net/ethernet/qlogic/qed/qed_cxt.h
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.h
drivers/net/ethernet/qlogic/qed/qed_debug.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_dev_api.h
drivers/net/ethernet/qlogic/qed/qed_fcoe.c
drivers/net/ethernet/qlogic/qed/qed_hsi.h
drivers/net/ethernet/qlogic/qed/qed_hw.c
drivers/net/ethernet/qlogic/qed/qed_hw.h
drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
drivers/net/ethernet/qlogic/qed/qed_init_ops.c
drivers/net/ethernet/qlogic/qed/qed_int.c
drivers/net/ethernet/qlogic/qed/qed_iscsi.c
drivers/net/ethernet/qlogic/qed/qed_iscsi.h
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_mcp.h
drivers/net/ethernet/qlogic/qed/qed_ooo.c
drivers/net/ethernet/qlogic/qed/qed_ooo.h
drivers/net/ethernet/qlogic/qed/qed_ptp.c
drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/qlogic/qed/qed_roce.h
drivers/net/ethernet/qlogic/qed/qed_spq.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qed/qed_sriov.h
drivers/net/ethernet/qlogic/qed/qed_vf.c
drivers/net/ethernet/qlogic/qed/qed_vf.h
drivers/net/ethernet/qlogic/qede/qede.h
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
drivers/net/ethernet/qlogic/qede/qede_filter.c
drivers/net/ethernet/qlogic/qede/qede_fp.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qlge/qlge.h
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/rocker/rocker_main.c
drivers/net/ethernet/rocker/rocker_ofdpa.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/falcon/tx.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/smsc/smc911x.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/stmicro/stmmac/chain_mode.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac4.h
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/sun/ldmvsw.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sunbmac.c
drivers/net/ethernet/sun/sunbmac.h
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/sun/sunhme.c
drivers/net/ethernet/sun/sunhme.h
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ethernet/sun/sunvnet_common.c
drivers/net/ethernet/sun/sunvnet_common.h
drivers/net/ethernet/synopsys/Kconfig [new file with mode: 0644]
drivers/net/ethernet/synopsys/Makefile [new file with mode: 0644]
drivers/net/ethernet/synopsys/dwc-xlgmac-common.c [new file with mode: 0644]
drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c [new file with mode: 0644]
drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c [new file with mode: 0644]
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c [new file with mode: 0644]
drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c [new file with mode: 0644]
drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h [new file with mode: 0644]
drivers/net/ethernet/synopsys/dwc-xlgmac.h [new file with mode: 0644]
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/Makefile
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/netcp_core.c
drivers/net/ethernet/toshiba/ps3_gelic_net.c
drivers/net/ethernet/toshiba/spider_net_ethtool.c
drivers/net/ethernet/tundra/tsi108_eth.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/via/via-velocity.c
drivers/net/ethernet/wiznet/w5100.c
drivers/net/fjes/fjes_ethtool.c
drivers/net/fjes/fjes_main.c
drivers/net/gtp.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/mrf24j40.c
drivers/net/irda/vlsi_ir.c
drivers/net/loopback.c
drivers/net/ntb_netdev.c
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/bcm-phy-lib.c
drivers/net/phy/bcm7xxx.c
drivers/net/phy/dp83867.c
drivers/net/phy/intel-xway.c
drivers/net/phy/marvell.c
drivers/net/phy/mdio-bcm-unimac.c
drivers/net/phy/mdio-boardinfo.c
drivers/net/phy/mdio-boardinfo.h
drivers/net/phy/mdio-xgene.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/micrel.c
drivers/net/phy/microchip.c
drivers/net/phy/phy-core.c [new file with mode: 0644]
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/smsc.c
drivers/net/phy/spi_ks8995.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/asix_devices.c
drivers/net/usb/ax88172a.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/catc.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/cdc_mbim.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/dm9601.c
drivers/net/usb/int51x1.c
drivers/net/usb/kaweth.c
drivers/net/usb/lan78xx.c
drivers/net/usb/mcs7830.c
drivers/net/usb/pegasus.c
drivers/net/usb/pegasus.h
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/rndis_host.c
drivers/net/usb/rtl8150.c
drivers/net/usb/sierra_net.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/sr9700.c
drivers/net/usb/sr9800.c
drivers/net/usb/usbnet.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wan/fsl_ucc_hdlc.c
drivers/net/wimax/i2400m/usb.c
drivers/net/wireless/ath/ath10k/hw.c
drivers/net/wireless/ath/wcn36xx/Kconfig
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/ath/wcn36xx/smd.c
drivers/net/wireless/ath/wcn36xx/smd.h
drivers/net/wireless/ath/wcn36xx/wcn36xx.h
drivers/net/wireless/atmel/atmel.c
drivers/net/wireless/broadcom/brcm80211/Kconfig
drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
drivers/net/wireless/intel/ipw2x00/ipw2200.c
drivers/net/wireless/intel/iwlwifi/Makefile
drivers/net/wireless/intel/iwlwifi/iwl-7000.c
drivers/net/wireless/intel/iwlwifi/iwl-8000.c
drivers/net/wireless/intel/iwlwifi/iwl-9000.c
drivers/net/wireless/intel/iwlwifi/iwl-a000.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-context-info.h [new file with mode: 0644]
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
drivers/net/wireless/intel/iwlwifi/iwl-io.c
drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.c
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/binding.c
drivers/net/wireless/intel/iwlwifi/mvm/coex.c
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/rx.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sf.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
drivers/net/wireless/intel/iwlwifi/mvm/tof.c
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c [new file with mode: 0644]
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c [new file with mode: 0644]
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c [new file with mode: 0644]
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/marvell/mwifiex/11h.c
drivers/net/wireless/marvell/mwifiex/cfg80211.c
drivers/net/wireless/marvell/mwifiex/cmdevt.c
drivers/net/wireless/marvell/mwifiex/fw.h
drivers/net/wireless/marvell/mwifiex/ie.c
drivers/net/wireless/marvell/mwifiex/ioctl.h
drivers/net/wireless/marvell/mwifiex/main.c
drivers/net/wireless/marvell/mwifiex/main.h
drivers/net/wireless/marvell/mwifiex/pcie.c
drivers/net/wireless/marvell/mwifiex/pcie.h
drivers/net/wireless/marvell/mwifiex/scan.c
drivers/net/wireless/marvell/mwifiex/sdio.c
drivers/net/wireless/marvell/mwifiex/sta_cmd.c
drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
drivers/net/wireless/marvell/mwifiex/sta_event.c
drivers/net/wireless/marvell/mwifiex/tdls.c
drivers/net/wireless/marvell/mwifiex/uap_event.c
drivers/net/wireless/marvell/mwifiex/usb.c
drivers/net/wireless/marvell/mwifiex/usb.h
drivers/net/wireless/marvell/mwifiex/util.c
drivers/net/wireless/marvell/mwifiex/util.h
drivers/net/wireless/ralink/rt2x00/Kconfig
drivers/net/wireless/ralink/rt2x00/rt2800.h
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
drivers/net/wireless/ralink/rt2x00/rt2800lib.h
drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
drivers/net/wireless/ralink/rt2x00/rt2800usb.c
drivers/net/wireless/ralink/rt2x00/rt2x00.h
drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
drivers/net/wireless/realtek/rtlwifi/base.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
drivers/net/wireless/realtek/rtlwifi/regd.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h
drivers/net/wireless/realtek/rtlwifi/wifi.h
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/st/cw1200/cw1200_sdio.c
drivers/net/wireless/ti/wlcore/debugfs.c
drivers/net/wireless/zydas/zd1211rw/zd_usb.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/xenbus.c
drivers/nvme/host/rdma.c
drivers/nvme/target/core.c
drivers/nvme/target/loop.c
drivers/nvme/target/nvmet.h
drivers/nvme/target/rdma.c
drivers/parport/share.c
drivers/pci/dwc/pci-exynos.c
drivers/pci/host/pci-thunder-pem.c
drivers/pci/host/pcie-iproc-bcma.c
drivers/pci/host/pcie-iproc-platform.c
drivers/pci/host/pcie-iproc.h
drivers/pci/msi.c
drivers/pci/pcie/aspm.c
drivers/pci/quirks.c
drivers/phy/Kconfig
drivers/phy/Makefile
drivers/phy/phy-bcm-nsp-usb3.c [deleted file]
drivers/phy/phy-exynos-pcie.c
drivers/pinctrl/meson/pinctrl-meson-gxbb.c
drivers/pinctrl/pinctrl-st.c
drivers/pinctrl/qcom/pinctrl-ipq4019.c
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/pinctrl/samsung/pinctrl-samsung.c
drivers/pinctrl/ti/Kconfig
drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/asus-wmi.h
drivers/platform/x86/fujitsu-laptop.c
drivers/ptp/ptp_kvm.c
drivers/rapidio/devices/tsi721.c
drivers/rapidio/devices/tsi721.h
drivers/remoteproc/Kconfig
drivers/rpmsg/Kconfig
drivers/s390/crypto/pkey_api.c
drivers/s390/net/ctcm_fsms.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.h
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l2_sys.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/qeth_l3_sys.c
drivers/scsi/Kconfig
drivers/scsi/aacraid/commsup.c
drivers/scsi/aacraid/src.c
drivers/scsi/aic7xxx/aic79xx_core.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/hpsa.c
drivers/scsi/hpsa.h
drivers/scsi/hpsa_cmd.h
drivers/scsi/libiscsi.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_debugfs.h
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_mem.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/lpfc/lpfc_nvme.h
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/qedf/Makefile
drivers/scsi/qedf/drv_fcoe_fw_funcs.c [new file with mode: 0644]
drivers/scsi/qedf/drv_fcoe_fw_funcs.h [new file with mode: 0644]
drivers/scsi/qedf/drv_scsi_fw_funcs.c [new file with mode: 0644]
drivers/scsi/qedf/drv_scsi_fw_funcs.h [new file with mode: 0644]
drivers/scsi/qedf/qedf.h
drivers/scsi/qedf/qedf_dbg.h
drivers/scsi/qedf/qedf_els.c
drivers/scsi/qedf/qedf_fip.c
drivers/scsi/qedf/qedf_io.c
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qedi/Makefile
drivers/scsi/qedi/qedi_debugfs.c
drivers/scsi/qedi/qedi_fw.c
drivers/scsi/qedi/qedi_fw_api.c [new file with mode: 0644]
drivers/scsi/qedi/qedi_fw_iscsi.h [new file with mode: 0644]
drivers/scsi/qedi/qedi_fw_scsi.h [new file with mode: 0644]
drivers/scsi/qedi/qedi_gbl.h
drivers/scsi/qedi/qedi_iscsi.c
drivers/scsi/qedi/qedi_iscsi.h
drivers/scsi/qedi/qedi_main.c
drivers/scsi/qedi/qedi_version.h
drivers/scsi/qla2xxx/Kconfig
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_dbg.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_dfs.c
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_priv.h
drivers/scsi/sd.c
drivers/scsi/sg.c
drivers/scsi/storvsc_drv.c
drivers/scsi/ufs/ufs.h
drivers/scsi/ufs/ufshcd-pltfrm.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/scsi/vmw_pvscsi.c
drivers/soc/qcom/Kconfig
drivers/soc/qcom/Makefile
drivers/soc/qcom/smd-rpm.c
drivers/soc/qcom/smd.c [deleted file]
drivers/soc/qcom/wcnss_ctrl.c
drivers/staging/lustre/lnet/lnet/lib-socket.c
drivers/staging/octeon/ethernet-rx.c
drivers/staging/vc04_services/Kconfig
drivers/target/target_core_alua.c
drivers/target/target_core_configfs.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_sbc.c
drivers/target/target_core_tpg.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/thermal/cpu_cooling.c
drivers/thermal/devfreq_cooling.c
drivers/tty/n_hdlc.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/8250/Kconfig
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/mxs-auart.c
drivers/tty/serial/samsung.c
drivers/tty/serial/st-asc.c
drivers/tty/sysrq.c
drivers/tty/tty_ldisc.c
drivers/tty/vt/keyboard.c
drivers/usb/class/usbtmc.c
drivers/usb/core/config.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/dwc3/gadget.c
drivers/usb/dwc3/gadget.h
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/f_acm.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/function/f_ncm.c
drivers/usb/gadget/function/f_uvc.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/udc/atmel_usba_udc.c
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/gadget/udc/net2280.c
drivers/usb/gadget/udc/pch_udc.c
drivers/usb/gadget/udc/pxa27x_udc.c
drivers/usb/host/ohci-at91.c
drivers/usb/host/xhci-dbg.c
drivers/usb/host/xhci-mtk.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci-tegra.c
drivers/usb/host/xhci.c
drivers/usb/misc/idmouse.c
drivers/usb/misc/iowarrior.c
drivers/usb/misc/lvstest.c
drivers/usb/misc/usb251xb.c
drivers/usb/misc/uss720.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_cppi41.c
drivers/usb/musb/musb_dsps.c
drivers/usb/phy/phy-isp1301.c
drivers/usb/serial/digi_acceleport.c
drivers/usb/serial/io_ti.c
drivers/usb/serial/omninet.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/serial/safe_serial.c
drivers/usb/storage/unusual_devs.h
drivers/usb/wusbcore/wa-hc.c
drivers/uwb/hwa-rc.c
drivers/uwb/i1480/dfu/usb.c
drivers/vfio/vfio.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/vsock.c
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_pci_common.c
drivers/xen/gntdev.c
drivers/xen/swiotlb-xen.c
drivers/xen/xen-acpi-processor.c
drivers/xen/xenbus/xenbus_dev_frontend.c
fs/afs/callback.c
fs/afs/cmservice.c
fs/afs/file.c
fs/afs/fsclient.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/misc.c
fs/afs/mntpt.c
fs/afs/rxrpc.c
fs/afs/security.c
fs/afs/server.c
fs/afs/vlocation.c
fs/afs/write.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/qgroup.c
fs/btrfs/send.c
fs/cifs/cifsfs.c
fs/cifs/connect.c
fs/cifs/smb2pdu.c
fs/crypto/crypto.c
fs/crypto/fname.c
fs/crypto/fscrypt_private.h
fs/crypto/keyinfo.c
fs/crypto/policy.c
fs/dlm/lowcomms.c
fs/eventpoll.c
fs/ext4/inline.c
fs/ext4/inode.c
fs/ext4/move_extent.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/f2fs/debug.c
fs/f2fs/dir.c
fs/f2fs/f2fs.h
fs/f2fs/node.c
fs/f2fs/segment.c
fs/fat/inode.c
fs/fs-writeback.c
fs/gfs2/incore.h
fs/hugetlbfs/inode.c
fs/iomap.c
fs/jbd2/journal.c
fs/jbd2/revoke.c
fs/kernfs/file.c
fs/nfs/callback.c
fs/nfs/client.c
fs/nfs/dir.c
fs/nfs/filelayout/filelayout.c
fs/nfs/filelayout/filelayout.h
fs/nfs/filelayout/filelayoutdev.c
fs/nfs/flexfilelayout/flexfilelayout.h
fs/nfs/flexfilelayout/flexfilelayoutdev.c
fs/nfs/internal.h
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4xdr.c
fs/nfs/pnfs.h
fs/nfs/pnfs_nfs.c
fs/nfs/write.c
fs/nfsd/nfsctl.c
fs/nfsd/nfsproc.c
fs/nfsd/nfssvc.c
fs/ocfs2/cluster/tcp.c
fs/overlayfs/util.c
fs/select.c
fs/timerfd.c
fs/userfaultfd.c
fs/xfs/kmem.c
fs/xfs/kmem.h
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap_btree.c
fs/xfs/libxfs/xfs_dir2_priv.h
fs/xfs/libxfs/xfs_dir2_sf.c
fs/xfs/libxfs/xfs_inode_fork.c
fs/xfs/libxfs/xfs_inode_fork.h
fs/xfs/xfs_aops.c
fs/xfs/xfs_dir2_readdir.c
fs/xfs/xfs_icache.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_itable.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_reflink.c
fs/xfs/xfs_reflink.h
fs/xfs/xfs_super.c
include/asm-generic/4level-fixup.h
include/asm-generic/5level-fixup.h [new file with mode: 0644]
include/asm-generic/pgtable-nop4d-hack.h [new file with mode: 0644]
include/asm-generic/pgtable-nop4d.h [new file with mode: 0644]
include/asm-generic/pgtable-nopud.h
include/asm-generic/pgtable.h
include/asm-generic/sections.h
include/asm-generic/tlb.h
include/asm-generic/vmlinux.lds.h
include/crypto/if_alg.h
include/drm/ttm/ttm_object.h
include/dt-bindings/sound/cs42l42.h
include/linux/acpi.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/bpf_types.h [new file with mode: 0644]
include/linux/bpf_verifier.h
include/linux/brcmphy.h
include/linux/can/core.h
include/linux/can/platform/ti_hecc.h [deleted file]
include/linux/ccp.h
include/linux/ceph/libceph.h
include/linux/ceph/osd_client.h
include/linux/clockchips.h
include/linux/dccp.h
include/linux/device.h
include/linux/errqueue.h
include/linux/etherdevice.h
include/linux/ethtool.h
include/linux/filter.h
include/linux/fs.h
include/linux/fscrypt_common.h
include/linux/genhd.h
include/linux/gpio/consumer.h
include/linux/hugetlb.h
include/linux/hwmon.h
include/linux/hyperv.h
include/linux/iio/sw_device.h
include/linux/inetdevice.h
include/linux/iommu.h
include/linux/ipv6.h
include/linux/irqchip/arm-gic-v3.h
include/linux/irqdomain.h
include/linux/jump_label.h
include/linux/kasan.h
include/linux/kvm_host.h
include/linux/list_nulls.h
include/linux/memcontrol.h
include/linux/mfd/cros_ec.h
include/linux/mlx4/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/fs.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/net.h
include/linux/netdevice.h
include/linux/of_mdio.h
include/linux/omap-gpmc.h
include/linux/pci.h
include/linux/phy.h
include/linux/purgatory.h [new file with mode: 0644]
include/linux/qed/common_hsi.h
include/linux/qed/eth_common.h
include/linux/qed/fcoe_common.h
include/linux/qed/iscsi_common.h
include/linux/qed/qed_if.h
include/linux/qed/qed_iscsi_if.h
include/linux/qed/rdma_common.h
include/linux/qed/roce_common.h
include/linux/qed/storage_common.h
include/linux/qed/tcp_common.h
include/linux/random.h
include/linux/rculist_nulls.h
include/linux/regulator/machine.h
include/linux/reset.h
include/linux/rhashtable.h
include/linux/rpmsg/qcom_smd.h
include/linux/sched/clock.h
include/linux/skbuff.h
include/linux/soc/qcom/smd.h [deleted file]
include/linux/soc/qcom/wcnss_ctrl.h
include/linux/sock_diag.h
include/linux/stmmac.h
include/linux/udp.h
include/linux/usb/quirks.h
include/linux/usb/usbnet.h
include/linux/user_namespace.h
include/linux/userfaultfd_k.h
include/linux/virtio_vsock.h
include/linux/vm_event_item.h
include/linux/wait.h
include/media/vsp1.h
include/net/addrconf.h
include/net/af_rxrpc.h
include/net/af_vsock.h
include/net/bonding.h
include/net/busy_poll.h
include/net/devlink.h
include/net/dsa.h
include/net/fib_rules.h
include/net/flow.h
include/net/flowcache.h
include/net/inet_common.h
include/net/inet_connection_sock.h
include/net/ip_fib.h
include/net/ip_vs.h
include/net/irda/timer.h
include/net/mpls_iptunnel.h
include/net/ndisc.h
include/net/neighbour.h
include/net/net_namespace.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_conntrack_expect.h
include/net/netfilter/nf_conntrack_timeout.h
include/net/netfilter/nf_tables.h
include/net/netfilter/nf_tables_ipv6.h
include/net/netfilter/nft_fib.h
include/net/netns/can.h [new file with mode: 0644]
include/net/netns/ipv4.h
include/net/netns/mpls.h
include/net/pkt_sched.h
include/net/protocol.h
include/net/route.h
include/net/sch_generic.h
include/net/sctp/sctp.h
include/net/sctp/sm.h
include/net/sctp/structs.h
include/net/sctp/ulpevent.h
include/net/secure_seq.h
include/net/sock.h
include/net/tc_act/tc_pedit.h
include/net/tc_act/tc_vlan.h
include/net/tcp.h
include/net/udp.h
include/net/xfrm.h
include/rdma/ib_verbs.h
include/scsi/libiscsi.h
include/scsi/scsi_device.h
include/target/target_core_backend.h
include/target/target_core_base.h
include/trace/events/rxrpc.h
include/trace/events/syscalls.h
include/uapi/asm-generic/socket.h
include/uapi/asm-generic/unistd.h
include/uapi/drm/omap_drm.h
include/uapi/linux/bpf.h
include/uapi/linux/btrfs.h
include/uapi/linux/devlink.h
include/uapi/linux/ethtool.h
include/uapi/linux/gtp.h
include/uapi/linux/if_link.h
include/uapi/linux/ipv6.h
include/uapi/linux/mpls_iptunnel.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/netlink.h
include/uapi/linux/netlink_diag.h
include/uapi/linux/openvswitch.h
include/uapi/linux/packet_diag.h
include/uapi/linux/pkt_sched.h
include/uapi/linux/rtnetlink.h
include/uapi/linux/sctp.h
include/uapi/linux/snmp.h
include/uapi/linux/sysctl.h
include/uapi/linux/userfaultfd.h
include/uapi/rdma/mlx5-abi.h
include/video/exynos5433_decon.h
include/xen/swiotlb-xen.h
init/main.c
kernel/audit.c
kernel/audit.h
kernel/auditsc.c
kernel/bpf/Makefile
kernel/bpf/arraymap.c
kernel/bpf/cgroup.c
kernel/bpf/hashtab.c
kernel/bpf/lpm_trie.c
kernel/bpf/map_in_map.c [new file with mode: 0644]
kernel/bpf/map_in_map.h [new file with mode: 0644]
kernel/bpf/stackmap.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup-v1.c
kernel/cgroup/cgroup.c
kernel/cgroup/pids.c
kernel/cpu.c
kernel/events/core.c
kernel/exit.c
kernel/futex.c
kernel/kexec_file.c
kernel/kexec_internal.h
kernel/locking/lockdep.c
kernel/locking/rwsem-spinlock.c
kernel/locking/test-ww_mutex.c
kernel/memremap.c
kernel/padata.c
kernel/sched/clock.c
kernel/sched/core.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/features.h
kernel/sched/loadavg.c
kernel/sched/wait.c
kernel/time/jiffies.c
kernel/trace/Kconfig
kernel/trace/Makefile
kernel/trace/bpf_trace.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_probe.h
kernel/trace/trace_stack.c
kernel/ucount.c
kernel/workqueue.c
lib/ioremap.c
lib/radix-tree.c
lib/refcount.c
lib/syscall.c
lib/test_kasan.c
mm/backing-dev.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/kasan/kasan.h
mm/kasan/kasan_init.c
mm/kasan/quarantine.c
mm/kasan/report.c
mm/kmemleak.c
mm/madvise.c
mm/memblock.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/migrate.c
mm/mlock.c
mm/mprotect.c
mm/mremap.c
mm/page_alloc.c
mm/page_vma_mapped.c
mm/pagewalk.c
mm/percpu-vm.c
mm/percpu.c
mm/pgtable-generic.c
mm/rmap.c
mm/sparse-vmemmap.c
mm/swap_slots.c
mm/swapfile.c
mm/userfaultfd.c
mm/vmalloc.c
mm/vmstat.c
mm/workingset.c
mm/z3fold.c
net/8021q/vlan_dev.c
net/Makefile
net/atm/clip.c
net/atm/common.c
net/atm/svc.c
net/ax25/af_ax25.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_v.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/distributed-arp-table.c
net/batman-adv/fragmentation.c
net/batman-adv/gateway_common.c
net/batman-adv/log.h
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/multicast.c
net/batman-adv/routing.c
net/batman-adv/send.c
net/batman-adv/send.h
net/batman-adv/soft-interface.c
net/batman-adv/tp_meter.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/bluetooth/l2cap_sock.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bpf/Makefile [new file with mode: 0644]
net/bpf/test_run.c [new file with mode: 0644]
net/bridge/br_fdb.c
net/bridge/br_if.c
net/bridge/br_input.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_private.h
net/bridge/netfilter/ebt_log.c
net/bridge/netfilter/nft_reject_bridge.c
net/can/af_can.c
net/can/af_can.h
net/can/bcm.c
net/can/gw.c
net/can/proc.c
net/can/raw.c
net/ceph/ceph_common.c
net/ceph/messenger.c
net/ceph/osd_client.c
net/ceph/osdmap.c
net/core/datagram.c
net/core/dev.c
net/core/devlink.c
net/core/drop_monitor.c
net/core/ethtool.c
net/core/fib_rules.c
net/core/filter.c
net/core/flow.c
net/core/flow_dissector.c
net/core/lwtunnel.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/netclassid_cgroup.c
net/core/netprio_cgroup.c
net/core/rtnetlink.c
net/core/secure_seq.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_diag.c
net/core/sock_reuseport.c
net/core/sysctl_net_core.c
net/core/utils.c
net/dccp/ccids/ccid2.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dccp/minisocks.c
net/decnet/af_decnet.c
net/dsa/Kconfig
net/dsa/Makefile
net/dsa/dsa.c
net/dsa/dsa2.c
net/dsa/dsa_priv.h
net/dsa/slave.c
net/dsa/switch.c
net/dsa/tag_brcm.c
net/dsa/tag_dsa.c
net/dsa/tag_edsa.c
net/dsa/tag_mtk.c [new file with mode: 0644]
net/dsa/tag_qca.c
net/dsa/tag_trailer.c
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/arp.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_notifier.c [new file with mode: 0644]
net/ipv4/fib_rules.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/icmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_fragment.c
net/ipv4/ip_input.c
net/ipv4/ip_output.c
net/ipv4/ipconfig.c
net/ipv4/ipmr.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
net/ipv4/netfilter/nf_nat_snmp_basic.c
net/ipv4/netfilter/nf_reject_ipv4.c
net/ipv4/netfilter/nft_fib_ipv4.c
net/ipv4/netfilter/nft_masq_ipv4.c
net/ipv4/netfilter/nft_redir_ipv4.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/protocol.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_recovery.c
net/ipv4/tcp_timer.c
net/ipv4/tcp_westwood.c
net/ipv6/Kconfig
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_input.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_vti.c
net/ipv6/ip6mr.c
net/ipv6/mcast.c
net/ipv6/ndisc.c
net/ipv6/netfilter/nft_fib_ipv6.c
net/ipv6/netfilter/nft_masq_ipv6.c
net/ipv6/netfilter/nft_redir_ipv6.c
net/ipv6/protocol.c
net/ipv6/route.c
net/ipv6/seg6_iptunnel.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/irda/af_irda.c
net/iucv/af_iucv.c
net/kcm/kcmsock.c
net/key/af_key.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_debugfs.c
net/l2tp/l2tp_eth.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_netlink.c
net/l2tp/l2tp_ppp.c
net/llc/af_llc.c
net/mac80211/iface.c
net/mac802154/ieee802154_i.h
net/mpls/af_mpls.c
net/mpls/internal.h
net/mpls/mpls_iptunnel.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_lblc.c
net/netfilter/ipvs/ip_vs_lblcr.c
net/netfilter/ipvs/ip_vs_nq.c
net/netfilter/ipvs/ip_vs_proto_sctp.c
net/netfilter/ipvs/ip_vs_proto_tcp.c
net/netfilter/ipvs/ip_vs_rr.c
net/netfilter/ipvs/ip_vs_sed.c
net/netfilter/ipvs/ip_vs_wlc.c
net/netfilter/ipvs/ip_vs_wrr.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_ecache.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_extend.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_nat_proto_sctp.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_acct.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_compat.c
net/netfilter/nft_counter.c
net/netfilter/nft_ct.c
net/netfilter/nft_dynset.c
net/netfilter/nft_exthdr.c
net/netfilter/nft_fib.c
net/netfilter/nft_hash.c
net/netfilter/nft_limit.c
net/netfilter/nft_lookup.c
net/netfilter/nft_masq.c
net/netfilter/nft_meta.c
net/netfilter/nft_nat.c
net/netfilter/nft_objref.c
net/netfilter/nft_quota.c
net/netfilter/nft_redir.c
net/netfilter/nft_reject.c
net/netfilter/nft_reject_inet.c
net/netfilter/nft_set_bitmap.c
net/netfilter/nft_set_rbtree.c
net/netfilter/xt_limit.c
net/netlink/af_netlink.c
net/netlink/af_netlink.h
net/netlink/diag.c
net/netlink/genetlink.c
net/netrom/af_netrom.c
net/nfc/llcp_sock.c
net/openvswitch/actions.c
net/openvswitch/conntrack.c
net/openvswitch/datapath.h
net/openvswitch/flow.c
net/openvswitch/flow_netlink.c
net/packet/af_packet.c
net/phonet/pep.c
net/phonet/socket.c
net/qrtr/Kconfig
net/qrtr/smd.c
net/rds/connection.c
net/rds/ib_cm.c
net/rds/ib_fmr.c
net/rds/ib_mr.h
net/rds/rds.h
net/rds/tcp.c
net/rds/tcp.h
net/rds/tcp_listen.c
net/rds/threads.c
net/rose/af_rose.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_event.c
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/conn_event.c
net/rxrpc/input.c
net/rxrpc/insecure.c
net/rxrpc/peer_event.c
net/rxrpc/recvmsg.c
net/rxrpc/rxkad.c
net/rxrpc/sendmsg.c
net/sched/act_connmark.c
net/sched/act_csum.c
net/sched/act_ife.c
net/sched/act_skbmod.c
net/sched/cls_flow.c
net/sched/sch_api.c
net/sched/sch_cbq.c
net/sched/sch_choke.c
net/sched/sch_drr.c
net/sched/sch_dsmark.c
net/sched/sch_fq_codel.c
net/sched/sch_generic.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_mq.c
net/sched/sch_mqprio.c
net/sched/sch_multiq.c
net/sched/sch_netem.c
net/sched/sch_prio.c
net/sched/sch_qfq.c
net/sched/sch_red.c
net/sched/sch_sfb.c
net/sched/sch_sfq.c
net/sched/sch_tbf.c
net/sctp/associola.c
net/sctp/chunk.c
net/sctp/input.c
net/sctp/ipv6.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/proc.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/sctp/stream.c
net/sctp/sysctl.c
net/sctp/transport.c
net/sctp/ulpevent.c
net/smc/af_smc.c
net/smc/smc.h
net/smc/smc_cdc.c
net/smc/smc_close.c
net/smc/smc_close.h
net/smc/smc_core.c
net/smc/smc_ib.c
net/smc/smc_ib.h
net/smc/smc_pnet.c
net/smc/smc_pnet.h
net/smc/smc_rx.c
net/smc/smc_tx.c
net/smc/smc_wr.c
net/socket.c
net/sunrpc/svcsock.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/sunrpc/xprtrdma/verbs.c
net/tipc/name_table.c
net/tipc/socket.c
net/tipc/subscr.c
net/tipc/subscr.h
net/unix/af_unix.c
net/unix/garbage.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/virtio_transport.c
net/vmw_vsock/virtio_transport_common.c
net/vmw_vsock/vmci_transport.c
net/wireless/nl80211.c
net/wireless/sysfs.c
net/x25/af_x25.c
net/xfrm/xfrm_hash.h
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
samples/bpf/Makefile
samples/bpf/bpf_helpers.h
samples/bpf/bpf_load.c
samples/bpf/cookie_uid_helper_example.c [new file with mode: 0644]
samples/bpf/libbpf.h
samples/bpf/map_perf_test_kern.c
samples/bpf/map_perf_test_user.c
samples/bpf/run_cookie_uid_helper_example.sh [new file with mode: 0755]
samples/bpf/test_map_in_map_kern.c [new file with mode: 0644]
samples/bpf/test_map_in_map_user.c [new file with mode: 0644]
scripts/Kbuild.include
scripts/Makefile.lib
scripts/gcc-plugins/sancov_plugin.c
scripts/kconfig/gconf.c
scripts/module-common.lds
scripts/spelling.txt
security/selinux/nlmsgtab.c
sound/core/seq/seq_clientmgr.c
sound/core/seq/seq_fifo.c
sound/core/seq/seq_memory.c
sound/core/seq/seq_memory.h
sound/pci/ctxfi/cthw20k1.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/acp-pcm-dma.c
sound/soc/atmel/atmel-classd.c
sound/soc/codecs/hdac_hdmi.c
sound/soc/codecs/rt5665.c
sound/soc/codecs/rt5665.h
sound/soc/codecs/wm_adsp.c
sound/soc/generic/simple-card-utils.c
sound/soc/intel/skylake/skl-topology.c
sound/soc/mediatek/Kconfig
sound/soc/sh/rcar/cmd.c
sound/soc/sh/rcar/dma.c
sound/soc/sh/rcar/ssiu.c
sound/soc/soc-core.c
sound/soc/sti/uniperif_reader.c
sound/soc/sunxi/sun8i-codec.c
sound/x86/Kconfig
tools/include/linux/filter.h
tools/include/uapi/linux/bpf.h
tools/include/uapi/linux/bpf_perf_event.h [new file with mode: 0644]
tools/lguest/lguest.c
tools/lib/bpf/Makefile
tools/lib/bpf/bpf.c
tools/lib/bpf/bpf.h
tools/lib/bpf/libbpf.c
tools/lib/bpf/libbpf.h
tools/lib/traceevent/Makefile
tools/lib/traceevent/event-parse.h
tools/objtool/builtin-check.c
tools/objtool/elf.c
tools/objtool/elf.h
tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
tools/perf/util/symbol.c
tools/testing/ktest/ktest.pl
tools/testing/radix-tree/Makefile
tools/testing/radix-tree/benchmark.c
tools/testing/radix-tree/idr-test.c
tools/testing/radix-tree/main.c
tools/testing/radix-tree/tag_check.c
tools/testing/radix-tree/test.h
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/test_iptunnel_common.h [new file with mode: 0644]
tools/testing/selftests/bpf/test_l4lb.c [new file with mode: 0644]
tools/testing/selftests/bpf/test_maps.c
tools/testing/selftests/bpf/test_pkt_access.c [new file with mode: 0644]
tools/testing/selftests/bpf/test_progs.c [new file with mode: 0644]
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/bpf/test_xdp.c [new file with mode: 0644]
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/netdevice.sh [new file with mode: 0755]
tools/testing/selftests/powerpc/harness.c
tools/testing/selftests/powerpc/include/vsx_asm.h
tools/testing/selftests/vm/Makefile
tools/testing/selftests/x86/fsgsbase.c
tools/testing/selftests/x86/ldt_gdt.c
tools/testing/selftests/x86/ptrace_syscall.c
tools/testing/selftests/x86/single_step_syscall.c
virt/kvm/arm/vgic/vgic-its.c
virt/kvm/arm/vgic/vgic-mmio.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/eventfd.c
virt/kvm/kvm_main.c

index fa5a00bb114372bfbc67008e8d0ebe8d1566ed6b..7122d6264c49d6c02c2c0074e145f19b93fc63dd 100644 (file)
@@ -21,3 +21,30 @@ Description:
                is responsible for coordination of driver and firmware
                link framing mode, changing this setting to 'Y' if the
                firmware is configured for 'raw-ip' mode.
+
+What:          /sys/class/net/<iface>/qmi/add_mux
+Date:          March 2017
+KernelVersion: 4.11
+Contact:       Bjørn Mork <bjorn@mork.no>
+Description:
+               Unsigned integer.
+
+               Write a number ranging from 1 to 127 to add a qmap mux
+               based network device, supported by recent Qualcomm based
+               modems.
+
+               The network device will be called qmimux.
+
+               Userspace is in charge of managing the qmux network device
+               activation and data stream setup on the modem side by
+               using the proper QMI protocol requests.
+
+What:          /sys/class/net/<iface>/qmi/del_mux
+Date:          March 2017
+KernelVersion: 4.11
+Contact:       Bjørn Mork <bjorn@mork.no>
+Description:
+               Unsigned integer.
+
+               Write a number ranging from 1 to 127 to delete a previously
+               created qmap mux based network device.
index 986e44387dad493e268ab93253120df73abc3045..facc20a3f96280472396ad3f7d2e8f2dba62fecc 100644 (file)
        cpuidle.off=1   [CPU_IDLE]
                        disable the cpuidle sub-system
 
+       cpufreq.off=1   [CPU_FREQ]
+                       disable the cpufreq sub-system
+
        cpu_init_udelay=N
                        [X86] Delay for N microsec between assert and de-assert
                        of APIC INIT to start processors.  This delay occurs
                        functions that can be changed at run time by the
                        set_graph_notrace file in the debugfs tracing directory.
 
+       ftrace_graph_max_depth=<uint>
+                       [FTRACE] Used with the function graph tracer. This is
+                       the max depth it will trace into a function. This value
+                       can be changed at run time by the max_graph_depth file
+                       in the tracefs tracing directory. default: 0 (no limit)
+
        gamecon.map[2|3]=
                        [HW,JOY] Multisystem joystick and NES/SNES/PSX pad
                        support via parallel port (up to 5 devices per port)
                        kernel and module base offset ASLR (Address Space
                        Layout Randomization).
 
+       kasan_multi_shot
+                       [KNL] Enforce KASAN (Kernel Address Sanitizer) to print
+                       report on every invalid memory access. Without this
+                       parameter KASAN will print report only for the first
+                       invalid access.
+
        keepinitrd      [HW,ARM]
 
        kernelcore=     [KNL,X86,IA-64,PPC]
index a71b8095dbd8df44603f18e7435b490d7b5c56c9..2f66683500b8e44e0ceb44bc877acccae39b35d7 100644 (file)
@@ -68,3 +68,4 @@ stable kernels.
 |                |                 |                 |                             |
 | Qualcomm Tech. | Falkor v1       | E1003           | QCOM_FALKOR_ERRATUM_1003    |
 | Qualcomm Tech. | Falkor v1       | E1009           | QCOM_FALKOR_ERRATUM_1009    |
+| Qualcomm Tech. | QDF2400 ITS     | E0065           | QCOM_QDF2400_ERRATUM_0065   |
index 3b8449f8ac7e80a0ebeaf6dfe8c64b15503f3954..49d7c997fa1ee7f759b5ba319bb57be464f0bd47 100644 (file)
@@ -1142,16 +1142,17 @@ used by the kernel.
 
   pids.max
 
- A read-write single value file which exists on non-root cgroups.  The
- default is "max".
+       A read-write single value file which exists on non-root
      cgroups.  The default is "max".
 
- Hard limit of number of processes.
      Hard limit of number of processes.
 
   pids.current
 
- A read-only single value file which exists on all cgroups.
      A read-only single value file which exists on all cgroups.
 
- The number of processes currently in the cgroup and its descendants.
+       The number of processes currently in the cgroup and its
+       descendants.
 
 Organisational operations are not blocked by cgroup policies, so it is
 possible to have pids.current > pids.max.  This can be done by either
index 2c41b713841fd497a95b57054ed9d6998625b71e..44886c91e112d4d21a41e0c4d1a96f37a584aa68 100644 (file)
@@ -10,7 +10,7 @@ Note that kcov does not aim to collect as much coverage as possible. It aims
 to collect more or less stable coverage that is function of syscall inputs.
 To achieve this goal it does not collect coverage in soft/hard interrupts
 and instrumentation of some inherently non-deterministic parts of kernel is
-disbled (e.g. scheduler, locking).
+disabled (e.g. scheduler, locking).
 
 Usage
 -----
index 30c546900b6021d24d64e1033714d362f3152a0e..07dbb358182ccd255baf2ed1220d5df9044f9dd7 100644 (file)
@@ -45,7 +45,7 @@ The following clocks are available:
    - 1 15      SATA
    - 1 16      SATA USB
    - 1 17      Main
-   - 1 18      SD/MMC
+   - 1 18      SD/MMC/GOP
    - 1 21      Slow IO (SPI, NOR, BootROM, I2C, UART)
    - 1 22      USB3H0
    - 1 23      USB3H1
@@ -65,7 +65,7 @@ Required properties:
        "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
        "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
        "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
-       "cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io",
+       "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
        "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
 
 Example:
@@ -78,6 +78,6 @@ Example:
                gate-clock-output-names = "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
                        "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
                        "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
-                       "cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io",
+                       "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
                        "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
        };
index a78265993665a65bae524bb19606a7c16f248770..ca5204b3bc218bfe8e04a4fd5ca70482b1ab8c6b 100644 (file)
@@ -4,7 +4,6 @@ Required properties:
   - compatible: value should be one of the following
                "samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */
                "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
-               "samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */
                "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
                "samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */
                "samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */
index 18645e0228b054e1ec0a77658c78d30f3c7eed8e..5837402c3adeae526a4e6efa914fd43c64c10443 100644 (file)
@@ -11,7 +11,6 @@ Required properties:
                "samsung,s5pv210-fimd"; /* for S5PV210 SoC */
                "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */
                "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */
-               "samsung,exynos4415-fimd"; /* for Exynos4415 SoC */
                "samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */
                "samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */
 
index ea9c1c9607f61239d2d0211d0cf5a607da406fb0..520d61dad6dd7ff4f65e5e8cddbf33a63b81b009 100644 (file)
@@ -13,7 +13,7 @@ Required Properties:
        - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following,
                                                        before RK3288
        - "rockchip,rk3288-dw-mshc": for Rockchip RK3288
-       - "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK1108
+       - "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108
        - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
        - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
        - "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399
index 10587bdadbbe5a8e8fe703e23c194420e3701f9f..26c77d985fafe06092467c5a2ea8a0e176d0d460 100644 (file)
@@ -2,11 +2,14 @@
 
 Required properties:
 - compatible: should contain one of "brcm,genet-v1", "brcm,genet-v2",
-  "brcm,genet-v3", "brcm,genet-v4".
+  "brcm,genet-v3", "brcm,genet-v4", "brcm,genet-v5".
 - reg: address and length of the register set for the device
-- interrupts: must be two cells, the first cell is the general purpose
-  interrupt line, while the second cell is the interrupt for the ring
-  RX and TX queues operating in ring mode
+- interrupts and/or interrupts-extended: must be two cells, the first cell
+  is the general purpose interrupt line, while the second cell is the
+  interrupt for the ring RX and TX queues operating in ring mode.  An
+  optional third interrupt cell for Wake-on-LAN can be specified.
+  See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+  for information on the property specifics.
 - phy-mode: see ethernet.txt file in the same directory
 - #address-cells: should be 1
 - #size-cells: should be 1
@@ -29,15 +32,15 @@ Optional properties:
 
 Required child nodes:
 
-- mdio bus node: this node should always be present regarless of the PHY
+- mdio bus node: this node should always be present regardless of the PHY
   configuration of the GENET instance
 
 MDIO bus node required properties:
 
 - compatible: should contain one of "brcm,genet-mdio-v1", "brcm,genet-mdio-v2"
-  "brcm,genet-mdio-v3", "brcm,genet-mdio-v4", the version has to match the
-  parent node compatible property (e.g: brcm,genet-v4 pairs with
-  brcm,genet-mdio-v4)
+  "brcm,genet-mdio-v3", "brcm,genet-mdio-v4", "brcm,genet-mdio-v5", the version
+  has to match the parent node compatible property (e.g: brcm,genet-v4 pairs
+  with brcm,genet-mdio-v4)
 - reg: address and length relative to the parent node base register address
 - #address-cells: address cell for MDIO bus addressing, should be 1
 - #size-cells: size of the cells for MDIO bus addressing, should be 0
index ab0bb4247d14950959044cc138e71e7c736f85b9..4648948f7c3b8f26391292b06aa01743100e8796 100644 (file)
@@ -2,8 +2,9 @@
 
 Required properties:
 - compatible: should one from "brcm,genet-mdio-v1", "brcm,genet-mdio-v2",
-  "brcm,genet-mdio-v3", "brcm,genet-mdio-v4" or "brcm,unimac-mdio"
-- reg: address and length of the regsiter set for the device, first one is the
+  "brcm,genet-mdio-v3", "brcm,genet-mdio-v4", "brcm,genet-mdio-v5" or
+  "brcm,unimac-mdio"
+- reg: address and length of the register set for the device, first one is the
   base register, and the second one is optional and for indirect accesses to
   larger than 16-bits MDIO transactions
 - reg-names: name(s) of the register must be "mdio" and optional "mdio_indir_rw"
diff --git a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
new file mode 100644 (file)
index 0000000..23aa94e
--- /dev/null
@@ -0,0 +1,24 @@
+* Holt HI-311X stand-alone CAN controller device tree bindings
+
+Required properties:
+ - compatible: Should be one of the following:
+   - "holt,hi3110" for HI-3110
+ - reg: SPI chip select.
+ - clocks: The clock feeding the CAN controller.
+ - interrupt-parent: The parent interrupt controller.
+ - interrupts: Should contain IRQ line for the CAN controller.
+
+Optional properties:
+ - vdd-supply: Regulator that powers the CAN controller.
+ - xceiver-supply: Regulator that powers the CAN transceiver.
+
+Example:
+       can0: can@1 {
+               compatible = "holt,hi3110";
+               reg = <1>;
+               clocks = <&clk32m>;
+               interrupt-parent = <&gpio4>;
+               interrupts = <13 IRQ_TYPE_EDGE_RISING>;
+               vdd-supply = <&reg5v0>;
+               xceiver-supply = <&reg5v0>;
+       };
diff --git a/Documentation/devicetree/bindings/net/can/ti_hecc.txt b/Documentation/devicetree/bindings/net/can/ti_hecc.txt
new file mode 100644 (file)
index 0000000..e0f0a7c
--- /dev/null
@@ -0,0 +1,32 @@
+Texas Instruments High End CAN Controller (HECC)
+================================================
+
+This file provides information, what the device node
+for the hecc interface contains.
+
+Required properties:
+- compatible: "ti,am3517-hecc"
+- reg: addresses and lengths of the register spaces for 'hecc', 'hecc-ram'
+       and 'mbx'
+- reg-names :"hecc", "hecc-ram", "mbx"
+- interrupts: interrupt mapping for the hecc interrupts sources
+- clocks: clock phandles (see clock bindings for details)
+
+Optional properties:
+- ti,use-hecc1int: if provided configures HECC to produce all interrupts
+                  on HECC1INT interrupt line. By default HECC0INT interrupt
+                  line will be used.
+- xceiver-supply: regulator that powers the CAN transceiver
+
+Example:
+
+For am3517evm board:
+       hecc: can@5c050000 {
+               compatible = "ti,am3517-hecc";
+               reg = <0x5c050000 0x80>,
+                     <0x5c053000 0x180>,
+                     <0x5c052000 0x200>;
+               reg-names = "hecc", "hecc-ram", "mbx";
+               interrupts = <24>;
+               clocks = <&hecc_ck>;
+       };
diff --git a/Documentation/devicetree/bindings/net/dsa/mt7530.txt b/Documentation/devicetree/bindings/net/dsa/mt7530.txt
new file mode 100644 (file)
index 0000000..a9bc27b
--- /dev/null
@@ -0,0 +1,92 @@
+Mediatek MT7530 Ethernet switch
+================================
+
+Required properties:
+
+- compatible: Must be compatible = "mediatek,mt7530";
+- #address-cells: Must be 1.
+- #size-cells: Must be 0.
+- mediatek,mcm: Boolean; if defined, indicates that either MT7530 is the part
+       on multi-chip module belong to MT7623A has or the remotely standalone
+       chip as the function MT7623N reference board provided for.
+- core-supply: Phandle to the regulator node necessary for the core power.
+- io-supply: Phandle to the regulator node necessary for the I/O power.
+       See Documentation/devicetree/bindings/regulator/mt6323-regulator.txt
+       for details for the regulator setup on these boards.
+
+If the property mediatek,mcm isn't defined, following property is required
+
+- reset-gpios: Should be a gpio specifier for a reset line.
+
+Else, following properties are required
+
+- resets : Phandle pointing to the system reset controller with
+       line index for the ethsys.
+- reset-names : Should be set to "mcm".
+
+Required properties for the child nodes within ports container:
+
+- reg: Port address described must be 6 for CPU port and from 0 to 5 for
+       user ports.
+- phy-mode: String, must be either "trgmii" or "rgmii" for port labeled
+        "cpu".
+
+See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional
+required, optional properties and how the integrated switch subnodes must
+be specified.
+
+Example:
+
+       &mdio0 {
+               switch@0 {
+                       compatible = "mediatek,mt7530";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0>;
+
+                       core-supply = <&mt6323_vpa_reg>;
+                       io-supply = <&mt6323_vemc3v3_reg>;
+                       reset-gpios = <&pio 33 0>;
+
+                       ports {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               reg = <0>;
+                               port@0 {
+                                       reg = <0>;
+                                       label = "lan0";
+                               };
+
+                               port@1 {
+                                       reg = <1>;
+                                       label = "lan1";
+                               };
+
+                               port@2 {
+                                       reg = <2>;
+                                       label = "lan2";
+                               };
+
+                               port@3 {
+                                       reg = <3>;
+                                       label = "lan3";
+                               };
+
+                               port@4 {
+                                       reg = <4>;
+                                       label = "wan";
+                               };
+
+                               port@6 {
+                                       reg = <6>;
+                                       label = "cpu";
+                                       ethernet = <&gmac0>;
+                                       phy-mode = "trgmii";
+                                       fixed-link {
+                                               speed = <1000>;
+                                               full-duplex;
+                                       };
+                               };
+                       };
+               };
+       };
index 4754364df4c66adfc53e2546e31e0d124070dca1..6b4956beff8c42c3214906c83d94072fca8e9083 100644 (file)
@@ -1,17 +1,28 @@
-* Marvell Armada 375 Ethernet Controller (PPv2)
+* Marvell Armada 375 Ethernet Controller (PPv2.1)
+  Marvell Armada 7K/8K Ethernet Controller (PPv2.2)
 
 Required properties:
 
-- compatible: should be "marvell,armada-375-pp2"
+- compatible: should be one of:
+    "marvell,armada-375-pp2"
+    "marvell,armada-7k-pp2"
 - reg: addresses and length of the register sets for the device.
-  Must contain the following register sets:
+  For "marvell,armada-375-pp2", must contain the following register
+  sets:
        - common controller registers
        - LMS registers
-  In addition, at least one port register set is required.
-- clocks: a pointer to the reference clocks for this device, consequently:
-       - main controller clock
-       - GOP clock
-- clock-names: names of used clocks, must be "pp_clk" and "gop_clk".
+       - one register area per Ethernet port
+  For "marvell,armada-7k-pp2", must contain the following register
+  sets:
+       - packet processor registers
+       - networking interfaces registers
+
+- clocks: pointers to the reference clocks for this device, consequently:
+       - main controller clock (for both armada-375-pp2 and armada-7k-pp2)
+       - GOP clock (for both armada-375-pp2 and armada-7k-pp2)
+       - MG clock (only for armada-7k-pp2)
+- clock-names: names of used clocks, must be "pp_clk", "gop_clk" and
+  "mg_clk" (the latter only for armada-7k-pp2).
 
 The ethernet ports are represented by subnodes. At least one port is
 required.
@@ -19,8 +30,10 @@ required.
 Required properties (port):
 
 - interrupts: interrupt for the port
-- port-id: should be '0' or '1' for ethernet ports, and '2' for the
-           loopback port
+- port-id: ID of the port from the MAC point of view
+- gop-port-id: only for marvell,armada-7k-pp2, ID of the port from the
+  GOP (Group Of Ports) point of view. This ID is used to index the
+  per-port registers in the second register area.
 - phy-mode: See ethernet.txt file in the same directory
 
 Optional properties (port):
@@ -29,7 +42,7 @@ Optional properties (port):
 - phy: a phandle to a phy node defining the PHY address (as the reg
   property, a single integer).
 
-Example:
+Example for marvell,armada-375-pp2:
 
 ethernet@f0000 {
        compatible = "marvell,armada-375-pp2";
@@ -57,3 +70,30 @@ ethernet@f0000 {
                phy-mode = "gmii";
        };
 };
+
+Example for marvell,armada-7k-pp2:
+
+cpm_ethernet: ethernet@0 {
+       compatible = "marvell,armada-7k-pp22";
+       reg = <0x0 0x100000>, <0x129000 0xb000>;
+       clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, <&cpm_syscon0 1 5>;
+       clock-names = "pp_clk", "gop_clk", "gp_clk";
+
+       eth0: eth0 {
+               interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+               port-id = <0>;
+               gop-port-id = <0>;
+       };
+
+       eth1: eth1 {
+               interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+               port-id = <1>;
+               gop-port-id = <2>;
+       };
+
+       eth2: eth2 {
+               interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+               port-id = <2>;
+               gop-port-id = <3>;
+       };
+};
index d3bfc2b30fb5ecc07493b4c5510d5cdc32b3ff3a..f652b0c384ced4f1a2ed9cc6eeb1f7abb99669c7 100644 (file)
@@ -28,9 +28,9 @@ Optional properties:
   clocks may be specified in derived bindings.
 - clock-names: One name for each entry in the clocks property, the
   first one should be "stmmaceth" and the second one should be "pclk".
-- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is
-  available this clock is used for programming the Timestamp Addend Register.
-  If not passed then the system clock will be used and this is fine on some
+- ptp_ref: this is the PTP reference clock; in case of the PTP is available
+  this clock is used for programming the Timestamp Addend Register. If not
+  passed then the system clock will be used and this is fine on some
   platforms.
 - tx-fifo-depth: See ethernet.txt file in the same directory
 - rx-fifo-depth: See ethernet.txt file in the same directory
@@ -72,7 +72,45 @@ Optional properties:
        - snps,mb: mixed-burst
        - snps,rb: rebuild INCRx Burst
 - mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus.
-
+- Multiple RX Queues parameters: below the list of all the parameters to
+                                configure the multiple RX queues:
+       - snps,rx-queues-to-use: number of RX queues to be used in the driver
+       - Choose one of these RX scheduling algorithms:
+               - snps,rx-sched-sp: Strict priority
+               - snps,rx-sched-wsp: Weighted Strict priority
+       - For each RX queue
+               - Choose one of these modes:
+                       - snps,dcb-algorithm: Queue to be enabled as DCB
+                       - snps,avb-algorithm: Queue to be enabled as AVB
+               - snps,map-to-dma-channel: Channel to map
+               - Specifiy specific packet routing:
+                       - snps,route-avcp: AV Untagged Control packets
+                       - snps,route-ptp: PTP Packets
+                       - snps,route-dcbcp: DCB Control Packets
+                       - snps,route-up: Untagged Packets
+                       - snps,route-multi-broad: Multicast & Broadcast Packets
+               - snps,priority: RX queue priority (Range: 0x0 to 0xF)
+- Multiple TX Queues parameters: below the list of all the parameters to
+                                configure the multiple TX queues:
+       - snps,tx-queues-to-use: number of TX queues to be used in the driver
+       - Choose one of these TX scheduling algorithms:
+               - snps,tx-sched-wrr: Weighted Round Robin
+               - snps,tx-sched-wfq: Weighted Fair Queuing
+               - snps,tx-sched-dwrr: Deficit Weighted Round Robin
+               - snps,tx-sched-sp: Strict priority
+       - For each TX queue
+               - snps,weight: TX queue weight (if using a DCB weight algorithm)
+               - Choose one of these modes:
+                       - snps,dcb-algorithm: TX queue will be working in DCB
+                       - snps,avb-algorithm: TX queue will be working in AVB
+                         [Attention] Queue 0 is reserved for legacy traffic
+                         and so no AVB is available in this queue.
+               - Configure Credit Base Shaper (if AVB Mode selected):
+                       - snps,send_slope: enable Low Power Interface
+                       - snps,idle_slope: unlock on WoL
+                       - snps,high_credit: max write outstanding req. limit
+                       - snps,low_credit: max read outstanding req. limit
+               - snps,priority: TX queue priority (Range: 0x0 to 0xF)
 Examples:
 
        stmmac_axi_setup: stmmac-axi-config {
@@ -81,6 +119,35 @@ Examples:
                snps,blen = <256 128 64 32 0 0 0>;
        };
 
+       mtl_rx_setup: rx-queues-config {
+               snps,rx-queues-to-use = <1>;
+               snps,rx-sched-sp;
+               queue0 {
+                       snps,dcb-algorithm;
+                       snps,map-to-dma-channel = <0x0>;
+                       snps,priority = <0x0>;
+               };
+       };
+
+       mtl_tx_setup: tx-queues-config {
+               snps,tx-queues-to-use = <2>;
+               snps,tx-sched-wrr;
+               queue0 {
+                       snps,weight = <0x10>;
+                       snps,dcb-algorithm;
+                       snps,priority = <0x0>;
+               };
+
+               queue1 {
+                       snps,avb-algorithm;
+                       snps,send_slope = <0x1000>;
+                       snps,idle_slope = <0x1000>;
+                       snps,high_credit = <0x3E800>;
+                       snps,low_credit = <0xFFC18000>;
+                       snps,priority = <0x1>;
+               };
+       };
+
        gmac0: ethernet@e0800000 {
                compatible = "st,spear600-gmac";
                reg = <0xe0800000 0x8000>;
@@ -104,4 +171,6 @@ Examples:
                        phy1: ethernet-phy@0 {
                        };
                };
+               snps,mtl-rx-config = <&mtl_rx_setup>;
+               snps,mtl-tx-config = <&mtl_tx_setup>;
        };
diff --git a/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt b/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt
deleted file mode 100644 (file)
index e68ae5d..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-Broadcom USB3 phy binding for northstar plus SoC
-The USB3 phy is internal to the SoC and is accessed using mdio interface.
-
-Required mdio bus properties:
-- reg: Should be 0x0 for SoC internal USB3 phy
-- #address-cells: must be 1
-- #size-cells: must be 0
-
-Required USB3 PHY properties:
-- compatible: should be "brcm,nsp-usb3-phy"
-- reg: USB3 Phy address on SoC internal MDIO bus and it should be 0x10.
-- usb3-ctrl-syscon: handler of syscon node defining physical address
-  of usb3 control register.
-- #phy-cells: must be 0
-
-Required usb3 control properties:
-- compatible: should be "brcm,nsp-usb3-ctrl"
-- reg: offset and length of the control registers
-
-Example:
-
-       mdio@0 {
-               reg = <0x0>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               usb3_phy: usb-phy@10 {
-                       compatible = "brcm,nsp-usb3-phy";
-                       reg = <0x10>;
-                       usb3-ctrl-syscon = <&usb3_ctrl>;
-                       #phy-cells = <0>;
-                       status = "disabled";
-               };
-       };
-
-       usb3_ctrl: syscon@104408 {
-               compatible = "brcm,nsp-usb3-ctrl", "syscon";
-               reg = <0x104408 0x3fc>;
-       };
index 712baf6c3e246fa9ea93c6bab4cb8f294b894d86..44b842b6ca154d9c13c3d0da6a784fe54195fcf9 100644 (file)
@@ -71,6 +71,9 @@
                          For Axon it can be absent, though my current driver
                          doesn't handle phy-address yet so for now, keep
                          0x00ffffff in it.
+    - phy-handle       : Used to describe configurations where a external PHY
+                         is used. Please refer to:
+                         Documentation/devicetree/bindings/net/ethernet.txt
     - rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
                          operations (if absent the value is the same as
                          rx-fifo-size).  For Axon, either absent or 2048.
                          offload, phandle of the TAH device node.
     - tah-channel       : 1 cell, optional. If appropriate, channel used on the
                          TAH engine.
+    - fixed-link       : Fixed-link subnode describing a link to a non-MDIO
+                         managed entity. See
+                         Documentation/devicetree/bindings/net/fixed-link.txt
+                         for details.
+    - mdio subnode     : When the EMAC has a phy connected to its local
+                         mdio, which us supported by the kernel's network
+                         PHY library in drivers/net/phy, there must be device
+                         tree subnode with the following required properties:
+                               - #address-cells: Must be <1>.
+                               - #size-cells: Must be <0>.
 
-    Example:
+                         For PHY definitions: Please refer to
+                         Documentation/devicetree/bindings/net/phy.txt and
+                         Documentation/devicetree/bindings/net/ethernet.txt
+
+    Examples:
 
        EMAC0: ethernet@40000800 {
                device_type = "network";
                zmii-channel = <0>;
        };
 
+       EMAC1: ethernet@ef600c00 {
+               device_type = "network";
+               compatible = "ibm,emac-apm821xx", "ibm,emac4sync";
+               interrupt-parent = <&EMAC1>;
+               interrupts = <0 1>;
+               #interrupt-cells = <1>;
+               #address-cells = <0>;
+               #size-cells = <0>;
+               interrupt-map = <0 &UIC2 0x10 IRQ_TYPE_LEVEL_HIGH /* Status */
+                                1 &UIC2 0x14 IRQ_TYPE_LEVEL_HIGH /* Wake */>;
+               reg = <0xef600c00 0x000000c4>;
+               local-mac-address = [000000000000]; /* Filled in by U-Boot */
+               mal-device = <&MAL0>;
+               mal-tx-channel = <0>;
+               mal-rx-channel = <0>;
+               cell-index = <0>;
+               max-frame-size = <9000>;
+               rx-fifo-size = <16384>;
+               tx-fifo-size = <2048>;
+               fifo-entry-size = <10>;
+               phy-mode = "rgmii";
+               phy-handle = <&phy0>;
+               phy-map = <0x00000000>;
+               rgmii-device = <&RGMII0>;
+               rgmii-channel = <0>;
+               tah-device = <&TAH0>;
+               tah-channel = <0>;
+               has-inverted-stacr-oc;
+               has-new-stacr-staopc;
+
+               mdio {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       phy0: ethernet-phy@0 {
+                               compatible = "ethernet-phy-ieee802.3-c22";
+                               reg = <0>;
+                       };
+               };
+       };
+
+
       ii) McMAL node
 
     Required properties:
     - revision           : as provided by the RGMII new version register if
                           available.
                           For Axon: 0x0000012a
-
index c3f6546ebac777421b467b0008f7f78f06e8e5c4..6a23ad9ac53a4cabc85a6bc592a873f38c7c144b 100644 (file)
@@ -45,7 +45,7 @@ Required Properties:
 Optional Properties:
 - reg-names: In addition to the required properties, the following are optional
   - "efuse-address"    - Contains efuse base address used to pick up ABB info.
-  - "ldo-address"      - Contains address of ABB LDO overide register address.
+  - "ldo-address"      - Contains address of ABB LDO override register.
        "efuse-address" is required for this.
 - ti,ldovbb-vset-mask  - Required if ldo-address is set, mask for LDO override
        register to provide override vset value.
index 471477299ece16c931322e96fbb3c1535b1e1a43..9cf7876ab43444f604a501075ccbf6088e50f880 100644 (file)
@@ -12,7 +12,8 @@ Required properties:
 - reg : Offset and length of the register set for the module
 - interrupts : the interrupt number for the RNG module.
                Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76"
-- clocks: the trng clock source
+- clocks: the trng clock source. Only mandatory for the
+  "inside-secure,safexcel-eip76" compatible.
 
 Example:
 /* AM335x */
index 0c065f77658f138e8bc535e85c7d2a99f5268b59..3957d4edaa745fc068c4ab8ba796f9c8d8190a1f 100644 (file)
@@ -7,18 +7,18 @@ Required properties :
  - compatible : Should be "microchip,usb251xb" or one of the specific types:
        "microchip,usb2512b", "microchip,usb2512bi", "microchip,usb2513b",
        "microchip,usb2513bi", "microchip,usb2514b", "microchip,usb2514bi"
- - hub-reset-gpios : Should specify the gpio for hub reset
+ - reset-gpios : Should specify the gpio for hub reset
+ - reg : I2C address on the selected bus (default is <0x2C>)
 
 Optional properties :
- - reg : I2C address on the selected bus (default is <0x2C>)
  - skip-config : Skip Hub configuration, but only send the USB-Attach command
- - vendor-id : USB Vendor ID of the hub (16 bit, default is 0x0424)
- - product-id : USB Product ID of the hub (16 bit, default depends on type)
- - device-id : USB Device ID of the hub (16 bit, default is 0x0bb3)
- - language-id : USB Language ID (16 bit, default is 0x0000)
- - manufacturer : USB Manufacturer string (max 31 characters long)
- - product : USB Product string (max 31 characters long)
- - serial : USB Serial string (max 31 characters long)
+ - vendor-id : Set USB Vendor ID of the hub (16 bit, default is 0x0424)
+ - product-id : Set USB Product ID of the hub (16 bit, default depends on type)
+ - device-id : Set USB Device ID of the hub (16 bit, default is 0x0bb3)
+ - language-id : Set USB Language ID (16 bit, default is 0x0000)
+ - manufacturer : Set USB Manufacturer string (max 31 characters long)
+ - product : Set USB Product string (max 31 characters long)
+ - serial : Set USB Serial string (max 31 characters long)
  - {bus,self}-powered : selects between self- and bus-powered operation (default
        is self-powered)
  - disable-hi-speed : disable USB Hi-Speed support
@@ -31,8 +31,10 @@ Optional properties :
        (default is individual)
  - dynamic-power-switching : enable auto-switching from self- to bus-powered
        operation if the local power source is removed or unavailable
- - oc-delay-{100us,4ms,8ms,16ms} : set over current timer delay (default is 8ms)
- - compound-device : indicated the hub is part of a compound device
+ - oc-delay-us : Delay time (in microseconds) for filtering the over-current
+       sense inputs. Valid values are 100, 4000, 8000 (default) and 16000. If
+       an invalid value is given, the default is used instead.
+ - compound-device : indicate the hub is part of a compound device
  - port-mapping-mode : enable port mapping mode
  - string-support : enable string descriptor support (required for manufacturer,
        product and serial string configuration)
@@ -40,34 +42,15 @@ Optional properties :
        device connected.
  - sp-disabled-ports : Specifies the ports which will be self-power disabled
  - bp-disabled-ports : Specifies the ports which will be bus-power disabled
- - max-sp-power : Specifies the maximum current the hub consumes from an
-       upstream port when operating as self-powered hub including the power
-       consumption of a permanently attached peripheral if the hub is
-       configured as a compound device. The value is given in mA in a 0 - 500
-       range (default is 2).
- - max-bp-power : Specifies the maximum current the hub consumes from an
-       upstream port when operating as bus-powered hub including the power
-       consumption of a permanently attached peripheral if the hub is
-       configured as a compound device. The value is given in mA in a 0 - 500
-       range (default is 100).
- - max-sp-current : Specifies the maximum current the hub consumes from an
-       upstream port when operating as self-powered hub EXCLUDING the power
-       consumption of a permanently attached peripheral if the hub is
-       configured as a compound device. The value is given in mA in a 0 - 500
-       range (default is 2).
- - max-bp-current : Specifies the maximum current the hub consumes from an
-       upstream port when operating as bus-powered hub EXCLUDING the power
-       consumption of a permanently attached peripheral if the hub is
-       configured as a compound device. The value is given in mA in a 0 - 500
-       range (default is 100).
- - power-on-time : Specifies the time it takes from the time the host initiates
-       the power-on sequence to a port until the port has adequate power. The
-       value is given in ms in a 0 - 510 range (default is 100ms).
+ - power-on-time-ms : Specifies the time it takes from the time the host
+       initiates the power-on sequence to a port until the port has adequate
+       power. The value is given in ms in a 0 - 510 range (default is 100ms).
 
 Examples:
        usb2512b@2c {
                compatible = "microchip,usb2512b";
-               hub-reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+               reg = <0x2c>;
+               reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
        };
 
        usb2514b@2c {
index af0b366c25b73332a77e58b9b7247c4c387fc0d6..8155dbc7fad36a253137c230a7f074fecd0a7969 100644 (file)
@@ -20,3 +20,8 @@ Index 1: The output gpio for enabling Vbus output from the device to the otg
 Index 2: The output gpio for muxing of the data pins between the USB host and
          the USB peripheral controller, write 1 to mux to the peripheral
          controller
+
+There is a mapping between indices and GPIO connection IDs as follows
+       id      index 0
+       vbus    index 1
+       mux     index 2
index 891c694644348241925dc3e585d632222d8b0948..433eaefb4aa171ac62f3162866763a0526d7eb83 100644 (file)
@@ -18,8 +18,8 @@ because gcc versions 4.5 and 4.6 are compiled by a C compiler,
 gcc-4.7 can be compiled by a C or a C++ compiler,
 and versions 4.8+ can only be compiled by a C++ compiler.
 
-Currently the GCC plugin infrastructure supports only the x86, arm and arm64
-architectures.
+Currently the GCC plugin infrastructure supports only the x86, arm, arm64 and
+powerpc architectures.
 
 This infrastructure was ported from grsecurity [6] and PaX [7].
 
index a251bf4fe9c9281baf497f17bff8630b4eb21705..57e616ed10b0b54fc8ca2ad1ea405f742db2ab1e 100644 (file)
@@ -63,6 +63,78 @@ Additional Configurations
   The latest release of ethtool can be found from
   https://www.kernel.org/pub/software/network/ethtool
 
+
+  Flow Director n-ntuple traffic filters (FDir)
+  ---------------------------------------------
+  The driver utilizes the ethtool interface for configuring ntuple filters,
+  via "ethtool -N <device> <filter>".
+
+  The sctp4, ip4, udp4, and tcp4 flow types are supported with the standard
+  fields including src-ip, dst-ip, src-port and dst-port. The driver only
+  supports fully enabling or fully masking the fields, so use of the mask
+  fields for partial matches is not supported.
+
+  Additionally, the driver supports using the action to specify filters for a
+  Virtual Function. You can specify the action as a 64bit value, where the
+  lower 32 bits represents the queue number, while the next 8 bits represent
+  which VF. Note that 0 is the PF, so the VF identifier is offset by 1. For
+  example:
+
+    ... action 0x800000002 ...
+
+  Would indicate to direct traffic for Virtual Function 7 (8 minus 1) on queue
+  2 of that VF.
+
+  The driver also supports using the user-defined field to specify 2 bytes of
+  arbitrary data to match within the packet payload in addition to the regular
+  fields. The data is specified in the lower 32bits of the user-def field in
+  the following way:
+
+  +----------------------------+---------------------------+
+  | 31    28    24    20    16 | 15    12     8     4     0|
+  +----------------------------+---------------------------+
+  | offset into packet payload |  2 bytes of flexible data |
+  +----------------------------+---------------------------+
+
+  As an example,
+
+    ... user-def 0x4FFFF ....
+
+  means to match the value 0xFFFF 4 bytes into the packet payload. Note that
+  the offset is based on the beginning of the payload, and not the beginning
+  of the packet. Thus
+
+    flow-type tcp4 ... user-def 0x8BEAF ....
+
+  would match TCP/IPv4 packets which have the value 0xBEAF 8bytes into the
+  TCP/IPv4 payload.
+
+  For ICMP, the hardware parses the ICMP header as 4 bytes of header and 4
+  bytes of payload, so if you want to match an ICMP frames payload you may need
+  to add 4 to the offset in order to match the data.
+
+  Furthermore, the offset can only be up to a value of 64, as the hardware
+  will only read up to 64 bytes of data from the payload. It must also be even
+  as the flexible data is 2 bytes long and must be aligned to byte 0 of the
+  packet payload.
+
+  When programming filters, the hardware is limited to using a single input
+  set for each flow type. This means that it is an error to program two
+  different filters with the same type that don't match on the same fields.
+  Thus the second of the following two commands will fail:
+
+    ethtool -N <device> flow-type tcp4 src-ip 192.168.0.7 action 5
+    ethtool -N <device> flow-type tcp4 dst-ip 192.168.15.18 action 1
+
+  This is because the first filter will be accepted and reprogram the input
+  set for TCPv4 filters, but the second filter will be unable to reprogram the
+  input set until all the conflicting TCPv4 filters are first removed.
+
+  Note that the user-defined flexible offset is also considered part of the
+  input set and cannot be programmed separately for multiple filters of the
+  same type. However, the flexible data is not part of the input set and
+  multiple filters may use the same offset but match against different data.
+
   Data Center Bridging (DCB)
   --------------------------
   DCB configuration is not currently supported.
index fc73eeb7b3b8b119083a03a42e0046a564ea2f0e..b1c6500e7a8df4d7377b291e9afc09363e66cd17 100644 (file)
@@ -73,6 +73,14 @@ fib_multipath_use_neigh - BOOLEAN
        0 - disabled
        1 - enabled
 
+fib_multipath_hash_policy - INTEGER
+       Controls which hash policy to use for multipath routes. Only valid
+       for kernels built with CONFIG_IP_ROUTE_MULTIPATH enabled.
+       Default: 0 (Layer 3)
+       Possible values:
+       0 - Layer 3
+       1 - Layer 4
+
 route/max_size - INTEGER
        Maximum number of routes allowed in the kernel.  Increase
        this when using large numbers of interfaces and/or routes.
@@ -640,11 +648,6 @@ tcp_tso_win_divisor - INTEGER
        building larger TSO frames.
        Default: 3
 
-tcp_tw_recycle - BOOLEAN
-       Enable fast recycling TIME-WAIT sockets. Default value is 0.
-       It should not be changed without advice/request of technical
-       experts.
-
 tcp_tw_reuse - BOOLEAN
        Allow to reuse TIME-WAIT sockets for new connections when it is
        safe from protocol viewpoint. Default value is 0.
@@ -853,12 +856,21 @@ ip_dynaddr - BOOLEAN
 ip_early_demux - BOOLEAN
        Optimize input packet processing down to one demux for
        certain kinds of local sockets.  Currently we only do this
-       for established TCP sockets.
+       for established TCP and connected UDP sockets.
 
        It may add an additional cost for pure routing workloads that
        reduces overall throughput, in such case you should disable it.
        Default: 1
 
+tcp_early_demux - BOOLEAN
+       Enable early demux for established TCP sockets.
+       Default: 1
+
+udp_early_demux - BOOLEAN
+       Enable early demux for connected UDP sockets. Disable this if
+       your system could experience more unconnected load.
+       Default: 1
+
 icmp_echo_ignore_all - BOOLEAN
        If set non-zero, then the kernel will ignore all ICMP ECHO
        requests sent to it.
@@ -1006,7 +1018,8 @@ accept_redirects - BOOLEAN
                FALSE (router)
 
 forwarding - BOOLEAN
-       Enable IP forwarding on this interface.
+       Enable IP forwarding on this interface.  This controls whether packets
+       received _on_ this interface can be forwarded.
 
 mc_forwarding - BOOLEAN
        Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE
@@ -1457,11 +1470,20 @@ accept_ra_pinfo - BOOLEAN
        Functional default: enabled if accept_ra is enabled.
                            disabled if accept_ra is disabled.
 
+accept_ra_rt_info_min_plen - INTEGER
+       Minimum prefix length of Route Information in RA.
+
+       Route Information w/ prefix smaller than this variable shall
+       be ignored.
+
+       Functional default: 0 if accept_ra_rtr_pref is enabled.
+                           -1 if accept_ra_rtr_pref is disabled.
+
 accept_ra_rt_info_max_plen - INTEGER
        Maximum prefix length of Route Information in RA.
 
-       Route Information w/ prefix larger than or equal to this
-       variable shall be ignored.
+       Route Information w/ prefix larger than this variable shall
+       be ignored.
 
        Functional default: 0 if accept_ra_rtr_pref is enabled.
                            -1 if accept_ra_rtr_pref is disabled.
index e6b1c025fdd89362a40e7aa676859a3a83646e7a..056898685d408e463f1a191f36e2384e9974ab6d 100644 (file)
@@ -175,6 +175,14 @@ nat_icmp_send - BOOLEAN
         for VS/NAT when the load balancer receives packets from real
         servers but the connection entries don't exist.
 
+pmtu_disc - BOOLEAN
+       0 - disabled
+       not 0 - enabled (default)
+
+       By default, reject with FRAG_NEEDED all DF packets that exceed
+       the PMTU, irrespective of the forwarding method. For TUN method
+       the flag can be disabled to fragment such packets.
+
 secure_tcp - INTEGER
         0  - disabled (default)
 
@@ -185,15 +193,59 @@ secure_tcp - INTEGER
         The value definition is the same as that of drop_entry and
         drop_packet.
 
-sync_threshold - INTEGER
-        default 3
+sync_threshold - vector of 2 INTEGERs: sync_threshold, sync_period
+       default 3 50
+
+       It sets synchronization threshold, which is the minimum number
+       of incoming packets that a connection needs to receive before
+       the connection will be synchronized. A connection will be
+       synchronized, every time the number of its incoming packets
+       modulus sync_period equals the threshold. The range of the
+       threshold is from 0 to sync_period.
+
+       When sync_period and sync_refresh_period are 0, send sync only
+       for state changes or only once when pkts matches sync_threshold
+
+sync_refresh_period - UNSIGNED INTEGER
+       default 0
+
+       In seconds, difference in reported connection timer that triggers
+       new sync message. It can be used to avoid sync messages for the
+       specified period (or half of the connection timeout if it is lower)
+       if connection state is not changed since last sync.
+
+       This is useful for normal connections with high traffic to reduce
+       sync rate. Additionally, retry sync_retries times with period of
+       sync_refresh_period/8.
+
+sync_retries - INTEGER
+       default 0
+
+       Defines sync retries with period of sync_refresh_period/8. Useful
+       to protect against loss of sync messages. The range of the
+       sync_retries is from 0 to 3.
+
+sync_qlen_max - UNSIGNED LONG
+
+       Hard limit for queued sync messages that are not sent yet. It
+       defaults to 1/32 of the memory pages but actually represents
+       number of messages. It will protect us from allocating large
+       parts of memory when the sending rate is lower than the queuing
+       rate.
+
+sync_sock_size - INTEGER
+       default 0
+
+       Configuration of SNDBUF (master) or RCVBUF (slave) socket limit.
+       Default value is 0 (preserve system defaults).
+
+sync_ports - INTEGER
+       default 1
 
-        It sets synchronization threshold, which is the minimum number
-        of incoming packets that a connection needs to receive before
-        the connection will be synchronized. A connection will be
-        synchronized, every time the number of its incoming packets
-        modulus 50 equals the threshold. The range of the threshold is
-        from 0 to 49.
+       The number of threads that master and backup servers can use for
+       sync traffic. Every thread will use single UDP port, thread 0 will
+       use the default port 8848 while last thread will use port
+       8848+sync_ports-1.
 
 snat_reroute - BOOLEAN
        0 - disabled
index 15d8d16934fd13727bb35e9c5078484127bbfa30..2f24a1912a48c73d360416d9889dd47c25bbbe28 100644 (file)
@@ -19,6 +19,25 @@ platform_labels - INTEGER
        Possible values: 0 - 1048575
        Default: 0
 
+ip_ttl_propagate - BOOL
+       Control whether TTL is propagated from the IPv4/IPv6 header to
+       the MPLS header on imposing labels and propagated from the
+       MPLS header to the IPv4/IPv6 header on popping the last label.
+
+       If disabled, the MPLS transport network will appear as a
+       single hop to transit traffic.
+
+       0 - disabled / RFC 3443 [Short] Pipe Model
+       1 - enabled / RFC 3443 Uniform Model (default)
+
+default_ttl - BOOL
+       Default TTL value to use for MPLS packets where it cannot be
+       propagated from an IP header, either because one isn't present
+       or ip_ttl_propagate has been disabled.
+
+       Possible values: 1 - 255
+       Default: 255
+
 conf/<interface>/input - BOOL
        Control whether packets can be input on this interface.
 
index e4991fb1eedcd4efcd258a7b1290fd087e5f9923..41ef9d8efe9517f602e59b21bfe1448196cb3450 100644 (file)
@@ -12,7 +12,7 @@ kprobes can probe (this means, all functions body except for __kprobes
 functions). Unlike the Tracepoint based event, this can be added and removed
 dynamically, on the fly.
 
-To enable this feature, build your kernel with CONFIG_KPROBE_EVENT=y.
+To enable this feature, build your kernel with CONFIG_KPROBE_EVENTS=y.
 
 Similar to the events tracer, this doesn't need to be activated via
 current_tracer. Instead of that, add probe points via
index fa7b680ee8a005acf245ae60aa3404b8e9aaafb7..bf526a7c5559a87829fc05ea5f592d9609ed5f3b 100644 (file)
@@ -7,7 +7,7 @@
 Overview
 --------
 Uprobe based trace events are similar to kprobe based trace events.
-To enable this feature, build your kernel with CONFIG_UPROBE_EVENT=y.
+To enable this feature, build your kernel with CONFIG_UPROBE_EVENTS=y.
 
 Similar to the kprobe-event tracer, this doesn't need to be activated via
 current_tracer. Instead of that, add probe points via
index 069450938b795df4e6f5e16f39b864e8011fb844..fd106899afd1b2cf53d88d0fd2aacc85625ebe57 100644 (file)
@@ -951,6 +951,10 @@ This ioctl allows the user to create or modify a guest physical memory
 slot.  When changing an existing slot, it may be moved in the guest
 physical memory space, or its flags may be modified.  It may not be
 resized.  Slots may not overlap in guest physical address space.
+Bits 0-15 of "slot" specifies the slot id and this value should be
+less than the maximum number of user memory slots supported per VM.
+The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
+if this capability is supported by the architecture.
 
 If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot"
 specifies the address space which is being modified.  They must be
@@ -3373,6 +3377,69 @@ struct kvm_ppc_resize_hpt {
        __u32 pad;
 };
 
+4.104 KVM_X86_GET_MCE_CAP_SUPPORTED
+
+Capability: KVM_CAP_MCE
+Architectures: x86
+Type: system ioctl
+Parameters: u64 mce_cap (out)
+Returns: 0 on success, -1 on error
+
+Returns supported MCE capabilities. The u64 mce_cap parameter
+has the same format as the MSR_IA32_MCG_CAP register. Supported
+capabilities will have the corresponding bits set.
+
+4.105 KVM_X86_SETUP_MCE
+
+Capability: KVM_CAP_MCE
+Architectures: x86
+Type: vcpu ioctl
+Parameters: u64 mcg_cap (in)
+Returns: 0 on success,
+         -EFAULT if u64 mcg_cap cannot be read,
+         -EINVAL if the requested number of banks is invalid,
+         -EINVAL if requested MCE capability is not supported.
+
+Initializes MCE support for use. The u64 mcg_cap parameter
+has the same format as the MSR_IA32_MCG_CAP register and
+specifies which capabilities should be enabled. The maximum
+supported number of error-reporting banks can be retrieved when
+checking for KVM_CAP_MCE. The supported capabilities can be
+retrieved with KVM_X86_GET_MCE_CAP_SUPPORTED.
+
+4.106 KVM_X86_SET_MCE
+
+Capability: KVM_CAP_MCE
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_x86_mce (in)
+Returns: 0 on success,
+         -EFAULT if struct kvm_x86_mce cannot be read,
+         -EINVAL if the bank number is invalid,
+         -EINVAL if VAL bit is not set in status field.
+
+Inject a machine check error (MCE) into the guest. The input
+parameter is:
+
+struct kvm_x86_mce {
+       __u64 status;
+       __u64 addr;
+       __u64 misc;
+       __u64 mcg_status;
+       __u8 bank;
+       __u8 pad1[7];
+       __u64 pad2[3];
+};
+
+If the MCE being reported is an uncorrected error, KVM will
+inject it as an MCE exception into the guest. If the guest
+MCG_STATUS register reports that an MCE is in progress, KVM
+causes an KVM_EXIT_SHUTDOWN vmexit.
+
+Otherwise, if the MCE is a corrected error, KVM will just
+store it in the corresponding bank (provided this bank is
+not holding a previously reported uncorrected error).
+
 5. The kvm_run structure
 ------------------------
 
index 0e5543a920e5b2595f4d194462ba722ce25bd80d..bb2f945f87ab6a2e59f2e7503b7500c08c5427d9 100644 (file)
@@ -172,10 +172,6 @@ the same read(2) protocol as for the page fault notifications. The
 manager has to explicitly enable these events by setting appropriate
 bits in uffdio_api.features passed to UFFDIO_API ioctl:
 
-UFFD_FEATURE_EVENT_EXIT - enable notification about exit() of the
-non-cooperative process. When the monitored process exits, the uffd
-manager will get UFFD_EVENT_EXIT.
-
 UFFD_FEATURE_EVENT_FORK - enable userfaultfd hooks for fork(). When
 this feature is enabled, the userfaultfd context of the parent process
 is duplicated into the newly created process. The manager receives
index c265a5fe48481f548629079cb529137e0a377f31..2efb7df5f879c300c83f5a996d5d55ee6723e8ad 100644 (file)
@@ -896,12 +896,19 @@ F:        arch/arm64/boot/dts/apm/
 APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER
 M:     Iyappan Subramanian <isubramanian@apm.com>
 M:     Keyur Chudgar <kchudgar@apm.com>
+M:     Quan Nguyen <qnguyen@apm.com>
 S:     Supported
 F:     drivers/net/ethernet/apm/xgene/
 F:     drivers/net/phy/mdio-xgene.c
 F:     Documentation/devicetree/bindings/net/apm-xgene-enet.txt
 F:     Documentation/devicetree/bindings/net/apm-xgene-mdio.txt
 
+APPLIED MICRO (APM) X-GENE SOC ETHERNET (V2) DRIVER
+M:     Iyappan Subramanian <isubramanian@apm.com>
+M:     Keyur Chudgar <kchudgar@apm.com>
+S:     Supported
+F:     drivers/net/ethernet/apm/xgene-v2/
+
 APPLIED MICRO (APM) X-GENE SOC PMU
 M:     Tai Nguyen <ttnguyen@apm.com>
 S:     Supported
@@ -3216,7 +3223,6 @@ F:        drivers/platform/chrome/
 
 CISCO VIC ETHERNET NIC DRIVER
 M:     Christian Benvenuti <benve@cisco.com>
-M:     Sujith Sankar <ssujith@cisco.com>
 M:     Govindarajulu Varadarajan <_govind@gmx.com>
 M:     Neel Patel <neepatel@cisco.com>
 S:     Supported
@@ -4776,6 +4782,12 @@ L:       linux-edac@vger.kernel.org
 S:     Maintained
 F:     drivers/edac/mpc85xx_edac.[ch]
 
+EDAC-PND2
+M:     Tony Luck <tony.luck@intel.com>
+L:     linux-edac@vger.kernel.org
+S:     Maintained
+F:     drivers/edac/pnd2_edac.[ch]
+
 EDAC-PASEMI
 M:     Egor Martovetsky <egor@pasemi.com>
 L:     linux-edac@vger.kernel.org
@@ -4923,6 +4935,7 @@ F:        include/linux/netfilter_bridge/
 F:     net/bridge/
 
 ETHERNET PHY LIBRARY
+M:     Andrew Lunn <andrew@lunn.ch>
 M:     Florian Fainelli <f.fainelli@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -7084,9 +7097,9 @@ S:        Maintained
 F:     fs/autofs4/
 
 KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
+M:     Masahiro Yamada <yamada.masahiro@socionext.com>
 M:     Michal Marek <mmarek@suse.com>
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git
 L:     linux-kbuild@vger.kernel.org
 S:     Maintained
 F:     Documentation/kbuild/
@@ -7774,13 +7787,6 @@ F:       include/net/mac80211.h
 F:     net/mac80211/
 F:     drivers/net/wireless/mac80211_hwsim.[ch]
 
-MACVLAN DRIVER
-M:     Patrick McHardy <kaber@trash.net>
-L:     netdev@vger.kernel.org
-S:     Maintained
-F:     drivers/net/macvlan.c
-F:     include/linux/if_macvlan.h
-
 MAILBOX API
 M:     Jassi Brar <jassisinghbrar@gmail.com>
 L:     linux-kernel@vger.kernel.org
@@ -7851,8 +7857,10 @@ S:       Maintained
 F:     drivers/net/ethernet/marvell/mvneta.*
 
 MARVELL MWIFIEX WIRELESS DRIVER
-M:     Amitkumar Karwar <akarwar@marvell.com>
+M:     Amitkumar Karwar <amitkarwar@gmail.com>
 M:     Nishant Sarmukadam <nishants@marvell.com>
+M:     Ganapathi Bhat <gbhat@marvell.com>
+M:     Xinming Hu <huxm@marvell.com>
 L:     linux-wireless@vger.kernel.org
 S:     Maintained
 F:     drivers/net/wireless/marvell/mwifiex/
@@ -8307,7 +8315,6 @@ M:        Richard Leitner <richard.leitner@skidata.com>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 F:     drivers/usb/misc/usb251xb.c
-F:     include/linux/platform_data/usb251xb.h
 F:     Documentation/devicetree/bindings/usb/usb251xb.txt
 
 MICROSOFT SURFACE PRO 3 BUTTON DRIVER
@@ -10815,6 +10822,7 @@ F:      drivers/s390/block/dasd*
 F:     block/partitions/ibm.c
 
 S390 NETWORK DRIVERS
+M:     Julian Wiedmann <jwi@linux.vnet.ibm.com>
 M:     Ursula Braun <ubraun@linux.vnet.ibm.com>
 L:     linux-s390@vger.kernel.org
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -10845,6 +10853,7 @@ S:      Supported
 F:     drivers/s390/scsi/zfcp_*
 
 S390 IUCV NETWORK LAYER
+M:     Julian Wiedmann <jwi@linux.vnet.ibm.com>
 M:     Ursula Braun <ubraun@linux.vnet.ibm.com>
 L:     linux-s390@vger.kernel.org
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -11062,6 +11071,12 @@ F:     include/linux/dma/dw.h
 F:     include/linux/platform_data/dma-dw.h
 F:     drivers/dma/dw/
 
+SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER
+M:     Jie Deng <jiedeng@synopsys.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     drivers/net/ethernet/synopsys/
+
 SYNOPSYS DESIGNWARE I2C DRIVER
 M:     Jarkko Nikula <jarkko.nikula@linux.intel.com>
 R:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
@@ -13384,14 +13399,6 @@ W:     https://linuxtv.org
 S:     Maintained
 F:     drivers/media/platform/vivid/*
 
-VLAN (802.1Q)
-M:     Patrick McHardy <kaber@trash.net>
-L:     netdev@vger.kernel.org
-S:     Maintained
-F:     drivers/net/macvlan.c
-F:     include/linux/if_*vlan.h
-F:     net/8021q/
-
 VLYNQ BUS
 M:     Florian Fainelli <f.fainelli@gmail.com>
 L:     openwrt-devel@lists.openwrt.org (subscribers-only)
index 4cb6b0a1152b5f57f783f0afa64207e956112c42..7acbcb324bae68497a34b8aea39e8ea5df26ded1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
-PATCHLEVEL = 10
+PATCHLEVEL = 11
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc5
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
@@ -372,7 +372,7 @@ LDFLAGS_MODULE  =
 CFLAGS_KERNEL  =
 AFLAGS_KERNEL  =
 LDFLAGS_vmlinux =
-CFLAGS_GCOV    = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
+CFLAGS_GCOV    := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
 CFLAGS_KCOV    := $(call cc-option,-fsanitize-coverage=trace-pc,)
 
 
@@ -653,6 +653,12 @@ KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
 # Tell gcc to never replace conditional load with a non-conditional one
 KBUILD_CFLAGS  += $(call cc-option,--param=allow-store-data-races=0)
 
+# check for 'asm goto'
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
+       KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+       KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
+endif
+
 include scripts/Makefile.gcc-plugins
 
 ifdef CONFIG_READABLE_ASM
@@ -798,12 +804,6 @@ KBUILD_CFLAGS   += $(call cc-option,-Werror=incompatible-pointer-types)
 # use the deterministic mode of AR if available
 KBUILD_ARFLAGS := $(call ar-option,D)
 
-# check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
-       KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
-       KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
-endif
-
 include scripts/Makefile.kasan
 include scripts/Makefile.extrawarn
 include scripts/Makefile.ubsan
index afc901b7a6f6e68c819aec1ab9199806f24fc1c1..148d7a32754e343397e84bb077afee8b82ecf3c6 100644 (file)
 
 #define SCM_TIMESTAMPING_OPT_STATS     54
 
+#define SO_MEMINFO             55
+
+#define SO_INCOMING_NAPI_ID    56
+
+#define SO_COOKIE              57
+
 #endif /* _UAPI_ASM_SOCKET_H */
index 65808fe0a290be15ded0903ad347540d0f35a52e..2891cb266cf0b5f855e4d89bcfa8aef194bd483f 100644 (file)
@@ -26,6 +26,7 @@
                        device_type = "cpu";
                        compatible = "snps,arc770d";
                        reg = <0>;
+                       clocks = <&core_clk>;
                };
        };
 
index 2dfe8037dfbb34ac680597619ecef27319bed1a2..5e944d3e5b74f61387b1f285e6837f96f5f66127 100644 (file)
@@ -21,6 +21,7 @@
                        device_type = "cpu";
                        compatible = "snps,archs38";
                        reg = <0>;
+                       clocks = <&core_clk>;
                };
        };
 
index 4c11079f3565a3decc7f6401a92950df16565411..54b277d7dea0e4594245aa50df0a3276a85a9916 100644 (file)
 
                cpu@0 {
                        device_type = "cpu";
-                       compatible = "snps,archs38xN";
+                       compatible = "snps,archs38";
                        reg = <0>;
+                       clocks = <&core_clk>;
+               };
+               cpu@1 {
+                       device_type = "cpu";
+                       compatible = "snps,archs38";
+                       reg = <1>;
+                       clocks = <&core_clk>;
+               };
+               cpu@2 {
+                       device_type = "cpu";
+                       compatible = "snps,archs38";
+                       reg = <2>;
+                       clocks = <&core_clk>;
+               };
+               cpu@3 {
+                       device_type = "cpu";
+                       compatible = "snps,archs38";
+                       reg = <3>;
+                       clocks = <&core_clk>;
                };
        };
 
index f0df59b23e21e473c6f2987593c1f7ccd9cc5958..459fc656b759aee977c560dcbbc996c6deed0cd3 100644 (file)
                        interrupts = <7>;
                        bus-width = <4>;
                };
+       };
 
-               /* Embedded Vision subsystem UIO mappings; only relevant for EV VDK */
-               uio_ev: uio@0xD0000000 {
-                       compatible = "generic-uio";
-                       reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>;
-                       reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem";
-                       interrupts = <23>;
-               };
+       /*
+        * Embedded Vision subsystem UIO mappings; only relevant for EV VDK
+        *
+        * This node is intentionally put outside of MB above becase
+        * it maps areas outside of MB's 0xEz-0xFz.
+        */
+       uio_ev: uio@0xD0000000 {
+               compatible = "generic-uio";
+               reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>;
+               reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem";
+               interrupt-parent = <&mb_intc>;
+               interrupts = <23>;
        };
 };
index 317ff773e1ca5f4de6e7ab03dc1c9f184426a2d2..b18fcb6069082220b00790fbe6f7008d9d7aa570 100644 (file)
@@ -11,6 +11,7 @@
 #define _ASM_ARC_HUGEPAGE_H
 
 #include <linux/types.h>
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 static inline pte_t pmd_pte(pmd_t pmd)
index 00bdbe167615ec2d97c7bccec66595217741cd83..2e52d18e6bc7ee3661d055c2ae6d98806478bb50 100644 (file)
@@ -54,9 +54,7 @@ int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
 void kretprobe_trampoline(void);
 void trap_is_kprobe(unsigned long address, struct pt_regs *regs);
 #else
-static void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
-{
-}
+#define trap_is_kprobe(address, regs)
 #endif /* CONFIG_KPROBES */
 
 #endif /* _ARC_KPROBES_H */
index e94ca72b974e7c7b31c2d631cb773ab3ad707b8d..ee22d40afef43b37dec7d93f0f1ee87060607f93 100644 (file)
@@ -37,6 +37,7 @@
 
 #include <asm/page.h>
 #include <asm/mmu.h>
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 #include <linux/const.h>
 
index 2585632eaa6891d511252e1aee7cce4114725533..cc558a25b8fa690d1c72afed97f80161e4167db6 100644 (file)
@@ -100,15 +100,21 @@ END(handle_interrupt)
 ;################### Non TLB Exception Handling #############################
 
 ENTRY(EV_SWI)
-       flag 1
+       ; TODO: implement this
+       EXCEPTION_PROLOGUE
+       b   ret_from_exception
 END(EV_SWI)
 
 ENTRY(EV_DivZero)
-       flag 1
+       ; TODO: implement this
+       EXCEPTION_PROLOGUE
+       b   ret_from_exception
 END(EV_DivZero)
 
 ENTRY(EV_DCError)
-       flag 1
+       ; TODO: implement this
+       EXCEPTION_PROLOGUE
+       b   ret_from_exception
 END(EV_DCError)
 
 ; ---------------------------------------------
index 3093fa898a236a1b6ae18757bbaf3f954c49dc26..fa62404ba58f77ab9fc24489b35c15132d03d720 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/fs.h>
 #include <linux/delay.h>
 #include <linux/root_dev.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clocksource.h>
 #include <linux/console.h>
@@ -488,8 +489,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 {
        char *str;
        int cpu_id = ptr_to_cpu(v);
-       struct device_node *core_clk = of_find_node_by_name(NULL, "core_clk");
-       u32 freq = 0;
+       struct device *cpu_dev = get_cpu_device(cpu_id);
+       struct clk *cpu_clk;
+       unsigned long freq = 0;
 
        if (!cpu_online(cpu_id)) {
                seq_printf(m, "processor [%d]\t: Offline\n", cpu_id);
@@ -502,9 +504,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 
        seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
 
-       of_property_read_u32(core_clk, "clock-frequency", &freq);
+       cpu_clk = clk_get(cpu_dev, NULL);
+       if (IS_ERR(cpu_clk)) {
+               seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n",
+                          cpu_id);
+       } else {
+               freq = clk_get_rate(cpu_clk);
+       }
        if (freq)
-               seq_printf(m, "CPU speed\t: %u.%02u Mhz\n",
+               seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n",
                           freq / 1000000, (freq / 10000) % 100);
 
        seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
index d408fa21a07c9937a0e2956a6e12a7895ffef684..928562967f3cd02fee4a5de34e687d442d64a4e0 100644 (file)
@@ -633,6 +633,9 @@ noinline static void slc_entire_op(const int op)
 
        write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
 
+       /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
+       read_aux_reg(r);
+
        /* Important to wait for flush to complete */
        while (read_aux_reg(r) & SLC_CTRL_BUSY);
 }
index 02981eae96b99413f28ddb76cdcc90c810c5ae37..1ec8e0d801912fbb080b7f3ff880d017f60bf320 100644 (file)
                        label = "home";
                        linux,code = <KEY_HOME>;
                        gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                button@1 {
                        label = "menu";
                        linux,code = <KEY_MENU>;
                        gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
        };
index 0d341c545b010fb810b890166eae3aa7967313ea..e5ac1d81d15c9e482ab06b41830e5230a0187e69 100644 (file)
                        /* ID & VBUS GPIOs provided in board dts */
                };
        };
+
+       tpic2810: tpic2810@60 {
+               compatible = "ti,tpic2810";
+               reg = <0x60>;
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
 };
 
 &mcspi3 {
                spi-max-frequency = <1000000>;
                spi-cpol;
        };
-
-       tpic2810: tpic2810@60 {
-               compatible = "ti,tpic2810";
-               reg = <0x60>;
-               gpio-controller;
-               #gpio-cells = <2>;
-       };
 };
 
 &uart3 {
index 0b4932cc02a8d8bb66165e40c11113647b92633f..c79c937b0a8aab92f2227deb8bf2d756542b57e8 100644 (file)
                };
 
                mac0: ethernet@1e660000 {
-                       compatible = "faraday,ftgmac100";
+                       compatible = "aspeed,ast2400-mac", "faraday,ftgmac100";
                        reg = <0x1e660000 0x180>;
                        interrupts = <2>;
-                       no-hw-checksum;
                        status = "disabled";
                };
 
                mac1: ethernet@1e680000 {
-                       compatible = "faraday,ftgmac100";
+                       compatible = "aspeed,ast2400-mac", "faraday,ftgmac100";
                        reg = <0x1e680000 0x180>;
                        interrupts = <3>;
-                       no-hw-checksum;
                        status = "disabled";
                };
 
index b664fe380936390a6f1ecdee49f157763e858513..b6596633036cd3a66c29e0c4c28663980fa7c200 100644 (file)
                };
 
                mac0: ethernet@1e660000 {
-                       compatible = "faraday,ftgmac100";
+                       compatible = "aspeed,ast2500-mac", "faraday,ftgmac100";
                        reg = <0x1e660000 0x180>;
                        interrupts = <2>;
-                       no-hw-checksum;
                        status = "disabled";
                };
 
                mac1: ethernet@1e680000 {
-                       compatible = "faraday,ftgmac100";
+                       compatible = "aspeed,ast2500-mac", "faraday,ftgmac100";
                        reg = <0x1e680000 0x180>;
                        interrupts = <3>;
-                       no-hw-checksum;
                        status = "disabled";
                };
 
index 4fbb089cf5ad3c1f96a15f504ee433ff06b724df..00de62dc0042f1445851d1cb84c2cbea3e28d25f 100644 (file)
                timer@20200 {
                        compatible = "arm,cortex-a9-global-timer";
                        reg = <0x20200 0x100>;
-                       interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
                        clocks = <&periph_clk>;
                };
 
                local-timer@20600 {
                        compatible = "arm,cortex-a9-twd-timer";
                        reg = <0x20600 0x100>;
-                       interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
                        clocks = <&periph_clk>;
                };
 
index bfd923096a8c1f5487b4bed2302ee8c784c395c7..ae31a5826e918ec58fc1948df0f9998664b7064f 100644 (file)
        };
 
        memory {
-               reg = <0x00000000 0x10000000>;
+               reg = <0x80000000 0x10000000>;
        };
 };
 
 &uart0 {
-       clock-frequency = <62499840>;
+       status = "okay";
 };
 
 &uart1 {
-       clock-frequency = <62499840>;
        status = "okay";
 };
index 3f04a40eb90cc904afb27fb71603270a8d8e1bea..df05e7f568af3e36bb2aa703d39fdbec37a6cece 100644 (file)
@@ -55,6 +55,7 @@
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+               open-source;
                priority = <200>;
        };
 };
index 9fd542200d3d52229e1a0651330a5ef48b2ee8a9..4a3ab19c62819fb8c57d9ccf406100b64b15c598 100644 (file)
@@ -55,6 +55,7 @@
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+               open-source;
                priority = <200>;
        };
 };
index 41e7fd350fcd1bbf6c36008c5f1e1c179679ffe0..81f78435d8c76cca38cb777d86e139c7d3751151 100644 (file)
@@ -55,6 +55,7 @@
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpioa 31 GPIO_ACTIVE_LOW>;
+               open-source;
                priority = <200>;
        };
 };
index 477c4860db52236fa12a70aef2235c302f4a2a45..c88b8fefcb2f13e3c9bd98321948c8988d8e428d 100644 (file)
@@ -55,6 +55,7 @@
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+               open-source;
                priority = <200>;
        };
 };
index c0a499d5ba447d5503d6655541461dddb9f35317..d503fa0dde310ff7597aeb7d3bfc757bcca32291 100644 (file)
@@ -55,6 +55,7 @@
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+               open-source;
                priority = <200>;
        };
 };
index f7eb5854a224486adbe98a33da6a4ae59b17a90a..cc0363b843c1a0ae777efa40a6f2e25b34cf1f4c 100644 (file)
@@ -55,6 +55,7 @@
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+               open-source;
                priority = <200>;
        };
 };
index 16666324fda8b5b901af538423ad302d13b9f98d..74e15a3cd9f8efb6a65238054824ba7e22df0bc2 100644 (file)
@@ -55,6 +55,7 @@
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+               open-source;
                priority = <200>;
        };
 };
index 49f466fe0b1dc2eecdaca3e6638dd80a12029bcd..dcfc9759143375decd12bea87ad5e7547837f3bd 100644 (file)
        };
 };
 
-&cpu0 {
-       arm-supply = <&sw1a_reg>;
-       soc-supply = <&sw1c_reg>;
-};
-
 &fec1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_enet1>;
index 22332be7214032fd3ba710def036168f3f9c2b3c..528b4e9c6d3d30928d363ffc6cf1d8d2275a8ce7 100644 (file)
                };
 
                usb1: ohci@00400000 {
-                       compatible = "atmel,sama5d2-ohci", "usb-ohci";
+                       compatible = "atmel,at91rm9200-ohci", "usb-ohci";
                        reg = <0x00400000 0x100000>;
                        interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
                        clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
index 82d8c477129359952b0ae499a2fdefca9393bdf4..162e1eb5373d3475fa785639223dd80750d5ca42 100644 (file)
@@ -14,6 +14,7 @@
 #include <dt-bindings/mfd/dbx500-prcmu.h>
 #include <dt-bindings/arm/ux500_pm_domains.h>
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/clock/ste-ab8500.h>
 #include "skeleton.dtsi"
 
 / {
                                interrupt-controller;
                                #interrupt-cells = <2>;
 
+                               ab8500_clock: clock-controller {
+                                       compatible = "stericsson,ab8500-clk";
+                                       #clock-cells = <1>;
+                               };
+
                                ab8500_gpio: ab8500-gpio {
                                        compatible = "stericsson,ab8500-gpio";
                                        gpio-controller;
 
                                ab8500-pwm {
                                        compatible = "stericsson,ab8500-pwm";
+                                       clocks = <&ab8500_clock AB8500_SYSCLK_INT>;
+                                       clock-names = "intclk";
                                };
 
                                ab8500-debugfs {
                                        V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>;
                                        V-DMIC-supply = <&ab8500_ldo_dmic_reg>;
 
+                                       clocks = <&ab8500_clock AB8500_SYSCLK_AUDIO>;
+                                       clock-names = "audioclk";
+
                                        stericsson,earpeice-cmv = <950>; /* Units in mV. */
                                };
 
                        status = "disabled";
                };
 
+               sound {
+                       compatible = "stericsson,snd-soc-mop500";
+                       stericsson,cpu-dai = <&msp1 &msp3>;
+                       stericsson,audio-codec = <&codec>;
+                       clocks = <&prcmu_clk PRCMU_SYSCLK>, <&ab8500_clock AB8500_SYSCLK_ULP>, <&ab8500_clock AB8500_SYSCLK_INT>;
+                       clock-names = "sysclk", "ulpclk", "intclk";
+               };
+
                msp0: msp@80123000 {
                        compatible = "stericsson,ux500-msp-i2s";
                        reg = <0x80123000 0x1000>;
index f37f9e10713cc878ce9ee0393aa4b0c6ad239adf..9e359e4f342e76ebd1fbfab57d1f8f427ece2cd7 100644 (file)
                        status = "okay";
                };
 
-               sound {
-                       compatible = "stericsson,snd-soc-mop500";
-
-                       stericsson,cpu-dai = <&msp1 &msp3>;
-                       stericsson,audio-codec = <&codec>;
-                       clocks = <&prcmu_clk PRCMU_SYSCLK>;
-                       clock-names = "sysclk";
-               };
-
                msp0: msp@80123000 {
                        pinctrl-names = "default";
                        pinctrl-0 = <&msp0_default_mode>;
index dd5514def6042470aabea1ee29853d6c713c2691..ade1d0d4e5f45c595f079c251c8b93dbe95742df 100644 (file)
                                     "", "", "", "", "", "", "", "";
                };
 
-               sound {
-                       compatible = "stericsson,snd-soc-mop500";
-
-                       stericsson,cpu-dai = <&msp1 &msp3>;
-                       stericsson,audio-codec = <&codec>;
-                       clocks = <&prcmu_clk PRCMU_SYSCLK>;
-                       clock-names = "sysclk";
-               };
-
                msp0: msp@80123000 {
                        pinctrl-names = "default";
                        pinctrl-0 = <&msp0_default_mode>;
index 72ec0d5ae052cda33bc05a0086cd7e34a684184c..bbf1c8cbaac6aa19a6acd946a0230220e4e11417 100644 (file)
                                        reg = <8>;
                                        label = "cpu";
                                        ethernet = <&gmac>;
-                                       phy-mode = "rgmii";
+                                       phy-mode = "rgmii-txid";
                                        fixed-link {
                                                speed = <1000>;
                                                full-duplex;
index a952cc0703cc172b3a50ab334b1eec6b44a93df8..8a3ed21cb7bcfcf4785784bcb66d10aafd2081e7 100644 (file)
                        resets = <&ccu RST_BUS_GPU>;
 
                        assigned-clocks = <&ccu CLK_GPU>;
-                       assigned-clock-rates = <408000000>;
+                       assigned-clock-rates = <384000000>;
                };
 
                gic: interrupt-controller@01c81000 {
index 18c174fef84f512c18145e795276af099d8de07b..0467fb365bfca714b5ce9021974470002139039f 100644 (file)
                simple-audio-card,mclk-fs = <512>;
                simple-audio-card,aux-devs = <&codec_analog>;
                simple-audio-card,routing =
-                       "Left DAC", "Digital Left DAC",
-                       "Right DAC", "Digital Right DAC";
+                       "Left DAC", "AIF1 Slot 0 Left",
+                       "Right DAC", "AIF1 Slot 0 Right";
                status = "disabled";
 
                simple-audio-card,cpu {
index 7097c18ff487d4851ca5d76d73e018b75aed7cb2..d6bd15898db6d6cc880fe0abb4999712450dc50e 100644 (file)
@@ -50,8 +50,6 @@
 
        backlight: backlight {
                compatible = "pwm-backlight";
-               pinctrl-names = "default";
-               pinctrl-0 = <&bl_en_pin>;
                pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
                brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
                default-brightness-level = <8>;
 };
 
 &pio {
-       bl_en_pin: bl_en_pin@0 {
-               pins = "PH6";
-               function = "gpio_in";
-       };
-
        mmc0_cd_pin: mmc0_cd_pin@0 {
                pins = "PB4";
                function = "gpio_in";
index f2462a6bdba6e7fcfc8c04815dd763bdcd5b3069..decd388d613d7e1d65c5d194f77fb23061ed37e0 100644 (file)
@@ -188,6 +188,7 @@ CONFIG_WL12XX=m
 CONFIG_WL18XX=m
 CONFIG_WLCORE_SPI=m
 CONFIG_WLCORE_SDIO=m
+CONFIG_INPUT_MOUSEDEV=m
 CONFIG_INPUT_JOYDEV=m
 CONFIG_INPUT_EVDEV=m
 CONFIG_KEYBOARD_ATKBD=m
index e22089fb44dc86b7ed2fdb175bc6ec7b47ee4001..a3f0b3d500895b349004921b5f1b9435a45a0f5b 100644 (file)
 #define HSR_EC_IABT_HYP        (0x21)
 #define HSR_EC_DABT    (0x24)
 #define HSR_EC_DABT_HYP        (0x25)
+#define HSR_EC_MAX     (0x3f)
 
 #define HSR_WFI_IS_WFE         (_AC(1, UL) << 0)
 
index cc495d799c67643c58e136249197a06736299339..31ee468ce667dee8a219f775f1106714879088c2 100644 (file)
@@ -30,7 +30,6 @@
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
 
 #define KVM_USER_MEM_SLOTS 32
-#define KVM_PRIVATE_MEM_SLOTS 4
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 #define KVM_HAVE_ONE_REG
 #define KVM_HALT_POLL_NS_DEFAULT 500000
index a8d656d9aec715f5ddcea1295c54923b82c49be9..1c462381c225eea31346ec4f19145e3fd449caab 100644 (file)
@@ -20,6 +20,7 @@
 
 #else
 
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopud.h>
 #include <asm/memory.h>
 #include <asm/pgtable-hwdef.h>
index c9a2103faeb9acf82f0c26164085506f14015822..96dba7cd8be7b4b6f29d9896e2d4515c477ca963 100644 (file)
@@ -221,6 +221,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_MAX_VCPUS:
                r = KVM_MAX_VCPUS;
                break;
+       case KVM_CAP_NR_MEMSLOTS:
+               r = KVM_USER_MEM_SLOTS;
+               break;
        case KVM_CAP_MSI_DEVID:
                if (!kvm)
                        r = -EINVAL;
index 4e40d1955e35341b7756efe72f2da6bf2360b224..96af65a30d78b1e09182d8e41f8b8e3ff4aae81e 100644 (file)
@@ -79,7 +79,19 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
        return 1;
 }
 
+static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       u32 hsr = kvm_vcpu_get_hsr(vcpu);
+
+       kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
+                     hsr);
+
+       kvm_inject_undefined(vcpu);
+       return 1;
+}
+
 static exit_handle_fn arm_exit_handlers[] = {
+       [0 ... HSR_EC_MAX]      = kvm_handle_unknown_ec,
        [HSR_EC_WFI]            = kvm_handle_wfx,
        [HSR_EC_CP15_32]        = kvm_handle_cp15_32,
        [HSR_EC_CP15_64]        = kvm_handle_cp15_64,
@@ -98,13 +110,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
 {
        u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
 
-       if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
-           !arm_exit_handlers[hsr_ec]) {
-               kvm_err("Unknown exception class: hsr: %#08x\n",
-                       (unsigned int)kvm_vcpu_get_hsr(vcpu));
-               BUG();
-       }
-
        return arm_exit_handlers[hsr_ec];
 }
 
index 3d89b7905bd903687b481fccab7249382c2428c6..a277981f414d8dd9433569c529068f206daab81f 100644 (file)
@@ -289,6 +289,22 @@ static void at91_ddr_standby(void)
                at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
 }
 
+static void sama5d3_ddr_standby(void)
+{
+       u32 lpr0;
+       u32 saved_lpr0;
+
+       saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
+       lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
+       lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
+
+       at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
+
+       cpu_do_idle();
+
+       at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
+}
+
 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
  * remember.
  */
@@ -323,7 +339,7 @@ static const struct of_device_id const ramc_ids[] __initconst = {
        { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
        { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
        { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
-       { .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby },
+       { .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby },
        { /*sentinel*/ }
 };
 
index 093458b62c8dadbcc3c7cc1c3b66d84e59af3d8d..c89757abb0ae4bc82adf923a5947f2c0fbefb42c 100644 (file)
@@ -241,6 +241,3 @@ obj-$(CONFIG_MACH_OMAP2_TUSB6010)   += usb-tusb6010.o
 
 onenand-$(CONFIG_MTD_ONENAND_OMAP2)    := gpmc-onenand.o
 obj-y                                  += $(onenand-m) $(onenand-y)
-
-nand-$(CONFIG_MTD_NAND_OMAP2)          := gpmc-nand.o
-obj-y                                  += $(nand-m) $(nand-y)
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
deleted file mode 100644 (file)
index f6ac027..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * gpmc-nand.c
- *
- * Copyright (C) 2009 Texas Instruments
- * Vimal Singh <vimalsingh@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/omap-gpmc.h>
-#include <linux/mtd/nand.h>
-#include <linux/platform_data/mtd-nand-omap2.h>
-
-#include <asm/mach/flash.h>
-
-#include "soc.h"
-
-/* minimum size for IO mapping */
-#define        NAND_IO_SIZE    4
-
-static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
-{
-       /* platforms which support all ECC schemes */
-       if (soc_is_am33xx() || soc_is_am43xx() || cpu_is_omap44xx() ||
-                soc_is_omap54xx() || soc_is_dra7xx())
-               return 1;
-
-       if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
-                ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
-               if (cpu_is_omap24xx())
-                       return 0;
-               else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
-                       return 0;
-               else
-                       return 1;
-       }
-
-       /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
-        * which require H/W based ECC error detection */
-       if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
-           ((ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
-                (ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
-               return 0;
-
-       /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
-       if (ecc_opt == OMAP_ECC_HAM1_CODE_HW ||
-           ecc_opt == OMAP_ECC_HAM1_CODE_SW)
-               return 1;
-       else
-               return 0;
-}
-
-/* This function will go away once the device-tree convertion is complete */
-static void gpmc_set_legacy(struct omap_nand_platform_data *gpmc_nand_data,
-                           struct gpmc_settings *s)
-{
-       /* Enable RD PIN Monitoring Reg */
-       if (gpmc_nand_data->dev_ready) {
-               s->wait_on_read = true;
-               s->wait_on_write = true;
-       }
-
-       if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16)
-               s->device_width = GPMC_DEVWIDTH_16BIT;
-       else
-               s->device_width = GPMC_DEVWIDTH_8BIT;
-}
-
-int gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data,
-                  struct gpmc_timings *gpmc_t)
-{
-       int err = 0;
-       struct gpmc_settings s;
-       struct platform_device *pdev;
-       struct resource gpmc_nand_res[] = {
-               { .flags = IORESOURCE_MEM, },
-               { .flags = IORESOURCE_IRQ, },
-               { .flags = IORESOURCE_IRQ, },
-       };
-
-       BUG_ON(gpmc_nand_data->cs >= GPMC_CS_NUM);
-
-       err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE,
-                             (unsigned long *)&gpmc_nand_res[0].start);
-       if (err < 0) {
-               pr_err("omap2-gpmc: Cannot request GPMC CS %d, error %d\n",
-                      gpmc_nand_data->cs, err);
-               return err;
-       }
-       gpmc_nand_res[0].end = gpmc_nand_res[0].start + NAND_IO_SIZE - 1;
-       gpmc_nand_res[1].start = gpmc_get_client_irq(GPMC_IRQ_FIFOEVENTENABLE);
-       gpmc_nand_res[2].start = gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT);
-
-       memset(&s, 0, sizeof(struct gpmc_settings));
-       gpmc_set_legacy(gpmc_nand_data, &s);
-
-       s.device_nand = true;
-
-       if (gpmc_t) {
-               err = gpmc_cs_set_timings(gpmc_nand_data->cs, gpmc_t, &s);
-               if (err < 0) {
-                       pr_err("omap2-gpmc: Unable to set gpmc timings: %d\n",
-                              err);
-                       return err;
-               }
-       }
-
-       err = gpmc_cs_program_settings(gpmc_nand_data->cs, &s);
-       if (err < 0)
-               goto out_free_cs;
-
-       err = gpmc_configure(GPMC_CONFIG_WP, 0);
-       if (err < 0)
-               goto out_free_cs;
-
-       if (!gpmc_hwecc_bch_capable(gpmc_nand_data->ecc_opt)) {
-               pr_err("omap2-nand: Unsupported NAND ECC scheme selected\n");
-               err = -EINVAL;
-               goto out_free_cs;
-       }
-
-
-       pdev = platform_device_alloc("omap2-nand", gpmc_nand_data->cs);
-       if (pdev) {
-               err = platform_device_add_resources(pdev, gpmc_nand_res,
-                                                   ARRAY_SIZE(gpmc_nand_res));
-               if (!err)
-                       pdev->dev.platform_data = gpmc_nand_data;
-       } else {
-               err = -ENOMEM;
-       }
-       if (err)
-               goto out_free_pdev;
-
-       err = platform_device_add(pdev);
-       if (err) {
-               dev_err(&pdev->dev, "Unable to register NAND device\n");
-               goto out_free_pdev;
-       }
-
-       return 0;
-
-out_free_pdev:
-       platform_device_put(pdev);
-out_free_cs:
-       gpmc_cs_free(gpmc_nand_data->cs);
-
-       return err;
-}
index 8633c703546a65c2e5b0071ffca0d5a12b664884..2944af82055847935462da4035a73b513c5795a6 100644 (file)
@@ -367,7 +367,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
        return ret;
 }
 
-void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
+int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
 {
        int err;
        struct device *dev = &gpmc_onenand_device.dev;
@@ -393,15 +393,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
        if (err < 0) {
                dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
                        gpmc_onenand_data->cs, err);
-               return;
+               return err;
        }
 
        gpmc_onenand_resource.end = gpmc_onenand_resource.start +
                                                        ONENAND_IO_SIZE - 1;
 
-       if (platform_device_register(&gpmc_onenand_device) < 0) {
+       err = platform_device_register(&gpmc_onenand_device);
+       if (err) {
                dev_err(dev, "Unable to register OneNAND device\n");
                gpmc_cs_free(gpmc_onenand_data->cs);
-               return;
        }
+
+       return err;
 }
index fe36ce2734d47a81f8dd5cbfdf72e9f6717df903..4c6f14cf92a82e3dbf8d7ceeb985686feb79e924 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <linux/linkage.h>
 #include <linux/init.h>
+#include <asm/assembler.h>
 
 #include "omap44xx.h"
 
@@ -66,7 +67,7 @@ wait_2:       ldr     r2, =AUX_CORE_BOOT0_PA  @ read from AuxCoreBoot0
        cmp     r0, r4
        bne     wait_2
        ldr     r12, =API_HYP_ENTRY
-       adr     r0, hyp_boot
+       badr    r0, hyp_boot
        smc     #0
 hyp_boot:
        b       omap_secondary_startup
index 56f917ec8621e8d5d4a9410975ec454aa802414a..1435fee39a89ba18239291859c97f0f333ca877f 100644 (file)
@@ -2112,11 +2112,20 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
 };
 
 /* L4 CORE -> SR1 interface */
+static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
+       {
+               .pa_start       = OMAP34XX_SR1_BASE,
+               .pa_end         = OMAP34XX_SR1_BASE + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT,
+       },
+       { },
+};
 
 static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap34xx_sr1_hwmod,
        .clk            = "sr_l4_ick",
+       .addr           = omap3_sr1_addr_space,
        .user           = OCP_USER_MPU,
 };
 
@@ -2124,15 +2133,25 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr1 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap36xx_sr1_hwmod,
        .clk            = "sr_l4_ick",
+       .addr           = omap3_sr1_addr_space,
        .user           = OCP_USER_MPU,
 };
 
 /* L4 CORE -> SR1 interface */
+static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = {
+       {
+               .pa_start       = OMAP34XX_SR2_BASE,
+               .pa_end         = OMAP34XX_SR2_BASE + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT,
+       },
+       { },
+};
 
 static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap34xx_sr2_hwmod,
        .clk            = "sr_l4_ick",
+       .addr           = omap3_sr2_addr_space,
        .user           = OCP_USER_MPU,
 };
 
@@ -2140,6 +2159,7 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr2 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap36xx_sr2_hwmod,
        .clk            = "sr_l4_ick",
+       .addr           = omap3_sr2_addr_space,
        .user           = OCP_USER_MPU,
 };
 
@@ -3111,16 +3131,20 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = {
  * Return: 0 if device named @dev_name is not likely to be accessible,
  * or 1 if it is likely to be accessible.
  */
-static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
-                                                      const char *dev_name)
+static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
+                                                       const char *dev_name)
 {
+       struct device_node *node;
+       bool available;
+
        if (!bus)
-               return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0;
+               return omap_type() == OMAP2_DEVICE_TYPE_GP;
 
-       if (of_device_is_available(of_find_node_by_name(bus, dev_name)))
-               return 1;
+       node = of_get_child_by_name(bus, dev_name);
+       available = of_device_is_available(node);
+       of_node_put(node);
 
-       return 0;
+       return available;
 }
 
 int __init omap3xxx_hwmod_init(void)
@@ -3189,15 +3213,20 @@ int __init omap3xxx_hwmod_init(void)
 
        if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) {
                r = omap_hwmod_register_links(h_sham);
-               if (r < 0)
+               if (r < 0) {
+                       of_node_put(bus);
                        return r;
+               }
        }
 
        if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) {
                r = omap_hwmod_register_links(h_aes);
-               if (r < 0)
+               if (r < 0) {
+                       of_node_put(bus);
                        return r;
+               }
        }
+       of_node_put(bus);
 
        /*
         * Register hwmod links specific to certain ES levels of a
index 3c2cb5d5adfa4f17bab53005b7722ffe8add022e..0bb0e9c6376c4aab7bb1ad43c2bd4fce87cef943 100644 (file)
 394    common  pkey_mprotect           sys_pkey_mprotect
 395    common  pkey_alloc              sys_pkey_alloc
 396    common  pkey_free               sys_pkey_free
+397    common  statx                   sys_statx
index ce18c91b50a1cbac3fb6d38af60c63af9b031185..f0325d96b97aed734f86deb3a9f5b3266b5a16ed 100644 (file)
@@ -198,6 +198,8 @@ static const struct dma_map_ops xen_swiotlb_dma_ops = {
        .unmap_page = xen_swiotlb_unmap_page,
        .dma_supported = xen_swiotlb_dma_supported,
        .set_dma_mask = xen_swiotlb_set_dma_mask,
+       .mmap = xen_swiotlb_dma_mmap,
+       .get_sgtable = xen_swiotlb_get_sgtable,
 };
 
 int __init xen_mm_init(void)
index a39029b5414eb25f23f3409f74a4d84713a02c4f..3741859765cfe050d2c4a174d613ff90e1074be0 100644 (file)
@@ -508,6 +508,16 @@ config QCOM_FALKOR_ERRATUM_1009
 
          If unsure, say Y.
 
+config QCOM_QDF2400_ERRATUM_0065
+       bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size"
+       default y
+       help
+         On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports
+         ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have
+         been indicated as 16Bytes (0xf), not 8Bytes (0x7).
+
+         If unsure, say Y.
+
 endmenu
 
 
@@ -1063,6 +1073,10 @@ config SYSVIPC_COMPAT
        def_bool y
        depends on COMPAT && SYSVIPC
 
+config KEYS_COMPAT
+       def_bool y
+       depends on COMPAT && KEYS
+
 endmenu
 
 menu "Power management options"
index 9f9e203c09c5ad362ae00d5038d2e52c91043f39..bcb03fc3266552e22ce855ac81677584d0937e63 100644 (file)
        pcie0: pcie@20020000 {
                compatible = "brcm,iproc-pcie";
                reg = <0 0x20020000 0 0x1000>;
+               dma-coherent;
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
        pcie4: pcie@50020000 {
                compatible = "brcm,iproc-pcie";
                reg = <0 0x50020000 0 0x1000>;
+               dma-coherent;
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
        pcie8: pcie@60c00000 {
                compatible = "brcm,iproc-pcie-paxc";
                reg = <0 0x60c00000 0 0x1000>;
+               dma-coherent;
                linux,pci-domain = <8>;
 
                bus-range = <0x0 0x1>;
                              <0x61030000 0x100>;
                        reg-names = "amac_base", "idm_base", "nicpm_base";
                        interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
+                       dma-coherent;
                        phy-handle = <&gphy0>;
                        phy-mode = "rgmii";
                        status = "disabled";
                        reg = <0x612c0000 0x445>;  /* PDC FS0 regs */
                        interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
                        #mbox-cells = <1>;
+                       dma-coherent;
                        brcm,rx-status-len = <32>;
                        brcm,use-bcm-hdr;
                };
                        reg = <0x612e0000 0x445>;  /* PDC FS1 regs */
                        interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
                        #mbox-cells = <1>;
+                       dma-coherent;
                        brcm,rx-status-len = <32>;
                        brcm,use-bcm-hdr;
                };
                        reg = <0x61300000 0x445>;  /* PDC FS2 regs */
                        interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
                        #mbox-cells = <1>;
+                       dma-coherent;
                        brcm,rx-status-len = <32>;
                        brcm,use-bcm-hdr;
                };
                        reg = <0x61320000 0x445>;  /* PDC FS3 regs */
                        interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
                        #mbox-cells = <1>;
+                       dma-coherent;
                        brcm,rx-status-len = <32>;
                        brcm,use-bcm-hdr;
                };
                sata: ahci@663f2000 {
                        compatible = "brcm,iproc-ahci", "generic-ahci";
                        reg = <0x663f2000 0x1000>;
+                       dma-coherent;
                        reg-names = "ahci";
                        interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>;
                        #address-cells = <1>;
                        compatible = "brcm,sdhci-iproc-cygnus";
                        reg = <0x66420000 0x100>;
                        interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
+                       dma-coherent;
                        bus-width = <8>;
                        clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
                        status = "disabled";
                        compatible = "brcm,sdhci-iproc-cygnus";
                        reg = <0x66430000 0x100>;
                        interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>;
+                       dma-coherent;
                        bus-width = <8>;
                        clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
                        status = "disabled";
index 05310ad8c5abec54a445cb2dfcd3df5fefcefe3a..f31c48d0cd6873f399a6d8f5f861e98fa3f66e10 100644 (file)
@@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void)
 static inline bool system_uses_ttbr0_pan(void)
 {
        return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
-               !cpus_have_cap(ARM64_HAS_PAN);
+               !cpus_have_const_cap(ARM64_HAS_PAN);
 }
 
 #endif /* __ASSEMBLY__ */
index 86c404171305abd290a6a85d7a5edd69c55ecd02..f6580d4afb0e0c4e242e5171ecda679d927286d2 100644 (file)
@@ -3,8 +3,6 @@
 
 #include <linux/compiler.h>
 
-#include <asm/sysreg.h>
-
 #ifndef __ASSEMBLY__
 
 struct task_struct;
index f21fd38943708f1f6b69f1431fd07538ff26a993..e7705e7bb07b133de4da9b2809a152f94ceb0b4b 100644 (file)
@@ -30,8 +30,7 @@
 
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
 
-#define KVM_USER_MEM_SLOTS 32
-#define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_USER_MEM_SLOTS 512
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 #define KVM_HALT_POLL_NS_DEFAULT 500000
 
index 69b2fd41503ca3764fed84f9d404b2e32cbfc939..345a072b5856d41477fab1f450eda1e213d201d3 100644 (file)
@@ -55,9 +55,13 @@ typedef struct { pteval_t pgprot; } pgprot_t;
 #define __pgprot(x)    ((pgprot_t) { (x) } )
 
 #if CONFIG_PGTABLE_LEVELS == 2
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 #elif CONFIG_PGTABLE_LEVELS == 3
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopud.h>
+#elif CONFIG_PGTABLE_LEVELS == 4
+#include <asm-generic/5level-fixup.h>
 #endif
 
 #endif /* __ASM_PGTABLE_TYPES_H */
index e78ac26324bd809dcd5fa2f7f45465daba8c158d..bdbeb06dc11ede112de28b09c7608b3d5b0ce23b 100644 (file)
@@ -44,7 +44,7 @@
 #define __ARM_NR_compat_cacheflush     (__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE+5)
 
-#define __NR_compat_syscalls           394
+#define __NR_compat_syscalls           398
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
index b7e8ef16ff0dc62b94a042e4a0b70c9884e4cb16..c66b51aab1958816e6d137e9ae9dc0dc0378cf7f 100644 (file)
@@ -809,6 +809,14 @@ __SYSCALL(__NR_copy_file_range, sys_copy_file_range)
 __SYSCALL(__NR_preadv2, compat_sys_preadv2)
 #define __NR_pwritev2 393
 __SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
+#define __NR_pkey_mprotect 394
+__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
+#define __NR_pkey_alloc 395
+__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
+#define __NR_pkey_free 396
+__SYSCALL(__NR_pkey_free, sys_pkey_free)
+#define __NR_statx 397
+__SYSCALL(__NR_statx, sys_statx)
 
 /*
  * Please add new compat syscalls above this comment and update
index 75a0f8acef669ce5560f627f516dae54168a898d..fd691087dc9ad58ff0ff007f5ea7191a3f879380 100644 (file)
@@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu)
 }
 
 /**
- * cpu_suspend() - function to enter a low-power idle state
+ * arm_cpuidle_suspend() - function to enter a low-power idle state
  * @arg: argument to pass to CPU suspend operations
  *
  * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
index 769f24ef628c1e9ffd167e0b0e634151cf36de3d..d7e90d97f5c405f2c348eaaff331df818b27906a 100644 (file)
@@ -131,11 +131,15 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
        /*
         * The kernel Image should not extend across a 1GB/32MB/512MB alignment
         * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
-        * happens, increase the KASLR offset by the size of the kernel image.
+        * happens, increase the KASLR offset by the size of the kernel image
+        * rounded up by SWAPPER_BLOCK_SIZE.
         */
        if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
-           (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT))
-               offset = (offset + (u64)(_end - _text)) & mask;
+           (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
+               u64 kimg_sz = _end - _text;
+               offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
+                               & mask;
+       }
 
        if (IS_ENABLED(CONFIG_KASAN))
                /*
index 2a07aae5b8a26431edcdfd2534a856474fc00b44..c5c45942fb6e6693c5f8c195bb6596e2fa9f6ff2 100644 (file)
@@ -372,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
        return 0;
 }
 
-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
-                                      unsigned long val, void *data)
-{
-       return NOTIFY_DONE;
-}
-
 static void __kprobes kprobe_handler(struct pt_regs *regs)
 {
        struct kprobe *p, *cur_kprobe;
index ef1caae02110eef59c4abb5dd5cbb8051d9cc269..9b1036570586f95379f035b8606220144cdc7837 100644 (file)
@@ -944,7 +944,7 @@ static bool have_cpu_die(void)
 #ifdef CONFIG_HOTPLUG_CPU
        int any_cpu = raw_smp_processor_id();
 
-       if (cpu_ops[any_cpu]->cpu_die)
+       if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
                return true;
 #endif
        return false;
index b8cc94e9698b69f17f7d127d2663a21f64e5e2eb..f8b69d84238eb4f7743c23b425e1a4900cb9cd7f 100644 (file)
@@ -1,2 +1 @@
 vdso.lds
-vdso-offsets.h
index 1bfe30dfbfe77ffa2395528e008c058bd93b648d..fa1b18e364fc9d73cec1c0fdb6626285c1d7adc2 100644 (file)
@@ -135,7 +135,19 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
        return ret;
 }
 
+static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       u32 hsr = kvm_vcpu_get_hsr(vcpu);
+
+       kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
+                     hsr, esr_get_class_string(hsr));
+
+       kvm_inject_undefined(vcpu);
+       return 1;
+}
+
 static exit_handle_fn arm_exit_handlers[] = {
+       [0 ... ESR_ELx_EC_MAX]  = kvm_handle_unknown_ec,
        [ESR_ELx_EC_WFx]        = kvm_handle_wfx,
        [ESR_ELx_EC_CP15_32]    = kvm_handle_cp15_32,
        [ESR_ELx_EC_CP15_64]    = kvm_handle_cp15_64,
@@ -162,13 +174,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
        u32 hsr = kvm_vcpu_get_hsr(vcpu);
        u8 hsr_ec = ESR_ELx_EC(hsr);
 
-       if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
-           !arm_exit_handlers[hsr_ec]) {
-               kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
-                       hsr, esr_get_class_string(hsr));
-               BUG();
-       }
-
        return arm_exit_handlers[hsr_ec];
 }
 
index e8e7ba2bc11f93abde92c6b91782ae776bdbcb73..9e1d2b75eecd606df6a6ccf632247ebc02149c67 100644 (file)
 #include <asm/kvm_hyp.h>
 #include <asm/tlbflush.h>
 
+static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
+{
+       u64 val;
+
+       /*
+        * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
+        * most TLB operations target EL2/EL0. In order to affect the
+        * guest TLBs (EL1/EL0), we need to change one of these two
+        * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
+        * let's flip TGE before executing the TLB operation.
+        */
+       write_sysreg(kvm->arch.vttbr, vttbr_el2);
+       val = read_sysreg(hcr_el2);
+       val &= ~HCR_TGE;
+       write_sysreg(val, hcr_el2);
+       isb();
+}
+
+static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
+{
+       write_sysreg(kvm->arch.vttbr, vttbr_el2);
+       isb();
+}
+
+static hyp_alternate_select(__tlb_switch_to_guest,
+                           __tlb_switch_to_guest_nvhe,
+                           __tlb_switch_to_guest_vhe,
+                           ARM64_HAS_VIRT_HOST_EXTN);
+
+static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
+{
+       /*
+        * We're done with the TLB operation, let's restore the host's
+        * view of HCR_EL2.
+        */
+       write_sysreg(0, vttbr_el2);
+       write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+}
+
+static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
+{
+       write_sysreg(0, vttbr_el2);
+}
+
+static hyp_alternate_select(__tlb_switch_to_host,
+                           __tlb_switch_to_host_nvhe,
+                           __tlb_switch_to_host_vhe,
+                           ARM64_HAS_VIRT_HOST_EXTN);
+
 void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
        dsb(ishst);
 
        /* Switch to requested VMID */
        kvm = kern_hyp_va(kvm);
-       write_sysreg(kvm->arch.vttbr, vttbr_el2);
-       isb();
+       __tlb_switch_to_guest()(kvm);
 
        /*
         * We could do so much better if we had the VA as well.
@@ -46,7 +94,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
        dsb(ish);
        isb();
 
-       write_sysreg(0, vttbr_el2);
+       __tlb_switch_to_host()(kvm);
 }
 
 void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
@@ -55,14 +103,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
 
        /* Switch to requested VMID */
        kvm = kern_hyp_va(kvm);
-       write_sysreg(kvm->arch.vttbr, vttbr_el2);
-       isb();
+       __tlb_switch_to_guest()(kvm);
 
        __tlbi(vmalls12e1is);
        dsb(ish);
        isb();
 
-       write_sysreg(0, vttbr_el2);
+       __tlb_switch_to_host()(kvm);
 }
 
 void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
@@ -70,14 +117,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
        struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
 
        /* Switch to requested VMID */
-       write_sysreg(kvm->arch.vttbr, vttbr_el2);
-       isb();
+       __tlb_switch_to_guest()(kvm);
 
        __tlbi(vmalle1);
        dsb(nsh);
        isb();
 
-       write_sysreg(0, vttbr_el2);
+       __tlb_switch_to_host()(kvm);
 }
 
 void __hyp_text __kvm_flush_vm_context(void)
index 55d1e9205543689a6883d983dc82cb8b9eb2be6a..687a358a37337af9cf7a0d50c27b0176cfbd2012 100644 (file)
@@ -162,7 +162,7 @@ void __init kasan_init(void)
        clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
        vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
-                        pfn_to_nid(virt_to_pfn(_text)));
+                        pfn_to_nid(virt_to_pfn(lm_alias(_text))));
 
        /*
         * vmemmap_populate() has populated the shadow region that covers the
index 425dd567b5b955424ef4f995ddf49decdf4637fd..d5b1c63993ec29620b9306e734cbd3e3ee66bd01 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef __ASM_AVR32_PGTABLE_2LEVEL_H
 #define __ASM_AVR32_PGTABLE_2LEVEL_H
 
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 /*
index 5a650426f35703e82db1149a4049b8575b21d85f..2434d08ad8d6be3f3e0151bd081a4eed4c4978bd 100644 (file)
 
 #define SCM_TIMESTAMPING_OPT_STATS     54
 
+#define SO_MEMINFO             55
+
+#define SO_INCOMING_NAPI_ID    56
+
+#define SO_COOKIE              57
+
 #endif /* _UAPI__ASM_AVR32_SOCKET_H */
index 75d9ad6f99cf56e8071eff21157ed3da0dfef4b7..29cf2f191bfd289902c7f29b55ac896c79697944 100644 (file)
@@ -14,7 +14,7 @@
  */
 
 #include <linux/oprofile.h>
-#include <linux/sched.h>
+#include <linux/ptrace.h>
 #include <linux/uaccess.h>
 
 /* The first two words of each frame on the stack look like this if we have
index a27e1f02ce182d0e6805057bc9390e67df5e751c..8801dc98fd442a85cd09113ce6794040ac2e22dd 100644 (file)
@@ -70,46 +70,6 @@ static int gpr_get(struct task_struct *target,
                                   0, sizeof(*regs));
 }
 
-static int gpr_set(struct task_struct *target,
-                  const struct user_regset *regset,
-                  unsigned int pos, unsigned int count,
-                  const void *kbuf, const void __user *ubuf)
-{
-       int ret;
-       struct pt_regs *regs = task_pt_regs(target);
-
-       /* Don't copyin TSR or CSR */
-       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                &regs,
-                                0, PT_TSR * sizeof(long));
-       if (ret)
-               return ret;
-
-       ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
-                                       PT_TSR * sizeof(long),
-                                       (PT_TSR + 1) * sizeof(long));
-       if (ret)
-               return ret;
-
-       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                &regs,
-                                (PT_TSR + 1) * sizeof(long),
-                                PT_CSR * sizeof(long));
-       if (ret)
-               return ret;
-
-       ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
-                                       PT_CSR * sizeof(long),
-                                       (PT_CSR + 1) * sizeof(long));
-       if (ret)
-               return ret;
-
-       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                &regs,
-                                (PT_CSR + 1) * sizeof(long), -1);
-       return ret;
-}
-
 enum c6x_regset {
        REGSET_GPR,
 };
@@ -121,7 +81,6 @@ static const struct user_regset c6x_regsets[] = {
                .size = sizeof(u32),
                .align = sizeof(u32),
                .get = gpr_get,
-               .set = gpr_set
        },
 };
 
index ae6903d7fdbe08c25a7fa23439d228b345d1a874..14970f11bbf2b60cc1d9e7ce26adffa1bf15f63e 100644 (file)
@@ -2086,7 +2086,7 @@ static void cryptocop_job_queue_close(void)
                dma_in_cfg.en = regk_dma_no;
                REG_WR(dma, IN_DMA_INST, rw_cfg, dma_in_cfg);
 
-               /* Disble the cryptocop. */
+               /* Disable the cryptocop. */
                rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg);
                rw_cfg.en = 0;
                REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
index 2a3210ba4c720485c4ac29de9c9fa3b69b136726..fa3a73004cc570b564e1c36a66c644f1d86f0c94 100644 (file)
@@ -6,6 +6,7 @@
 #define _CRIS_PGTABLE_H
 
 #include <asm/page.h>
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 #ifndef __ASSEMBLY__
index a0513d463a1fa86d39e4af642f68e6f279b69f3a..ab6e7e961b545c30805d5dccae26ffa3782db4f2 100644 (file)
@@ -16,6 +16,7 @@
 #ifndef _ASM_PGTABLE_H
 #define _ASM_PGTABLE_H
 
+#include <asm-generic/5level-fixup.h>
 #include <asm/mem-layout.h>
 #include <asm/setup.h>
 #include <asm/processor.h>
index 81e03530ed39ee7e3b25b7442361f64aa883c179..1ccf45657472a5240ddd815c90de42397effc64d 100644 (file)
 
 #define SCM_TIMESTAMPING_OPT_STATS     54
 
+#define SO_MEMINFO             55
+
+#define SO_INCOMING_NAPI_ID    56
+
+#define SO_COOKIE              57
+
 #endif /* _ASM_SOCKET_H */
 
index 8341db67821dd16ebefd70d2eb0e7b361b76a6ec..7d265d28ba5eecd2a6770c2dd161253b1a9a89b6 100644 (file)
@@ -1,5 +1,6 @@
 #ifndef _H8300_PGTABLE_H
 #define _H8300_PGTABLE_H
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopud.h>
 #include <asm-generic/pgtable.h>
 #define pgtable_cache_init()   do { } while (0)
index 92075544a19ac03fae02b3efc28a986b0748ec1c..0dc1c8f622bc3fda818d5703896e99be485762ea 100644 (file)
@@ -95,7 +95,8 @@ static int regs_get(struct task_struct *target,
        long *reg = (long *)&regs;
 
        /* build user regs in buffer */
-       for (r = 0; r < ARRAY_SIZE(register_offset); r++)
+       BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
+       for (r = 0; r < sizeof(regs) / sizeof(long); r++)
                *reg++ = h8300_get_reg(target, r);
 
        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -113,7 +114,8 @@ static int regs_set(struct task_struct *target,
        long *reg;
 
        /* build user regs in buffer */
-       for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++)
+       BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
+       for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++)
                *reg++ = h8300_get_reg(target, r);
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -122,7 +124,7 @@ static int regs_set(struct task_struct *target,
                return ret;
 
        /* write back to pt_regs */
-       for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++)
+       for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++)
                h8300_put_reg(target, r, *reg++);
        return 0;
 }
index fe3b5673babaa49428a90c1c56d64abec3d0c871..f5ff3b794c8512ce430812fd07b5c85752294d79 100644 (file)
@@ -9,7 +9,7 @@
  */
 
 #include <linux/linkage.h>
-#include <linux/sched.h>
+#include <linux/sched/signal.h>
 #include <asm/ptrace.h>
 
 #define BREAKINST 0x5730 /* trapa #3 */
index 49eab8136ec307d3dbcb40fd98676ccc9f2b44a8..24a9177fb897b6f72fab8ff4277e1af76814de2f 100644 (file)
@@ -26,6 +26,7 @@
  */
 #include <linux/swap.h>
 #include <asm/page.h>
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 /* A handy thing to have if one has the RAM. Declared in head.S */
index 384794e665fc4a733b420d7ff73c38d6ab5bff8f..6cc22c8d8923e9c294f8736190b3e55e38e7698a 100644 (file)
@@ -587,8 +587,10 @@ extern struct page *zero_page_memmap_ptr;
 
 
 #if CONFIG_PGTABLE_LEVELS == 3
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopud.h>
 #endif
+#include <asm-generic/5level-fixup.h>
 #include <asm-generic/pgtable.h>
 
 #endif /* _ASM_IA64_PGTABLE_H */
index 57feb0c1f7d707dd51ce20ffba0a418f5b5687ff..2c3f4b48042ae34319a2359b69ffd8fe57cedc21 100644 (file)
 
 #define SCM_TIMESTAMPING_OPT_STATS     54
 
+#define SO_MEMINFO             55
+
+#define SO_INCOMING_NAPI_ID    56
+
+#define SO_COOKIE              57
+
 #endif /* _ASM_IA64_SOCKET_H */
index 5853f8e92c20cda02450346d839b3f0b466359ee..ae6548d29a1819b4cc75826b881697a753e63a36 100644 (file)
 
 #define SCM_TIMESTAMPING_OPT_STATS     54
 
+#define SO_MEMINFO             55
+
+#define SO_INCOMING_NAPI_ID    56
+
+#define SO_COOKIE              57
+
 #endif /* _ASM_M32R_SOCKET_H */
index 048bf076f7df66a35fd4d11addd015e9ec285fc9..531cb9eb3319f4251bb6f4cf603b2a4bf90b7e80 100644 (file)
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -60,6 +61,7 @@ CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -71,6 +73,7 @@ CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
@@ -101,6 +104,7 @@ CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
 CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
@@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
 CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
 CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -371,6 +377,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
 CONFIG_GTP=m
@@ -383,6 +390,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_AMAZON is not set
 CONFIG_A2065=y
 CONFIG_ARIADNE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
@@ -404,7 +412,6 @@ CONFIG_ZORRO8390=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -564,6 +571,8 @@ CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
@@ -594,6 +603,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -605,6 +615,7 @@ CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -629,4 +640,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
 CONFIG_XZ_DEC_TEST=m
index d4de24963f5f7434e5fab612629c23149fc0389c..ca91d39555da2dad5eb6413dd1892d1a070fe172 100644 (file)
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
 CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
 CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
 CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
 CONFIG_GTP=m
@@ -362,6 +369,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
 # CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
@@ -378,7 +386,6 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -523,6 +530,8 @@ CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
@@ -553,6 +562,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -564,6 +574,7 @@ CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -588,4 +599,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
 CONFIG_XZ_DEC_TEST=m
index fc0fd3f871f3348233c720465d2707fd97c8d94f..23a3d8a691e2239478299856316fc9bac1261ccd 100644 (file)
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
 CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
 CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
 CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -362,6 +368,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
 CONFIG_GTP=m
@@ -372,6 +379,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 CONFIG_ATARILANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
@@ -389,7 +397,6 @@ CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -544,6 +551,8 @@ CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
@@ -574,6 +583,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -585,6 +595,7 @@ CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -609,4 +620,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
 CONFIG_XZ_DEC_TEST=m
index 52e984a0aa696a503f458f2dd853913a1f32a52b..95deb95140fe9273ef2f70f670182103e5f03399 100644 (file)
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
 CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
 CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
 CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
 CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
 # CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_BVME6000_NET=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
 CONFIG_XZ_DEC_TEST=m
index aaeed4422cc97525600537135669e9da3f865b3e..afae6958db2d777591527d1d59dc3bd686f64772 100644 (file)
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
 CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
 CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
 CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
 CONFIG_GTP=m
@@ -363,6 +370,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 CONFIG_HPLANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
@@ -379,7 +387,6 @@ CONFIG_HPLANCE=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -525,6 +532,8 @@ CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
@@ -555,6 +564,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -566,6 +576,7 @@ CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -590,4 +601,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
 CONFIG_XZ_DEC_TEST=m
index 3bbc9b2f0dac0fb890183d11d369655fdfd68201..b010734729a79e42b599c26919542faf4fbce31d 100644 (file)
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -57,6 +58,7 @@ CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -68,6 +70,7 @@ CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
@@ -98,6 +101,7 @@ CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
 CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
@@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
 CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
 CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -369,6 +375,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
 CONFIG_GTP=m
@@ -379,6 +386,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 CONFIG_MACMACE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
@@ -398,7 +406,6 @@ CONFIG_MAC8390=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -547,6 +554,8 @@ CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
@@ -577,6 +586,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -588,6 +598,7 @@ CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -612,4 +623,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
 CONFIG_XZ_DEC_TEST=m
index 8f2c0decb2f8edd8030ffc1df2eddad2bd70a939..0e414549b235b0a04e9ed3feaa3786542c862bb3 100644 (file)
@@ -21,6 +21,7 @@ CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -67,6 +68,7 @@ CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -78,6 +80,7 @@ CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
@@ -108,6 +111,7 @@ CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
 CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
@@ -308,6 +312,8 @@ CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
 CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
 CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -402,6 +408,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
 CONFIG_GTP=m
@@ -419,6 +426,7 @@ CONFIG_HPLANCE=y
 CONFIG_MVME147_NET=y
 CONFIG_SUN3LANCE=y
 CONFIG_MACMACE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
@@ -444,7 +452,6 @@ CONFIG_ZORRO8390=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PLIP=m
@@ -627,6 +634,8 @@ CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
@@ -657,6 +666,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -668,6 +678,7 @@ CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -692,4 +703,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
 CONFIG_XZ_DEC_TEST=m
index c743dd22e96f935f553a12648991546616a47291..b2e687a0ec3d477d2f8fbb50a7387e51a60fb5bc 100644 (file)
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68030=y
@@ -55,6 +56,7 @@ CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -66,6 +68,7 @@ CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
@@ -96,6 +99,7 @@ CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
 CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
@@ -293,6 +297,8 @@ CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
 CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
 CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -351,6 +357,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
 CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 CONFIG_MVME147_NET=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_MVME147_NET=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
 CONFIG_XZ_DEC_TEST=m
index 2ccaca858f0533d79d7f4c6a52bf070f333eb8ce..cbd8ee24d1bc4e2f7c4f611df7cdd95c8b147e3b 100644 (file)
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
 CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
 CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
 CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
 CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
 # CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
 CONFIG_XZ_DEC_TEST=m
index 5599f3fd5fcd44eab2e52ac460b73376be5cc052..1e82cc9443399a2cd67febf8b7f3fda0682e1990 100644 (file)
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
 CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
 CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
 CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -358,6 +364,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
 CONFIG_GTP=m
@@ -369,6 +376,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 # CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
@@ -388,7 +396,6 @@ CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PLIP=m
@@ -538,6 +545,8 @@ CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
@@ -568,6 +577,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -579,6 +589,7 @@ CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -603,4 +614,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
 CONFIG_XZ_DEC_TEST=m
index 313bf0a562ad33496735210197879fcf86739ea2..f9e77f57a9725035d9f75a30ae4c0941c229d5d7 100644 (file)
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_SUN3=y
@@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -64,6 +66,7 @@ CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
@@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
 CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
@@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
 CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
 CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
 CONFIG_GTP=m
@@ -359,6 +366,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 CONFIG_SUN3LANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
@@ -375,7 +383,6 @@ CONFIG_SUN3_82586=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SUN is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
@@ -546,6 +555,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -557,6 +567,7 @@ CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -581,4 +592,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
 CONFIG_XZ_DEC_TEST=m
index 38b61365f769273f829980fa9cec2d9cc585e1f5..3c394fcfb36836beba293194b23b91c4ca31f2aa 100644 (file)
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_SUN3X=y
@@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -64,6 +66,7 @@ CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
@@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_SET_RBTREE=m
 CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
@@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
 CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
 CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
 CONFIG_GTP=m
@@ -359,6 +366,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_AMAZON is not set
 CONFIG_SUN3LANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
@@ -375,7 +383,6 @@ CONFIG_SUN3LANCE=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
 CONFIG_TEST_STRING_HELPERS=m
@@ -547,6 +556,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -558,6 +568,7 @@ CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -582,4 +593,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
 CONFIG_XZ_DEC_TEST=m
index b4a9b0d5928dfb33c0c6dd5ef507305f35214fe7..dda58cfe8c22a3ec65ba074c4c0baab3c957340b 100644 (file)
@@ -148,7 +148,7 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
 #define __change_bit(nr, vaddr)        change_bit(nr, vaddr)
 
 
-static inline int test_bit(int nr, const unsigned long *vaddr)
+static inline int test_bit(int nr, const volatile unsigned long *vaddr)
 {
        return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
 }
index a857d82ec5094abc30e353f25370365194a01194..aab1edd0d4bade511f2ea92230d3bd533a58ad34 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            379
+#define NR_syscalls            380
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index 9fe674bf911fd2a4e61d7119f9b91ffbdddf44f5..25589f5b8669631d5cf2441d7fb405a97c5561f5 100644 (file)
 #define __NR_copy_file_range   376
 #define __NR_preadv2           377
 #define __NR_pwritev2          378
+#define __NR_statx             379
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index d6fd6d9ced2474ab477b0becefbd5f695d256e96..8c9fcfafe0dd90ba5f67f860e7eafb351cb156ef 100644 (file)
@@ -399,3 +399,4 @@ ENTRY(sys_call_table)
        .long sys_copy_file_range
        .long sys_preadv2
        .long sys_pwritev2
+       .long sys_statx
index ffa3a3a2ecadda8bed7cf5e7b1508cd98c43abf8..0c151e5af079288aeebb8deb55994ec0106f5507 100644 (file)
@@ -6,6 +6,7 @@
 #define _METAG_PGTABLE_H
 
 #include <asm/pgtable-bits.h>
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
index 5fd16ee5280c1aee8584d6fbc90d5bd74b4c1665..e615603a4b0ae95cb0731ce5e653a518763b7c92 100644 (file)
  * user_regset definitions.
  */
 
+static unsigned long user_txstatus(const struct pt_regs *regs)
+{
+       unsigned long data = (unsigned long)regs->ctx.Flags;
+
+       if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
+               data |= USER_GP_REGS_STATUS_CATCH_BIT;
+
+       return data;
+}
+
 int metag_gp_regs_copyout(const struct pt_regs *regs,
                          unsigned int pos, unsigned int count,
                          void *kbuf, void __user *ubuf)
@@ -64,9 +74,7 @@ int metag_gp_regs_copyout(const struct pt_regs *regs,
        if (ret)
                goto out;
        /* TXSTATUS */
-       data = (unsigned long)regs->ctx.Flags;
-       if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
-               data |= USER_GP_REGS_STATUS_CATCH_BIT;
+       data = user_txstatus(regs);
        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
                                  &data, 4*25, 4*26);
        if (ret)
@@ -121,6 +129,7 @@ int metag_gp_regs_copyin(struct pt_regs *regs,
        if (ret)
                goto out;
        /* TXSTATUS */
+       data = user_txstatus(regs);
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                 &data, 4*25, 4*26);
        if (ret)
@@ -246,6 +255,8 @@ int metag_rp_state_copyin(struct pt_regs *regs,
        unsigned long long *ptr;
        int ret, i;
 
+       if (count < 4*13)
+               return -EINVAL;
        /* Read the entire pipeline before making any changes */
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                 &rp, 0, 4*13);
@@ -305,7 +316,7 @@ static int metag_tls_set(struct task_struct *target,
                        const void *kbuf, const void __user *ubuf)
 {
        int ret;
-       void __user *tls;
+       void __user *tls = target->thread.tls_ptr;
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
        if (ret)
index fd850879854dff3f79a73effd5fc782e9bd203d8..d506bb0893f94e67288fdb8a4b700749873e921b 100644 (file)
@@ -95,7 +95,8 @@ typedef struct { unsigned long pgd; } pgd_t;
 #   else /* CONFIG_MMU */
 typedef struct { unsigned long ste[64]; }      pmd_t;
 typedef struct { pmd_t         pue[1]; }       pud_t;
-typedef struct { pud_t         pge[1]; }       pgd_t;
+typedef struct { pud_t         p4e[1]; }       p4d_t;
+typedef struct { p4d_t         pge[1]; }       pgd_t;
 #   endif /* CONFIG_MMU */
 
 # define pte_val(x)    ((x).pte)
index a5b427909b5cac04d28c4da1b099342ee72df4ce..036d56cc459168a0087ce8eba3e69aae9f9197b6 100644 (file)
@@ -10,7 +10,9 @@
 #include <linux/irqflags.h>
 #include <linux/notifier.h>
 #include <linux/prefetch.h>
+#include <linux/ptrace.h>
 #include <linux/sched.h>
+#include <linux/sched/task_stack.h>
 
 #include <asm/cop2.h>
 #include <asm/current.h>
index 4d22365844af30b9bdd24cfd521b6a8e0a7f7cff..cfb4a146cf1786a73a28a781f26b363757164968 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/cop2.h>
 #include <linux/export.h>
 #include <linux/interrupt.h>
+#include <linux/sched/task_stack.h>
 
 #include "octeon-crypto.h"
 
index 4b94b7fbafa3602374477c99fd76acde79945565..3de786545ded10ac64f5fa77d2025fa3a9955fd9 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/sched.h>
 #include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
 #include <linux/init.h>
 #include <linux/export.h>
 
index 321752bcbab6ec5fc6fa1d6a658b82bb1a58e7ed..f94455f964ec00b1e1b17120349b1543ed9dec97 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/sched.h>
 #include <linux/sched/task_stack.h>
+#include <linux/ptrace.h>
 #include <linux/thread_info.h>
 #include <linux/bitops.h>
 
index d21f3da7bdb619402a438b923fda454b7525d204..6f94bed571c4416b917a52fe364172243a3a9fe6 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/cachectl.h>
 #include <asm/fixmap.h>
 
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 extern int temp_tlb_entry;
index 514cbc0a6a6760fd84d77c3760aa9ce2ac682fd5..130a2a6c153156bd311e6646bbfd3b92fc6f7228 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/cachectl.h>
 #include <asm/fixmap.h>
 
+#define __ARCH_USE_5LEVEL_HACK
 #if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
 #include <asm-generic/pgtable-nopmd.h>
 #else
index 566ecdcb5b4bcb2cd4d5888a1ce787b8fcbd0b97..3418ec9c1c5016df7c5acd27cdce5e3507ef4175 100644 (file)
 
 #define SCM_TIMESTAMPING_OPT_STATS     54
 
+#define SO_MEMINFO             55
+
+#define SO_INCOMING_NAPI_ID    56
+
+#define SO_COOKIE              57
+
 #endif /* _UAPI_ASM_SOCKET_H */
index 339601267265394d19e92deef167167375600e82..6931fe722a0b54dcfa5078c8686160018c1e6c64 100644 (file)
@@ -456,7 +456,8 @@ static int fpr_set(struct task_struct *target,
                                          &target->thread.fpu,
                                          0, sizeof(elf_fpregset_t));
 
-       for (i = 0; i < NUM_FPU_REGS; i++) {
+       BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+       for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
                err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                         &fpr_val, i * sizeof(elf_fpreg_t),
                                         (i + 1) * sizeof(elf_fpreg_t));
index 3daa2cae50b0b976942772c7c10656d8f4ffd3ab..1b070a76fcdd4c2e5f62a84bfff8e40ad0a8721b 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
 #include <linux/mm.h>
 #include <linux/delay.h>
 #include <linux/smp.h>
index e077ea3e11fb36ee2d5f85f7e8415c97eeead1d9..e398cbc3d7767d50287d4c1e267b4e3e990efa4e 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/interrupt.h>
 #include <linux/irqchip/mips-gic.h>
 #include <linux/compiler.h>
+#include <linux/sched/task_stack.h>
 #include <linux/smp.h>
 
 #include <linux/atomic.h>
index ea13764d0a035ccc5ec6cf443f021aa1f4f58103..621d6af5f6eb8ecc9e602ee085ceb39412678cc3 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/notifier.h>
+#include <linux/ptrace.h>
 
 #include <asm/fpu.h>
 #include <asm/cop2.h>
index 10d86d54880ab8541eecf01f8d1f0dd2b3d6ee18..bddf1ef553a4f695d320d4f88eb7f28b079cec94 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/kernel.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/sched/task_stack.h>
 #include <linux/smp.h>
 #include <linux/irq.h>
 
index 52bc5de420052ca4e604cd39a8ff4bfcbbba2fa1..21e439b3db707f4119229954e1eced768dd5162d 100644 (file)
@@ -9,11 +9,14 @@
  * Copyright (C) 2009 Wind River Systems,
  *   written by Ralf Baechle <ralf@linux-mips.org>
  */
+#include <linux/capability.h>
 #include <linux/init.h>
 #include <linux/irqflags.h>
 #include <linux/notifier.h>
 #include <linux/prefetch.h>
+#include <linux/ptrace.h>
 #include <linux/sched.h>
+#include <linux/sched/task_stack.h>
 
 #include <asm/cop2.h>
 #include <asm/current.h>
index 1f2a5bc4779e6a356a43f179608b98d605a8c343..75460e1e106b2c08799354139b869481d0b23abb 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
 #include <linux/seq_file.h>
 
 #include <asm/addrspace.h>
index d12879eb2b1fa9f0b58cfc03fbd51d8ea2394134..83efe03d5c600f695d073279c1d594a7ca60b471 100644 (file)
@@ -12,7 +12,9 @@
 #include <linux/signal.h>      /* for SIGBUS */
 #include <linux/sched.h>       /* schow_regs(), force_sig() */
 #include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
 
+#include <asm/ptrace.h>
 #include <asm/sn/addrs.h>
 #include <asm/sn/arch.h>
 #include <asm/sn/sn0/hub.h>
index f5ed45e8f442565ce0f7435585cff684ece5d764..4cd47d23d81a76105b5a5fc21a768cc50d478879 100644 (file)
@@ -8,10 +8,13 @@
  */
 #include <linux/init.h>
 #include <linux/sched.h>
+#include <linux/sched/task_stack.h>
 #include <linux/topology.h>
 #include <linux/nodemask.h>
+
 #include <asm/page.h>
 #include <asm/processor.h>
+#include <asm/ptrace.h>
 #include <asm/sn/arch.h>
 #include <asm/sn/gda.h>
 #include <asm/sn/intr.h>
index 57d8c7486fe6b75b2f903fbd2673d869c9d63001..c1f12a9cf305f4196bd23cbe01b7dc73dc852b26 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
 #include <asm/traps.h>
 #include <linux/uaccess.h>
 #include <asm/addrspace.h>
index 8bd415c8729f974a8e1e376d5a2b3e08c84e6053..b3b442def42383794289c6c07ea9a44ee5c4a873 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/sched.h>
+#include <linux/sched/signal.h>
 #include <linux/notifier.h>
 #include <linux/delay.h>
 #include <linux/rtc/ds1685.h>
index 3810a6f740fdf67ffa6622fcf4706c1c841aa09a..dfe730a5ede04a0f3f22a877dd8cd4f42baa2652 100644 (file)
@@ -57,6 +57,7 @@ typedef struct page *pgtable_t;
 #define __pgd(x)       ((pgd_t) { (x) })
 #define __pgprot(x)    ((pgprot_t) { (x) })
 
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 #endif /* !__ASSEMBLY__ */
index 0e12527c4b0e6de154efaa91fe197eb995a1535c..4526e92301a6768ec714c5d95d501cdfafaf5cfa 100644 (file)
 
 #define SCM_TIMESTAMPING_OPT_STATS     54
 
+#define SO_MEMINFO             55
+
+#define SO_INCOMING_NAPI_ID    56
+
+#define SO_COOKIE              57
+
 #endif /* _ASM_SOCKET_H */
index 298393c3cb426ffa7889589a637f342cae5b90e0..db4f7d179220782ab05e46ab46b02ffa09d4a998 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/tlbflush.h>
 
 #include <asm/pgtable-bits.h>
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 #define FIRST_USER_ADDRESS     0UL
index 367c5426157ba14dfe8799664c3f11dd6eb9c8a2..3901b80d442021e17e96e29f33cea2b1c28ec345 100644 (file)
@@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
        return alloc_bootmem_align(size, align);
 }
 
+int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
+                                            bool nomap)
+{
+       reserve_bootmem(base, size, BOOTMEM_DEFAULT);
+       return 0;
+}
+
 void __init early_init_devtree(void *params)
 {
        __be32 *dtb = (u32 *)__dtb_start;
index 6e57ffa5db2769babe8c285f1e88e16fe13ed998..6044d9be28b4493323d362162e7e5ec6c56e1f33 100644 (file)
@@ -201,6 +201,9 @@ void __init setup_arch(char **cmdline_p)
        }
 #endif /* CONFIG_BLK_DEV_INITRD */
 
+       early_init_fdt_reserve_self();
+       early_init_fdt_scan_reserved_mem();
+
        unflatten_and_copy_device_tree();
 
        setup_cpuinfo();
index 5fcb9ac72693850f50060a4822445a09d81b8a80..f0a5d8b844d6b85b16eb6c170f8af86f73ad8440 100644 (file)
@@ -77,7 +77,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
        return val;
 }
 
-#define xchg(ptr, with) \
-       ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr))))
+#define xchg(ptr, with)                                                \
+       ({                                                              \
+               (__typeof__(*(ptr))) __xchg((unsigned long)(with),      \
+                                           (ptr),                      \
+                                           sizeof(*(ptr)));            \
+       })
 
 #endif /* __ASM_OPENRISC_CMPXCHG_H */
index 3567aa7be55504d7b838b2e13b3d62d7929096c1..ff97374ca0693d526582b8c34e7f9f421f43ed48 100644 (file)
@@ -25,6 +25,7 @@
 #ifndef __ASM_OPENRISC_PGTABLE_H
 #define __ASM_OPENRISC_PGTABLE_H
 
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 #ifndef __ASSEMBLY__
index 140faa16685a2325f3a1b6cbf9cbb9c8e68fc913..1311e6b139916692bb5f81fbfd188a48b844d977 100644 (file)
@@ -211,7 +211,7 @@ do {                                                                        \
        case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break;         \
        case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break;         \
        case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break;         \
-       case 8: __get_user_asm2(x, ptr, retval);                        \
+       case 8: __get_user_asm2(x, ptr, retval); break;                 \
        default: (x) = __get_user_bad();                                \
        }                                                               \
 } while (0)
index 5c4695d13542fc003054995b728ac468e18bd94c..ee3e604959e15c514bc91eb65118d8d04ea20b59 100644 (file)
@@ -30,6 +30,7 @@
 #include <asm/hardirq.h>
 #include <asm/delay.h>
 #include <asm/pgalloc.h>
+#include <asm/pgtable.h>
 
 #define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
 
@@ -42,6 +43,9 @@ DECLARE_EXPORT(__muldi3);
 DECLARE_EXPORT(__ashrdi3);
 DECLARE_EXPORT(__ashldi3);
 DECLARE_EXPORT(__lshrdi3);
+DECLARE_EXPORT(__ucmpdi2);
 
+EXPORT_SYMBOL(empty_zero_page);
 EXPORT_SYMBOL(__copy_tofrom_user);
+EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(memset);
index 828a29110459e8cb9f1e85b1b5033f30ef0348dd..f8da545854f979c33a7b3116d26d822caa46c494 100644 (file)
@@ -90,6 +90,7 @@ void arch_cpu_idle(void)
 }
 
 void (*pm_power_off) (void) = machine_power_off;
+EXPORT_SYMBOL(pm_power_off);
 
 /*
  * When a process does an "exec", machine state like FPU and debug
index 19c9c3c5f267eac813edf6c5fc6f358301d2a639..c7e15cc5c6683b423d028b1557fc0dc9b7dd5a16 100644 (file)
@@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
 
 #define flush_kernel_dcache_range(start,size) \
        flush_kernel_dcache_range_asm((start), (start)+(size));
-/* vmap range flushes and invalidates.  Architecturally, we don't need
- * the invalidate, because the CPU should refuse to speculate once an
- * area has been flushed, so invalidate is left empty */
-static inline void flush_kernel_vmap_range(void *vaddr, int size)
-{
-       unsigned long start = (unsigned long)vaddr;
-
-       flush_kernel_dcache_range_asm(start, start + size);
-}
-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
-{
-       unsigned long start = (unsigned long)vaddr;
-       void *cursor = vaddr;
 
-       for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
-               struct page *page = vmalloc_to_page(cursor);
-
-               if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
-                       flush_kernel_dcache_page(page);
-       }
-       flush_kernel_dcache_range_asm(start, start + size);
-}
+void flush_kernel_vmap_range(void *vaddr, int size);
+void invalidate_kernel_vmap_range(void *vaddr, int size);
 
 #define flush_cache_vmap(start, end)           flush_cache_all()
 #define flush_cache_vunmap(start, end)         flush_cache_all()
index fb4382c28259b3ff2f873014fce7e42f1373dac8..8442727f28d2732eecb583d46bdcc88258a590dd 100644 (file)
@@ -32,7 +32,8 @@
  * that put_user is the same as __put_user, etc.
  */
 
-#define access_ok(type, uaddr, size) (1)
+#define access_ok(type, uaddr, size)   \
+       ( (uaddr) == (uaddr) )
 
 #define put_user __put_user
 #define get_user __get_user
@@ -63,6 +64,15 @@ struct exception_table_entry {
        ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
        ".previous\n"
 
+/*
+ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
+ * (with lowest bit set) for which the fault handler in fixup_exception() will
+ * load -EFAULT into %r8 for a read or write fault, and zeroes the target
+ * register in case of a read fault in get_user().
+ */
+#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
+       ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
+
 /*
  * The page fault handler stores, in a per-cpu area, the following information
  * if a fixup routine is available.
@@ -90,7 +100,7 @@ struct exception_data {
 #define __get_user(x, ptr)                               \
 ({                                                       \
        register long __gu_err __asm__ ("r8") = 0;       \
-       register long __gu_val __asm__ ("r9") = 0;       \
+       register long __gu_val;                          \
                                                         \
        load_sr2();                                      \
        switch (sizeof(*(ptr))) {                        \
@@ -106,22 +116,23 @@ struct exception_data {
 })
 
 #define __get_user_asm(ldx, ptr)                        \
-       __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t"     \
-               ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
+       __asm__("1: " ldx " 0(%%sr2,%2),%0\n"           \
+               "9:\n"                                  \
+               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
                : "=r"(__gu_val), "=r"(__gu_err)        \
-               : "r"(ptr), "1"(__gu_err)               \
-               : "r1");
+               : "r"(ptr), "1"(__gu_err));
 
 #if !defined(CONFIG_64BIT)
 
 #define __get_user_asm64(ptr)                          \
-       __asm__("\n1:\tldw 0(%%sr2,%2),%0"              \
-               "\n2:\tldw 4(%%sr2,%2),%R0\n\t"         \
-               ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
-               ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
+       __asm__("   copy %%r0,%R0\n"                    \
+               "1: ldw 0(%%sr2,%2),%0\n"               \
+               "2: ldw 4(%%sr2,%2),%R0\n"              \
+               "9:\n"                                  \
+               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
                : "=r"(__gu_val), "=r"(__gu_err)        \
-               : "r"(ptr), "1"(__gu_err)               \
-               : "r1");
+               : "r"(ptr), "1"(__gu_err));
 
 #endif /* !defined(CONFIG_64BIT) */
 
@@ -147,32 +158,31 @@ struct exception_data {
  * The "__put_user/kernel_asm()" macros tell gcc they read from memory
  * instead of writing. This is because they do not write to any memory
  * gcc knows about, so there are no aliasing issues. These macros must
- * also be aware that "fixup_put_user_skip_[12]" are executed in the
- * context of the fault, and any registers used there must be listed
- * as clobbers. In this case only "r1" is used by the current routines.
- * r8/r9 are already listed as err/val.
+ * also be aware that fixups are executed in the context of the fault,
+ * and any registers used there must be listed as clobbers.
+ * r8 is already listed as err.
  */
 
 #define __put_user_asm(stx, x, ptr)                         \
        __asm__ __volatile__ (                              \
-               "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t"         \
-               ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
+               "1: " stx " %2,0(%%sr2,%1)\n"               \
+               "9:\n"                                      \
+               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)     \
                : "=r"(__pu_err)                            \
-               : "r"(ptr), "r"(x), "0"(__pu_err)           \
-               : "r1")
+               : "r"(ptr), "r"(x), "0"(__pu_err))
 
 
 #if !defined(CONFIG_64BIT)
 
 #define __put_user_asm64(__val, ptr) do {                  \
        __asm__ __volatile__ (                              \
-               "\n1:\tstw %2,0(%%sr2,%1)"                  \
-               "\n2:\tstw %R2,4(%%sr2,%1)\n\t"             \
-               ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
-               ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
+               "1: stw %2,0(%%sr2,%1)\n"                   \
+               "2: stw %R2,4(%%sr2,%1)\n"                  \
+               "9:\n"                                      \
+               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)     \
+               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)     \
                : "=r"(__pu_err)                            \
-               : "r"(ptr), "r"(__val), "0"(__pu_err) \
-               : "r1");                                    \
+               : "r"(ptr), "r"(__val), "0"(__pu_err));     \
 } while (0)
 
 #endif /* !defined(CONFIG_64BIT) */
index 7a109b73ddf7e814f9c29ffa1ec9b99977ead5d1..514701840bd93441c54107d57f10411b64576f13 100644 (file)
 
 #define SCM_TIMESTAMPING_OPT_STATS     0x402F
 
+#define SO_MEMINFO             0x4030
+
+#define SO_INCOMING_NAPI_ID    0x4031
+
+#define SO_COOKIE              0x4032
+
 #endif /* _UAPI_ASM_SOCKET_H */
index 6b0741e7a7ed3ee4060d619a8999b50dab12dac3..667c99421003e4dd07c6d204bef7db08fa905933 100644 (file)
 #define __NR_copy_file_range   (__NR_Linux + 346)
 #define __NR_preadv2           (__NR_Linux + 347)
 #define __NR_pwritev2          (__NR_Linux + 348)
+#define __NR_statx             (__NR_Linux + 349)
 
-#define __NR_Linux_syscalls    (__NR_pwritev2 + 1)
+#define __NR_Linux_syscalls    (__NR_statx + 1)
 
 
 #define __IGNORE_select                /* newselect */
index 0dc72d5de861539e5c16ff2ecd49f205e37775e6..c32a0909521665b5f08c22ef37fa8d8f9c654012 100644 (file)
@@ -616,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
                __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
        }
 }
+
+void flush_kernel_vmap_range(void *vaddr, int size)
+{
+       unsigned long start = (unsigned long)vaddr;
+
+       if ((unsigned long)size > parisc_cache_flush_threshold)
+               flush_data_cache();
+       else
+               flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(flush_kernel_vmap_range);
+
+void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+       unsigned long start = (unsigned long)vaddr;
+
+       if ((unsigned long)size > parisc_cache_flush_threshold)
+               flush_data_cache();
+       else
+               flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(invalidate_kernel_vmap_range);
index a0ecdb4abcc878b3805d7a2d0f845272b1fc372d..c66c943d93224f342cb71c97bd6a690bf8fb225b 100644 (file)
@@ -620,6 +620,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
                         */
                        *loc = fsel(val, addend); 
                        break;
+               case R_PARISC_SECREL32:
+                       /* 32-bit section relative address. */
+                       *loc = fsel(val, addend);
+                       break;
                case R_PARISC_DPREL21L:
                        /* left 21 bit of relative address */
                        val = lrsel(val - dp, addend);
@@ -807,6 +811,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
                         */
                        *loc = fsel(val, addend); 
                        break;
+               case R_PARISC_SECREL32:
+                       /* 32-bit section relative address. */
+                       *loc = fsel(val, addend);
+                       break;
                case R_PARISC_FPTR64:
                        /* 64-bit function address */
                        if(in_local(me, (void *)(val + addend))) {
index 7484b3d11e0dbf83e58de4b64b08079b4a346bd7..c6d6272a934f03823b655cf07b38e7bbc01ca12e 100644 (file)
@@ -47,16 +47,6 @@ EXPORT_SYMBOL(__cmpxchg_u64);
 EXPORT_SYMBOL(lclear_user);
 EXPORT_SYMBOL(lstrnlen_user);
 
-/* Global fixups - defined as int to avoid creation of function pointers */
-extern int fixup_get_user_skip_1;
-extern int fixup_get_user_skip_2;
-extern int fixup_put_user_skip_1;
-extern int fixup_put_user_skip_2;
-EXPORT_SYMBOL(fixup_get_user_skip_1);
-EXPORT_SYMBOL(fixup_get_user_skip_2);
-EXPORT_SYMBOL(fixup_put_user_skip_1);
-EXPORT_SYMBOL(fixup_put_user_skip_2);
-
 #ifndef CONFIG_64BIT
 /* Needed so insmod can set dp value */
 extern int $global$;
index e282a5131d77e10f62d274d4be426b076f655017..6017a5af2e6e2c8feb45de54adfb36865b65d3ee 100644 (file)
@@ -39,7 +39,7 @@
  *  the PDC INTRIGUE calls.  This is done to eliminate bugs introduced
  *  in various PDC revisions.  The code is much more maintainable
  *  and reliable this way vs having to debug on every version of PDC
- *  on every box. 
+ *  on every box.
  */
 
 #include <linux/capability.h>
@@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr);
 static int perf_release(struct inode *inode, struct file *file);
 static int perf_open(struct inode *inode, struct file *file);
 static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 
-       loff_t *ppos);
+static ssize_t perf_write(struct file *file, const char __user *buf,
+       size_t count, loff_t *ppos);
 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 static void perf_start_counters(void);
 static int perf_stop_counters(uint32_t *raddr);
@@ -222,7 +222,7 @@ extern void perf_intrigue_disable_perf_counters (void);
 /*
  * configure:
  *
- * Configure the cpu with a given data image.  First turn off the counters, 
+ * Configure the cpu with a given data image.  First turn off the counters,
  * then download the image, then turn the counters back on.
  */
 static int perf_config(uint32_t *image_ptr)
@@ -234,7 +234,7 @@ static int perf_config(uint32_t *image_ptr)
        error = perf_stop_counters(raddr);
        if (error != 0) {
                printk("perf_config: perf_stop_counters = %ld\n", error);
-               return -EINVAL; 
+               return -EINVAL;
        }
 
 printk("Preparing to write image\n");
@@ -242,7 +242,7 @@ printk("Preparing to write image\n");
        error = perf_write_image((uint64_t *)image_ptr);
        if (error != 0) {
                printk("perf_config: DOWNLOAD = %ld\n", error);
-               return -EINVAL; 
+               return -EINVAL;
        }
 
 printk("Preparing to start counters\n");
@@ -254,7 +254,7 @@ printk("Preparing to start counters\n");
 }
 
 /*
- * Open the device and initialize all of its memory.  The device is only 
+ * Open the device and initialize all of its memory.  The device is only
  * opened once, but can be "queried" by multiple processes that know its
  * file descriptor.
  */
@@ -298,19 +298,19 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
  * called on the processor that the download should happen
  * on.
  */
-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 
-       loff_t *ppos)
+static ssize_t perf_write(struct file *file, const char __user *buf,
+       size_t count, loff_t *ppos)
 {
        size_t image_size;
        uint32_t image_type;
        uint32_t interface_type;
        uint32_t test;
 
-       if (perf_processor_interface == ONYX_INTF) 
+       if (perf_processor_interface == ONYX_INTF)
                image_size = PCXU_IMAGE_SIZE;
-       else if (perf_processor_interface == CUDA_INTF) 
+       else if (perf_processor_interface == CUDA_INTF)
                image_size = PCXW_IMAGE_SIZE;
-       else 
+       else
                return -EFAULT;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -330,22 +330,22 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
 
        /* First check the machine type is correct for
           the requested image */
-        if (((perf_processor_interface == CUDA_INTF) &&
-                      (interface_type != CUDA_INTF)) ||
-           ((perf_processor_interface == ONYX_INTF) &&
-                      (interface_type != ONYX_INTF))) 
+       if (((perf_processor_interface == CUDA_INTF) &&
+                       (interface_type != CUDA_INTF)) ||
+               ((perf_processor_interface == ONYX_INTF) &&
+                       (interface_type != ONYX_INTF)))
                return -EINVAL;
 
        /* Next check to make sure the requested image
           is valid */
-       if (((interface_type == CUDA_INTF) && 
+       if (((interface_type == CUDA_INTF) &&
                       (test >= MAX_CUDA_IMAGES)) ||
-           ((interface_type == ONYX_INTF) && 
-                      (test >= MAX_ONYX_IMAGES))) 
+           ((interface_type == ONYX_INTF) &&
+                      (test >= MAX_ONYX_IMAGES)))
                return -EINVAL;
 
        /* Copy the image into the processor */
-       if (interface_type == CUDA_INTF) 
+       if (interface_type == CUDA_INTF)
                return perf_config(cuda_images[test]);
        else
                return perf_config(onyx_images[test]);
@@ -359,7 +359,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
 static void perf_patch_images(void)
 {
 #if 0 /* FIXME!! */
-/* 
+/*
  * NOTE:  this routine is VERY specific to the current TLB image.
  * If the image is changed, this routine might also need to be changed.
  */
@@ -367,9 +367,9 @@ static void perf_patch_images(void)
        extern void $i_dtlb_miss_2_0();
        extern void PA2_0_iva();
 
-       /* 
+       /*
         * We can only use the lower 32-bits, the upper 32-bits should be 0
-        * anyway given this is in the kernel 
+        * anyway given this is in the kernel
         */
        uint32_t itlb_addr  = (uint32_t)&($i_itlb_miss_2_0);
        uint32_t dtlb_addr  = (uint32_t)&($i_dtlb_miss_2_0);
@@ -377,21 +377,21 @@ static void perf_patch_images(void)
 
        if (perf_processor_interface == ONYX_INTF) {
                /* clear last 2 bytes */
-               onyx_images[TLBMISS][15] &= 0xffffff00;  
+               onyx_images[TLBMISS][15] &= 0xffffff00;
                /* set 2 bytes */
                onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
                onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
                onyx_images[TLBMISS][17] = itlb_addr;
 
                /* clear last 2 bytes */
-               onyx_images[TLBHANDMISS][15] &= 0xffffff00;  
+               onyx_images[TLBHANDMISS][15] &= 0xffffff00;
                /* set 2 bytes */
                onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
                onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
                onyx_images[TLBHANDMISS][17] = itlb_addr;
 
                /* clear last 2 bytes */
-               onyx_images[BIG_CPI][15] &= 0xffffff00;  
+               onyx_images[BIG_CPI][15] &= 0xffffff00;
                /* set 2 bytes */
                onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
                onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
@@ -404,24 +404,24 @@ static void perf_patch_images(void)
 
        } else if (perf_processor_interface == CUDA_INTF) {
                /* Cuda interface */
-               cuda_images[TLBMISS][16] =  
+               cuda_images[TLBMISS][16] =
                        (cuda_images[TLBMISS][16]&0xffff0000) |
                        ((dtlb_addr >> 8)&0x0000ffff);
-               cuda_images[TLBMISS][17] = 
+               cuda_images[TLBMISS][17] =
                        ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
                cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
 
-               cuda_images[TLBHANDMISS][16] = 
+               cuda_images[TLBHANDMISS][16] =
                        (cuda_images[TLBHANDMISS][16]&0xffff0000) |
                        ((dtlb_addr >> 8)&0x0000ffff);
-               cuda_images[TLBHANDMISS][17] = 
+               cuda_images[TLBHANDMISS][17] =
                        ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
                cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
 
-               cuda_images[BIG_CPI][16] = 
+               cuda_images[BIG_CPI][16] =
                        (cuda_images[BIG_CPI][16]&0xffff0000) |
                        ((dtlb_addr >> 8)&0x0000ffff);
-               cuda_images[BIG_CPI][17] = 
+               cuda_images[BIG_CPI][17] =
                        ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
                cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
        } else {
@@ -433,7 +433,7 @@ static void perf_patch_images(void)
 
 /*
  * ioctl routine
- * All routines effect the processor that they are executed on.  Thus you 
+ * All routines effect the processor that they are executed on.  Thus you
  * must be running on the processor that you wish to change.
  */
 
@@ -459,7 +459,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        }
 
                        /* copy out the Counters */
-                       if (copy_to_user((void __user *)arg, raddr, 
+                       if (copy_to_user((void __user *)arg, raddr,
                                        sizeof (raddr)) != 0) {
                                error =  -EFAULT;
                                break;
@@ -487,7 +487,7 @@ static const struct file_operations perf_fops = {
        .open = perf_open,
        .release = perf_release
 };
-       
+
 static struct miscdevice perf_dev = {
        MISC_DYNAMIC_MINOR,
        PA_PERF_DEV,
@@ -595,7 +595,7 @@ static int perf_stop_counters(uint32_t *raddr)
                /* OR sticky2 (bit 1496) to counter2 bit 32 */
                tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
                raddr[2] = (uint32_t)tmp64;
-               
+
                /* Counter3 is bits 1497 to 1528 */
                tmp64 =  (userbuf[23] >> 7) & 0x00000000ffffffff;
                /* OR sticky3 (bit 1529) to counter3 bit 32 */
@@ -617,7 +617,7 @@ static int perf_stop_counters(uint32_t *raddr)
                userbuf[22] = 0;
                userbuf[23] = 0;
 
-               /* 
+               /*
                 * Write back the zeroed bytes + the image given
                 * the read was destructive.
                 */
@@ -625,13 +625,13 @@ static int perf_stop_counters(uint32_t *raddr)
        } else {
 
                /*
-                * Read RDR-15 which contains the counters and sticky bits 
+                * Read RDR-15 which contains the counters and sticky bits
                 */
                if (!perf_rdr_read_ubuf(15, userbuf)) {
                        return -13;
                }
 
-               /* 
+               /*
                 * Clear out the counters
                 */
                perf_rdr_clear(15);
@@ -644,7 +644,7 @@ static int perf_stop_counters(uint32_t *raddr)
                raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
                raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
        }
+
        return 0;
 }
 
@@ -682,7 +682,7 @@ static int perf_rdr_read_ubuf(uint32_t      rdr_num, uint64_t *buffer)
        i = tentry->num_words;
        while (i--) {
                buffer[i] = 0;
-       }       
+       }
 
        /* Check for bits an even number of 64 */
        if ((xbits = width & 0x03f) != 0) {
@@ -808,18 +808,22 @@ static int perf_write_image(uint64_t *memaddr)
        }
 
        runway = ioremap_nocache(cpu_device->hpa.start, 4096);
+       if (!runway) {
+               pr_err("perf_write_image: ioremap failed!\n");
+               return -ENOMEM;
+       }
 
        /* Merge intrigue bits into Runway STATUS 0 */
        tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
-       __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), 
+       __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
                     runway + RUNWAY_STATUS);
-       
+
        /* Write RUNWAY DEBUG registers */
        for (i = 0; i < 8; i++) {
                __raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
        }
 
-       return 0; 
+       return 0;
 }
 
 /*
@@ -843,7 +847,7 @@ printk("perf_rdr_write\n");
                        perf_rdr_shift_out_U(rdr_num, buffer[i]);
                } else {
                        perf_rdr_shift_out_W(rdr_num, buffer[i]);
-               }       
+               }
        }
 printk("perf_rdr_write done\n");
 }
index 06f7ca7fe70b616b4d68353ae10dd5d409bbbcab..4516a5b53f38ef651c038e4231effa00fd6db19d 100644 (file)
@@ -142,6 +142,10 @@ void machine_power_off(void)
 
        printk(KERN_EMERG "System shut down completed.\n"
               "Please power this system off now.");
+
+       /* prevent soft lockup/stalled CPU messages for endless loop. */
+       rcu_sysrq_start();
+       for (;;);
 }
 
 void (*pm_power_off)(void) = machine_power_off;
index 3cfef1de8061af183820e98ca97467d674a8c463..44aeaa9c039fc421421a5b1b7524495e0d225eba 100644 (file)
        ENTRY_SAME(copy_file_range)
        ENTRY_COMP(preadv2)
        ENTRY_COMP(pwritev2)
+       ENTRY_SAME(statx)
 
 
 .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
index 8fa92b8d839abb98efb59bb4ac7689a40aeaf602..f2dac4d73b1b309cb2fbc28b744f948be8a35d11 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for parisc-specific library files
 #
 
-lib-y  := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \
+lib-y  := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
           ucmpdi2.o delay.o
 
 obj-y  := iomap.o
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
deleted file mode 100644 (file)
index a5b72f2..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Linux/PA-RISC Project (http://www.parisc-linux.org/)
- *
- *  Copyright (C) 2004  Randolph Chung <tausq@debian.org>
- *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; either version 2, or (at your option)
- *    any later version.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *    GNU General Public License for more details.
- *
- *    You should have received a copy of the GNU General Public License
- *    along with this program; if not, write to the Free Software
- *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- * 
- * Fixup routines for kernel exception handling.
- */
-#include <asm/asm-offsets.h>
-#include <asm/assembly.h>
-#include <asm/errno.h>
-#include <linux/linkage.h>
-
-#ifdef CONFIG_SMP
-       .macro  get_fault_ip t1 t2
-       loadgp
-       addil LT%__per_cpu_offset,%r27
-       LDREG RT%__per_cpu_offset(%r1),\t1
-       /* t2 = smp_processor_id() */
-       mfctl 30,\t2
-       ldw TI_CPU(\t2),\t2
-#ifdef CONFIG_64BIT
-       extrd,u \t2,63,32,\t2
-#endif
-       /* t2 = &__per_cpu_offset[smp_processor_id()]; */
-       LDREGX \t2(\t1),\t2 
-       addil LT%exception_data,%r27
-       LDREG RT%exception_data(%r1),\t1
-       /* t1 = this_cpu_ptr(&exception_data) */
-       add,l \t1,\t2,\t1
-       /* %r27 = t1->fault_gp - restore gp */
-       LDREG EXCDATA_GP(\t1), %r27
-       /* t1 = t1->fault_ip */
-       LDREG EXCDATA_IP(\t1), \t1
-       .endm
-#else
-       .macro  get_fault_ip t1 t2
-       loadgp
-       /* t1 = this_cpu_ptr(&exception_data) */
-       addil LT%exception_data,%r27
-       LDREG RT%exception_data(%r1),\t2
-       /* %r27 = t2->fault_gp - restore gp */
-       LDREG EXCDATA_GP(\t2), %r27
-       /* t1 = t2->fault_ip */
-       LDREG EXCDATA_IP(\t2), \t1
-       .endm
-#endif
-
-       .level LEVEL
-
-       .text
-       .section .fixup, "ax"
-
-       /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
-ENTRY_CFI(fixup_get_user_skip_1)
-       get_fault_ip %r1,%r8
-       ldo 4(%r1), %r1
-       ldi -EFAULT, %r8
-       bv %r0(%r1)
-       copy %r0, %r9
-ENDPROC_CFI(fixup_get_user_skip_1)
-
-ENTRY_CFI(fixup_get_user_skip_2)
-       get_fault_ip %r1,%r8
-       ldo 8(%r1), %r1
-       ldi -EFAULT, %r8
-       bv %r0(%r1)
-       copy %r0, %r9
-ENDPROC_CFI(fixup_get_user_skip_2)
-
-       /* put_user() fixups, store -EFAULT in r8 */
-ENTRY_CFI(fixup_put_user_skip_1)
-       get_fault_ip %r1,%r8
-       ldo 4(%r1), %r1
-       bv %r0(%r1)
-       ldi -EFAULT, %r8
-ENDPROC_CFI(fixup_put_user_skip_1)
-
-ENTRY_CFI(fixup_put_user_skip_2)
-       get_fault_ip %r1,%r8
-       ldo 8(%r1), %r1
-       bv %r0(%r1)
-       ldi -EFAULT, %r8
-ENDPROC_CFI(fixup_put_user_skip_2)
-
index 56845de6b5dfc9ba21aec0b4840ffec6fb41fac5..f01188c044ee83e41ba52162544464781056f262 100644 (file)
@@ -5,6 +5,8 @@
  *    Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
  *    Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
  *    Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
+ *    Copyright (C) 2017 Helge Deller <deller@gmx.de>
+ *    Copyright (C) 2017 John David Anglin <dave.anglin@bell.net>
  *
  *
  *    This program is free software; you can redistribute it and/or modify
@@ -132,4 +134,320 @@ ENDPROC_CFI(lstrnlen_user)
 
        .procend
 
+
+
+/*
+ * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+ *
+ * Inputs:
+ * - sr1 already contains space of source region
+ * - sr2 already contains space of destination region
+ *
+ * Returns:
+ * - number of bytes that could not be copied.
+ *   On success, this will be zero.
+ *
+ * This code is based on a C-implementation of a copy routine written by
+ * Randolph Chung, which in turn was derived from the glibc.
+ *
+ * Several strategies are tried to try to get the best performance for various
+ * conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes
+ * at a time using general registers.  Unaligned copies are handled either by
+ * aligning the destination and then using shift-and-write method, or in a few
+ * cases by falling back to a byte-at-a-time copy.
+ *
+ * Testing with various alignments and buffer sizes shows that this code is
+ * often >10x faster than a simple byte-at-a-time copy, even for strangely
+ * aligned operands. It is interesting to note that the glibc version of memcpy
+ * (written in C) is actually quite fast already. This routine is able to beat
+ * it by 30-40% for aligned copies because of the loop unrolling, but in some
+ * cases the glibc version is still slightly faster. This lends more
+ * credibility that gcc can generate very good code as long as we are careful.
+ *
+ * Possible optimizations:
+ * - add cache prefetching
+ * - try not to use the post-increment address modifiers; they may create
+ *   additional interlocks. Assumption is that those were only efficient on old
+ *   machines (pre PA8000 processors)
+ */
+
+       dst = arg0
+       src = arg1
+       len = arg2
+       end = arg3
+       t1  = r19
+       t2  = r20
+       t3  = r21
+       t4  = r22
+       srcspc = sr1
+       dstspc = sr2
+
+       t0 = r1
+       a1 = t1
+       a2 = t2
+       a3 = t3
+       a0 = t4
+
+       save_src = ret0
+       save_dst = ret1
+       save_len = r31
+
+ENTRY_CFI(pa_memcpy)
+       .proc
+       .callinfo NO_CALLS
+       .entry
+
+       /* Last destination address */
+       add     dst,len,end
+
+       /* short copy with less than 16 bytes? */
+       cmpib,>>=,n 15,len,.Lbyte_loop
+
+       /* same alignment? */
+       xor     src,dst,t0
+       extru   t0,31,2,t1
+       cmpib,<>,n  0,t1,.Lunaligned_copy
+
+#ifdef CONFIG_64BIT
+       /* only do 64-bit copies if we can get aligned. */
+       extru   t0,31,3,t1
+       cmpib,<>,n  0,t1,.Lalign_loop32
+
+       /* loop until we are 64-bit aligned */
+.Lalign_loop64:
+       extru   dst,31,3,t1
+       cmpib,=,n       0,t1,.Lcopy_loop_16
+20:    ldb,ma  1(srcspc,src),t1
+21:    stb,ma  t1,1(dstspc,dst)
+       b       .Lalign_loop64
+       ldo     -1(len),len
+
+       ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+       ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+       ldi     31,t0
+.Lcopy_loop_16:
+       cmpb,COND(>>=),n t0,len,.Lword_loop
+
+10:    ldd     0(srcspc,src),t1
+11:    ldd     8(srcspc,src),t2
+       ldo     16(src),src
+12:    std,ma  t1,8(dstspc,dst)
+13:    std,ma  t2,8(dstspc,dst)
+14:    ldd     0(srcspc,src),t1
+15:    ldd     8(srcspc,src),t2
+       ldo     16(src),src
+16:    std,ma  t1,8(dstspc,dst)
+17:    std,ma  t2,8(dstspc,dst)
+
+       ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+       ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault)
+       ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
+       ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
+       ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
+       ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault)
+       ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
+       ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
+
+       b       .Lcopy_loop_16
+       ldo     -32(len),len
+
+.Lword_loop:
+       cmpib,COND(>>=),n 3,len,.Lbyte_loop
+20:    ldw,ma  4(srcspc,src),t1
+21:    stw,ma  t1,4(dstspc,dst)
+       b       .Lword_loop
+       ldo     -4(len),len
+
+       ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+       ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+#endif /* CONFIG_64BIT */
+
+       /* loop until we are 32-bit aligned */
+.Lalign_loop32:
+       extru   dst,31,2,t1
+       cmpib,=,n       0,t1,.Lcopy_loop_4
+20:    ldb,ma  1(srcspc,src),t1
+21:    stb,ma  t1,1(dstspc,dst)
+       b       .Lalign_loop32
+       ldo     -1(len),len
+
+       ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+       ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+
+.Lcopy_loop_4:
+       cmpib,COND(>>=),n 15,len,.Lbyte_loop
+
+10:    ldw     0(srcspc,src),t1
+11:    ldw     4(srcspc,src),t2
+12:    stw,ma  t1,4(dstspc,dst)
+13:    stw,ma  t2,4(dstspc,dst)
+14:    ldw     8(srcspc,src),t1
+15:    ldw     12(srcspc,src),t2
+       ldo     16(src),src
+16:    stw,ma  t1,4(dstspc,dst)
+17:    stw,ma  t2,4(dstspc,dst)
+
+       ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+       ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault)
+       ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
+       ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
+       ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
+       ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault)
+       ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
+       ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
+
+       b       .Lcopy_loop_4
+       ldo     -16(len),len
+
+.Lbyte_loop:
+       cmpclr,COND(<>) len,%r0,%r0
+       b,n     .Lcopy_done
+20:    ldb     0(srcspc,src),t1
+       ldo     1(src),src
+21:    stb,ma  t1,1(dstspc,dst)
+       b       .Lbyte_loop
+       ldo     -1(len),len
+
+       ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+       ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_done:
+       bv      %r0(%r2)
+       sub     end,dst,ret0
+
+
+       /* src and dst are not aligned the same way. */
+       /* need to go the hard way */
+.Lunaligned_copy:
+       /* align until dst is 32bit-word-aligned */
+       extru   dst,31,2,t1
+       cmpib,COND(=),n 0,t1,.Lcopy_dstaligned
+20:    ldb     0(srcspc,src),t1
+       ldo     1(src),src
+21:    stb,ma  t1,1(dstspc,dst)
+       b       .Lunaligned_copy
+       ldo     -1(len),len
+
+       ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+       ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_dstaligned:
+
+       /* store src, dst and len in safe place */
+       copy    src,save_src
+       copy    dst,save_dst
+       copy    len,save_len
+
+       /* len now needs give number of words to copy */
+       SHRREG  len,2,len
+
+       /*
+        * Copy from a not-aligned src to an aligned dst using shifts.
+        * Handles 4 words per loop.
+        */
+
+       depw,z src,28,2,t0
+       subi 32,t0,t0
+       mtsar t0
+       extru len,31,2,t0
+       cmpib,= 2,t0,.Lcase2
+       /* Make src aligned by rounding it down.  */
+       depi 0,31,2,src
+
+       cmpiclr,<> 3,t0,%r0
+       b,n .Lcase3
+       cmpiclr,<> 1,t0,%r0
+       b,n .Lcase1
+.Lcase0:
+       cmpb,= %r0,len,.Lcda_finish
+       nop
+
+1:     ldw,ma 4(srcspc,src), a3
+       ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1:     ldw,ma 4(srcspc,src), a0
+       ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+       b,n .Ldo3
+.Lcase1:
+1:     ldw,ma 4(srcspc,src), a2
+       ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1:     ldw,ma 4(srcspc,src), a3
+       ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+       ldo -1(len),len
+       cmpb,=,n %r0,len,.Ldo0
+.Ldo4:
+1:     ldw,ma 4(srcspc,src), a0
+       ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+       shrpw a2, a3, %sar, t0
+1:     stw,ma t0, 4(dstspc,dst)
+       ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo3:
+1:     ldw,ma 4(srcspc,src), a1
+       ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+       shrpw a3, a0, %sar, t0
+1:     stw,ma t0, 4(dstspc,dst)
+       ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo2:
+1:     ldw,ma 4(srcspc,src), a2
+       ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+       shrpw a0, a1, %sar, t0
+1:     stw,ma t0, 4(dstspc,dst)
+       ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo1:
+1:     ldw,ma 4(srcspc,src), a3
+       ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+       shrpw a1, a2, %sar, t0
+1:     stw,ma t0, 4(dstspc,dst)
+       ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+       ldo -4(len),len
+       cmpb,<> %r0,len,.Ldo4
+       nop
+.Ldo0:
+       shrpw a2, a3, %sar, t0
+1:     stw,ma t0, 4(dstspc,dst)
+       ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+
+.Lcda_rdfault:
+.Lcda_finish:
+       /* calculate new src, dst and len and jump to byte-copy loop */
+       sub     dst,save_dst,t0
+       add     save_src,t0,src
+       b       .Lbyte_loop
+       sub     save_len,t0,len
+
+.Lcase3:
+1:     ldw,ma 4(srcspc,src), a0
+       ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1:     ldw,ma 4(srcspc,src), a1
+       ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+       b .Ldo2
+       ldo 1(len),len
+.Lcase2:
+1:     ldw,ma 4(srcspc,src), a1
+       ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1:     ldw,ma 4(srcspc,src), a2
+       ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+       b .Ldo1
+       ldo 2(len),len
+
+
+       /* fault exception fixup handlers: */
+#ifdef CONFIG_64BIT
+.Lcopy16_fault:
+10:    b       .Lcopy_done
+       std,ma  t1,8(dstspc,dst)
+       ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+#endif
+
+.Lcopy8_fault:
+10:    b       .Lcopy_done
+       stw,ma  t1,4(dstspc,dst)
+       ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+
+       .exit
+ENDPROC_CFI(pa_memcpy)
+       .procend
+
        .end
index f82ff10ed974117a59f8713eba4281350feb44aa..b3d47ec1d80a241db8781518026d5a95e26ddb24 100644 (file)
@@ -2,7 +2,7 @@
  *    Optimized memory copy routines.
  *
  *    Copyright (C) 2004 Randolph Chung <tausq@debian.org>
- *    Copyright (C) 2013 Helge Deller <deller@gmx.de>
+ *    Copyright (C) 2013-2017 Helge Deller <deller@gmx.de>
  *
  *    This program is free software; you can redistribute it and/or modify
  *    it under the terms of the GNU General Public License as published by
  *    Portions derived from the GNU C Library
  *    Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
  *
- * Several strategies are tried to try to get the best performance for various
- * conditions. In the optimal case, we copy 64-bytes in an unrolled loop using 
- * fp regs. This is followed by loops that copy 32- or 16-bytes at a time using
- * general registers.  Unaligned copies are handled either by aligning the 
- * destination and then using shift-and-write method, or in a few cases by 
- * falling back to a byte-at-a-time copy.
- *
- * I chose to implement this in C because it is easier to maintain and debug,
- * and in my experiments it appears that the C code generated by gcc (3.3/3.4
- * at the time of writing) is fairly optimal. Unfortunately some of the 
- * semantics of the copy routine (exception handling) is difficult to express
- * in C, so we have to play some tricks to get it to work.
- *
- * All the loads and stores are done via explicit asm() code in order to use
- * the right space registers. 
- * 
- * Testing with various alignments and buffer sizes shows that this code is 
- * often >10x faster than a simple byte-at-a-time copy, even for strangely
- * aligned operands. It is interesting to note that the glibc version
- * of memcpy (written in C) is actually quite fast already. This routine is 
- * able to beat it by 30-40% for aligned copies because of the loop unrolling, 
- * but in some cases the glibc version is still slightly faster. This lends 
- * more credibility that gcc can generate very good code as long as we are 
- * careful.
- *
- * TODO:
- * - cache prefetching needs more experimentation to get optimal settings
- * - try not to use the post-increment address modifiers; they create additional
- *   interlocks
- * - replace byte-copy loops with stybs sequences
  */
 
-#ifdef __KERNEL__
 #include <linux/module.h>
 #include <linux/compiler.h>
 #include <linux/uaccess.h>
-#define s_space "%%sr1"
-#define d_space "%%sr2"
-#else
-#include "memcpy.h"
-#define s_space "%%sr0"
-#define d_space "%%sr0"
-#define pa_memcpy new2_copy
-#endif
 
 DECLARE_PER_CPU(struct exception_data, exception_data);
 
-#define preserve_branch(label) do {                                    \
-       volatile int dummy = 0;                                         \
-       /* The following branch is never taken, it's just here to  */   \
-       /* prevent gcc from optimizing away our exception code. */      \
-       if (unlikely(dummy != dummy))                                   \
-               goto label;                                             \
-} while (0)
-
 #define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3))
 #define get_kernel_space() (0)
 
-#define MERGE(w0, sh_1, w1, sh_2)  ({                                  \
-       unsigned int _r;                                                \
-       asm volatile (                                                  \
-       "mtsar %3\n"                                                    \
-       "shrpw %1, %2, %%sar, %0\n"                                     \
-       : "=r"(_r)                                                      \
-       : "r"(w0), "r"(w1), "r"(sh_2)                                   \
-       );                                                              \
-       _r;                                                             \
-})
-#define THRESHOLD      16
-
-#ifdef DEBUG_MEMCPY
-#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
-#else
-#define DPRINTF(fmt, args...)
-#endif
-
-#define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e)    \
-       __asm__ __volatile__ (                          \
-       "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \
-       ASM_EXCEPTIONTABLE_ENTRY(1b,_e)                 \
-       : _tt(_t), "+r"(_a)                             \
-       :                                               \
-       : "r8")
-
-#define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e)   \
-       __asm__ __volatile__ (                          \
-       "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \
-       ASM_EXCEPTIONTABLE_ENTRY(1b,_e)                 \
-       : "+r"(_a)                                      \
-       : _tt(_t)                                       \
-       : "r8")
-
-#define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e)
-#define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e)
-#define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e)
-#define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e)
-#define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e)
-#define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e)
-
-#define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e)        \
-       __asm__ __volatile__ (                          \
-       "1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t"     \
-       ASM_EXCEPTIONTABLE_ENTRY(1b,_e)                 \
-       : _tt(_t)                                       \
-       : "r"(_a)                                       \
-       : "r8")
-
-#define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e)       \
-       __asm__ __volatile__ (                          \
-       "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t"     \
-       ASM_EXCEPTIONTABLE_ENTRY(1b,_e)                 \
-       :                                               \
-       : _tt(_t), "r"(_a)                              \
-       : "r8")
-
-#define ldw(_s,_o,_a,_t,_e)    def_load_insn(ldw,"=r",_s,_o,_a,_t,_e)
-#define stw(_s,_t,_o,_a,_e)    def_store_insn(stw,"r",_s,_t,_o,_a,_e)
-
-#ifdef  CONFIG_PREFETCH
-static inline void prefetch_src(const void *addr)
-{
-       __asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr));
-}
-
-static inline void prefetch_dst(const void *addr)
-{
-       __asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr));
-}
-#else
-#define prefetch_src(addr) do { } while(0)
-#define prefetch_dst(addr) do { } while(0)
-#endif
-
-#define PA_MEMCPY_OK           0
-#define PA_MEMCPY_LOAD_ERROR   1
-#define PA_MEMCPY_STORE_ERROR  2
-
-/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
- * per loop.  This code is derived from glibc. 
- */
-static noinline unsigned long copy_dstaligned(unsigned long dst,
-                                       unsigned long src, unsigned long len)
-{
-       /* gcc complains that a2 and a3 may be uninitialized, but actually
-        * they cannot be.  Initialize a2/a3 to shut gcc up.
-        */
-       register unsigned int a0, a1, a2 = 0, a3 = 0;
-       int sh_1, sh_2;
-
-       /* prefetch_src((const void *)src); */
-
-       /* Calculate how to shift a word read at the memory operation
-          aligned srcp to make it aligned for copy.  */
-       sh_1 = 8 * (src % sizeof(unsigned int));
-       sh_2 = 8 * sizeof(unsigned int) - sh_1;
-
-       /* Make src aligned by rounding it down.  */
-       src &= -sizeof(unsigned int);
-
-       switch (len % 4)
-       {
-               case 2:
-                       /* a1 = ((unsigned int *) src)[0];
-                          a2 = ((unsigned int *) src)[1]; */
-                       ldw(s_space, 0, src, a1, cda_ldw_exc);
-                       ldw(s_space, 4, src, a2, cda_ldw_exc);
-                       src -= 1 * sizeof(unsigned int);
-                       dst -= 3 * sizeof(unsigned int);
-                       len += 2;
-                       goto do1;
-               case 3:
-                       /* a0 = ((unsigned int *) src)[0];
-                          a1 = ((unsigned int *) src)[1]; */
-                       ldw(s_space, 0, src, a0, cda_ldw_exc);
-                       ldw(s_space, 4, src, a1, cda_ldw_exc);
-                       src -= 0 * sizeof(unsigned int);
-                       dst -= 2 * sizeof(unsigned int);
-                       len += 1;
-                       goto do2;
-               case 0:
-                       if (len == 0)
-                               return PA_MEMCPY_OK;
-                       /* a3 = ((unsigned int *) src)[0];
-                          a0 = ((unsigned int *) src)[1]; */
-                       ldw(s_space, 0, src, a3, cda_ldw_exc);
-                       ldw(s_space, 4, src, a0, cda_ldw_exc);
-                       src -=-1 * sizeof(unsigned int);
-                       dst -= 1 * sizeof(unsigned int);
-                       len += 0;
-                       goto do3;
-               case 1:
-                       /* a2 = ((unsigned int *) src)[0];
-                          a3 = ((unsigned int *) src)[1]; */
-                       ldw(s_space, 0, src, a2, cda_ldw_exc);
-                       ldw(s_space, 4, src, a3, cda_ldw_exc);
-                       src -=-2 * sizeof(unsigned int);
-                       dst -= 0 * sizeof(unsigned int);
-                       len -= 1;
-                       if (len == 0)
-                               goto do0;
-                       goto do4;                       /* No-op.  */
-       }
-
-       do
-       {
-               /* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */
-do4:
-               /* a0 = ((unsigned int *) src)[0]; */
-               ldw(s_space, 0, src, a0, cda_ldw_exc);
-               /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
-               stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
-do3:
-               /* a1 = ((unsigned int *) src)[1]; */
-               ldw(s_space, 4, src, a1, cda_ldw_exc);
-               /* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */
-               stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc);
-do2:
-               /* a2 = ((unsigned int *) src)[2]; */
-               ldw(s_space, 8, src, a2, cda_ldw_exc);
-               /* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */
-               stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc);
-do1:
-               /* a3 = ((unsigned int *) src)[3]; */
-               ldw(s_space, 12, src, a3, cda_ldw_exc);
-               /* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */
-               stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc);
-
-               src += 4 * sizeof(unsigned int);
-               dst += 4 * sizeof(unsigned int);
-               len -= 4;
-       }
-       while (len != 0);
-
-do0:
-       /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
-       stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
-
-       preserve_branch(handle_load_error);
-       preserve_branch(handle_store_error);
-
-       return PA_MEMCPY_OK;
-
-handle_load_error:
-       __asm__ __volatile__ ("cda_ldw_exc:\n");
-       return PA_MEMCPY_LOAD_ERROR;
-
-handle_store_error:
-       __asm__ __volatile__ ("cda_stw_exc:\n");
-       return PA_MEMCPY_STORE_ERROR;
-}
-
-
-/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
- * In case of an access fault the faulty address can be read from the per_cpu
- * exception data struct. */
-static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
-                                       unsigned long len)
-{
-       register unsigned long src, dst, t1, t2, t3;
-       register unsigned char *pcs, *pcd;
-       register unsigned int *pws, *pwd;
-       register double *pds, *pdd;
-       unsigned long ret;
-
-       src = (unsigned long)srcp;
-       dst = (unsigned long)dstp;
-       pcs = (unsigned char *)srcp;
-       pcd = (unsigned char *)dstp;
-
-       /* prefetch_src((const void *)srcp); */
-
-       if (len < THRESHOLD)
-               goto byte_copy;
-
-       /* Check alignment */
-       t1 = (src ^ dst);
-       if (unlikely(t1 & (sizeof(double)-1)))
-               goto unaligned_copy;
-
-       /* src and dst have same alignment. */
-
-       /* Copy bytes till we are double-aligned. */
-       t2 = src & (sizeof(double) - 1);
-       if (unlikely(t2 != 0)) {
-               t2 = sizeof(double) - t2;
-               while (t2 && len) {
-                       /* *pcd++ = *pcs++; */
-                       ldbma(s_space, pcs, t3, pmc_load_exc);
-                       len--;
-                       stbma(d_space, t3, pcd, pmc_store_exc);
-                       t2--;
-               }
-       }
-
-       pds = (double *)pcs;
-       pdd = (double *)pcd;
-
-#if 0
-       /* Copy 8 doubles at a time */
-       while (len >= 8*sizeof(double)) {
-               register double r1, r2, r3, r4, r5, r6, r7, r8;
-               /* prefetch_src((char *)pds + L1_CACHE_BYTES); */
-               flddma(s_space, pds, r1, pmc_load_exc);
-               flddma(s_space, pds, r2, pmc_load_exc);
-               flddma(s_space, pds, r3, pmc_load_exc);
-               flddma(s_space, pds, r4, pmc_load_exc);
-               fstdma(d_space, r1, pdd, pmc_store_exc);
-               fstdma(d_space, r2, pdd, pmc_store_exc);
-               fstdma(d_space, r3, pdd, pmc_store_exc);
-               fstdma(d_space, r4, pdd, pmc_store_exc);
-
-#if 0
-               if (L1_CACHE_BYTES <= 32)
-                       prefetch_src((char *)pds + L1_CACHE_BYTES);
-#endif
-               flddma(s_space, pds, r5, pmc_load_exc);
-               flddma(s_space, pds, r6, pmc_load_exc);
-               flddma(s_space, pds, r7, pmc_load_exc);
-               flddma(s_space, pds, r8, pmc_load_exc);
-               fstdma(d_space, r5, pdd, pmc_store_exc);
-               fstdma(d_space, r6, pdd, pmc_store_exc);
-               fstdma(d_space, r7, pdd, pmc_store_exc);
-               fstdma(d_space, r8, pdd, pmc_store_exc);
-               len -= 8*sizeof(double);
-       }
-#endif
-
-       pws = (unsigned int *)pds;
-       pwd = (unsigned int *)pdd;
-
-word_copy:
-       while (len >= 8*sizeof(unsigned int)) {
-               register unsigned int r1,r2,r3,r4,r5,r6,r7,r8;
-               /* prefetch_src((char *)pws + L1_CACHE_BYTES); */
-               ldwma(s_space, pws, r1, pmc_load_exc);
-               ldwma(s_space, pws, r2, pmc_load_exc);
-               ldwma(s_space, pws, r3, pmc_load_exc);
-               ldwma(s_space, pws, r4, pmc_load_exc);
-               stwma(d_space, r1, pwd, pmc_store_exc);
-               stwma(d_space, r2, pwd, pmc_store_exc);
-               stwma(d_space, r3, pwd, pmc_store_exc);
-               stwma(d_space, r4, pwd, pmc_store_exc);
-
-               ldwma(s_space, pws, r5, pmc_load_exc);
-               ldwma(s_space, pws, r6, pmc_load_exc);
-               ldwma(s_space, pws, r7, pmc_load_exc);
-               ldwma(s_space, pws, r8, pmc_load_exc);
-               stwma(d_space, r5, pwd, pmc_store_exc);
-               stwma(d_space, r6, pwd, pmc_store_exc);
-               stwma(d_space, r7, pwd, pmc_store_exc);
-               stwma(d_space, r8, pwd, pmc_store_exc);
-               len -= 8*sizeof(unsigned int);
-       }
-
-       while (len >= 4*sizeof(unsigned int)) {
-               register unsigned int r1,r2,r3,r4;
-               ldwma(s_space, pws, r1, pmc_load_exc);
-               ldwma(s_space, pws, r2, pmc_load_exc);
-               ldwma(s_space, pws, r3, pmc_load_exc);
-               ldwma(s_space, pws, r4, pmc_load_exc);
-               stwma(d_space, r1, pwd, pmc_store_exc);
-               stwma(d_space, r2, pwd, pmc_store_exc);
-               stwma(d_space, r3, pwd, pmc_store_exc);
-               stwma(d_space, r4, pwd, pmc_store_exc);
-               len -= 4*sizeof(unsigned int);
-       }
-
-       pcs = (unsigned char *)pws;
-       pcd = (unsigned char *)pwd;
-
-byte_copy:
-       while (len) {
-               /* *pcd++ = *pcs++; */
-               ldbma(s_space, pcs, t3, pmc_load_exc);
-               stbma(d_space, t3, pcd, pmc_store_exc);
-               len--;
-       }
-
-       return PA_MEMCPY_OK;
-
-unaligned_copy:
-       /* possibly we are aligned on a word, but not on a double... */
-       if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) {
-               t2 = src & (sizeof(unsigned int) - 1);
-
-               if (unlikely(t2 != 0)) {
-                       t2 = sizeof(unsigned int) - t2;
-                       while (t2) {
-                               /* *pcd++ = *pcs++; */
-                               ldbma(s_space, pcs, t3, pmc_load_exc);
-                               stbma(d_space, t3, pcd, pmc_store_exc);
-                               len--;
-                               t2--;
-                       }
-               }
-
-               pws = (unsigned int *)pcs;
-               pwd = (unsigned int *)pcd;
-               goto word_copy;
-       }
-
-       /* Align the destination.  */
-       if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) {
-               t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1));
-               while (t2) {
-                       /* *pcd++ = *pcs++; */
-                       ldbma(s_space, pcs, t3, pmc_load_exc);
-                       stbma(d_space, t3, pcd, pmc_store_exc);
-                       len--;
-                       t2--;
-               }
-               dst = (unsigned long)pcd;
-               src = (unsigned long)pcs;
-       }
-
-       ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));
-       if (ret)
-               return ret;
-
-       pcs += (len & -sizeof(unsigned int));
-       pcd += (len & -sizeof(unsigned int));
-       len %= sizeof(unsigned int);
-
-       preserve_branch(handle_load_error);
-       preserve_branch(handle_store_error);
-
-       goto byte_copy;
-
-handle_load_error:
-       __asm__ __volatile__ ("pmc_load_exc:\n");
-       return PA_MEMCPY_LOAD_ERROR;
-
-handle_store_error:
-       __asm__ __volatile__ ("pmc_store_exc:\n");
-       return PA_MEMCPY_STORE_ERROR;
-}
-
-
 /* Returns 0 for success, otherwise, returns number of bytes not transferred. */
-static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
-{
-       unsigned long ret, fault_addr, reference;
-       struct exception_data *d;
-
-       ret = pa_memcpy_internal(dstp, srcp, len);
-       if (likely(ret == PA_MEMCPY_OK))
-               return 0;
-
-       /* if a load or store fault occured we can get the faulty addr */
-       d = this_cpu_ptr(&exception_data);
-       fault_addr = d->fault_addr;
-
-       /* error in load or store? */
-       if (ret == PA_MEMCPY_LOAD_ERROR)
-               reference = (unsigned long) srcp;
-       else
-               reference = (unsigned long) dstp;
+extern unsigned long pa_memcpy(void *dst, const void *src,
+                               unsigned long len);
 
-       DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n",
-               ret, len, fault_addr, reference);
-
-       if (fault_addr >= reference)
-               return len - (fault_addr - reference);
-       else
-               return len;
-}
-
-#ifdef __KERNEL__
 unsigned long __copy_to_user(void __user *dst, const void *src,
                             unsigned long len)
 {
@@ -537,5 +84,3 @@ long probe_kernel_read(void *dst, const void *src, size_t size)
 
        return __probe_kernel_read(dst, src, size);
 }
-
-#endif
index deab89a8915a108a3fd98bef581605b59c094ab8..32ec22146141e56f9436bfbdfaccf0256820a552 100644 (file)
@@ -150,6 +150,23 @@ int fixup_exception(struct pt_regs *regs)
                d->fault_space = regs->isr;
                d->fault_addr = regs->ior;
 
+               /*
+                * Fix up get_user() and put_user().
+                * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
+                * bit in the relative address of the fixup routine to indicate
+                * that %r8 should be loaded with -EFAULT to report a userspace
+                * access error.
+                */
+               if (fix->fixup & 1) {
+                       regs->gr[8] = -EFAULT;
+
+                       /* zero target register for get_user() */
+                       if (parisc_acctyp(0, regs->iir) == VM_READ) {
+                               int treg = regs->iir & 0x1f;
+                               regs->gr[treg] = 0;
+                       }
+               }
+
                regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup;
                regs->iaoq[0] &= ~3;
                /*
index 494091762bd7f3a43afe15945943343c4cb23fe0..97a8bc8a095ce4199ad2e4e0c88c2933fc3b6987 100644 (file)
@@ -80,93 +80,99 @@ config ARCH_HAS_DMA_SET_COHERENT_MASK
 config PPC
        bool
        default y
-       select BUILDTIME_EXTABLE_SORT
+       #
+       # Please keep this list sorted alphabetically.
+       #
+       select ARCH_HAS_DEVMEM_IS_ALLOWED
+       select ARCH_HAS_DMA_SET_COHERENT_MASK
+       select ARCH_HAS_ELF_RANDOMIZE
+       select ARCH_HAS_GCOV_PROFILE_ALL
+       select ARCH_HAS_SCALED_CPUTIME          if VIRT_CPU_ACCOUNTING_NATIVE
+       select ARCH_HAS_SG_CHAIN
+       select ARCH_HAS_TICK_BROADCAST          if GENERIC_CLOCKEVENTS_BROADCAST
+       select ARCH_HAS_UBSAN_SANITIZE_ALL
+       select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_MIGHT_HAVE_PC_SERIO
+       select ARCH_SUPPORTS_ATOMIC_RMW
+       select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
+       select ARCH_USE_BUILTIN_BSWAP
+       select ARCH_USE_CMPXCHG_LOCKREF         if PPC64
+       select ARCH_WANT_IPC_PARSE_VERSION
        select BINFMT_ELF
-       select ARCH_HAS_ELF_RANDOMIZE
-       select OF
-       select OF_EARLY_FLATTREE
-       select OF_RESERVED_MEM
-       select HAVE_FTRACE_MCOUNT_RECORD
+       select BUILDTIME_EXTABLE_SORT
+       select CLONE_BACKWARDS
+       select DCACHE_WORD_ACCESS               if PPC64 && CPU_LITTLE_ENDIAN
+       select EDAC_ATOMIC_SCRUB
+       select EDAC_SUPPORT
+       select GENERIC_ATOMIC64                 if PPC32
+       select GENERIC_CLOCKEVENTS
+       select GENERIC_CLOCKEVENTS_BROADCAST    if SMP
+       select GENERIC_CMOS_UPDATE
+       select GENERIC_CPU_AUTOPROBE
+       select GENERIC_IRQ_SHOW
+       select GENERIC_IRQ_SHOW_LEVEL
+       select GENERIC_SMP_IDLE_THREAD
+       select GENERIC_STRNCPY_FROM_USER
+       select GENERIC_STRNLEN_USER
+       select GENERIC_TIME_VSYSCALL_OLD
+       select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_HARDENED_USERCOPY
+       select HAVE_ARCH_JUMP_LABEL
+       select HAVE_ARCH_KGDB
+       select HAVE_ARCH_SECCOMP_FILTER
+       select HAVE_ARCH_TRACEHOOK
+       select HAVE_CBPF_JIT                    if !PPC64
+       select HAVE_CONTEXT_TRACKING            if PPC64
+       select HAVE_DEBUG_KMEMLEAK
+       select HAVE_DEBUG_STACKOVERFLOW
+       select HAVE_DMA_API_DEBUG
        select HAVE_DYNAMIC_FTRACE
-       select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
-       select HAVE_FUNCTION_TRACER
+       select HAVE_DYNAMIC_FTRACE_WITH_REGS    if MPROFILE_KERNEL
+       select HAVE_EBPF_JIT                    if PPC64
+       select HAVE_EFFICIENT_UNALIGNED_ACCESS  if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
+       select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FUNCTION_GRAPH_TRACER
+       select HAVE_FUNCTION_TRACER
        select HAVE_GCC_PLUGINS
-       select SYSCTL_EXCEPTION_TRACE
-       select VIRT_TO_BUS if !PPC64
+       select HAVE_GENERIC_RCU_GUP
+       select HAVE_HW_BREAKPOINT               if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
        select HAVE_IDE
        select HAVE_IOREMAP_PROT
-       select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
+       select HAVE_IRQ_EXIT_ON_IRQ_STACK
+       select HAVE_KERNEL_GZIP
        select HAVE_KPROBES
-       select HAVE_OPTPROBES if PPC64
-       select HAVE_ARCH_KGDB
        select HAVE_KRETPROBES
-       select HAVE_ARCH_TRACEHOOK
+       select HAVE_LIVEPATCH                   if HAVE_DYNAMIC_FTRACE_WITH_REGS
        select HAVE_MEMBLOCK
        select HAVE_MEMBLOCK_NODE_MAP
-       select HAVE_DMA_API_DEBUG
+       select HAVE_MOD_ARCH_SPECIFIC
+       select HAVE_NMI                         if PERF_EVENTS
        select HAVE_OPROFILE
-       select HAVE_DEBUG_KMEMLEAK
-       select ARCH_HAS_SG_CHAIN
-       select GENERIC_ATOMIC64 if PPC32
+       select HAVE_OPTPROBES                   if PPC64
        select HAVE_PERF_EVENTS
+       select HAVE_PERF_EVENTS_NMI             if PPC64
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
+       select HAVE_RCU_TABLE_FREE              if SMP
        select HAVE_REGS_AND_STACK_ACCESS_API
-       select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
-       select ARCH_WANT_IPC_PARSE_VERSION
-       select SPARSE_IRQ
+       select HAVE_SYSCALL_TRACEPOINTS
+       select HAVE_VIRT_CPU_ACCOUNTING
        select IRQ_DOMAIN
-       select GENERIC_IRQ_SHOW
-       select GENERIC_IRQ_SHOW_LEVEL
        select IRQ_FORCED_THREADING
-       select HAVE_RCU_TABLE_FREE if SMP
-       select HAVE_SYSCALL_TRACEPOINTS
-       select HAVE_CBPF_JIT if !PPC64
-       select HAVE_EBPF_JIT if PPC64
-       select HAVE_ARCH_JUMP_LABEL
-       select ARCH_HAVE_NMI_SAFE_CMPXCHG
-       select ARCH_HAS_GCOV_PROFILE_ALL
-       select GENERIC_SMP_IDLE_THREAD
-       select GENERIC_CMOS_UPDATE
-       select GENERIC_TIME_VSYSCALL_OLD
-       select GENERIC_CLOCKEVENTS
-       select GENERIC_CLOCKEVENTS_BROADCAST if SMP
-       select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
-       select GENERIC_STRNCPY_FROM_USER
-       select GENERIC_STRNLEN_USER
-       select HAVE_MOD_ARCH_SPECIFIC
        select MODULES_USE_ELF_RELA
-       select CLONE_BACKWARDS
-       select ARCH_USE_BUILTIN_BSWAP
-       select OLD_SIGSUSPEND
-       select OLD_SIGACTION if PPC32
-       select HAVE_DEBUG_STACKOVERFLOW
-       select HAVE_IRQ_EXIT_ON_IRQ_STACK
-       select ARCH_USE_CMPXCHG_LOCKREF if PPC64
-       select HAVE_ARCH_AUDITSYSCALL
-       select ARCH_SUPPORTS_ATOMIC_RMW
-       select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
        select NO_BOOTMEM
-       select HAVE_GENERIC_RCU_GUP
-       select HAVE_PERF_EVENTS_NMI if PPC64
-       select HAVE_NMI if PERF_EVENTS
-       select EDAC_SUPPORT
-       select EDAC_ATOMIC_SCRUB
-       select ARCH_HAS_DMA_SET_COHERENT_MASK
-       select ARCH_HAS_DEVMEM_IS_ALLOWED
-       select HAVE_ARCH_SECCOMP_FILTER
-       select ARCH_HAS_UBSAN_SANITIZE_ALL
-       select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
-       select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
-       select GENERIC_CPU_AUTOPROBE
-       select HAVE_VIRT_CPU_ACCOUNTING
-       select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
-       select HAVE_ARCH_HARDENED_USERCOPY
-       select HAVE_KERNEL_GZIP
-       select HAVE_CONTEXT_TRACKING if PPC64
+       select OF
+       select OF_EARLY_FLATTREE
+       select OF_RESERVED_MEM
+       select OLD_SIGACTION                    if PPC32
+       select OLD_SIGSUSPEND
+       select SPARSE_IRQ
+       select SYSCTL_EXCEPTION_TRACE
+       select VIRT_TO_BUS                      if !PPC64
+       #
+       # Please keep this list sorted alphabetically.
+       #
 
 config GENERIC_CSUM
        def_bool n
index 31286fa7873c1df915814b9bc62156b140006cd2..19b0d1a819593081bc22164b9da26188c613bca8 100644 (file)
@@ -72,8 +72,15 @@ GNUTARGET    := powerpc
 MULTIPLEWORD   := -mmultiple
 endif
 
-cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(call cc-option,-mbig-endian)
+ifdef CONFIG_PPC64
+cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(call cc-option,-mabi=elfv1)
+cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(call cc-option,-mcall-aixdesc)
+aflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(call cc-option,-mabi=elfv1)
+aflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += -mabi=elfv2
+endif
+
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += -mlittle-endian
+cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(call cc-option,-mbig-endian)
 ifneq ($(cc-name),clang)
   cflags-$(CONFIG_CPU_LITTLE_ENDIAN)   += -mno-strict-align
 endif
@@ -113,7 +120,9 @@ ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
 CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
 AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
 else
+CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
 CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc)
+AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
 endif
 CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
 CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
index 861e72109df2da0b54c98a94584b8b4ff853026b..f080abfc2f83fbd1e7d63846904a3a21ad820cee 100644 (file)
@@ -68,6 +68,7 @@ SECTIONS
   }
 
 #ifdef CONFIG_PPC64_BOOT_WRAPPER
+  . = ALIGN(256);
   .got :
   {
     __toc_start = .;
index 9fa046d56ebadd6ad25e62b5a29a853b123cd30a..411994551afc138b0b733fdda2b6398140f7127c 100644 (file)
@@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
 {
        u32 *key = crypto_tfm_ctx(tfm);
 
-       *key = 0;
+       *key = ~0;
 
        return 0;
 }
index 73eb794d6163811c45729984f0d9fb06bbde31a9..bc5fdfd227886aa2cb359656faf2eef5eb6574b7 100644 (file)
 #define PPC_BIT(bit)           (1UL << PPC_BITLSHIFT(bit))
 #define PPC_BITMASK(bs, be)    ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
 
+/* Put a PPC bit into a "normal" bit position */
+#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit)                 \
+       ((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
+
 #include <asm/barrier.h>
 
 /* Macro for generating the ***_bits() functions */
index 012223638815569bb424e58021f3f9a7196ff0f0..26ed228d4dc6b7dd089fc84142dbc8af6adb31f2 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
 #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
 
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 #include <asm/book3s/32/hash.h>
index 1eeeb72c70158aa07775444e2fe160e4ef15223b..8f4d41936e5a90986c679876f66bfd15339de102 100644 (file)
@@ -1,9 +1,12 @@
 #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
 #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
 
+#include <asm-generic/5level-fixup.h>
+
 #ifndef __ASSEMBLY__
 #include <linux/mmdebug.h>
 #endif
+
 /*
  * Common bits between hash and Radix page table
  */
@@ -347,23 +350,58 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
        __r;                                                    \
 })
 
+static inline int __pte_write(pte_t pte)
+{
+       return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
+}
+
+#ifdef CONFIG_NUMA_BALANCING
+#define pte_savedwrite pte_savedwrite
+static inline bool pte_savedwrite(pte_t pte)
+{
+       /*
+        * Saved write ptes are prot none ptes that doesn't have
+        * privileged bit sit. We mark prot none as one which has
+        * present and pviliged bit set and RWX cleared. To mark
+        * protnone which used to have _PAGE_WRITE set we clear
+        * the privileged bit.
+        */
+       return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
+}
+#else
+#define pte_savedwrite pte_savedwrite
+static inline bool pte_savedwrite(pte_t pte)
+{
+       return false;
+}
+#endif
+
+static inline int pte_write(pte_t pte)
+{
+       return __pte_write(pte) || pte_savedwrite(pte);
+}
+
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
                                      pte_t *ptep)
 {
-       if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
-               return;
-
-       pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
+       if (__pte_write(*ptep))
+               pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
+       else if (unlikely(pte_savedwrite(*ptep)))
+               pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0);
 }
 
 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
                                           unsigned long addr, pte_t *ptep)
 {
-       if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
-               return;
-
-       pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
+       /*
+        * We should not find protnone for hugetlb, but this complete the
+        * interface.
+        */
+       if (__pte_write(*ptep))
+               pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
+       else if (unlikely(pte_savedwrite(*ptep)))
+               pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1);
 }
 
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
@@ -397,11 +435,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
        pte_update(mm, addr, ptep, ~0UL, 0, 0);
 }
 
-static inline int pte_write(pte_t pte)
-{
-       return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
-}
-
 static inline int pte_dirty(pte_t pte)
 {
        return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
@@ -465,19 +498,12 @@ static inline pte_t pte_clear_savedwrite(pte_t pte)
        VM_BUG_ON(!pte_protnone(pte));
        return __pte(pte_val(pte) | _PAGE_PRIVILEGED);
 }
-
-#define pte_savedwrite pte_savedwrite
-static inline bool pte_savedwrite(pte_t pte)
+#else
+#define pte_clear_savedwrite pte_clear_savedwrite
+static inline pte_t pte_clear_savedwrite(pte_t pte)
 {
-       /*
-        * Saved write ptes are prot none ptes that doesn't have
-        * privileged bit sit. We mark prot none as one which has
-        * present and pviliged bit set and RWX cleared. To mark
-        * protnone which used to have _PAGE_WRITE set we clear
-        * the privileged bit.
-        */
-       VM_BUG_ON(!pte_protnone(pte));
-       return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
+       VM_WARN_ON(1);
+       return __pte(pte_val(pte) & ~_PAGE_WRITE);
 }
 #endif /* CONFIG_NUMA_BALANCING */
 
@@ -506,6 +532,8 @@ static inline unsigned long pte_pfn(pte_t pte)
 /* Generic modifiers for PTE bits */
 static inline pte_t pte_wrprotect(pte_t pte)
 {
+       if (unlikely(pte_savedwrite(pte)))
+               return pte_clear_savedwrite(pte);
        return __pte(pte_val(pte) & ~_PAGE_WRITE);
 }
 
@@ -926,6 +954,7 @@ static inline int pmd_protnone(pmd_t pmd)
 
 #define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         pte_write(pmd_pte(pmd))
+#define __pmd_write(pmd)       __pte_write(pmd_pte(pmd))
 #define pmd_savedwrite(pmd)    pte_savedwrite(pmd_pte(pmd))
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -982,11 +1011,10 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
 static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
                                      pmd_t *pmdp)
 {
-
-       if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_WRITE)) == 0)
-               return;
-
-       pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
+       if (__pmd_write((*pmdp)))
+               pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
+       else if (unlikely(pmd_savedwrite(*pmdp)))
+               pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
 }
 
 static inline int pmd_trans_huge(pmd_t pmd)
index 4e63787dc3becfd6c9b832f50a8c2e1367bd4187..842124b199b5859f6d0f61cb6b7c09ed08854d96 100644 (file)
@@ -112,7 +112,7 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
 
 #ifdef __powerpc64__
        res += (__force u64)addend;
-       return (__force __wsum)((u32)res + (res >> 32));
+       return (__force __wsum) from64to32(res);
 #else
        asm("addc %0,%0,%1;"
            "addze %0,%0;"
index fd321eb423cb44fef259cf47547a05ee1cc1f6a8..155731557c9bc08673881520c13d6db825fd91b0 100644 (file)
@@ -70,8 +70,8 @@ static inline void report_invalid_psscr_val(u64 psscr_val, int err)
        std     r0,0(r1);                                       \
        ptesync;                                                \
        ld      r0,0(r1);                                       \
-1:     cmpd    cr0,r0,r0;                                      \
-       bne     1b;                                             \
+236:   cmpd    cr0,r0,r0;                                      \
+       bne     236b;                                           \
        IDLE_INST;                                              \
 
 #define        IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST)                   \
index 93b9b84568e8175e4010b6544bb490685730408f..09bde6e34f5d524bd7b172f42b25484b1cca3f9d 100644 (file)
@@ -144,8 +144,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
 #define ARCH_DLINFO_CACHE_GEOMETRY                                     \
        NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size);           \
        NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i));     \
-       NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1i.size);           \
-       NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1i));     \
+       NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1d.size);           \
+       NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1d));     \
        NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size);             \
        NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2));       \
        NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size);             \
index f97d8cb6bdf64fd8e147035d659b71016c33ecd0..ed62efe01e49ed1a2e37c6bbd6efbf41fed3e925 100644 (file)
 
 #define P8_DSISR_MC_SLB_ERRORS         (P7_DSISR_MC_SLB_ERRORS | \
                                         P8_DSISR_MC_ERAT_MULTIHIT_SEC)
+
+/*
+ * Machine Check bits on power9
+ */
+#define P9_SRR1_MC_LOADSTORE(srr1)     (((srr1) >> PPC_BITLSHIFT(42)) & 1)
+
+#define P9_SRR1_MC_IFETCH(srr1)        (       \
+       PPC_BITEXTRACT(srr1, 45, 0) |   \
+       PPC_BITEXTRACT(srr1, 44, 1) |   \
+       PPC_BITEXTRACT(srr1, 43, 2) |   \
+       PPC_BITEXTRACT(srr1, 36, 3) )
+
+/* 0 is reserved */
+#define P9_SRR1_MC_IFETCH_UE                           1
+#define P9_SRR1_MC_IFETCH_SLB_PARITY                   2
+#define P9_SRR1_MC_IFETCH_SLB_MULTIHIT                 3
+#define P9_SRR1_MC_IFETCH_ERAT_MULTIHIT                        4
+#define P9_SRR1_MC_IFETCH_TLB_MULTIHIT                 5
+#define P9_SRR1_MC_IFETCH_UE_TLB_RELOAD                        6
+/* 7 is reserved */
+#define P9_SRR1_MC_IFETCH_LINK_TIMEOUT                 8
+#define P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT       9
+/* 10 ? */
+#define P9_SRR1_MC_IFETCH_RA                   11
+#define P9_SRR1_MC_IFETCH_RA_TABLEWALK         12
+#define P9_SRR1_MC_IFETCH_RA_ASYNC_STORE               13
+#define P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT     14
+#define P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN 15
+
+/* DSISR bits for machine check (On Power9) */
+#define P9_DSISR_MC_UE                                 (PPC_BIT(48))
+#define P9_DSISR_MC_UE_TABLEWALK                       (PPC_BIT(49))
+#define P9_DSISR_MC_LINK_LOAD_TIMEOUT                  (PPC_BIT(50))
+#define P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT             (PPC_BIT(51))
+#define P9_DSISR_MC_ERAT_MULTIHIT                      (PPC_BIT(52))
+#define P9_DSISR_MC_TLB_MULTIHIT_MFTLB                 (PPC_BIT(53))
+#define P9_DSISR_MC_USER_TLBIE                         (PPC_BIT(54))
+#define P9_DSISR_MC_SLB_PARITY_MFSLB                   (PPC_BIT(55))
+#define P9_DSISR_MC_SLB_MULTIHIT_MFSLB                 (PPC_BIT(56))
+#define P9_DSISR_MC_RA_LOAD                            (PPC_BIT(57))
+#define P9_DSISR_MC_RA_TABLEWALK                       (PPC_BIT(58))
+#define P9_DSISR_MC_RA_TABLEWALK_FOREIGN               (PPC_BIT(59))
+#define P9_DSISR_MC_RA_FOREIGN                         (PPC_BIT(60))
+
+/* SLB error bits */
+#define P9_DSISR_MC_SLB_ERRORS         (P9_DSISR_MC_ERAT_MULTIHIT | \
+                                        P9_DSISR_MC_SLB_PARITY_MFSLB | \
+                                        P9_DSISR_MC_SLB_MULTIHIT_MFSLB)
+
 enum MCE_Version {
        MCE_V1 = 1,
 };
@@ -93,6 +142,9 @@ enum MCE_ErrorType {
        MCE_ERROR_TYPE_SLB = 2,
        MCE_ERROR_TYPE_ERAT = 3,
        MCE_ERROR_TYPE_TLB = 4,
+       MCE_ERROR_TYPE_USER = 5,
+       MCE_ERROR_TYPE_RA = 6,
+       MCE_ERROR_TYPE_LINK = 7,
 };
 
 enum MCE_UeErrorType {
@@ -121,6 +173,32 @@ enum MCE_TlbErrorType {
        MCE_TLB_ERROR_MULTIHIT = 2,
 };
 
+enum MCE_UserErrorType {
+       MCE_USER_ERROR_INDETERMINATE = 0,
+       MCE_USER_ERROR_TLBIE = 1,
+};
+
+enum MCE_RaErrorType {
+       MCE_RA_ERROR_INDETERMINATE = 0,
+       MCE_RA_ERROR_IFETCH = 1,
+       MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
+       MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 3,
+       MCE_RA_ERROR_LOAD = 4,
+       MCE_RA_ERROR_STORE = 5,
+       MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 6,
+       MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 7,
+       MCE_RA_ERROR_LOAD_STORE_FOREIGN = 8,
+};
+
+enum MCE_LinkErrorType {
+       MCE_LINK_ERROR_INDETERMINATE = 0,
+       MCE_LINK_ERROR_IFETCH_TIMEOUT = 1,
+       MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT = 2,
+       MCE_LINK_ERROR_LOAD_TIMEOUT = 3,
+       MCE_LINK_ERROR_STORE_TIMEOUT = 4,
+       MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT = 5,
+};
+
 struct machine_check_event {
        enum MCE_Version        version:8;      /* 0x00 */
        uint8_t                 in_use;         /* 0x01 */
@@ -166,6 +244,30 @@ struct machine_check_event {
                        uint64_t        effective_address;
                        uint8_t         reserved_2[16];
                } tlb_error;
+
+               struct {
+                       enum MCE_UserErrorType user_error_type:8;
+                       uint8_t         effective_address_provided;
+                       uint8_t         reserved_1[6];
+                       uint64_t        effective_address;
+                       uint8_t         reserved_2[16];
+               } user_error;
+
+               struct {
+                       enum MCE_RaErrorType ra_error_type:8;
+                       uint8_t         effective_address_provided;
+                       uint8_t         reserved_1[6];
+                       uint64_t        effective_address;
+                       uint8_t         reserved_2[16];
+               } ra_error;
+
+               struct {
+                       enum MCE_LinkErrorType link_error_type:8;
+                       uint8_t         effective_address_provided;
+                       uint8_t         reserved_1[6];
+                       uint64_t        effective_address;
+                       uint8_t         reserved_2[16];
+               } link_error;
        } u;
 };
 
@@ -176,8 +278,12 @@ struct mce_error_info {
                enum MCE_SlbErrorType slb_error_type:8;
                enum MCE_EratErrorType erat_error_type:8;
                enum MCE_TlbErrorType tlb_error_type:8;
+               enum MCE_UserErrorType user_error_type:8;
+               enum MCE_RaErrorType ra_error_type:8;
+               enum MCE_LinkErrorType link_error_type:8;
        } u;
-       uint8_t         reserved[2];
+       enum MCE_Severity       severity:8;
+       enum MCE_Initiator      initiator:8;
 };
 
 #define MAX_MC_EVT     100
index ba9921bf202e0c7f2d8579dfc6f31f25ad7cebd7..5134ade2e850162c70d288c1b293b38a7aae6a1c 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
 #define _ASM_POWERPC_NOHASH_32_PGTABLE_H
 
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 #ifndef __ASSEMBLY__
index d0db98793dd83d0ddf5e8d60be2688e697e74491..9f4de0a1035efb3e6d615a86f6cab1e29362d339 100644 (file)
@@ -1,5 +1,8 @@
 #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
 #define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
+
+#include <asm-generic/5level-fixup.h>
+
 /*
  * Entries per page directory level.  The PTE level must use a 64b record
  * for each page table entry.  The PMD and PGD level use a 32b record for
index 55b28ef3409af5494a521b8a948966947555a84d..1facb584dd2962faf8ff334b9ca90e2840ee6d1a 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
 #define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
 
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopud.h>
 
 
index 0cd8a3852763292eabe905b33960f888e875c978..e5805ad78e127ba456ba305abf09d30fdc38c5f4 100644 (file)
@@ -230,7 +230,7 @@ static inline int hugepd_ok(hugepd_t hpd)
        return ((hpd_val(hpd) & 0x4) != 0);
 #else
        /* We clear the top bit to indicate hugepd */
-       return ((hpd_val(hpd) & PD_HUGE) ==  0);
+       return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
 #endif
 }
 
index d99bd442aacbe5747f605cd356de6aa3ae58e53a..e7d6d86563eeda924598b1079a491d0e5945c566 100644 (file)
 #define PPC_INST_BRANCH_COND           0x40800000
 #define PPC_INST_LBZCIX                        0x7c0006aa
 #define PPC_INST_STBCIX                        0x7c0007aa
+#define PPC_INST_LWZX                  0x7c00002e
+#define PPC_INST_LFSX                  0x7c00042e
+#define PPC_INST_STFSX                 0x7c00052e
+#define PPC_INST_LFDX                  0x7c0004ae
+#define PPC_INST_STFDX                 0x7c0005ae
+#define PPC_INST_LVX                   0x7c0000ce
+#define PPC_INST_STVX                  0x7c0001ce
 
 /* macros to insert fields into opcodes */
 #define ___PPC_RA(a)   (((a) & 0x1f) << 16)
index 4a90634e83223c25bdf839f54ca9b3f91cb76330..35c00d7a0cf81a4773d0fad7dc2747923cd2c643 100644 (file)
@@ -160,12 +160,18 @@ struct of_drconf_cell {
 #define OV5_PFO_HW_ENCR                0x1120  /* PFO Encryption Accelerator */
 #define OV5_SUB_PROCESSORS     0x1501  /* 1,2,or 4 Sub-Processors supported */
 #define OV5_XIVE_EXPLOIT       0x1701  /* XIVE exploitation supported */
-#define OV5_MMU_RADIX_300      0x1880  /* ISA v3.00 radix MMU supported */
-#define OV5_MMU_HASH_300       0x1840  /* ISA v3.00 hash MMU supported */
-#define OV5_MMU_SEGM_RADIX     0x1820  /* radix mode (no segmentation) */
-#define OV5_MMU_PROC_TBL       0x1810  /* hcall selects SLB or proc table */
-#define OV5_MMU_SLB            0x1800  /* always use SLB */
-#define OV5_MMU_GTSE           0x1808  /* Guest translation shootdown */
+/* MMU Base Architecture */
+#define OV5_MMU_SUPPORT                0x18C0  /* MMU Mode Support Mask */
+#define OV5_MMU_HASH           0x1800  /* Hash MMU Only */
+#define OV5_MMU_RADIX          0x1840  /* Radix MMU Only */
+#define OV5_MMU_EITHER         0x1880  /* Hash or Radix Supported */
+#define OV5_MMU_DYNAMIC                0x18C0  /* Hash or Radix Can Switch Later */
+#define OV5_NMMU               0x1820  /* Nest MMU Available */
+/* Hash Table Extensions */
+#define OV5_HASH_SEG_TBL       0x1980  /* In Memory Segment Tables Available */
+#define OV5_HASH_GTSE          0x1940  /* Guest Translation Shoot Down Avail */
+/* Radix Table Extensions */
+#define OV5_RADIX_GTSE         0x1A40  /* Guest Translation Shoot Down Avail */
 
 /* Option Vector 6: IBM PAPR hints */
 #define OV6_LINUX              0x02    /* Linux is our OS */
index 4b369d83fe9ce1ea72b3f2a93590fb132d534512..1c9470881c4abe249fd943294c99e94f1916893b 100644 (file)
@@ -387,3 +387,4 @@ SYSCALL(copy_file_range)
 COMPAT_SYS_SPU(preadv2)
 COMPAT_SYS_SPU(pwritev2)
 SYSCALL(kexec_file_load)
+SYSCALL(statx)
index eb1acee91a2034c30d4277fe040cd797279f13b4..9ba11dbcaca98f88c53ee46c3bd009b22f13df01 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            383
+#define NR_syscalls            384
 
 #define __NR__exit __NR_exit
 
index 44583a52f882540986928cc48a63971251226a0f..58e2ec0310fc9508b51c39ec6ab00edb7e51c258 100644 (file)
 
 #define SCM_TIMESTAMPING_OPT_STATS     54
 
+#define SO_MEMINFO             55
+
+#define SO_INCOMING_NAPI_ID    56
+
+#define SO_COOKIE              57
+
 #endif /* _ASM_POWERPC_SOCKET_H */
index 2f26335a3c42a8141d29156f07105ca82761a98c..b85f1422885746d918131216fb45fd76bb99338a 100644 (file)
 #define __NR_preadv2           380
 #define __NR_pwritev2          381
 #define __NR_kexec_file_load   382
+#define __NR_statx             383
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index bb7a1890aeb7fb8e95cf8ca0c7aa53765e12eb45..e79b9daa873c1874485021676426ea47196a5a68 100644 (file)
@@ -77,6 +77,7 @@ extern void __flush_tlb_power8(unsigned int action);
 extern void __flush_tlb_power9(unsigned int action);
 extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
 extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
+extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
 #endif /* CONFIG_PPC64 */
 #if defined(CONFIG_E500)
 extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
@@ -540,6 +541,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .cpu_setup              = __setup_cpu_power9,
                .cpu_restore            = __restore_cpu_power9,
                .flush_tlb              = __flush_tlb_power9,
+               .machine_check_early    = __machine_check_early_realmode_p9,
                .platform               = "power9",
        },
        {       /* Power9 */
@@ -559,6 +561,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .cpu_setup              = __setup_cpu_power9,
                .cpu_restore            = __restore_cpu_power9,
                .flush_tlb              = __flush_tlb_power9,
+               .machine_check_early    = __machine_check_early_realmode_p9,
                .platform               = "power9",
        },
        {       /* Cell Broadband Engine */
index 5f61cc0349c063f1abfd736c13104559cab9d2b0..6fd08219248db7485a6d5c8227dee83664d29b38 100644 (file)
@@ -276,19 +276,21 @@ power_enter_stop:
  */
        andis.   r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
        clrldi   r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
-       bne      1f
+       bne      .Lhandle_esl_ec_set
        IDLE_STATE_ENTER_SEQ(PPC_STOP)
        li      r3,0  /* Since we didn't lose state, return 0 */
        b       pnv_wakeup_noloss
+
+.Lhandle_esl_ec_set:
 /*
  * Check if the requested state is a deep idle state.
  */
-1:     LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
+       LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
        ld      r4,ADDROFF(pnv_first_deep_stop_state)(r5)
        cmpd    r3,r4
-       bge     2f
+       bge     .Lhandle_deep_stop
        IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
-2:
+.Lhandle_deep_stop:
 /*
  * Entering deep idle state.
  * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
@@ -447,9 +449,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 _GLOBAL(pnv_wakeup_tb_loss)
        ld      r1,PACAR1(r13)
        /*
-        * Before entering any idle state, the NVGPRs are saved in the stack
-        * and they are restored before switching to the process context. Hence
-        * until they are restored, they are free to be used.
+        * Before entering any idle state, the NVGPRs are saved in the stack.
+        * If there was a state loss, or PACA_NAPSTATELOST was set, then the
+        * NVGPRs are restored. If we are here, it is likely that state is lost,
+        * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
+        * here are the same as the test to restore NVGPRS:
+        * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
+        * and SRR1 test for restoring NVGPRs.
+        *
+        * We are about to clobber NVGPRs now, so set NAPSTATELOST to
+        * guarantee they will always be restored. This might be tightened
+        * with careful reading of specs (particularly for ISA300) but this
+        * is already a slow wakeup path and it's simpler to be safe.
+        */
+       li      r0,1
+       stb     r0,PACA_NAPSTATELOST(r13)
+
+       /*
         *
         * Save SRR1 and LR in NVGPRs as they might be clobbered in
         * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
index c6923ff451311bfade14e7f68888f85bb69f7176..a1475e6aef3a519c70824d4dd432748097a7965e 100644 (file)
@@ -58,6 +58,15 @@ static void mce_set_error_info(struct machine_check_event *mce,
        case MCE_ERROR_TYPE_TLB:
                mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
                break;
+       case MCE_ERROR_TYPE_USER:
+               mce->u.user_error.user_error_type = mce_err->u.user_error_type;
+               break;
+       case MCE_ERROR_TYPE_RA:
+               mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
+               break;
+       case MCE_ERROR_TYPE_LINK:
+               mce->u.link_error.link_error_type = mce_err->u.link_error_type;
+               break;
        case MCE_ERROR_TYPE_UNKNOWN:
        default:
                break;
@@ -90,13 +99,14 @@ void save_mce_event(struct pt_regs *regs, long handled,
        mce->gpr3 = regs->gpr[3];
        mce->in_use = 1;
 
-       mce->initiator = MCE_INITIATOR_CPU;
        /* Mark it recovered if we have handled it and MSR(RI=1). */
        if (handled && (regs->msr & MSR_RI))
                mce->disposition = MCE_DISPOSITION_RECOVERED;
        else
                mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
-       mce->severity = MCE_SEV_ERROR_SYNC;
+
+       mce->initiator = mce_err->initiator;
+       mce->severity = mce_err->severity;
 
        /*
         * Populate the mce error_type and type-specific error_type.
@@ -115,6 +125,15 @@ void save_mce_event(struct pt_regs *regs, long handled,
        } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
                mce->u.erat_error.effective_address_provided = true;
                mce->u.erat_error.effective_address = addr;
+       } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
+               mce->u.user_error.effective_address_provided = true;
+               mce->u.user_error.effective_address = addr;
+       } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
+               mce->u.ra_error.effective_address_provided = true;
+               mce->u.ra_error.effective_address = addr;
+       } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
+               mce->u.link_error.effective_address_provided = true;
+               mce->u.link_error.effective_address = addr;
        } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
                mce->u.ue_error.effective_address_provided = true;
                mce->u.ue_error.effective_address = addr;
@@ -239,6 +258,29 @@ void machine_check_print_event_info(struct machine_check_event *evt)
                "Parity",
                "Multihit",
        };
+       static const char *mc_user_types[] = {
+               "Indeterminate",
+               "tlbie(l) invalid",
+       };
+       static const char *mc_ra_types[] = {
+               "Indeterminate",
+               "Instruction fetch (bad)",
+               "Page table walk ifetch (bad)",
+               "Page table walk ifetch (foreign)",
+               "Load (bad)",
+               "Store (bad)",
+               "Page table walk Load/Store (bad)",
+               "Page table walk Load/Store (foreign)",
+               "Load/Store (foreign)",
+       };
+       static const char *mc_link_types[] = {
+               "Indeterminate",
+               "Instruction fetch (timeout)",
+               "Page table walk ifetch (timeout)",
+               "Load (timeout)",
+               "Store (timeout)",
+               "Page table walk Load/Store (timeout)",
+       };
 
        /* Print things out */
        if (evt->version != MCE_V1) {
@@ -315,6 +357,36 @@ void machine_check_print_event_info(struct machine_check_event *evt)
                        printk("%s    Effective address: %016llx\n",
                               level, evt->u.tlb_error.effective_address);
                break;
+       case MCE_ERROR_TYPE_USER:
+               subtype = evt->u.user_error.user_error_type <
+                       ARRAY_SIZE(mc_user_types) ?
+                       mc_user_types[evt->u.user_error.user_error_type]
+                       : "Unknown";
+               printk("%s  Error type: User [%s]\n", level, subtype);
+               if (evt->u.user_error.effective_address_provided)
+                       printk("%s    Effective address: %016llx\n",
+                              level, evt->u.user_error.effective_address);
+               break;
+       case MCE_ERROR_TYPE_RA:
+               subtype = evt->u.ra_error.ra_error_type <
+                       ARRAY_SIZE(mc_ra_types) ?
+                       mc_ra_types[evt->u.ra_error.ra_error_type]
+                       : "Unknown";
+               printk("%s  Error type: Real address [%s]\n", level, subtype);
+               if (evt->u.ra_error.effective_address_provided)
+                       printk("%s    Effective address: %016llx\n",
+                              level, evt->u.ra_error.effective_address);
+               break;
+       case MCE_ERROR_TYPE_LINK:
+               subtype = evt->u.link_error.link_error_type <
+                       ARRAY_SIZE(mc_link_types) ?
+                       mc_link_types[evt->u.link_error.link_error_type]
+                       : "Unknown";
+               printk("%s  Error type: Link [%s]\n", level, subtype);
+               if (evt->u.link_error.effective_address_provided)
+                       printk("%s    Effective address: %016llx\n",
+                              level, evt->u.link_error.effective_address);
+               break;
        default:
        case MCE_ERROR_TYPE_UNKNOWN:
                printk("%s  Error type: Unknown\n", level);
@@ -341,6 +413,18 @@ uint64_t get_mce_fault_addr(struct machine_check_event *evt)
                if (evt->u.tlb_error.effective_address_provided)
                        return evt->u.tlb_error.effective_address;
                break;
+       case MCE_ERROR_TYPE_USER:
+               if (evt->u.user_error.effective_address_provided)
+                       return evt->u.user_error.effective_address;
+               break;
+       case MCE_ERROR_TYPE_RA:
+               if (evt->u.ra_error.effective_address_provided)
+                       return evt->u.ra_error.effective_address;
+               break;
+       case MCE_ERROR_TYPE_LINK:
+               if (evt->u.link_error.effective_address_provided)
+                       return evt->u.link_error.effective_address;
+               break;
        default:
        case MCE_ERROR_TYPE_UNKNOWN:
                break;
index 7353991c4ecee6d8a6ecacdce4ab96815ffdfb12..763d6f58caa8ca140c8afb1260555b0ea1c1d2a0 100644 (file)
@@ -116,6 +116,51 @@ static void flush_and_reload_slb(void)
 }
 #endif
 
+static void flush_erat(void)
+{
+       asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
+}
+
+#define MCE_FLUSH_SLB 1
+#define MCE_FLUSH_TLB 2
+#define MCE_FLUSH_ERAT 3
+
+static int mce_flush(int what)
+{
+#ifdef CONFIG_PPC_STD_MMU_64
+       if (what == MCE_FLUSH_SLB) {
+               flush_and_reload_slb();
+               return 1;
+       }
+#endif
+       if (what == MCE_FLUSH_ERAT) {
+               flush_erat();
+               return 1;
+       }
+       if (what == MCE_FLUSH_TLB) {
+               if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
+                       cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
+static int mce_handle_flush_derrors(uint64_t dsisr, uint64_t slb, uint64_t tlb, uint64_t erat)
+{
+       if ((dsisr & slb) && mce_flush(MCE_FLUSH_SLB))
+               dsisr &= ~slb;
+       if ((dsisr & erat) && mce_flush(MCE_FLUSH_ERAT))
+               dsisr &= ~erat;
+       if ((dsisr & tlb) && mce_flush(MCE_FLUSH_TLB))
+               dsisr &= ~tlb;
+       /* Any other errors we don't understand? */
+       if (dsisr)
+               return 0;
+       return 1;
+}
+
 static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
 {
        long handled = 1;
@@ -281,6 +326,9 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
        long handled = 1;
        struct mce_error_info mce_error_info = { 0 };
 
+       mce_error_info.severity = MCE_SEV_ERROR_SYNC;
+       mce_error_info.initiator = MCE_INITIATOR_CPU;
+
        srr1 = regs->msr;
        nip = regs->nip;
 
@@ -352,6 +400,9 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
        long handled = 1;
        struct mce_error_info mce_error_info = { 0 };
 
+       mce_error_info.severity = MCE_SEV_ERROR_SYNC;
+       mce_error_info.initiator = MCE_INITIATOR_CPU;
+
        srr1 = regs->msr;
        nip = regs->nip;
 
@@ -372,3 +423,189 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
        save_mce_event(regs, handled, &mce_error_info, nip, addr);
        return handled;
 }
+
+static int mce_handle_derror_p9(struct pt_regs *regs)
+{
+       uint64_t dsisr = regs->dsisr;
+
+       return mce_handle_flush_derrors(dsisr,
+                       P9_DSISR_MC_SLB_PARITY_MFSLB |
+                       P9_DSISR_MC_SLB_MULTIHIT_MFSLB,
+
+                       P9_DSISR_MC_TLB_MULTIHIT_MFTLB,
+
+                       P9_DSISR_MC_ERAT_MULTIHIT);
+}
+
+static int mce_handle_ierror_p9(struct pt_regs *regs)
+{
+       uint64_t srr1 = regs->msr;
+
+       switch (P9_SRR1_MC_IFETCH(srr1)) {
+       case P9_SRR1_MC_IFETCH_SLB_PARITY:
+       case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
+               return mce_flush(MCE_FLUSH_SLB);
+       case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
+               return mce_flush(MCE_FLUSH_TLB);
+       case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
+               return mce_flush(MCE_FLUSH_ERAT);
+       default:
+               return 0;
+       }
+}
+
+static void mce_get_derror_p9(struct pt_regs *regs,
+               struct mce_error_info *mce_err, uint64_t *addr)
+{
+       uint64_t dsisr = regs->dsisr;
+
+       mce_err->severity = MCE_SEV_ERROR_SYNC;
+       mce_err->initiator = MCE_INITIATOR_CPU;
+
+       if (dsisr & P9_DSISR_MC_USER_TLBIE)
+               *addr = regs->nip;
+       else
+               *addr = regs->dar;
+
+       if (dsisr & P9_DSISR_MC_UE) {
+               mce_err->error_type = MCE_ERROR_TYPE_UE;
+               mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
+       } else if (dsisr & P9_DSISR_MC_UE_TABLEWALK) {
+               mce_err->error_type = MCE_ERROR_TYPE_UE;
+               mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
+       } else if (dsisr & P9_DSISR_MC_LINK_LOAD_TIMEOUT) {
+               mce_err->error_type = MCE_ERROR_TYPE_LINK;
+               mce_err->u.link_error_type = MCE_LINK_ERROR_LOAD_TIMEOUT;
+       } else if (dsisr & P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT) {
+               mce_err->error_type = MCE_ERROR_TYPE_LINK;
+               mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT;
+       } else if (dsisr & P9_DSISR_MC_ERAT_MULTIHIT) {
+               mce_err->error_type = MCE_ERROR_TYPE_ERAT;
+               mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
+       } else if (dsisr & P9_DSISR_MC_TLB_MULTIHIT_MFTLB) {
+               mce_err->error_type = MCE_ERROR_TYPE_TLB;
+               mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
+       } else if (dsisr & P9_DSISR_MC_USER_TLBIE) {
+               mce_err->error_type = MCE_ERROR_TYPE_USER;
+               mce_err->u.user_error_type = MCE_USER_ERROR_TLBIE;
+       } else if (dsisr & P9_DSISR_MC_SLB_PARITY_MFSLB) {
+               mce_err->error_type = MCE_ERROR_TYPE_SLB;
+               mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
+       } else if (dsisr & P9_DSISR_MC_SLB_MULTIHIT_MFSLB) {
+               mce_err->error_type = MCE_ERROR_TYPE_SLB;
+               mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
+       } else if (dsisr & P9_DSISR_MC_RA_LOAD) {
+               mce_err->error_type = MCE_ERROR_TYPE_RA;
+               mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD;
+       } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK) {
+               mce_err->error_type = MCE_ERROR_TYPE_RA;
+               mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
+       } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK_FOREIGN) {
+               mce_err->error_type = MCE_ERROR_TYPE_RA;
+               mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN;
+       } else if (dsisr & P9_DSISR_MC_RA_FOREIGN) {
+               mce_err->error_type = MCE_ERROR_TYPE_RA;
+               mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD_STORE_FOREIGN;
+       }
+}
+
+static void mce_get_ierror_p9(struct pt_regs *regs,
+               struct mce_error_info *mce_err, uint64_t *addr)
+{
+       uint64_t srr1 = regs->msr;
+
+       switch (P9_SRR1_MC_IFETCH(srr1)) {
+       case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
+       case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
+               mce_err->severity = MCE_SEV_FATAL;
+               break;
+       default:
+               mce_err->severity = MCE_SEV_ERROR_SYNC;
+               break;
+       }
+
+       mce_err->initiator = MCE_INITIATOR_CPU;
+
+       *addr = regs->nip;
+
+       switch (P9_SRR1_MC_IFETCH(srr1)) {
+       case P9_SRR1_MC_IFETCH_UE:
+               mce_err->error_type = MCE_ERROR_TYPE_UE;
+               mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH;
+               break;
+       case P9_SRR1_MC_IFETCH_SLB_PARITY:
+               mce_err->error_type = MCE_ERROR_TYPE_SLB;
+               mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
+               break;
+       case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
+               mce_err->error_type = MCE_ERROR_TYPE_SLB;
+               mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
+               break;
+       case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
+               mce_err->error_type = MCE_ERROR_TYPE_ERAT;
+               mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
+               break;
+       case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
+               mce_err->error_type = MCE_ERROR_TYPE_TLB;
+               mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
+               break;
+       case P9_SRR1_MC_IFETCH_UE_TLB_RELOAD:
+               mce_err->error_type = MCE_ERROR_TYPE_UE;
+               mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
+               break;
+       case P9_SRR1_MC_IFETCH_LINK_TIMEOUT:
+               mce_err->error_type = MCE_ERROR_TYPE_LINK;
+               mce_err->u.link_error_type = MCE_LINK_ERROR_IFETCH_TIMEOUT;
+               break;
+       case P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT:
+               mce_err->error_type = MCE_ERROR_TYPE_LINK;
+               mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT;
+               break;
+       case P9_SRR1_MC_IFETCH_RA:
+               mce_err->error_type = MCE_ERROR_TYPE_RA;
+               mce_err->u.ra_error_type = MCE_RA_ERROR_IFETCH;
+               break;
+       case P9_SRR1_MC_IFETCH_RA_TABLEWALK:
+               mce_err->error_type = MCE_ERROR_TYPE_RA;
+               mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH;
+               break;
+       case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
+               mce_err->error_type = MCE_ERROR_TYPE_RA;
+               mce_err->u.ra_error_type = MCE_RA_ERROR_STORE;
+               break;
+       case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
+               mce_err->error_type = MCE_ERROR_TYPE_LINK;
+               mce_err->u.link_error_type = MCE_LINK_ERROR_STORE_TIMEOUT;
+               break;
+       case P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN:
+               mce_err->error_type = MCE_ERROR_TYPE_RA;
+               mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN;
+               break;
+       default:
+               break;
+       }
+}
+
+long __machine_check_early_realmode_p9(struct pt_regs *regs)
+{
+       uint64_t nip, addr;
+       long handled;
+       struct mce_error_info mce_error_info = { 0 };
+
+       nip = regs->nip;
+
+       if (P9_SRR1_MC_LOADSTORE(regs->msr)) {
+               handled = mce_handle_derror_p9(regs);
+               mce_get_derror_p9(regs, &mce_error_info, &addr);
+       } else {
+               handled = mce_handle_ierror_p9(regs);
+               mce_get_ierror_p9(regs, &mce_error_info, &addr);
+       }
+
+       /* Handle UE error. */
+       if (mce_error_info.error_type == MCE_ERROR_TYPE_UE)
+               handled = mce_handle_ue_error(regs);
+
+       save_mce_event(regs, handled, &mce_error_info, nip, addr);
+       return handled;
+}
index a3944540fe0d56b0245f65ffcc0c0719f3c57a54..1c1b44ec7642a531e116fa0d04b6269dd38e93b5 100644 (file)
@@ -168,6 +168,14 @@ static unsigned long __initdata prom_tce_alloc_start;
 static unsigned long __initdata prom_tce_alloc_end;
 #endif
 
+static bool __initdata prom_radix_disable;
+
+struct platform_support {
+       bool hash_mmu;
+       bool radix_mmu;
+       bool radix_gtse;
+};
+
 /* Platforms codes are now obsolete in the kernel. Now only used within this
  * file and ultimately gone too. Feel free to change them if you need, they
  * are not shared with anything outside of this file anymore
@@ -626,6 +634,12 @@ static void __init early_cmdline_parse(void)
                prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
 #endif
        }
+
+       opt = strstr(prom_cmd_line, "disable_radix");
+       if (opt) {
+               prom_debug("Radix disabled from cmdline\n");
+               prom_radix_disable = true;
+       }
 }
 
 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
@@ -695,6 +709,8 @@ struct option_vector5 {
        u8 byte22;
        u8 intarch;
        u8 mmu;
+       u8 hash_ext;
+       u8 radix_ext;
 } __packed;
 
 struct option_vector6 {
@@ -850,8 +866,9 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
                .reserved3 = 0,
                .subprocessors = 1,
                .intarch = 0,
-               .mmu = OV5_FEAT(OV5_MMU_RADIX_300) | OV5_FEAT(OV5_MMU_HASH_300) |
-                       OV5_FEAT(OV5_MMU_PROC_TBL) | OV5_FEAT(OV5_MMU_GTSE),
+               .mmu = 0,
+               .hash_ext = 0,
+               .radix_ext = 0,
        },
 
        /* option vector 6: IBM PAPR hints */
@@ -990,6 +1007,92 @@ static int __init prom_count_smt_threads(void)
 
 }
 
+static void __init prom_parse_mmu_model(u8 val,
+                                       struct platform_support *support)
+{
+       switch (val) {
+       case OV5_FEAT(OV5_MMU_DYNAMIC):
+       case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
+               prom_debug("MMU - either supported\n");
+               support->radix_mmu = !prom_radix_disable;
+               support->hash_mmu = true;
+               break;
+       case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
+               prom_debug("MMU - radix only\n");
+               if (prom_radix_disable) {
+                       /*
+                        * If we __have__ to do radix, we're better off ignoring
+                        * the command line rather than not booting.
+                        */
+                       prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
+               }
+               support->radix_mmu = true;
+               break;
+       case OV5_FEAT(OV5_MMU_HASH):
+               prom_debug("MMU - hash only\n");
+               support->hash_mmu = true;
+               break;
+       default:
+               prom_debug("Unknown mmu support option: 0x%x\n", val);
+               break;
+       }
+}
+
+static void __init prom_parse_platform_support(u8 index, u8 val,
+                                              struct platform_support *support)
+{
+       switch (index) {
+       case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
+               prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
+               break;
+       case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
+               if (val & OV5_FEAT(OV5_RADIX_GTSE)) {
+                       prom_debug("Radix - GTSE supported\n");
+                       support->radix_gtse = true;
+               }
+               break;
+       }
+}
+
+static void __init prom_check_platform_support(void)
+{
+       struct platform_support supported = {
+               .hash_mmu = false,
+               .radix_mmu = false,
+               .radix_gtse = false
+       };
+       int prop_len = prom_getproplen(prom.chosen,
+                                      "ibm,arch-vec-5-platform-support");
+       if (prop_len > 1) {
+               int i;
+               u8 vec[prop_len];
+               prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
+                          prop_len);
+               prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
+                            &vec, sizeof(vec));
+               for (i = 0; i < prop_len; i += 2) {
+                       prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
+                                                                 , vec[i]
+                                                                 , vec[i + 1]);
+                       prom_parse_platform_support(vec[i], vec[i + 1],
+                                                   &supported);
+               }
+       }
+
+       if (supported.radix_mmu && supported.radix_gtse) {
+               /* Radix preferred - but we require GTSE for now */
+               prom_debug("Asking for radix with GTSE\n");
+               ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
+               ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE);
+       } else if (supported.hash_mmu) {
+               /* Default to hash mmu (if we can) */
+               prom_debug("Asking for hash\n");
+               ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
+       } else {
+               /* We're probably on a legacy hypervisor */
+               prom_debug("Assuming legacy hash support\n");
+       }
+}
 
 static void __init prom_send_capabilities(void)
 {
@@ -997,6 +1100,9 @@ static void __init prom_send_capabilities(void)
        prom_arg_t ret;
        u32 cores;
 
+       /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
+       prom_check_platform_support();
+
        root = call_prom("open", 1, 1, ADDR("/"));
        if (root != 0) {
                /* We need to tell the FW about the number of cores we support.
@@ -2993,6 +3099,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
         */
        prom_check_initrd(r3, r4);
 
+       /*
+        * Do early parsing of command line
+        */
+       early_cmdline_parse();
+
 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
        /*
         * On pSeries, inform the firmware about our capabilities
@@ -3008,11 +3119,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
        if (of_platform != PLATFORM_POWERMAC)
                copy_and_flush(0, kbase, 0x100, 0);
 
-       /*
-        * Do early parsing of command line
-        */
-       early_cmdline_parse();
-
        /*
         * Initialize memory management within prom_init
         */
index adf2084f214b2bd01d5aa3ef2a613e66b7b66a05..9cfaa8b69b5f32eb64d7adcd8504ccdb6cd87b32 100644 (file)
@@ -408,7 +408,10 @@ static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
        info->line_size = lsize;
        info->block_size = bsize;
        info->log_block_size = __ilog2(bsize);
-       info->blocks_per_page = PAGE_SIZE / bsize;
+       if (bsize)
+               info->blocks_per_page = PAGE_SIZE / bsize;
+       else
+               info->blocks_per_page = 0;
 
        if (sets == 0)
                info->assoc = 0xffff;
index f3158fb16de34b69acdb593ce39de7d88437e094..8c68145ba1bd35f4e86f0a3da729ee5387a7c781 100644 (file)
@@ -601,7 +601,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                                                         hva, NULL, NULL);
                        if (ptep) {
                                pte = kvmppc_read_update_linux_pte(ptep, 1);
-                               if (pte_write(pte))
+                               if (__pte_write(pte))
                                        write_ok = 1;
                        }
                        local_irq_restore(flags);
index 6fca970373ee90eee718912c48d34a3ebab3ff37..ce6f2121fffe46857bf4b250c06ad3916ac24aeb 100644 (file)
@@ -256,7 +256,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                }
                pte = kvmppc_read_update_linux_pte(ptep, writing);
                if (pte_present(pte) && !pte_protnone(pte)) {
-                       if (writing && !pte_write(pte))
+                       if (writing && !__pte_write(pte))
                                /* make the actual HPTE be read-only */
                                ptel = hpte_make_readonly(ptel);
                        is_ci = pte_ci(pte);
index 0e649d72fe8d0d3a95f5b1c5216e4bf6f2a4bdd2..2b5e09020cfe379abfb3c56dd497215b07eb8931 100644 (file)
@@ -20,6 +20,7 @@ obj64-y       += copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \
 
 obj64-$(CONFIG_SMP)    += locks.o
 obj64-$(CONFIG_ALTIVEC)        += vmx-helper.o
+obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o
 
 obj-y                  += checksum_$(BITS).o checksum_wrappers.o
 
index 846dba2c6360002b5343dc17d5b0744fb5525e85..9c542ec70c5bc8b77810e34f9ab8c96e8cd51522 100644 (file)
@@ -1799,8 +1799,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                goto instr_done;
 
        case LARX:
-               if (regs->msr & MSR_LE)
-                       return 0;
                if (op.ea & (size - 1))
                        break;          /* can't handle misaligned */
                if (!address_ok(regs, op.ea, size))
@@ -1823,8 +1821,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                goto ldst_done;
 
        case STCX:
-               if (regs->msr & MSR_LE)
-                       return 0;
                if (op.ea & (size - 1))
                        break;          /* can't handle misaligned */
                if (!address_ok(regs, op.ea, size))
@@ -1849,8 +1845,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                goto ldst_done;
 
        case LOAD:
-               if (regs->msr & MSR_LE)
-                       return 0;
                err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
                if (!err) {
                        if (op.type & SIGNEXT)
@@ -1862,8 +1856,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 
 #ifdef CONFIG_PPC_FPU
        case LOAD_FP:
-               if (regs->msr & MSR_LE)
-                       return 0;
                if (size == 4)
                        err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
                else
@@ -1872,15 +1864,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 #endif
 #ifdef CONFIG_ALTIVEC
        case LOAD_VMX:
-               if (regs->msr & MSR_LE)
-                       return 0;
                err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
                goto ldst_done;
 #endif
 #ifdef CONFIG_VSX
        case LOAD_VSX:
-               if (regs->msr & MSR_LE)
-                       return 0;
                err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
                goto ldst_done;
 #endif
@@ -1903,8 +1891,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                goto instr_done;
 
        case STORE:
-               if (regs->msr & MSR_LE)
-                       return 0;
                if ((op.type & UPDATE) && size == sizeof(long) &&
                    op.reg == 1 && op.update_reg == 1 &&
                    !(regs->msr & MSR_PR) &&
@@ -1917,8 +1903,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 
 #ifdef CONFIG_PPC_FPU
        case STORE_FP:
-               if (regs->msr & MSR_LE)
-                       return 0;
                if (size == 4)
                        err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
                else
@@ -1927,15 +1911,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 #endif
 #ifdef CONFIG_ALTIVEC
        case STORE_VMX:
-               if (regs->msr & MSR_LE)
-                       return 0;
                err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
                goto ldst_done;
 #endif
 #ifdef CONFIG_VSX
        case STORE_VSX:
-               if (regs->msr & MSR_LE)
-                       return 0;
                err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
                goto ldst_done;
 #endif
diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c
new file mode 100644 (file)
index 0000000..2534c14
--- /dev/null
@@ -0,0 +1,434 @@
+/*
+ * Simple sanity test for emulate_step load/store instructions.
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * This program is free software;  you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "emulate_step_test: " fmt
+
+#include <linux/ptrace.h>
+#include <asm/sstep.h>
+#include <asm/ppc-opcode.h>
+
+#define IMM_L(i)               ((uintptr_t)(i) & 0xffff)
+
+/*
+ * Defined with TEST_ prefix so it does not conflict with other
+ * definitions.
+ */
+#define TEST_LD(r, base, i)    (PPC_INST_LD | ___PPC_RT(r) |           \
+                                       ___PPC_RA(base) | IMM_L(i))
+#define TEST_LWZ(r, base, i)   (PPC_INST_LWZ | ___PPC_RT(r) |          \
+                                       ___PPC_RA(base) | IMM_L(i))
+#define TEST_LWZX(t, a, b)     (PPC_INST_LWZX | ___PPC_RT(t) |         \
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STD(r, base, i)   (PPC_INST_STD | ___PPC_RS(r) |          \
+                                       ___PPC_RA(base) | ((i) & 0xfffc))
+#define TEST_LDARX(t, a, b, eh)        (PPC_INST_LDARX | ___PPC_RT(t) |        \
+                                       ___PPC_RA(a) | ___PPC_RB(b) |   \
+                                       __PPC_EH(eh))
+#define TEST_STDCX(s, a, b)    (PPC_INST_STDCX | ___PPC_RS(s) |        \
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LFSX(t, a, b)     (PPC_INST_LFSX | ___PPC_RT(t) |         \
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STFSX(s, a, b)    (PPC_INST_STFSX | ___PPC_RS(s) |        \
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LFDX(t, a, b)     (PPC_INST_LFDX | ___PPC_RT(t) |         \
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STFDX(s, a, b)    (PPC_INST_STFDX | ___PPC_RS(s) |        \
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LVX(t, a, b)      (PPC_INST_LVX | ___PPC_RT(t) |          \
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STVX(s, a, b)     (PPC_INST_STVX | ___PPC_RS(s) |         \
+                                       ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LXVD2X(s, a, b)   (PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b))
+#define TEST_STXVD2X(s, a, b)  (PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b))
+
+
+static void __init init_pt_regs(struct pt_regs *regs)
+{
+       static unsigned long msr;
+       static bool msr_cached;
+
+       memset(regs, 0, sizeof(struct pt_regs));
+
+       if (likely(msr_cached)) {
+               regs->msr = msr;
+               return;
+       }
+
+       asm volatile("mfmsr %0" : "=r"(regs->msr));
+
+       regs->msr |= MSR_FP;
+       regs->msr |= MSR_VEC;
+       regs->msr |= MSR_VSX;
+
+       msr = regs->msr;
+       msr_cached = true;
+}
+
+static void __init show_result(char *ins, char *result)
+{
+       pr_info("%-14s : %s\n", ins, result);
+}
+
+static void __init test_ld(void)
+{
+       struct pt_regs regs;
+       unsigned long a = 0x23;
+       int stepped = -1;
+
+       init_pt_regs(&regs);
+       regs.gpr[3] = (unsigned long) &a;
+
+       /* ld r5, 0(r3) */
+       stepped = emulate_step(&regs, TEST_LD(5, 3, 0));
+
+       if (stepped == 1 && regs.gpr[5] == a)
+               show_result("ld", "PASS");
+       else
+               show_result("ld", "FAIL");
+}
+
+static void __init test_lwz(void)
+{
+       struct pt_regs regs;
+       unsigned int a = 0x4545;
+       int stepped = -1;
+
+       init_pt_regs(&regs);
+       regs.gpr[3] = (unsigned long) &a;
+
+       /* lwz r5, 0(r3) */
+       stepped = emulate_step(&regs, TEST_LWZ(5, 3, 0));
+
+       if (stepped == 1 && regs.gpr[5] == a)
+               show_result("lwz", "PASS");
+       else
+               show_result("lwz", "FAIL");
+}
+
+static void __init test_lwzx(void)
+{
+       struct pt_regs regs;
+       unsigned int a[3] = {0x0, 0x0, 0x1234};
+       int stepped = -1;
+
+       init_pt_regs(&regs);
+       regs.gpr[3] = (unsigned long) a;
+       regs.gpr[4] = 8;
+       regs.gpr[5] = 0x8765;
+
+       /* lwzx r5, r3, r4 */
+       stepped = emulate_step(&regs, TEST_LWZX(5, 3, 4));
+       if (stepped == 1 && regs.gpr[5] == a[2])
+               show_result("lwzx", "PASS");
+       else
+               show_result("lwzx", "FAIL");
+}
+
+static void __init test_std(void)
+{
+       struct pt_regs regs;
+       unsigned long a = 0x1234;
+       int stepped = -1;
+
+       init_pt_regs(&regs);
+       regs.gpr[3] = (unsigned long) &a;
+       regs.gpr[5] = 0x5678;
+
+       /* std r5, 0(r3) */
+       stepped = emulate_step(&regs, TEST_STD(5, 3, 0));
+       if (stepped == 1 || regs.gpr[5] == a)
+               show_result("std", "PASS");
+       else
+               show_result("std", "FAIL");
+}
+
+static void __init test_ldarx_stdcx(void)
+{
+       struct pt_regs regs;
+       unsigned long a = 0x1234;
+       int stepped = -1;
+       unsigned long cr0_eq = 0x1 << 29; /* eq bit of CR0 */
+
+       init_pt_regs(&regs);
+       asm volatile("mfcr %0" : "=r"(regs.ccr));
+
+
+       /*** ldarx ***/
+
+       regs.gpr[3] = (unsigned long) &a;
+       regs.gpr[4] = 0;
+       regs.gpr[5] = 0x5678;
+
+       /* ldarx r5, r3, r4, 0 */
+       stepped = emulate_step(&regs, TEST_LDARX(5, 3, 4, 0));
+
+       /*
+        * Don't touch 'a' here. Touching 'a' can do Load/store
+        * of 'a' which result in failure of subsequent stdcx.
+        * Instead, use hardcoded value for comparison.
+        */
+       if (stepped <= 0 || regs.gpr[5] != 0x1234) {
+               show_result("ldarx / stdcx.", "FAIL (ldarx)");
+               return;
+       }
+
+
+       /*** stdcx. ***/
+
+       regs.gpr[5] = 0x9ABC;
+
+       /* stdcx. r5, r3, r4 */
+       stepped = emulate_step(&regs, TEST_STDCX(5, 3, 4));
+
+       /*
+        * Two possible scenarios that indicates successful emulation
+        * of stdcx. :
+        *  1. Reservation is active and store is performed. In this
+        *     case cr0.eq bit will be set to 1.
+        *  2. Reservation is not active and store is not performed.
+        *     In this case cr0.eq bit will be set to 0.
+        */
+       if (stepped == 1 && ((regs.gpr[5] == a && (regs.ccr & cr0_eq))
+                       || (regs.gpr[5] != a && !(regs.ccr & cr0_eq))))
+               show_result("ldarx / stdcx.", "PASS");
+       else
+               show_result("ldarx / stdcx.", "FAIL (stdcx.)");
+}
+
+#ifdef CONFIG_PPC_FPU
+static void __init test_lfsx_stfsx(void)
+{
+       struct pt_regs regs;
+       union {
+               float a;
+               int b;
+       } c;
+       int cached_b;
+       int stepped = -1;
+
+       init_pt_regs(&regs);
+
+
+       /*** lfsx ***/
+
+       c.a = 123.45;
+       cached_b = c.b;
+
+       regs.gpr[3] = (unsigned long) &c.a;
+       regs.gpr[4] = 0;
+
+       /* lfsx frt10, r3, r4 */
+       stepped = emulate_step(&regs, TEST_LFSX(10, 3, 4));
+
+       if (stepped == 1)
+               show_result("lfsx", "PASS");
+       else
+               show_result("lfsx", "FAIL");
+
+
+       /*** stfsx ***/
+
+       c.a = 678.91;
+
+       /* stfsx frs10, r3, r4 */
+       stepped = emulate_step(&regs, TEST_STFSX(10, 3, 4));
+
+       if (stepped == 1 && c.b == cached_b)
+               show_result("stfsx", "PASS");
+       else
+               show_result("stfsx", "FAIL");
+}
+
+static void __init test_lfdx_stfdx(void)
+{
+       struct pt_regs regs;
+       union {
+               double a;
+               long b;
+       } c;
+       long cached_b;
+       int stepped = -1;
+
+       init_pt_regs(&regs);
+
+
+       /*** lfdx ***/
+
+       c.a = 123456.78;
+       cached_b = c.b;
+
+       regs.gpr[3] = (unsigned long) &c.a;
+       regs.gpr[4] = 0;
+
+       /* lfdx frt10, r3, r4 */
+       stepped = emulate_step(&regs, TEST_LFDX(10, 3, 4));
+
+       if (stepped == 1)
+               show_result("lfdx", "PASS");
+       else
+               show_result("lfdx", "FAIL");
+
+
+       /*** stfdx ***/
+
+       c.a = 987654.32;
+
+       /* stfdx frs10, r3, r4 */
+       stepped = emulate_step(&regs, TEST_STFDX(10, 3, 4));
+
+       if (stepped == 1 && c.b == cached_b)
+               show_result("stfdx", "PASS");
+       else
+               show_result("stfdx", "FAIL");
+}
+#else
+static void __init test_lfsx_stfsx(void)
+{
+       show_result("lfsx", "SKIP (CONFIG_PPC_FPU is not set)");
+       show_result("stfsx", "SKIP (CONFIG_PPC_FPU is not set)");
+}
+
+static void __init test_lfdx_stfdx(void)
+{
+       show_result("lfdx", "SKIP (CONFIG_PPC_FPU is not set)");
+       show_result("stfdx", "SKIP (CONFIG_PPC_FPU is not set)");
+}
+#endif /* CONFIG_PPC_FPU */
+
+#ifdef CONFIG_ALTIVEC
+static void __init test_lvx_stvx(void)
+{
+       struct pt_regs regs;
+       union {
+               vector128 a;
+               u32 b[4];
+       } c;
+       u32 cached_b[4];
+       int stepped = -1;
+
+       init_pt_regs(&regs);
+
+
+       /*** lvx ***/
+
+       cached_b[0] = c.b[0] = 923745;
+       cached_b[1] = c.b[1] = 2139478;
+       cached_b[2] = c.b[2] = 9012;
+       cached_b[3] = c.b[3] = 982134;
+
+       regs.gpr[3] = (unsigned long) &c.a;
+       regs.gpr[4] = 0;
+
+       /* lvx vrt10, r3, r4 */
+       stepped = emulate_step(&regs, TEST_LVX(10, 3, 4));
+
+       if (stepped == 1)
+               show_result("lvx", "PASS");
+       else
+               show_result("lvx", "FAIL");
+
+
+       /*** stvx ***/
+
+       c.b[0] = 4987513;
+       c.b[1] = 84313948;
+       c.b[2] = 71;
+       c.b[3] = 498532;
+
+       /* stvx vrs10, r3, r4 */
+       stepped = emulate_step(&regs, TEST_STVX(10, 3, 4));
+
+       if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
+           cached_b[2] == c.b[2] && cached_b[3] == c.b[3])
+               show_result("stvx", "PASS");
+       else
+               show_result("stvx", "FAIL");
+}
+#else
+static void __init test_lvx_stvx(void)
+{
+       show_result("lvx", "SKIP (CONFIG_ALTIVEC is not set)");
+       show_result("stvx", "SKIP (CONFIG_ALTIVEC is not set)");
+}
+#endif /* CONFIG_ALTIVEC */
+
+#ifdef CONFIG_VSX
+static void __init test_lxvd2x_stxvd2x(void)
+{
+       struct pt_regs regs;
+       union {
+               vector128 a;
+               u32 b[4];
+       } c;
+       u32 cached_b[4];
+       int stepped = -1;
+
+       init_pt_regs(&regs);
+
+
+       /*** lxvd2x ***/
+
+       cached_b[0] = c.b[0] = 18233;
+       cached_b[1] = c.b[1] = 34863571;
+       cached_b[2] = c.b[2] = 834;
+       cached_b[3] = c.b[3] = 6138911;
+
+       regs.gpr[3] = (unsigned long) &c.a;
+       regs.gpr[4] = 0;
+
+       /* lxvd2x vsr39, r3, r4 */
+       stepped = emulate_step(&regs, TEST_LXVD2X(39, 3, 4));
+
+       if (stepped == 1)
+               show_result("lxvd2x", "PASS");
+       else
+               show_result("lxvd2x", "FAIL");
+
+
+       /*** stxvd2x ***/
+
+       c.b[0] = 21379463;
+       c.b[1] = 87;
+       c.b[2] = 374234;
+       c.b[3] = 4;
+
+       /* stxvd2x vsr39, r3, r4 */
+       stepped = emulate_step(&regs, TEST_STXVD2X(39, 3, 4));
+
+       if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
+           cached_b[2] == c.b[2] && cached_b[3] == c.b[3])
+               show_result("stxvd2x", "PASS");
+       else
+               show_result("stxvd2x", "FAIL");
+}
+#else
+static void __init test_lxvd2x_stxvd2x(void)
+{
+       show_result("lxvd2x", "SKIP (CONFIG_VSX is not set)");
+       show_result("stxvd2x", "SKIP (CONFIG_VSX is not set)");
+}
+#endif /* CONFIG_VSX */
+
+static int __init test_emulate_step(void)
+{
+       test_ld();
+       test_lwz();
+       test_lwzx();
+       test_std();
+       test_ldarx_stdcx();
+       test_lfsx_stfsx();
+       test_lfdx_stfdx();
+       test_lvx_stvx();
+       test_lxvd2x_stxvd2x();
+
+       return 0;
+}
+late_initcall(test_emulate_step);
index 6aa3b76aa0d66b0b0d53b7e30685ab26745e2603..c22f207aa6564ba93df40ecc03713de04cb20f99 100644 (file)
@@ -356,25 +356,48 @@ static void early_check_vec5(void)
        unsigned long root, chosen;
        int size;
        const u8 *vec5;
+       u8 mmu_supported;
 
        root = of_get_flat_dt_root();
        chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
-       if (chosen == -FDT_ERR_NOTFOUND)
+       if (chosen == -FDT_ERR_NOTFOUND) {
+               cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
                return;
+       }
        vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
-       if (!vec5)
+       if (!vec5) {
+               cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
                return;
-       if (size <= OV5_INDX(OV5_MMU_RADIX_300) ||
-           !(vec5[OV5_INDX(OV5_MMU_RADIX_300)] & OV5_FEAT(OV5_MMU_RADIX_300)))
-               /* Hypervisor doesn't support radix */
+       }
+       if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
                cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+               return;
+       }
+
+       /* Check for supported configuration */
+       mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
+                       OV5_FEAT(OV5_MMU_SUPPORT);
+       if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
+               /* Hypervisor only supports radix - check enabled && GTSE */
+               if (!early_radix_enabled()) {
+                       pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
+               }
+               if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
+                                               OV5_FEAT(OV5_RADIX_GTSE))) {
+                       pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
+               }
+               /* Do radix anyway - the hypervisor said we had to */
+               cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
+       } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
+               /* Hypervisor only supports hash - disable radix */
+               cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+       }
 }
 
 void __init mmu_early_init_devtree(void)
 {
        /* Disable radix mode based on kernel command line. */
-       /* We don't yet have the machinery to do radix as a guest. */
-       if (disable_radix || !(mfmsr() & MSR_HV))
+       if (disable_radix)
                cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
 
        /*
@@ -383,7 +406,7 @@ void __init mmu_early_init_devtree(void)
         * even though the ibm,architecture-vec-5 property created by
         * skiboot doesn't have the necessary bits set.
         */
-       if (early_radix_enabled() && !(mfmsr() & MSR_HV))
+       if (!(mfmsr() & MSR_HV))
                early_check_vec5();
 
        if (early_radix_enabled())
index 2a590a98e65215a8fceb413dd0c0b693a1adc9aa..c28165d8970b64de6bef88fa14a7945a6a75abc5 100644 (file)
@@ -186,6 +186,10 @@ static void __init radix_init_pgtable(void)
         */
        register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
        pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
+       asm volatile("ptesync" : : : "memory");
+       asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
+                    "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
+       asm volatile("eieio; tlbsync; ptesync" : : : "memory");
 }
 
 static void __init radix_init_partition_table(void)
index 595dd718ea8718b010fed1ca5c08f5f121f674c0..2ff13249f87a61759f015d7fff93bd014dba6347 100644 (file)
@@ -188,6 +188,8 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
                        sdsync = POWER7P_MMCRA_SDAR_VALID;
                else if (ppmu->flags & PPMU_ALT_SIPR)
                        sdsync = POWER6_MMCRA_SDSYNC;
+               else if (ppmu->flags & PPMU_NO_SIAR)
+                       sdsync = MMCRA_SAMPLE_ENABLE;
                else
                        sdsync = MMCRA_SDSYNC;
 
index e79fb5fb817dbe21cd19f633d89ca3bbbf51ad0c..cd951fd231c4040ba653f32cf485eebb22d1d805 100644 (file)
@@ -65,12 +65,41 @@ static bool is_event_valid(u64 event)
        return !(event & ~valid_mask);
 }
 
-static u64 mmcra_sdar_mode(u64 event)
+static inline bool is_event_marked(u64 event)
 {
-       if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
-               return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
+       if (event & EVENT_IS_MARKED)
+               return true;
+
+       return false;
+}
 
-       return MMCRA_SDAR_MODE_TLB;
+static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
+{
+       /*
+        * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in
+        * continous sampling mode.
+        *
+        * Incase of Power8:
+        * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
+        * mode and will be un-changed when setting MMCRA[63] (Marked events).
+        *
+        * Incase of Power9:
+        * Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
+        *               or if group already have any marked events.
+        * Non-Marked events (for DD1):
+        *      MMCRA[SDAR_MODE] will be set to 0b01
+        * For rest
+        *      MMCRA[SDAR_MODE] will be set from event code.
+        */
+       if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+               if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
+                       *mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
+               else if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
+                       *mmcra |=  p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
+               else if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+                       *mmcra |= MMCRA_SDAR_MODE_TLB;
+       } else
+               *mmcra |= MMCRA_SDAR_MODE_TLB;
 }
 
 static u64 thresh_cmp_val(u64 value)
@@ -180,7 +209,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
                value |= CNST_L1_QUAL_VAL(cache);
        }
 
-       if (event & EVENT_IS_MARKED) {
+       if (is_event_marked(event)) {
                mask  |= CNST_SAMPLE_MASK;
                value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
        }
@@ -276,7 +305,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
                }
 
                /* In continuous sampling mode, update SDAR on TLB miss */
-               mmcra |= mmcra_sdar_mode(event[i]);
+               mmcra_sdar_mode(event[i], &mmcra);
 
                if (event[i] & EVENT_IS_L1) {
                        cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
@@ -285,7 +314,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
                        mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
                }
 
-               if (event[i] & EVENT_IS_MARKED) {
+               if (is_event_marked(event[i])) {
                        mmcra |= MMCRA_SAMPLE_ENABLE;
 
                        val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
index cf9bd89901595cc38b793bc916a2096d873054eb..899210f14ee432ea4b63cc7de6f7ea1a6da7a404 100644 (file)
 #define MMCRA_THR_CMP_SHIFT            32
 #define MMCRA_SDAR_MODE_SHIFT          42
 #define MMCRA_SDAR_MODE_TLB            (1ull << MMCRA_SDAR_MODE_SHIFT)
+#define MMCRA_SDAR_MODE_NO_UPDATES     ~(0x3ull << MMCRA_SDAR_MODE_SHIFT)
 #define MMCRA_IFM_SHIFT                        30
 
 /* MMCR1 Threshold Compare bit constant for power9 */
index 6693f75e93d1629b51cd3104f24b84528a00969f..da8a0f7a035c1026b3403542c7ec2d97efa50376 100644 (file)
@@ -39,8 +39,8 @@ opal_tracepoint_refcount:
 BEGIN_FTR_SECTION;                                             \
        b       1f;                                             \
 END_FTR_SECTION(0, 1);                                         \
-       ld      r12,opal_tracepoint_refcount@toc(r2);           \
-       cmpdi   r12,0;                                          \
+       ld      r11,opal_tracepoint_refcount@toc(r2);           \
+       cmpdi   r11,0;                                          \
        bne-    LABEL;                                          \
 1:
 
index 86d9fde93c175f86dac6f40de0d68aff2455b0c6..e0f856bfbfe8f3c6ecfa70e737b1b2496725d563 100644 (file)
@@ -395,7 +395,6 @@ static int opal_recover_mce(struct pt_regs *regs,
                                        struct machine_check_event *evt)
 {
        int recovered = 0;
-       uint64_t ea = get_mce_fault_addr(evt);
 
        if (!(regs->msr & MSR_RI)) {
                /* If MSR_RI isn't set, we cannot recover */
@@ -404,26 +403,18 @@ static int opal_recover_mce(struct pt_regs *regs,
        } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
                /* Platform corrected itself */
                recovered = 1;
-       } else if (ea && !is_kernel_addr(ea)) {
+       } else if (evt->severity == MCE_SEV_FATAL) {
+               /* Fatal machine check */
+               pr_err("Machine check interrupt is fatal\n");
+               recovered = 0;
+       } else if ((evt->severity == MCE_SEV_ERROR_SYNC) &&
+                       (user_mode(regs) && !is_global_init(current))) {
                /*
-                * Faulting address is not in kernel text. We should be fine.
-                * We need to find which process uses this address.
                 * For now, kill the task if we have received exception when
                 * in userspace.
                 *
                 * TODO: Queue up this address for hwpoisioning later.
                 */
-               if (user_mode(regs) && !is_global_init(current)) {
-                       _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
-                       recovered = 1;
-               } else
-                       recovered = 0;
-       } else if (user_mode(regs) && !is_global_init(current) &&
-               evt->severity == MCE_SEV_ERROR_SYNC) {
-               /*
-                * If we have received a synchronous error when in userspace
-                * kill the task.
-                */
                _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
                recovered = 1;
        }
index 6901a06da2f90bddf70386667eaea5c4fe3505a7..e36738291c320575523422e139d4642e04142bd5 100644 (file)
@@ -1775,17 +1775,20 @@ static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
 }
 
 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
-                                  struct pci_bus *bus)
+                                  struct pci_bus *bus,
+                                  bool add_to_group)
 {
        struct pci_dev *dev;
 
        list_for_each_entry(dev, &bus->devices, bus_list) {
                set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
                set_dma_offset(&dev->dev, pe->tce_bypass_base);
-               iommu_add_device(&dev->dev);
+               if (add_to_group)
+                       iommu_add_device(&dev->dev);
 
                if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
-                       pnv_ioda_setup_bus_dma(pe, dev->subordinate);
+                       pnv_ioda_setup_bus_dma(pe, dev->subordinate,
+                                       add_to_group);
        }
 }
 
@@ -2191,7 +2194,7 @@ found:
                set_iommu_table_base(&pe->pdev->dev, tbl);
                iommu_add_device(&pe->pdev->dev);
        } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
-               pnv_ioda_setup_bus_dma(pe, pe->pbus);
+               pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
 
        return;
  fail:
@@ -2426,6 +2429,8 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
 
        pnv_pci_ioda2_set_bypass(pe, false);
        pnv_pci_ioda2_unset_window(&pe->table_group, 0);
+       if (pe->pbus)
+               pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
        pnv_ioda2_table_free(tbl);
 }
 
@@ -2435,6 +2440,8 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
                                                table_group);
 
        pnv_pci_ioda2_setup_default_config(pe);
+       if (pe->pbus)
+               pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
 }
 
 static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
@@ -2624,6 +2631,9 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
        level_shift = entries_shift + 3;
        level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
 
+       if ((level_shift - 3) * levels + page_shift >= 60)
+               return -EINVAL;
+
        /* Allocate TCE table */
        addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
                        levels, tce_table_size, &offset, &total_allocated);
@@ -2728,7 +2738,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
        if (pe->flags & PNV_IODA_PE_DEV)
                iommu_add_device(&pe->pdev->dev);
        else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
-               pnv_ioda_setup_bus_dma(pe, pe->pbus);
+               pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
 }
 
 #ifdef CONFIG_PCI_MSI
index 251060cf171364f1dd4dd08452189133dd713265..8b1fe895daa3f076bf57b6b9fe3283a6ba686fad 100644 (file)
@@ -751,7 +751,9 @@ void __init hpte_init_pseries(void)
        mmu_hash_ops.flush_hash_range    = pSeries_lpar_flush_hash_range;
        mmu_hash_ops.hpte_clear_all      = pseries_hpte_clear_all;
        mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
-       mmu_hash_ops.resize_hpt          = pseries_lpar_resize_hpt;
+
+       if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
+               mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
 }
 
 void radix_init_pseries(void)
index f9760ccf40323674cecb3f4f14cc98aa8e615cfe..3696ea6c4826b9740398113d207b1679318db2f8 100644 (file)
@@ -116,13 +116,13 @@ dt_offset:
 
        .data
        .balign 8
-.globl sha256_digest
-sha256_digest:
+.globl purgatory_sha256_digest
+purgatory_sha256_digest:
        .skip   32
-       .size sha256_digest, . - sha256_digest
+       .size purgatory_sha256_digest, . - purgatory_sha256_digest
 
        .balign 8
-.globl sha_regions
-sha_regions:
+.globl purgatory_sha_regions
+purgatory_sha_regions:
        .skip   8 * 2 * 16
-       .size sha_regions, . - sha_regions
+       .size purgatory_sha_regions, . - purgatory_sha_regions
index ada29eaed6e280c08f6d3ee5671c58da9eb06e38..f523ac88315070873eede1c978312569d48953a7 100644 (file)
@@ -274,7 +274,9 @@ failed:
                        if (bank->disk->major > 0)
                                unregister_blkdev(bank->disk->major,
                                                bank->disk->disk_name);
-                       del_gendisk(bank->disk);
+                       if (bank->disk->flags & GENHD_FL_UP)
+                               del_gendisk(bank->disk);
+                       put_disk(bank->disk);
                }
                device->dev.platform_data = NULL;
                if (bank->io_addr != 0)
@@ -299,6 +301,7 @@ axon_ram_remove(struct platform_device *device)
        device_remove_file(&device->dev, &dev_attr_ecc);
        free_irq(bank->irq_id, device);
        del_gendisk(bank->disk);
+       put_disk(bank->disk);
        iounmap((void __iomem *) bank->io_addr);
        kfree(bank);
 
index f9670eabfcfa70ca338aa0c5f2e10217803c7162..b53f80f0b4d822b8ecc77271ee7ece8b734bee5a 100644 (file)
@@ -91,6 +91,16 @@ static unsigned int icp_opal_get_irq(void)
 
 static void icp_opal_set_cpu_priority(unsigned char cppr)
 {
+       /*
+        * Here be dragons. The caller has asked to allow only IPI's and not
+        * external interrupts. But OPAL XIVE doesn't support that. So instead
+        * of allowing no interrupts allow all. That's still not right, but
+        * currently the only caller who does this is xics_migrate_irqs_away()
+        * and it works in that case.
+        */
+       if (cppr >= DEFAULT_PRIORITY)
+               cppr = LOWEST_PRIORITY;
+
        xics_set_base_cppr(cppr);
        opal_int_set_cppr(cppr);
        iosync();
index 69d858e51ac76f121741337031465cb1fd1ccebb..23efe4e42172210ee7784a9e8db07d6e2f087f7b 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/delay.h>
 
 #include <asm/prom.h>
 #include <asm/io.h>
@@ -198,9 +199,6 @@ void xics_migrate_irqs_away(void)
        /* Remove ourselves from the global interrupt queue */
        xics_set_cpu_giq(xics_default_distrib_server, 0);
 
-       /* Allow IPIs again... */
-       icp_ops->set_priority(DEFAULT_PRIORITY);
-
        for_each_irq_desc(virq, desc) {
                struct irq_chip *chip;
                long server;
@@ -255,6 +253,19 @@ void xics_migrate_irqs_away(void)
 unlock:
                raw_spin_unlock_irqrestore(&desc->lock, flags);
        }
+
+       /* Allow "sufficient" time to drop any inflight IRQ's */
+       mdelay(5);
+
+       /*
+        * Allow IPIs again. This is done at the very end, after migrating all
+        * interrupts, the expectation is that we'll only get woken up by an IPI
+        * interrupt beyond this point, but leave externals masked just to be
+        * safe. If we're using icp-opal this may actually allow all
+        * interrupts anyway, but that should be OK.
+        */
+       icp_ops->set_priority(DEFAULT_PRIORITY);
+
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
index fa95041fa9f6844a2ab13ac4dee90967a6a0f84b..33ca29333e1808ae4dc0f9e8875902ae2bb307f5 100644 (file)
@@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned long size)
 
 unsigned long decompress_kernel(void)
 {
-       unsigned long output_addr;
-       unsigned char *output;
+       void *output, *kernel_end;
 
-       output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
-       check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
-       memset(&_bss, 0, &_ebss - &_bss);
-       free_mem_ptr = (unsigned long)&_end;
-       free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
-       output = (unsigned char *) output_addr;
+       output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
+       kernel_end = output + SZ__bss_start;
+       check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
 
 #ifdef CONFIG_BLK_DEV_INITRD
        /*
         * Move the initrd right behind the end of the decompressed
-        * kernel image.
+        * kernel image. This also prevents initrd corruption caused by
+        * bss clearing since kernel_end will always be located behind the
+        * current bss section..
         */
-       if (INITRD_START && INITRD_SIZE &&
-           INITRD_START < (unsigned long) output + SZ__bss_start) {
-               check_ipl_parmblock(output + SZ__bss_start,
-                                   INITRD_START + INITRD_SIZE);
-               memmove(output + SZ__bss_start,
-                       (void *) INITRD_START, INITRD_SIZE);
-               INITRD_START = (unsigned long) output + SZ__bss_start;
+       if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
+               check_ipl_parmblock(kernel_end, INITRD_SIZE);
+               memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
+               INITRD_START = (unsigned long) kernel_end;
        }
 #endif
 
+       /*
+        * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
+        * initialized afterwards since they reside in bss.
+        */
+       memset(&_bss, 0, &_ebss - &_bss);
+       free_mem_ptr = (unsigned long) &_end;
+       free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+
        puts("Uncompressing Linux... ");
        __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
        puts("Ok, booting the kernel.\n");
index 143b1e00b818493f4cb683c251e1d90ef6a5aa9e..4b176fe83da4c6abeeaec2144635c87337cb3388 100644 (file)
@@ -609,7 +609,7 @@ CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENT=y
+CONFIG_UPROBE_EVENTS=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_HIST_TRIGGERS=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
index f05d2d6e10872a417cfb67a9624d7d74f56e5cc6..0de46cc397f6fe7a89f7e26c569287a41dc8a7fb 100644 (file)
@@ -560,7 +560,7 @@ CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENT=y
+CONFIG_UPROBE_EVENTS=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_HIST_TRIGGERS=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
index 2358bf33c5efcf2790643f0b8bbd2a8c80a2fc8f..e167557b434c201e421c5bda3d263849859d7609 100644 (file)
@@ -558,7 +558,7 @@ CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENT=y
+CONFIG_UPROBE_EVENTS=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_HIST_TRIGGERS=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
index d69ea495c4d748748618b27d7414671529e1f41a..716b17238599f63107b27b6860c030b63dd757ba 100644 (file)
@@ -474,8 +474,11 @@ static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
                        ret = blkcipher_walk_done(desc, walk, nbytes - n);
                }
                if (k < n) {
-                       if (__ctr_paes_set_key(ctx) != 0)
+                       if (__ctr_paes_set_key(ctx) != 0) {
+                               if (locked)
+                                       spin_unlock(&ctrblk_lock);
                                return blkcipher_walk_done(desc, walk, -EIO);
+                       }
                }
        }
        if (locked)
index 68bfd09f1b02ec23dad7ba4931db828f1286d890..97189dbaf34b2a36dade0738a64eb65bae38aafc 100644 (file)
@@ -179,7 +179,7 @@ CONFIG_FTRACE_SYSCALLS=y
 CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENT=y
+CONFIG_UPROBE_EVENTS=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_KPROBES_SANITY_TEST=y
index d1c407ddf7032de5a43d08aa48438abda7ab1e91..9072bf63a846148c008da47a5ed3a73313b382a3 100644 (file)
@@ -8,31 +8,27 @@
 #define _S390_CPUTIME_H
 
 #include <linux/types.h>
-#include <asm/div64.h>
+#include <asm/timex.h>
 
 #define CPUTIME_PER_USEC 4096ULL
 #define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC)
 
 /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
 
-typedef unsigned long long __nocast cputime_t;
-typedef unsigned long long __nocast cputime64_t;
-
 #define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
 
-static inline unsigned long __div(unsigned long long n, unsigned long base)
-{
-       return n / base;
-}
-
 /*
- * Convert cputime to microseconds and back.
+ * Convert cputime to microseconds.
  */
-static inline unsigned int cputime_to_usecs(const cputime_t cputime)
+static inline u64 cputime_to_usecs(const u64 cputime)
 {
-       return (__force unsigned long long) cputime >> 12;
+       return cputime >> 12;
 }
 
+/*
+ * Convert cputime to nanoseconds.
+ */
+#define cputime_to_nsecs(cputime) tod_to_ns(cputime)
 
 u64 arch_cpu_idle_time(int cpu);
 
index 7ed1972b1920eb45e8544f5b495db75bafa20636..93e37b12e88237766821369e19827e5e2d844a1b 100644 (file)
@@ -24,6 +24,7 @@
  * the S390 page table tree.
  */
 #ifndef __ASSEMBLY__
+#include <asm-generic/5level-fixup.h>
 #include <linux/sched.h>
 #include <linux/mm_types.h>
 #include <linux/page-flags.h>
index 5ce29fe100baaa0ee0dcaa987901691fb76e4044..fbd9116eb17bf2c73d80952af2108eecdf4db1ed 100644 (file)
@@ -4,6 +4,5 @@
 #include <asm-generic/sections.h>
 
 extern char _eshared[], _ehead[];
-extern char __start_ro_after_init[], __end_ro_after_init[];
 
 #endif
index 354344dcc19898bb647722db24f49733b28793c6..118535123f346d9b32bfb140d45884870a99fd2f 100644 (file)
@@ -206,20 +206,16 @@ static inline unsigned long long get_tod_clock_monotonic(void)
  *    ns = (todval * 125) >> 9;
  *
  * In order to avoid an overflow with the multiplication we can rewrite this.
- * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits)
+ * With a split todval == 2^9 * th + tl (th upper 55 bits, tl lower 9 bits)
  * we end up with
  *
- *    ns = ((2^32 * th + tl) * 125 ) >> 9;
- * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9);
+ *    ns = ((2^9 * th + tl) * 125 ) >> 9;
+ * -> ns = (th * 125) + ((tl * 125) >> 9);
  *
  */
 static inline unsigned long long tod_to_ns(unsigned long long todval)
 {
-       unsigned long long ns;
-
-       ns = ((todval >> 32) << 23) * 125;
-       ns += ((todval & 0xffffffff) * 125) >> 9;
-       return ns;
+       return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9);
 }
 
 #endif
index 136932ff42502027820a94702a924d65b3049622..3ea1554d04b3776e90fa1c311ff227a9a201925c 100644 (file)
@@ -147,7 +147,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from,
                "       jg      2b\n"                           \
                ".popsection\n"                                 \
                EX_TABLE(0b,3b) EX_TABLE(1b,3b)                 \
-               : "=d" (__rc), "=Q" (*(to))                     \
+               : "=d" (__rc), "+Q" (*(to))                     \
                : "d" (size), "Q" (*(from)),                    \
                  "d" (__reg0), "K" (-EFAULT)                   \
                : "cc");                                        \
index b24a64cbfeb10a91274a59117f2e76ea3c583e00..e8e5ecf673fdd864cf9a50a6e7c99512ec446c40 100644 (file)
 
 #define SCM_TIMESTAMPING_OPT_STATS     54
 
+#define        SO_MEMINFO              55
+
+#define SO_INCOMING_NAPI_ID    56
+
+#define SO_COOKIE              57
+
 #endif /* _ASM_SOCKET_H */
index 4384bc797a54f9d77dd593123f0cfc567124f792..152de9b796e149ed3745f41351a5cc5e637bb55e 100644 (file)
 #define __NR_copy_file_range   375
 #define __NR_preadv2           376
 #define __NR_pwritev2          377
-#define NR_syscalls 378
+/* Number 378 is reserved for guarded storage */
+#define __NR_statx             379
+#define NR_syscalls 380
 
 /* 
  * There are some system calls that are not present on 64 bit, some
index ae2cda5eee5a99b35b73e5b7868edd44cba1c6d2..e89cc2e71db1693c4c03f6e6ccc37ba9297b4012 100644 (file)
@@ -178,3 +178,4 @@ COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
 COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
 COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
 COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
+COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
index dff2152350a7ebaaf3df6c8b000eb36b03afd19e..6a7d737d514c4c0064ddd8ef1ca80b824ae60c0c 100644 (file)
@@ -490,7 +490,7 @@ ENTRY(pgm_check_handler)
        jnz     .Lpgm_svcper            # -> single stepped svc
 1:     CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
        aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
-       j       3f
+       j       4f
 2:     UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
        lg      %r15,__LC_KERNEL_STACK
        lgr     %r14,%r12
@@ -499,8 +499,8 @@ ENTRY(pgm_check_handler)
        tm      __LC_PGM_ILC+2,0x02     # check for transaction abort
        jz      3f
        mvc     __THREAD_trap_tdb(256,%r14),0(%r13)
-3:     la      %r11,STACK_FRAME_OVERHEAD(%r15)
-       stg     %r10,__THREAD_last_break(%r14)
+3:     stg     %r10,__THREAD_last_break(%r14)
+4:     la      %r11,STACK_FRAME_OVERHEAD(%r15)
        stmg    %r0,%r7,__PT_R0(%r11)
        mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
        stmg    %r8,%r9,__PT_PSW(%r11)
@@ -509,14 +509,14 @@ ENTRY(pgm_check_handler)
        xc      __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
        stg     %r10,__PT_ARGS(%r11)
        tm      __LC_PGM_ILC+3,0x80     # check for per exception
-       jz      4f
+       jz      5f
        tmhh    %r8,0x0001              # kernel per event ?
        jz      .Lpgm_kprobe
        oi      __PT_FLAGS+7(%r11),_PIF_PER_TRAP
        mvc     __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
        mvc     __THREAD_per_cause(2,%r14),__LC_PER_CODE
        mvc     __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
-4:     REENABLE_IRQS
+5:     REENABLE_IRQS
        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
        larl    %r1,pgm_check_table
        llgh    %r10,__PT_INT_CODE+2(%r11)
index b67dafb7b7cfc58221d786ee9f97b2adc5a61217..e545ffe5155ab0179327cfe4f9f66e677c604041 100644 (file)
@@ -564,6 +564,8 @@ static struct kset *ipl_kset;
 
 static void __ipl_run(void *unused)
 {
+       if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW)
+               diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
        diag308(DIAG308_LOAD_CLEAR, NULL);
        if (MACHINE_IS_VM)
                __cpcmd("IPL", NULL, 0, NULL);
index 20cd339e11aefc9e190e7c98c5671b94dac46d37..f29e41c5e2ecf6d28018463cf89a2db677dffccc 100644 (file)
@@ -124,7 +124,10 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
        clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
        /* Initialize per thread user and system timer values */
        p->thread.user_timer = 0;
+       p->thread.guest_timer = 0;
        p->thread.system_timer = 0;
+       p->thread.hardirq_timer = 0;
+       p->thread.softirq_timer = 0;
 
        frame->sf.back_chain = 0;
        /* new return point is ret_from_fork */
index 47a973b5b4f184adfa3855828d042bd73d33e61c..5dab859b0d543be205eaa5a176728e87f5e3bfc6 100644 (file)
@@ -909,13 +909,11 @@ void __init smp_prepare_boot_cpu(void)
 {
        struct pcpu *pcpu = pcpu_devices;
 
+       WARN_ON(!cpu_present(0) || !cpu_online(0));
        pcpu->state = CPU_STATE_CONFIGURED;
-       pcpu->address = stap();
        pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
        S390_lowcore.percpu_offset = __per_cpu_offset[0];
        smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
-       set_cpu_present(0, true);
-       set_cpu_online(0, true);
 }
 
 void __init smp_cpus_done(unsigned int max_cpus)
@@ -924,6 +922,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
 
 void __init smp_setup_processor_id(void)
 {
+       pcpu_devices[0].address = stap();
        S390_lowcore.cpu_nr = 0;
        S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
 }
index 9b59e6212d8fd22cadbc35f9e3546f7aa47e540c..2659b5cfeddba4cd294e71e356d1149cca68314f 100644 (file)
@@ -386,3 +386,5 @@ SYSCALL(sys_mlock2,compat_sys_mlock2)
 SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */
 SYSCALL(sys_preadv2,compat_sys_preadv2)
 SYSCALL(sys_pwritev2,compat_sys_pwritev2)
+NI_SYSCALL
+SYSCALL(sys_statx,compat_sys_statx)
index 5ccf953962518294e2fc0d1b6e6b633e589f223b..72307f108c40387fd718e9ca1e07ee5cb6ef9cb8 100644 (file)
@@ -63,11 +63,9 @@ SECTIONS
 
        . = ALIGN(PAGE_SIZE);
        __start_ro_after_init = .;
-       __start_data_ro_after_init = .;
        .data..ro_after_init : {
                 *(.data..ro_after_init)
        }
-       __end_data_ro_after_init = .;
        EXCEPTION_TABLE(16)
        . = ALIGN(PAGE_SIZE);
        __end_ro_after_init = .;
index c14fc902991272be4d761b5f6fe506a64e12ace8..072d84ba42a3725ae1b1bff009bc5e241a264717 100644 (file)
@@ -111,7 +111,7 @@ static inline u64 scale_vtime(u64 vtime)
 }
 
 static void account_system_index_scaled(struct task_struct *p,
-                                       cputime_t cputime, cputime_t scaled,
+                                       u64 cputime, u64 scaled,
                                        enum cpu_usage_stat index)
 {
        p->stimescaled += cputime_to_nsecs(scaled);
index b48dc5f1900b5122f62f98669d3f1ffd97955d99..463e5ef02304bb99c352c8468c7cf0ce57f0ba4e 100644 (file)
@@ -608,12 +608,29 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
 {
        spinlock_t *ptl;
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
        pgste_t pgste;
        pte_t *ptep;
        pte_t pte;
        bool dirty;
 
-       ptep = get_locked_pte(mm, addr, &ptl);
+       pgd = pgd_offset(mm, addr);
+       pud = pud_alloc(mm, pgd, addr);
+       if (!pud)
+               return false;
+       pmd = pmd_alloc(mm, pud, addr);
+       if (!pmd)
+               return false;
+       /* We can't run guests backed by huge pages, but userspace can
+        * still set them up and then try to migrate them without any
+        * migration support.
+        */
+       if (pmd_large(*pmd))
+               return true;
+
+       ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
        if (unlikely(!ptep))
                return false;
 
index 0553e5cd5985a0a634864a3402a889491a52da91..46ff8fd678a75cd1cf28111961ffec22375be6ac 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_SCORE_PGTABLE_H
 
 #include <linux/const.h>
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 #include <asm/fixmap.h>
index e359ec67586982d3a47dd3bf0eb38418471836c2..12daf45369b44274a1ba299ecbf8be37311dfe55 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 #include <linux/extable.h>
+#include <linux/ptrace.h>
 #include <linux/sched/mm.h>
 #include <linux/sched/signal.h>
 #include <linux/sched/debug.h>
index ec871355fc2d60498cee6b245c44476f0dc59604..6736a3ad6286093dd1c5ef957a2a60cf82801d97 100644 (file)
@@ -24,6 +24,8 @@
  */
 
 #include <linux/extable.h>
+#include <linux/ptrace.h>
+#include <asm/extable.h>
 
 int fixup_exception(struct pt_regs *regs)
 {
index 340fd40b381dc348b2bf8e86e48a2f8ee52efdaa..9c292c27e0d7114768a7bf8379df7be3fc157257 100644 (file)
@@ -128,7 +128,6 @@ static int __init smsc_superio_setup(void)
        SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX);
        SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX);
 
-#ifdef CONFIG_IDE
        /*
         * Only IDE1 exists on the Cayman
         */
@@ -158,7 +157,6 @@ static int __init smsc_superio_setup(void)
        SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */
        SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */
        SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */
-#endif
 
        /* Exit the configuration state */
        outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
index 19bd89db17e71749b1e7bb07355e2152e4c92408..f75cf438725766d2b7340f38ce74f1ea63f0690e 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef __ASM_SH_PGTABLE_2LEVEL_H
 #define __ASM_SH_PGTABLE_2LEVEL_H
 
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 /*
index 249a985d96482e54bb604daaf129552ba8835257..9b1e776eca31bec7ea936633528011e48b6374ba 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef __ASM_SH_PGTABLE_3LEVEL_H
 #define __ASM_SH_PGTABLE_3LEVEL_H
 
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopud.h>
 
 /*
index 56e49c8f770d6b620eb5811101d8548eb80d5850..8a598528ec1f0455508c1389982e832460c12b81 100644 (file)
@@ -12,6 +12,7 @@
  * the SpitFire page tables.
  */
 
+#include <asm-generic/5level-fixup.h>
 #include <linux/compiler.h>
 #include <linux/const.h>
 #include <asm/types.h>
index a25dc32f5d6a163c1b0e7b7ae775f43898ba4d58..3f4ad19d9ec70c7ab080483fa936219ef9be05f6 100644 (file)
 
 #define SCM_TIMESTAMPING_OPT_STATS     0x0038
 
+#define SO_MEMINFO             0x0039
+
+#define SO_INCOMING_NAPI_ID    0x003a
+
+#define SO_COOKIE              0x003b
+
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION             0x5001
 #define SO_SECURITY_ENCRYPTION_TRANSPORT       0x5002
index df9e731a76f51b923098501ac11ae3b0303c923a..fc5124ccdb53c7abc43e381ba098c453892a7640 100644 (file)
@@ -351,7 +351,7 @@ static int genregs64_set(struct task_struct *target,
        }
 
        if (!ret) {
-               unsigned long y;
+               unsigned long y = regs->y;
 
                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                         &y,
index d26a42279036837b760ea4b93593b45fe4394f83..5f8c615cb5e9bda9a3c1ef6028e553d5e54c3615 100644 (file)
@@ -74,6 +74,7 @@ extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */;
 #define MAXMEM         (_VMALLOC_START - PAGE_OFFSET)
 
 /* We have no pmd or pud since we are strictly a two-level page table */
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 static inline int pud_huge_page(pud_t pud)     { return 0; }
index e96cec52f6d8aa86c0f9a89fccf4d1081db98f56..96fe58b451188a3f3a31d560036ff3031823f05b 100644 (file)
@@ -59,6 +59,7 @@
 #ifndef __ASSEMBLY__
 
 /* We have no pud since we are a three-level page table. */
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopud.h>
 
 /*
index cfbe597524698c9234effb606aadd9ff74299085..179c0ea87a0c3b48e93821d2d1158259c0454b1b 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef __UM_PGTABLE_2LEVEL_H
 #define __UM_PGTABLE_2LEVEL_H
 
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 /* PGDIR_SHIFT determines what a third-level page table entry can map */
index bae8523a162fd3b80067260ddfad400bdf480e5b..c4d876dfb9acd14bc11ff6b4230bbff5bbe070fe 100644 (file)
@@ -7,6 +7,7 @@
 #ifndef __UM_PGTABLE_3LEVEL_H
 #define __UM_PGTABLE_3LEVEL_H
 
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopud.h>
 
 /* PGDIR_SHIFT determines what a third-level page table entry can map */
index 818d0f5598e3247666de004a1ff004abc5fd23cf..a4f2bef37e70697f215e916118775da8dbc4aad6 100644 (file)
@@ -12,6 +12,7 @@
 #ifndef __UNICORE_PGTABLE_H__
 #define __UNICORE_PGTABLE_H__
 
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 #include <asm/cpu-single.h>
 
index 2d449337a36051183c8468f469a8816e6c1e9e7c..a94a4d10f2dfa426d3746cfc9e528d8c91b7e824 100644 (file)
@@ -120,10 +120,6 @@ else
         # -funit-at-a-time shrinks the kernel .text considerably
         # unfortunately it makes reading oopses harder.
         KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
-
-        # this works around some issues with generating unwind tables in older gccs
-        # newer gccs do it by default
-        KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
 endif
 
 ifdef CONFIG_X86_X32
@@ -147,6 +143,37 @@ ifeq ($(CONFIG_KMEMCHECK),y)
        KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
 endif
 
+#
+# If the function graph tracer is used with mcount instead of fentry,
+# '-maccumulate-outgoing-args' is needed to prevent a GCC bug
+# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=42109)
+#
+ifdef CONFIG_FUNCTION_GRAPH_TRACER
+  ifndef CONFIG_HAVE_FENTRY
+       ACCUMULATE_OUTGOING_ARGS := 1
+  else
+    ifeq ($(call cc-option-yn, -mfentry), n)
+       ACCUMULATE_OUTGOING_ARGS := 1
+    endif
+  endif
+endif
+
+#
+# Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a
+# GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226).  There's no way
+# to test for this bug at compile-time because the test case needs to execute,
+# which is a no-go for cross compilers.  So check the GCC version instead.
+#
+ifdef CONFIG_JUMP_LABEL
+  ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1)
+       ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1)
+  endif
+endif
+
+ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
+       KBUILD_CFLAGS += -maccumulate-outgoing-args
+endif
+
 # Stackpointer is addressed different for 32 bit and 64 bit x86
 sp-$(CONFIG_X86_32) := esp
 sp-$(CONFIG_X86_64) := rsp
index 6647ed49c66c9789b7e0d37286653021a6fc7d4c..a45eb15b7cf290a176d287c1255b9f9412495831 100644 (file)
@@ -45,24 +45,6 @@ cflags-$(CONFIG_MGEODE_LX)   += $(call cc-option,-march=geode,-march=pentium-mmx)
 # cpu entries
 cflags-$(CONFIG_X86_GENERIC)   += $(call tune,generic,$(call tune,i686))
 
-# Work around the pentium-mmx code generator madness of gcc4.4.x which
-# does stack alignment by generating horrible code _before_ the mcount
-# prologue (push %ebp, mov %esp, %ebp) which breaks the function graph
-# tracer assumptions. For i686, generic, core2 this is set by the
-# compiler anyway
-ifeq ($(CONFIG_FUNCTION_GRAPH_TRACER), y)
-ADD_ACCUMULATE_OUTGOING_ARGS := y
-endif
-
-# Work around to a bug with asm goto with first implementations of it
-# in gcc causing gcc to mess up the push and pop of the stack in some
-# uses of asm goto.
-ifeq ($(CONFIG_JUMP_LABEL), y)
-ADD_ACCUMULATE_OUTGOING_ARGS := y
-endif
-
-cflags-$(ADD_ACCUMULATE_OUTGOING_ARGS) += $(call cc-option,-maccumulate-outgoing-args)
-
 # Bug fix for binutils: this option is required in order to keep
 # binutils from generating NOPL instructions against our will.
 ifneq ($(CONFIG_X86_P6_NOP),y)
index 6248740b68b5a0c71bddf6eb29e4c6898902d471..31922023de49281d44f7fe893099935a594f5456 100644 (file)
@@ -4,6 +4,7 @@
  * memcpy() and memmove() are defined for the compressed boot environment.
  */
 #include "misc.h"
+#include "error.h"
 
 void warn(char *m)
 {
index 7ef4a099defcda7f2d4e70fb7b3edec77361f2ec..6205d3b81e6d117b4116c2fa38ea40cea4a46fb8 100644 (file)
@@ -176,6 +176,7 @@ CONFIG_E1000E=y
 CONFIG_SKY2=y
 CONFIG_FORCEDETH=y
 CONFIG_8139TOO=y
+CONFIG_R8169=y
 CONFIG_FDDI=y
 CONFIG_INPUT_POLLDEV=y
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
index afb222b63caeb0217ef34d9b2b193b6b59bd190d..c84584bb940280b56f3b7d6d5365803ec4364505 100644 (file)
@@ -604,7 +604,7 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
                        return &amd_f15_PMC20;
                }
        case AMD_EVENT_NB:
-               /* moved to perf_event_amd_uncore.c */
+               /* moved to uncore.c */
                return &emptyconstraint;
        default:
                return &emptyconstraint;
index 349d4d17aa7fbd3a6268be3bd6e7bea909e76ccf..580b60f5ac83cea46a75a11185c8ef0a8c2da516 100644 (file)
@@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event)
 
 static void refresh_pce(void *ignored)
 {
-       if (current->mm)
-               load_mm_cr4(current->mm);
+       if (current->active_mm)
+               load_mm_cr4(current->active_mm);
 }
 
 static void x86_pmu_event_mapped(struct perf_event *event)
@@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event)
        if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
                return;
 
+       /*
+        * This function relies on not being called concurrently in two
+        * tasks in the same mm.  Otherwise one task could observe
+        * perf_rdpmc_allowed > 1 and return all the way back to
+        * userspace with CR4.PCE clear while another task is still
+        * doing on_each_cpu_mask() to propagate CR4.PCE.
+        *
+        * For now, this can't happen because all callers hold mmap_sem
+        * for write.  If this changes, we'll need a different solution.
+        */
+       lockdep_assert_held_exclusive(&current->mm->mmap_sem);
+
        if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
                on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
 }
@@ -2244,6 +2256,7 @@ void arch_perf_update_userpage(struct perf_event *event,
                               struct perf_event_mmap_page *userpg, u64 now)
 {
        struct cyc2ns_data *data;
+       u64 offset;
 
        userpg->cap_user_time = 0;
        userpg->cap_user_time_zero = 0;
@@ -2251,11 +2264,13 @@ void arch_perf_update_userpage(struct perf_event *event,
                !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
        userpg->pmc_width = x86_pmu.cntval_bits;
 
-       if (!sched_clock_stable())
+       if (!using_native_sched_clock() || !sched_clock_stable())
                return;
 
        data = cyc2ns_read_begin();
 
+       offset = data->cyc2ns_offset + __sched_clock_offset;
+
        /*
         * Internal timekeeping for enabled/running/stopped times
         * is always in the local_clock domain.
@@ -2263,7 +2278,7 @@ void arch_perf_update_userpage(struct perf_event *event,
        userpg->cap_user_time = 1;
        userpg->time_mult = data->cyc2ns_mul;
        userpg->time_shift = data->cyc2ns_shift;
-       userpg->time_offset = data->cyc2ns_offset - now;
+       userpg->time_offset = offset - now;
 
        /*
         * cap_user_time_zero doesn't make sense when we're using a different
@@ -2271,7 +2286,7 @@ void arch_perf_update_userpage(struct perf_event *event,
         */
        if (!event->attr.use_clockid) {
                userpg->cap_user_time_zero = 1;
-               userpg->time_zero = data->cyc2ns_offset;
+               userpg->time_zero = offset;
        }
 
        cyc2ns_read_end(data);
index aff4b5b69d4021aeb0ad4356833ca3c2380a7960..238ae3248ba5594265f14ef37ee6fde5c320675a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * perf_event_intel_cstate.c: support cstate residency counters
+ * Support cstate residency counters
  *
  * Copyright (C) 2015, Intel Corp.
  * Author: Kan Liang (kan.liang@intel.com)
index 22054ca49026511f6cbe2ee9ce30ca0f4281c78c..9d05c7e67f6073e3441c164d1bdc6db390507ef0 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * perf_event_intel_rapl.c: support Intel RAPL energy consumption counters
+ * Support Intel RAPL energy consumption counters
  * Copyright (C) 2013 Google, Inc., Stephane Eranian
  *
  * Intel RAPL interface is specified in the IA-32 Manual Vol3b
index ad986c1e29bccd7d5303d94bcbf2caa9223fcf74..df5989f27b1b6508404af774c55953f5c9b082c4 100644 (file)
@@ -360,7 +360,7 @@ extern struct list_head pci2phy_map_head;
 extern struct pci_extra_dev *uncore_extra_pci_dev;
 extern struct event_constraint uncore_constraint_empty;
 
-/* perf_event_intel_uncore_snb.c */
+/* uncore_snb.c */
 int snb_uncore_pci_init(void);
 int ivb_uncore_pci_init(void);
 int hsw_uncore_pci_init(void);
@@ -371,7 +371,7 @@ void nhm_uncore_cpu_init(void);
 void skl_uncore_cpu_init(void);
 int snb_pci2phy_map_init(int devid);
 
-/* perf_event_intel_uncore_snbep.c */
+/* uncore_snbep.c */
 int snbep_uncore_pci_init(void);
 void snbep_uncore_cpu_init(void);
 int ivbep_uncore_pci_init(void);
@@ -385,5 +385,5 @@ void knl_uncore_cpu_init(void);
 int skx_uncore_pci_init(void);
 void skx_uncore_cpu_init(void);
 
-/* perf_event_intel_uncore_nhmex.c */
+/* uncore_nhmex.c */
 void nhmex_uncore_cpu_init(void);
index db64baf0e500b4d2172bb2b5980a4e222fbdc84e..8bef70e7f3cc6d242e7841bfa2404d182e5235cd 100644 (file)
@@ -158,13 +158,13 @@ void hyperv_init(void)
                clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
                return;
        }
+register_msr_cs:
 #endif
        /*
         * For 32 bit guests just use the MSR based mechanism for reading
         * the partition counter.
         */
 
-register_msr_cs:
        hyperv_cs = &hyperv_cs_msr;
        if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
                clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
index 4e7772387c6e92efc365d271f08ec3fef6208c9d..b04bb6dfed7f8464c1425df50c0fa9d1481dcee2 100644 (file)
 #define X86_FEATURE_PKU                (16*32+ 3) /* Protection Keys for Userspace */
 #define X86_FEATURE_OSPKE      (16*32+ 4) /* OS Protection Keys Enable */
 #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
-#define X86_FEATURE_RDPID      (16*32+ 22) /* RDPID instruction */
+#define X86_FEATURE_LA57       (16*32+16) /* 5-level page tables */
+#define X86_FEATURE_RDPID      (16*32+22) /* RDPID instruction */
 
 /* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
 #define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
index d74747b031ecd2e20dcf437944195a37e7c6bb3b..c4eda791f877b6c67808546ce072da07b9bb8002 100644 (file)
@@ -46,6 +46,7 @@ struct kvm_page_track_notifier_node {
 };
 
 void kvm_page_track_init(struct kvm *kvm);
+void kvm_page_track_cleanup(struct kvm *kvm);
 
 void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
                                 struct kvm_memory_slot *dont);
index 72277b1028a5f54551962555fa56bfd5aebab15c..50d35e3185f553b92ce1eeba2700f13e33e49258 100644 (file)
@@ -121,12 +121,9 @@ static inline void native_pmd_clear(pmd_t *pmd)
        *(tmp + 1) = 0;
 }
 
-#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \
-               defined(CONFIG_PARAVIRT))
 static inline void native_pud_clear(pud_t *pudp)
 {
 }
-#endif
 
 static inline void pud_clear(pud_t *pudp)
 {
index 1cfb36b8c024ab07b8334121fc56ac79f2a35371..585ee0d42d18fc162601ff0d8a53827f0d011f5e 100644 (file)
@@ -62,7 +62,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
 # define set_pud(pudp, pud)            native_set_pud(pudp, pud)
 #endif
 
-#ifndef __PAGETABLE_PMD_FOLDED
+#ifndef __PAGETABLE_PUD_FOLDED
 #define pud_clear(pud)                 native_pud_clear(pud)
 #endif
 
index 8b4de22d64299e8997e8b12270e5c23112f85597..62484333673d98c251d52d1eccc10e762b38478b 100644 (file)
@@ -273,6 +273,8 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
 }
 
 #if CONFIG_PGTABLE_LEVELS > 3
+#include <asm-generic/5level-fixup.h>
+
 typedef struct { pudval_t pud; } pud_t;
 
 static inline pud_t native_make_pud(pmdval_t val)
@@ -285,6 +287,7 @@ static inline pudval_t native_pud_val(pud_t pud)
        return pud.pud;
 }
 #else
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopud.h>
 
 static inline pudval_t native_pud_val(pud_t pud)
@@ -306,6 +309,7 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
        return pmd.pmd;
 }
 #else
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 static inline pmdval_t native_pmd_val(pmd_t pmd)
index 34684adb6899ad132e44e159be621d92370b99c0..b3b09b98896d528d2ef7d425a10784e4ebe86106 100644 (file)
@@ -46,6 +46,15 @@ extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
 static inline
 bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
 {
+       /*
+        * "Allocated" pkeys are those that have been returned
+        * from pkey_alloc().  pkey 0 is special, and never
+        * returned from pkey_alloc().
+        */
+       if (pkey <= 0)
+               return false;
+       if (pkey >= arch_max_pkey())
+               return false;
        return mm_pkey_allocation_map(mm) & (1U << pkey);
 }
 
@@ -82,12 +91,6 @@ int mm_pkey_alloc(struct mm_struct *mm)
 static inline
 int mm_pkey_free(struct mm_struct *mm, int pkey)
 {
-       /*
-        * pkey 0 is special, always allocated and can never
-        * be freed.
-        */
-       if (!pkey)
-               return -EINVAL;
        if (!mm_pkey_is_allocated(mm, pkey))
                return -EINVAL;
 
diff --git a/arch/x86/include/asm/purgatory.h b/arch/x86/include/asm/purgatory.h
new file mode 100644 (file)
index 0000000..d7da272
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef _ASM_X86_PURGATORY_H
+#define _ASM_X86_PURGATORY_H
+
+#ifndef __ASSEMBLY__
+#include <linux/purgatory.h>
+
+extern void purgatory(void);
+/*
+ * These forward declarations serve two purposes:
+ *
+ * 1) Make sparse happy when checking arch/purgatory
+ * 2) Document that these are required to be global so the symbol
+ *    lookup in kexec works
+ */
+extern unsigned long purgatory_backup_dest;
+extern unsigned long purgatory_backup_src;
+extern unsigned long purgatory_backup_sz;
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_PURGATORY_H */
index a04eabd43d06621dfadb38d01869f2f22adce783..27e9f9d769b892ef27fa3cf13cb95a7c9563b559 100644 (file)
@@ -12,6 +12,8 @@ extern int recalibrate_cpu_khz(void);
 
 extern int no_timer_check;
 
+extern bool using_native_sched_clock(void);
+
 /*
  * We use the full linear equation: f(x) = a + b*x, in order to allow
  * a continuous function in the face of dynamic freq changes.
index 6fa85944af83d8ddbbad3a344a31a7920e64e6d0..fc5abff9b7fd63d6b3a01a18061be8b3f752d109 100644 (file)
@@ -188,7 +188,7 @@ static inline void __native_flush_tlb_single(unsigned long addr)
 
 static inline void __flush_tlb_all(void)
 {
-       if (static_cpu_has(X86_FEATURE_PGE))
+       if (boot_cpu_has(X86_FEATURE_PGE))
                __flush_tlb_global();
        else
                __flush_tlb();
index 72e8300b1e8a6a96eef10a918abb5b3b020014aa..9cffb44a3cf5dfedb122c7b31c2f690177e68604 100644 (file)
@@ -485,15 +485,17 @@ static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
 
        if (paddr < uv_hub_info->lowmem_remap_top)
                paddr |= uv_hub_info->lowmem_remap_base;
-       paddr |= uv_hub_info->gnode_upper;
-       if (m_val)
+
+       if (m_val) {
+               paddr |= uv_hub_info->gnode_upper;
                paddr = ((paddr << uv_hub_info->m_shift)
                                                >> uv_hub_info->m_shift) |
                        ((paddr >> uv_hub_info->m_val)
                                                << uv_hub_info->n_lshift);
-       else
+       } else {
                paddr |= uv_soc_phys_ram_to_nasid(paddr)
                                                << uv_hub_info->gpa_shift;
+       }
        return paddr;
 }
 
index 5138dacf8bb8360511f7b3514f8ab0ac1a1e88f8..07244ea16765a6bc77e1107233c65fc7286f0e8b 100644 (file)
@@ -58,7 +58,7 @@ struct setup_header {
        __u32   header;
        __u16   version;
        __u32   realmode_swtch;
-       __u16   start_sys;
+       __u16   start_sys_seg;
        __u16   kernel_version;
        __u8    type_of_loader;
        __u8    loadflags;
index ae32838cac5fd2251e1ffa0bbb8b8c629e399a84..b2879cc23db470ec8cc2cbeacdea4ff2b94ec1e3 100644 (file)
@@ -179,10 +179,15 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
                return -EINVAL;
        }
 
+       if (!enabled) {
+               ++disabled_cpus;
+               return -EINVAL;
+       }
+
        if (boot_cpu_physical_apicid != -1U)
                ver = boot_cpu_apic_version;
 
-       cpu = __generic_processor_info(id, ver, enabled);
+       cpu = generic_processor_info(id, ver);
        if (cpu >= 0)
                early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
 
@@ -710,7 +715,7 @@ static void __init acpi_set_irq_model_ioapic(void)
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 #include <acpi/processor.h>
 
-int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
+static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
 {
 #ifdef CONFIG_ACPI_NUMA
        int nid;
index 4261b3282ad99dd87799683e33b2945bcfb20746..8ccb7ef512e05dd9edaa6a3d7a852f70639a54d2 100644 (file)
@@ -1610,24 +1610,15 @@ static inline void try_to_enable_x2apic(int remap_mode) { }
 static inline void __x2apic_enable(void) { }
 #endif /* !CONFIG_X86_X2APIC */
 
-static int __init try_to_enable_IR(void)
-{
-#ifdef CONFIG_X86_IO_APIC
-       if (!x2apic_enabled() && skip_ioapic_setup) {
-               pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
-               return -1;
-       }
-#endif
-       return irq_remapping_enable();
-}
-
 void __init enable_IR_x2apic(void)
 {
        unsigned long flags;
        int ret, ir_stat;
 
-       if (skip_ioapic_setup)
+       if (skip_ioapic_setup) {
+               pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
                return;
+       }
 
        ir_stat = irq_remapping_prepare();
        if (ir_stat < 0 && !x2apic_supported())
@@ -1645,7 +1636,7 @@ void __init enable_IR_x2apic(void)
 
        /* If irq_remapping_prepare() succeeded, try to enable it */
        if (ir_stat >= 0)
-               ir_stat = try_to_enable_IR();
+               ir_stat = irq_remapping_enable();
        /* ir_stat contains the remap mode or an error code */
        try_to_enable_x2apic(ir_stat);
 
@@ -2062,17 +2053,17 @@ static int allocate_logical_cpuid(int apicid)
 
        /* Allocate a new cpuid. */
        if (nr_logical_cpuids >= nr_cpu_ids) {
-               WARN_ONCE(1, "Only %d processors supported."
+               WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %i reached. "
                             "Processor %d/0x%x and the rest are ignored.\n",
-                            nr_cpu_ids - 1, nr_logical_cpuids, apicid);
-               return -1;
+                            nr_cpu_ids, nr_logical_cpuids, apicid);
+               return -EINVAL;
        }
 
        cpuid_to_apicid[nr_logical_cpuids] = apicid;
        return nr_logical_cpuids++;
 }
 
-int __generic_processor_info(int apicid, int version, bool enabled)
+int generic_processor_info(int apicid, int version)
 {
        int cpu, max = nr_cpu_ids;
        bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2130,11 +2121,9 @@ int __generic_processor_info(int apicid, int version, bool enabled)
        if (num_processors >= nr_cpu_ids) {
                int thiscpu = max + disabled_cpus;
 
-               if (enabled) {
-                       pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
-                                  "reached. Processor %d/0x%x ignored.\n",
-                                  max, thiscpu, apicid);
-               }
+               pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
+                          "reached. Processor %d/0x%x ignored.\n",
+                          max, thiscpu, apicid);
 
                disabled_cpus++;
                return -EINVAL;
@@ -2186,23 +2175,13 @@ int __generic_processor_info(int apicid, int version, bool enabled)
                apic->x86_32_early_logical_apicid(cpu);
 #endif
        set_cpu_possible(cpu, true);
-
-       if (enabled) {
-               num_processors++;
-               physid_set(apicid, phys_cpu_present_map);
-               set_cpu_present(cpu, true);
-       } else {
-               disabled_cpus++;
-       }
+       physid_set(apicid, phys_cpu_present_map);
+       set_cpu_present(cpu, true);
+       num_processors++;
 
        return cpu;
 }
 
-int generic_processor_info(int apicid, int version)
-{
-       return __generic_processor_info(apicid, version, true);
-}
-
 int hard_smp_processor_id(void)
 {
        return read_apic_id();
index e9f8f8cdd57085db85dee8247b67150250308a42..86f20cc0a65e2240b2eb5a05fa77af3e866b40d6 100644 (file)
@@ -1105,7 +1105,8 @@ void __init uv_init_hub_info(struct uv_hub_info_s *hi)
        node_id.v               = uv_read_local_mmr(UVH_NODE_ID);
        uv_cpuid.gnode_shift    = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val);
        hi->gnode_extra         = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1;
-       hi->gnode_upper         = (unsigned long)hi->gnode_extra << mn.m_val;
+       if (mn.m_val)
+               hi->gnode_upper = (u64)hi->gnode_extra << mn.m_val;
 
        if (uv_gp_table) {
                hi->global_mmr_base     = uv_gp_table->mmr_base;
index 35a5d5dca2fae5fb68d522658824440f3b736d8b..c36140d788fe215aadb3a8f27a8de040f2c44c06 100644 (file)
@@ -556,10 +556,6 @@ static void early_init_amd(struct cpuinfo_x86 *c)
        if (c->x86_power & (1 << 8)) {
                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
                set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
-               if (check_tsc_unstable())
-                       clear_sched_clock_stable();
-       } else {
-               clear_sched_clock_stable();
        }
 
        /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
index adc0ebd8bed0e17be1716f3fb7c3eab51b7fa0c4..43955ee6715b1876b89ebd615b8eb171bbde1dd0 100644 (file)
@@ -105,8 +105,6 @@ static void early_init_centaur(struct cpuinfo_x86 *c)
 #ifdef CONFIG_X86_64
        set_cpu_cap(c, X86_FEATURE_SYSENTER32);
 #endif
-
-       clear_sched_clock_stable();
 }
 
 static void init_centaur(struct cpuinfo_x86 *c)
index b11b38c3b0bde194b9139ebfa0d3bc251b96f803..58094a1f9e9d301e11d2c93a1ecc126e1715002e 100644 (file)
@@ -88,7 +88,6 @@ static void default_init(struct cpuinfo_x86 *c)
                        strcpy(c->x86_model_id, "386");
        }
 #endif
-       clear_sched_clock_stable();
 }
 
 static const struct cpu_dev default_cpu = {
@@ -1077,8 +1076,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
         */
        if (this_cpu->c_init)
                this_cpu->c_init(c);
-       else
-               clear_sched_clock_stable();
 
        /* Disable the PN if appropriate */
        squash_the_stupid_serial_number(c);
index 0a3bc19de0177e93f81ae24c58264e7205406fd6..a70fd61095f8a73baa5eb7c486afd6ff19cd4fd1 100644 (file)
@@ -185,7 +185,6 @@ static void early_init_cyrix(struct cpuinfo_x86 *c)
                set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
                break;
        }
-       clear_sched_clock_stable();
 }
 
 static void init_cyrix(struct cpuinfo_x86 *c)
index fe0a615a051b19a99f9388a7b245b326cee86e11..063197771b8d7ba08f2eafe474cacb0efe9e79d3 100644 (file)
@@ -162,10 +162,6 @@ static void early_init_intel(struct cpuinfo_x86 *c)
        if (c->x86_power & (1 << 8)) {
                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
                set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
-               if (check_tsc_unstable())
-                       clear_sched_clock_stable();
-       } else {
-               clear_sched_clock_stable();
        }
 
        /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
index 0bbe0f3a039f6412f55863195cbb3a36b84ceb22..9ac2a5cdd9c206e83f171847ac04d5bf4f2a3152 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/sched/signal.h>
 #include <linux/sched/task.h>
 #include <linux/slab.h>
-#include <linux/cpu.h>
 #include <linux/task_work.h>
 
 #include <uapi/linux/magic.h>
@@ -728,7 +727,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
        if (atomic_dec_and_test(&rdtgrp->waitcount) &&
            (rdtgrp->flags & RDT_DELETED)) {
                kernfs_unbreak_active_protection(kn);
-               kernfs_put(kn);
+               kernfs_put(rdtgrp->kn);
                kfree(rdtgrp);
        } else {
                kernfs_unbreak_active_protection(kn);
index 8e9725c607ea6acb7a91deed9b72b2c9a873803e..5accfbdee3f06fac48eaf4423d66d7bee1e3a0a3 100644 (file)
@@ -54,6 +54,8 @@
 
 static DEFINE_MUTEX(mce_chrdev_read_mutex);
 
+static int mce_chrdev_open_count;      /* #times opened */
+
 #define mce_log_get_idx_check(p) \
 ({ \
        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
@@ -598,6 +600,10 @@ static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
        if (atomic_read(&num_notifiers) > 2)
                return NOTIFY_DONE;
 
+       /* Don't print when mcelog is running */
+       if (mce_chrdev_open_count > 0)
+               return NOTIFY_DONE;
+
        __print_mce(m);
 
        return NOTIFY_DONE;
@@ -1828,7 +1834,6 @@ void mcheck_cpu_clear(struct cpuinfo_x86 *c)
  */
 
 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
-static int mce_chrdev_open_count;      /* #times opened */
 static int mce_chrdev_open_exclu;      /* already open exclusive? */
 
 static int mce_chrdev_open(struct inode *inode, struct file *file)
index 524cc5780a779630d3203d834b0a508097340c67..6e4a047e4b684b0feeeeba851b850c9baebb552f 100644 (file)
@@ -60,7 +60,7 @@ static const char * const th_names[] = {
        "load_store",
        "insn_fetch",
        "combined_unit",
-       "",
+       "decode_unit",
        "northbridge",
        "execution_unit",
 };
index 8457b49786686f74a737429ec47d04c2f92bab35..d77d07ab310b4317d33e44de10896297bb2a6654 100644 (file)
@@ -16,8 +16,6 @@ static void early_init_transmeta(struct cpuinfo_x86 *c)
                if (xlvl >= 0x80860001)
                        c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
        }
-
-       clear_sched_clock_stable();
 }
 
 static void init_transmeta(struct cpuinfo_x86 *c)
index 891f4dad7b2c49c81518e15ecee61bc8d4694ff2..22403a28caf52226163ae96779c3f2a79fc3431d 100644 (file)
@@ -30,7 +30,6 @@
 #include <asm/hypervisor.h>
 #include <asm/timer.h>
 #include <asm/apic.h>
-#include <asm/timer.h>
 
 #undef pr_fmt
 #define pr_fmt(fmt)    "vmware: " fmt
index 8639bb2ae05868ab65d88e44683f44c8651121f3..cbd73eb4217026f1ef39e913cceb04d5c1af4e3a 100644 (file)
 #include <asm/ftrace.h>
 #include <asm/nops.h>
 
+#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && \
+       !defined(CC_USING_FENTRY) && \
+       !defined(CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE)
+# error The following combination is not supported: ((compiler missing -mfentry) || (CONFIG_X86_32 and !CONFIG_DYNAMIC_FTRACE)) && CONFIG_FUNCTION_GRAPH_TRACER && CONFIG_CC_OPTIMIZE_FOR_SIZE
+#endif
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 int ftrace_arch_code_modify_prepare(void)
@@ -535,7 +541,7 @@ static void run_sync(void)
 {
        int enable_irqs = irqs_disabled();
 
-       /* We may be called with interrupts disbled (on bootup). */
+       /* We may be called with interrupts disabled (on bootup). */
        if (enable_irqs)
                local_irq_enable();
        on_each_cpu(do_sync_core, NULL, 1);
index 54a2372f5dbb1eb0598788e944ad28708b638671..b5785c197e534796d5e477b6cd86a502d229db7c 100644 (file)
@@ -4,6 +4,7 @@
  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  */
 
+#define DISABLE_BRANCH_PROFILING
 #include <linux/init.h>
 #include <linux/linkage.h>
 #include <linux/types.h>
index dc6ba5bda9fc83630c773a80c4adea6871db0a59..89ff7af2de508ba0c34c2ce24c227dea5bf6d973 100644 (file)
@@ -354,7 +354,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
 
                irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
                irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
-               disable_irq(hdev->irq);
+               disable_hardirq(hdev->irq);
                irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
                enable_irq(hdev->irq);
        }
index bdb83e431d8976086e3c17328f01a9bdac2c1c8b..38b64587b31be5611a763df6dafe8434db2a66b5 100644 (file)
@@ -167,7 +167,7 @@ static int __init boot_params_kdebugfs_init(void)
        struct dentry *dbp, *version, *data;
        int error = -ENOMEM;
 
-       dbp = debugfs_create_dir("boot_params", NULL);
+       dbp = debugfs_create_dir("boot_params", arch_debugfs_dir);
        if (!dbp)
                return -ENOMEM;
 
index c6ee63f927ab721dd542b016bcfb22d65a55f114..d688826e5736a18c9f9343ebe278ec2b04bff66d 100644 (file)
@@ -67,7 +67,7 @@
 #endif
 
 /* Ensure if the instruction can be boostable */
-extern int can_boost(kprobe_opcode_t *instruction);
+extern int can_boost(kprobe_opcode_t *instruction, void *addr);
 /* Recover instruction if given address is probed */
 extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
                                         unsigned long addr);
index 6384eb754a58302a18406c3a9587034bbff3c2c3..993fa4fe4f68694a3fa75406b2e762cfadbbf745 100644 (file)
@@ -167,12 +167,12 @@ NOKPROBE_SYMBOL(skip_prefixes);
  * Returns non-zero if opcode is boostable.
  * RIP relative instructions are adjusted at copying time in 64 bits mode
  */
-int can_boost(kprobe_opcode_t *opcodes)
+int can_boost(kprobe_opcode_t *opcodes, void *addr)
 {
        kprobe_opcode_t opcode;
        kprobe_opcode_t *orig_opcodes = opcodes;
 
-       if (search_exception_tables((unsigned long)opcodes))
+       if (search_exception_tables((unsigned long)addr))
                return 0;       /* Page fault may occur on this address. */
 
 retry:
@@ -417,7 +417,7 @@ static int arch_copy_kprobe(struct kprobe *p)
         * __copy_instruction can modify the displacement of the instruction,
         * but it doesn't affect boostable check.
         */
-       if (can_boost(p->ainsn.insn))
+       if (can_boost(p->ainsn.insn, p->addr))
                p->ainsn.boostable = 0;
        else
                p->ainsn.boostable = -1;
index 3d1bee9d6a728fd50645d0179986cd1dda6629d0..3e7c6e5a08ffde197c192ab57f3bb38aac590969 100644 (file)
@@ -178,7 +178,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src)
 
        while (len < RELATIVEJUMP_SIZE) {
                ret = __copy_instruction(dest + len, src + len);
-               if (!ret || !can_boost(dest + len))
+               if (!ret || !can_boost(dest + len, src + len))
                        return -EINVAL;
                len += ret;
        }
index 307b1f4543de4bc96c6759c5f81a7faf5c9f443c..857cdbd028675716afad71c0b48974399889e622 100644 (file)
@@ -194,19 +194,22 @@ static int arch_update_purgatory(struct kimage *image)
 
        /* Setup copying of backup region */
        if (image->type == KEXEC_TYPE_CRASH) {
-               ret = kexec_purgatory_get_set_symbol(image, "backup_dest",
+               ret = kexec_purgatory_get_set_symbol(image,
+                               "purgatory_backup_dest",
                                &image->arch.backup_load_addr,
                                sizeof(image->arch.backup_load_addr), 0);
                if (ret)
                        return ret;
 
-               ret = kexec_purgatory_get_set_symbol(image, "backup_src",
+               ret = kexec_purgatory_get_set_symbol(image,
+                               "purgatory_backup_src",
                                &image->arch.backup_src_start,
                                sizeof(image->arch.backup_src_start), 0);
                if (ret)
                        return ret;
 
-               ret = kexec_purgatory_get_set_symbol(image, "backup_sz",
+               ret = kexec_purgatory_get_set_symbol(image,
+                               "purgatory_backup_sz",
                                &image->arch.backup_src_sz,
                                sizeof(image->arch.backup_src_sz), 0);
                if (ret)
index f088ea4c66e72e5787e6c2052b09bc95291cf131..a723ae9440ab2585303457dac977e53961f3cffd 100644 (file)
@@ -166,11 +166,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
        spin_lock_irqsave(&desc->lock, flags);
 
        /*
-        * most handlers of type NMI_UNKNOWN never return because
-        * they just assume the NMI is theirs.  Just a sanity check
-        * to manage expectations
+        * Indicate if there are multiple registrations on the
+        * internal NMI handler call chains (SERR and IO_CHECK).
         */
-       WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
        WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
        WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
 
index e244c19a2451aa7d4dfa8bf34b1834e48f68ec54..067f9813fd2cf7c15d5a1d297b537eedf6ca7959 100644 (file)
@@ -223,6 +223,22 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
                },
        },
+       {       /* Handle problems with rebooting on ASUS EeeBook X205TA */
+               .callback = set_acpi_reboot,
+               .ident = "ASUS EeeBook X205TA",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X205TA"),
+               },
+       },
+       {       /* Handle problems with rebooting on ASUS EeeBook X205TAW */
+               .callback = set_acpi_reboot,
+               .ident = "ASUS EeeBook X205TAW",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X205TAW"),
+               },
+       },
 
        /* Certec */
        {       /* Handle problems with rebooting on Certec BPC600 */
index 46bcda4cb1c2f84762bc409bf4c89b1528ba1c52..714dfba6a1e713fb6b5f4268c318f913bb266628 100644 (file)
@@ -327,9 +327,16 @@ unsigned long long sched_clock(void)
 {
        return paravirt_sched_clock();
 }
+
+bool using_native_sched_clock(void)
+{
+       return pv_time_ops.sched_clock == native_sched_clock;
+}
 #else
 unsigned long long
 sched_clock(void) __attribute__((alias("native_sched_clock")));
+
+bool using_native_sched_clock(void) { return true; }
 #endif
 
 int check_tsc_unstable(void)
@@ -1112,8 +1119,10 @@ static void tsc_cs_mark_unstable(struct clocksource *cs)
 {
        if (tsc_unstable)
                return;
+
        tsc_unstable = 1;
-       clear_sched_clock_stable();
+       if (using_native_sched_clock())
+               clear_sched_clock_stable();
        disable_sched_clock_irqtime();
        pr_info("Marking TSC unstable due to clocksource watchdog\n");
 }
@@ -1135,18 +1144,20 @@ static struct clocksource clocksource_tsc = {
 
 void mark_tsc_unstable(char *reason)
 {
-       if (!tsc_unstable) {
-               tsc_unstable = 1;
+       if (tsc_unstable)
+               return;
+
+       tsc_unstable = 1;
+       if (using_native_sched_clock())
                clear_sched_clock_stable();
-               disable_sched_clock_irqtime();
-               pr_info("Marking TSC unstable due to %s\n", reason);
-               /* Change only the rating, when not registered */
-               if (clocksource_tsc.mult)
-                       clocksource_mark_unstable(&clocksource_tsc);
-               else {
-                       clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
-                       clocksource_tsc.rating = 0;
-               }
+       disable_sched_clock_irqtime();
+       pr_info("Marking TSC unstable due to %s\n", reason);
+       /* Change only the rating, when not registered */
+       if (clocksource_tsc.mult) {
+               clocksource_mark_unstable(&clocksource_tsc);
+       } else {
+               clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
+               clocksource_tsc.rating = 0;
        }
 }
 
@@ -1322,6 +1333,8 @@ static int __init init_tsc_clocksource(void)
         * the refined calibration and directly register it as a clocksource.
         */
        if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
+               if (boot_cpu_has(X86_FEATURE_ART))
+                       art_related_clocksource = &clocksource_tsc;
                clocksource_register_khz(&clocksource_tsc, tsc_khz);
                return 0;
        }
index 478d15dbaee41b251c8bb28b59183e2b6c733326..08339262b666e56f2623406a10c42f3184c83e29 100644 (file)
@@ -82,19 +82,43 @@ static size_t regs_size(struct pt_regs *regs)
        return sizeof(*regs);
 }
 
+#ifdef CONFIG_X86_32
+#define GCC_REALIGN_WORDS 3
+#else
+#define GCC_REALIGN_WORDS 1
+#endif
+
 static bool is_last_task_frame(struct unwind_state *state)
 {
-       unsigned long bp = (unsigned long)state->bp;
-       unsigned long regs = (unsigned long)task_pt_regs(state->task);
+       unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2;
+       unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS;
 
        /*
         * We have to check for the last task frame at two different locations
         * because gcc can occasionally decide to realign the stack pointer and
-        * change the offset of the stack frame by a word in the prologue of a
-        * function called by head/entry code.
+        * change the offset of the stack frame in the prologue of a function
+        * called by head/entry code.  Examples:
+        *
+        * <start_secondary>:
+        *      push   %edi
+        *      lea    0x8(%esp),%edi
+        *      and    $0xfffffff8,%esp
+        *      pushl  -0x4(%edi)
+        *      push   %ebp
+        *      mov    %esp,%ebp
+        *
+        * <x86_64_start_kernel>:
+        *      lea    0x8(%rsp),%r10
+        *      and    $0xfffffffffffffff0,%rsp
+        *      pushq  -0x8(%r10)
+        *      push   %rbp
+        *      mov    %rsp,%rbp
+        *
+        * Note that after aligning the stack, it pushes a duplicate copy of
+        * the return address before pushing the frame pointer.
         */
-       return bp == regs - FRAME_HEADER_SIZE ||
-              bp == regs - FRAME_HEADER_SIZE - sizeof(long);
+       return (state->bp == last_bp ||
+               (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1)));
 }
 
 /*
index 73ea24d4f119c8dce2a0d3e5bfc24ef3d7562d3a..047b17a26269610b9cc083899cafaa6ca236eb5b 100644 (file)
@@ -657,6 +657,9 @@ void kvm_pic_destroy(struct kvm *kvm)
 {
        struct kvm_pic *vpic = kvm->arch.vpic;
 
+       if (!vpic)
+               return;
+
        kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
        kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
        kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
index 6e219e5c07d27c5dc41786953b1114b1e475e346..289270a6aecbb478ea14cc786c72fcfdf5058350 100644 (file)
@@ -635,6 +635,9 @@ void kvm_ioapic_destroy(struct kvm *kvm)
 {
        struct kvm_ioapic *ioapic = kvm->arch.vioapic;
 
+       if (!ioapic)
+               return;
+
        cancel_delayed_work_sync(&ioapic->eoi_inject);
        kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
        kvm->arch.vioapic = NULL;
index 37942e419c32e599a4ba05d3b75a77680f0065d9..60168cdd05463e2e18c993e20dfdeed7986808ce 100644 (file)
@@ -160,6 +160,14 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
        return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
 }
 
+void kvm_page_track_cleanup(struct kvm *kvm)
+{
+       struct kvm_page_track_notifier_head *head;
+
+       head = &kvm->arch.track_notifier_head;
+       cleanup_srcu_struct(&head->track_srcu);
+}
+
 void kvm_page_track_init(struct kvm *kvm)
 {
        struct kvm_page_track_notifier_head *head;
index d1efe2c62b3f8d0db7392970cdfd8e018dd3ac06..5fba70646c327941d231ab6dfa459e8a8536a115 100644 (file)
@@ -1379,6 +1379,9 @@ static void avic_vm_destroy(struct kvm *kvm)
        unsigned long flags;
        struct kvm_arch *vm_data = &kvm->arch;
 
+       if (!avic)
+               return;
+
        avic_free_vm_id(vm_data->avic_vm_id);
 
        if (vm_data->avic_logical_id_table_page)
index 283aa8601833509b9cf792b919dd3f243a78f389..2ee00dbbbd5188ccd51324085d51b0de679666bb 100644 (file)
@@ -1239,6 +1239,11 @@ static inline bool cpu_has_vmx_invvpid_global(void)
        return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
 }
 
+static inline bool cpu_has_vmx_invvpid(void)
+{
+       return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
+}
+
 static inline bool cpu_has_vmx_ept(void)
 {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -2753,7 +2758,6 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
                SECONDARY_EXEC_RDTSCP |
                SECONDARY_EXEC_DESC |
                SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
-               SECONDARY_EXEC_ENABLE_VPID |
                SECONDARY_EXEC_APIC_REGISTER_VIRT |
                SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
                SECONDARY_EXEC_WBINVD_EXITING |
@@ -2781,10 +2785,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
         * though it is treated as global context.  The alternative is
         * not failing the single-context invvpid, and it is worse.
         */
-       if (enable_vpid)
+       if (enable_vpid) {
+               vmx->nested.nested_vmx_secondary_ctls_high |=
+                       SECONDARY_EXEC_ENABLE_VPID;
                vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
                        VMX_VPID_EXTENT_SUPPORTED_MASK;
-       else
+       else
                vmx->nested.nested_vmx_vpid_caps = 0;
 
        if (enable_unrestricted_guest)
@@ -4024,6 +4030,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
        __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
 }
 
+static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
+{
+       if (enable_ept)
+               vmx_flush_tlb(vcpu);
+}
+
 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
 {
        ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -6517,8 +6529,10 @@ static __init int hardware_setup(void)
        if (boot_cpu_has(X86_FEATURE_NX))
                kvm_enable_efer_bits(EFER_NX);
 
-       if (!cpu_has_vmx_vpid())
+       if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
+               !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
                enable_vpid = 0;
+
        if (!cpu_has_vmx_shadow_vmcs())
                enable_shadow_vmcs = 0;
        if (enable_shadow_vmcs)
@@ -7258,9 +7272,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
 static int handle_vmclear(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
+       u32 zero = 0;
        gpa_t vmptr;
-       struct vmcs12 *vmcs12;
-       struct page *page;
 
        if (!nested_vmx_check_permission(vcpu))
                return 1;
@@ -7271,22 +7284,9 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
        if (vmptr == vmx->nested.current_vmptr)
                nested_release_vmcs12(vmx);
 
-       page = nested_get_page(vcpu, vmptr);
-       if (page == NULL) {
-               /*
-                * For accurate processor emulation, VMCLEAR beyond available
-                * physical memory should do nothing at all. However, it is
-                * possible that a nested vmx bug, not a guest hypervisor bug,
-                * resulted in this case, so let's shut down before doing any
-                * more damage:
-                */
-               kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
-               return 1;
-       }
-       vmcs12 = kmap(page);
-       vmcs12->launch_state = 0;
-       kunmap(page);
-       nested_release_page(page);
+       kvm_vcpu_write_guest(vcpu,
+                       vmptr + offsetof(struct vmcs12, launch_state),
+                       &zero, sizeof(zero));
 
        nested_free_vmcs02(vmx, vmptr);
 
@@ -8515,7 +8515,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
            && kvm_vmx_exit_handlers[exit_reason])
                return kvm_vmx_exit_handlers[exit_reason](vcpu);
        else {
-               WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
+               vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
+                               exit_reason);
                kvm_queue_exception(vcpu, UD_VECTOR);
                return 1;
        }
@@ -8561,6 +8562,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
        } else {
                sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
                sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+               vmx_flush_tlb_ept_only(vcpu);
        }
        vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
 
@@ -8586,8 +8588,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
         */
        if (!is_guest_mode(vcpu) ||
            !nested_cpu_has2(get_vmcs12(&vmx->vcpu),
-                            SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
+                            SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
                vmcs_write64(APIC_ACCESS_ADDR, hpa);
+               vmx_flush_tlb_ept_only(vcpu);
+       }
 }
 
 static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
@@ -9694,10 +9698,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
                return false;
 
        page = nested_get_page(vcpu, vmcs12->msr_bitmap);
-       if (!page) {
-               WARN_ON(1);
+       if (!page)
                return false;
-       }
        msr_bitmap_l1 = (unsigned long *)kmap(page);
 
        memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
@@ -9990,7 +9992,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u32 exec_control;
-       bool nested_ept_enabled = false;
 
        vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
        vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
@@ -10137,8 +10138,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
                                vmcs12->guest_intr_status);
                }
 
-               nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT) != 0;
-
                /*
                 * Write an illegal value to APIC_ACCESS_ADDR. Later,
                 * nested_get_vmcs12_pages will either fix it up or
@@ -10271,6 +10270,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
        if (nested_cpu_has_ept(vmcs12)) {
                kvm_mmu_unload(vcpu);
                nested_ept_init_mmu_context(vcpu);
+       } else if (nested_cpu_has2(vmcs12,
+                                  SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+               vmx_flush_tlb_ept_only(vcpu);
        }
 
        /*
@@ -10298,12 +10300,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
        vmx_set_efer(vcpu, vcpu->arch.efer);
 
        /* Shadow page tables on either EPT or shadow page tables. */
-       if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled,
+       if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
                                entry_failure_code))
                return 1;
 
-       kvm_mmu_reset_context(vcpu);
-
        if (!enable_ept)
                vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
 
@@ -11072,6 +11072,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
                vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
                vmx_set_virtual_x2apic_mode(vcpu,
                                vcpu->arch.apic_base & X2APIC_ENABLE);
+       } else if (!nested_cpu_has_ept(vmcs12) &&
+                  nested_cpu_has2(vmcs12,
+                                  SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+               vmx_flush_tlb_ept_only(vcpu);
        }
 
        /* This is needed for same reason as it was needed in prepare_vmcs02 */
@@ -11121,8 +11125,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
  */
 static void vmx_leave_nested(struct kvm_vcpu *vcpu)
 {
-       if (is_guest_mode(vcpu))
+       if (is_guest_mode(vcpu)) {
+               to_vmx(vcpu)->nested.nested_run_pending = 0;
                nested_vmx_vmexit(vcpu, -1, 0, 0);
+       }
        free_nested(to_vmx(vcpu));
 }
 
index 1faf620a6fdc206705a03357d3a8ec5814b2c790..ccbd45ecd41a3fa5c850cf924cccbb2723aecc92 100644 (file)
@@ -8153,11 +8153,12 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
        if (kvm_x86_ops->vm_destroy)
                kvm_x86_ops->vm_destroy(kvm);
        kvm_iommu_unmap_guest(kvm);
-       kfree(kvm->arch.vpic);
-       kfree(kvm->arch.vioapic);
+       kvm_pic_destroy(kvm);
+       kvm_ioapic_destroy(kvm);
        kvm_free_vcpus(kvm);
        kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
        kvm_mmu_uninit_vm(kvm);
+       kvm_page_track_cleanup(kvm);
 }
 
 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
@@ -8566,11 +8567,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
 {
        struct x86_exception fault;
 
-       trace_kvm_async_pf_ready(work->arch.token, work->gva);
        if (work->wakeup_all)
                work->arch.token = ~0; /* broadcast wakeup */
        else
                kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
+       trace_kvm_async_pf_ready(work->arch.token, work->gva);
 
        if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
            !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
index 779782f5832476582becc24e5a0f0f5b10ea0b53..9a53a06e5a3efcb62f9563a6161fd98bbc22d617 100644 (file)
@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled)
        _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
        _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
        _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
+       _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail)
        _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
        _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
        _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
index 99c7805a96937c17fffa7b92eb72a8b8c776ccbb..1f3b6ef105cda5732146fa6121c35f75ada9c0f5 100644 (file)
@@ -106,32 +106,35 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
                unsigned long end, int write, struct page **pages, int *nr)
 {
        struct dev_pagemap *pgmap = NULL;
-       int nr_start = *nr;
-       pte_t *ptep;
+       int nr_start = *nr, ret = 0;
+       pte_t *ptep, *ptem;
 
-       ptep = pte_offset_map(&pmd, addr);
+       /*
+        * Keep the original mapped PTE value (ptem) around since we
+        * might increment ptep off the end of the page when finishing
+        * our loop iteration.
+        */
+       ptem = ptep = pte_offset_map(&pmd, addr);
        do {
                pte_t pte = gup_get_pte(ptep);
                struct page *page;
 
                /* Similar to the PMD case, NUMA hinting must take slow path */
-               if (pte_protnone(pte)) {
-                       pte_unmap(ptep);
-                       return 0;
-               }
+               if (pte_protnone(pte))
+                       break;
+
+               if (!pte_allows_gup(pte_val(pte), write))
+                       break;
 
                if (pte_devmap(pte)) {
                        pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
                        if (unlikely(!pgmap)) {
                                undo_dev_pagemap(nr, nr_start, pages);
-                               pte_unmap(ptep);
-                               return 0;
+                               break;
                        }
-               } else if (!pte_allows_gup(pte_val(pte), write) ||
-                          pte_special(pte)) {
-                       pte_unmap(ptep);
-                       return 0;
-               }
+               } else if (pte_special(pte))
+                       break;
+
                VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
                page = pte_page(pte);
                get_page(page);
@@ -141,9 +144,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
                (*nr)++;
 
        } while (ptep++, addr += PAGE_SIZE, addr != end);
-       pte_unmap(ptep - 1);
+       if (addr == end)
+               ret = 1;
+       pte_unmap(ptem);
 
-       return 1;
+       return ret;
 }
 
 static inline void get_head_page_multiple(struct page *page, int nr)
index 8d63d7a104c3c445805dcf24a59fff2756a17b01..4c90cfdc128b832c6065cdb8830f89d16bff63dd 100644 (file)
@@ -1,3 +1,4 @@
+#define DISABLE_BRANCH_PROFILING
 #define pr_fmt(fmt) "kasan: " fmt
 #include <linux/bootmem.h>
 #include <linux/kasan.h>
index 887e57182716828b7f4f4946fe7145d106ec5bea..aed206475aa7c04892443646efa6e88a9e5f4d24 100644 (file)
@@ -48,7 +48,7 @@ static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
 #if defined(CONFIG_X86_ESPFIX64)
 static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
 #elif defined(CONFIG_EFI)
-static const unsigned long vaddr_end = EFI_VA_START;
+static const unsigned long vaddr_end = EFI_VA_END;
 #else
 static const unsigned long vaddr_end = __START_KERNEL_map;
 #endif
@@ -105,7 +105,7 @@ void __init kernel_randomize_memory(void)
         */
        BUILD_BUG_ON(vaddr_start >= vaddr_end);
        BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
-                    vaddr_end >= EFI_VA_START);
+                    vaddr_end >= EFI_VA_END);
        BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
                      IS_ENABLED(CONFIG_EFI)) &&
                     vaddr_end >= __START_KERNEL_map);
index 5126dfd52b182dd66471a49a0464eb2411fbc7cd..cd44ae727df7f48ceba7fad00591c48cec151896 100644 (file)
@@ -590,7 +590,7 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
  * we might run off the end of the bounds table if we are on
  * a 64-bit kernel and try to get 8 bytes.
  */
-int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
+static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
                long __user *bd_entry_ptr)
 {
        u32 bd_entry_32;
index 0cb52ae0a8f07521ee1cdf6a1075a4221f920884..190e718694b1720df737afdd9688ded962be6014 100644 (file)
@@ -735,6 +735,15 @@ void pcibios_disable_device (struct pci_dev *dev)
                pcibios_disable_irq(dev);
 }
 
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+void pcibios_release_device(struct pci_dev *dev)
+{
+       if (atomic_dec_return(&dev->enable_cnt) >= 0)
+               pcibios_disable_device(dev);
+
+}
+#endif
+
 int pci_ext_cfg_avail(void)
 {
        if (raw_pci_ext_ops)
index e1fb269c87af7b39f1445e01734e76f431982f58..292ab0364a89af9aa6bc93a2ad79a88d00fbad9d 100644 (file)
@@ -234,23 +234,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
                return 1;
 
        for_each_pci_msi_entry(msidesc, dev) {
-               __pci_read_msi_msg(msidesc, &msg);
-               pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
-                       ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
-               if (msg.data != XEN_PIRQ_MSI_DATA ||
-                   xen_irq_from_pirq(pirq) < 0) {
-                       pirq = xen_allocate_pirq_msi(dev, msidesc);
-                       if (pirq < 0) {
-                               irq = -ENODEV;
-                               goto error;
-                       }
-                       xen_msi_compose_msg(dev, pirq, &msg);
-                       __pci_write_msi_msg(msidesc, &msg);
-                       dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
-               } else {
-                       dev_dbg(&dev->dev,
-                               "xen: msi already bound to pirq=%d\n", pirq);
+               pirq = xen_allocate_pirq_msi(dev, msidesc);
+               if (pirq < 0) {
+                       irq = -ENODEV;
+                       goto error;
                }
+               xen_msi_compose_msg(dev, pirq, &msg);
+               __pci_write_msi_msg(msidesc, &msg);
+               dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
                irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
                                               (type == PCI_CAP_ID_MSI) ? nvec : 1,
                                               (type == PCI_CAP_ID_MSIX) ?
index a7dbec4dce2758261c6e1680b7ed825e5e44a9d1..3dbde04febdccab382bc47ccba53b422ac7c72ea 100644 (file)
@@ -26,5 +26,6 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
 obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
 # MISC Devices
 obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
+obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_mrfld_power_btn.o
 obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o
 obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c
new file mode 100644 (file)
index 0000000..a6c3705
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Intel Merrifield power button support
+ *
+ * (C) Copyright 2017 Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/sfi.h>
+
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+
+static struct resource mrfld_power_btn_resources[] = {
+       {
+               .flags          = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device mrfld_power_btn_dev = {
+       .name           = "msic_power_btn",
+       .id             = PLATFORM_DEVID_NONE,
+       .num_resources  = ARRAY_SIZE(mrfld_power_btn_resources),
+       .resource       = mrfld_power_btn_resources,
+};
+
+static int mrfld_power_btn_scu_status_change(struct notifier_block *nb,
+                                            unsigned long code, void *data)
+{
+       if (code == SCU_DOWN) {
+               platform_device_unregister(&mrfld_power_btn_dev);
+               return 0;
+       }
+
+       return platform_device_register(&mrfld_power_btn_dev);
+}
+
+static struct notifier_block mrfld_power_btn_scu_notifier = {
+       .notifier_call  = mrfld_power_btn_scu_status_change,
+};
+
+static int __init register_mrfld_power_btn(void)
+{
+       if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
+               return -ENODEV;
+
+       /*
+        * We need to be sure that the SCU IPC is ready before
+        * PMIC power button device can be registered:
+        */
+       intel_scu_notifier_add(&mrfld_power_btn_scu_notifier);
+
+       return 0;
+}
+arch_initcall(register_mrfld_power_btn);
+
+static void __init *mrfld_power_btn_platform_data(void *info)
+{
+       struct resource *res = mrfld_power_btn_resources;
+       struct sfi_device_table_entry *pentry = info;
+
+       res->start = res->end = pentry->irq;
+       return NULL;
+}
+
+static const struct devs_id mrfld_power_btn_dev_id __initconst = {
+       .name                   = "bcove_power_btn",
+       .type                   = SFI_DEV_TYPE_IPC,
+       .delay                  = 1,
+       .msic                   = 1,
+       .get_platform_data      = &mrfld_power_btn_platform_data,
+};
+
+sfi_device(mrfld_power_btn_dev_id);
index 86edd1e941eb07bc46187024ae332409c6924073..9e304e2ea4f55c456e7f0037a8963f6586ad2b19 100644 (file)
@@ -19,7 +19,7 @@
 #include <asm/intel_scu_ipc.h>
 #include <asm/io_apic.h>
 
-#define TANGIER_EXT_TIMER0_MSI 15
+#define TANGIER_EXT_TIMER0_MSI 12
 
 static struct platform_device wdt_dev = {
        .name = "intel_mid_wdt",
index e793fe509971f49fb2cfa6a12f8b365a937ae206..e42978d4deafeb184ea8595eb0cf3ef54ceb62bc 100644 (file)
 
 #include "intel_mid_weak_decls.h"
 
-static void penwell_arch_setup(void);
-/* penwell arch ops */
-static struct intel_mid_ops penwell_ops = {
-       .arch_setup = penwell_arch_setup,
-};
-
-static void mfld_power_off(void)
-{
-}
-
 static unsigned long __init mfld_calibrate_tsc(void)
 {
        unsigned long fast_calibrate;
@@ -63,9 +53,12 @@ static unsigned long __init mfld_calibrate_tsc(void)
 static void __init penwell_arch_setup(void)
 {
        x86_platform.calibrate_tsc = mfld_calibrate_tsc;
-       pm_power_off = mfld_power_off;
 }
 
+static struct intel_mid_ops penwell_ops = {
+       .arch_setup = penwell_arch_setup,
+};
+
 void *get_penwell_ops(void)
 {
        return &penwell_ops;
index 766d4d3529a1d946e36d4186e4c48da5d90b75cc..f25982cdff9006960d9e354d132ff01df717ad9b 100644 (file)
@@ -1847,7 +1847,6 @@ static void pq_init(int node, int pnode)
 
        ops.write_payload_first(pnode, first);
        ops.write_payload_last(pnode, last);
-       ops.write_g_sw_ack(pnode, 0xffffUL);
 
        /* in effect, all msg_type's are set to MSG_NOOP */
        memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
index 555b9fa0ad43cbd4148b2fb268692d4b2de167c4..7dbdb780264df9258d98829f8a11496cd58bb7ab 100644 (file)
@@ -8,6 +8,7 @@ PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
 LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
 targets += purgatory.ro
 
+KASAN_SANITIZE := n
 KCOV_INSTRUMENT := n
 
 # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
index 25e068ba338214826413265b26ccd9179e10b7f4..470edad96bb9560a218affd4c0922888f9200dba 100644 (file)
  * Version 2.  See the file COPYING for more details.
  */
 
+#include <linux/bug.h>
+#include <asm/purgatory.h>
+
 #include "sha256.h"
 #include "../boot/string.h"
 
-struct sha_region {
-       unsigned long start;
-       unsigned long len;
-};
-
-unsigned long backup_dest = 0;
-unsigned long backup_src = 0;
-unsigned long backup_sz = 0;
+unsigned long purgatory_backup_dest __section(.kexec-purgatory);
+unsigned long purgatory_backup_src __section(.kexec-purgatory);
+unsigned long purgatory_backup_sz __section(.kexec-purgatory);
 
-u8 sha256_digest[SHA256_DIGEST_SIZE] = { 0 };
+u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(.kexec-purgatory);
 
-struct sha_region sha_regions[16] = {};
+struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(.kexec-purgatory);
 
 /*
  * On x86, second kernel requries first 640K of memory to boot. Copy
@@ -33,26 +31,28 @@ struct sha_region sha_regions[16] = {};
  */
 static int copy_backup_region(void)
 {
-       if (backup_dest)
-               memcpy((void *)backup_dest, (void *)backup_src, backup_sz);
-
+       if (purgatory_backup_dest) {
+               memcpy((void *)purgatory_backup_dest,
+                      (void *)purgatory_backup_src, purgatory_backup_sz);
+       }
        return 0;
 }
 
-int verify_sha256_digest(void)
+static int verify_sha256_digest(void)
 {
-       struct sha_region *ptr, *end;
+       struct kexec_sha_region *ptr, *end;
        u8 digest[SHA256_DIGEST_SIZE];
        struct sha256_state sctx;
 
        sha256_init(&sctx);
-       end = &sha_regions[sizeof(sha_regions)/sizeof(sha_regions[0])];
-       for (ptr = sha_regions; ptr < end; ptr++)
+       end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions);
+
+       for (ptr = purgatory_sha_regions; ptr < end; ptr++)
                sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len);
 
        sha256_final(&sctx, digest);
 
-       if (memcmp(digest, sha256_digest, sizeof(digest)))
+       if (memcmp(digest, purgatory_sha256_digest, sizeof(digest)))
                return 1;
 
        return 0;
index fe3c91ba1bd0c6fd0fe0c880510364dbda0fa5c5..dfae9b9e60b5ba01e62d92d442bd1276eee45ae7 100644 (file)
@@ -9,6 +9,7 @@
  * This source code is licensed under the GNU General Public License,
  * Version 2.  See the file COPYING for more details.
  */
+#include <asm/purgatory.h>
 
        .text
        .globl purgatory_start
index bd15a4127735e5f6ed9560b8dfb2503126f56e47..2867d9825a57e5f1f734bfb4a5777bc31810b090 100644 (file)
@@ -10,7 +10,6 @@
 #ifndef SHA256_H
 #define SHA256_H
 
-
 #include <linux/types.h>
 #include <crypto/sha.h>
 
index 976b1d70edbc0a2d77016409fec93ec597df9cad..4ddbfd57a7c824c7d05aaa3d6d8824a849d03628 100644 (file)
@@ -164,8 +164,21 @@ void copy_user_highpage(struct page *to, struct page *from,
 
 #define ARCH_PFN_OFFSET                (PHYS_OFFSET >> PAGE_SHIFT)
 
+#ifdef CONFIG_MMU
+static inline unsigned long ___pa(unsigned long va)
+{
+       unsigned long off = va - PAGE_OFFSET;
+
+       if (off >= XCHAL_KSEG_SIZE)
+               off -= XCHAL_KSEG_SIZE;
+
+       return off + PHYS_OFFSET;
+}
+#define __pa(x)        ___pa((unsigned long)(x))
+#else
 #define __pa(x)        \
        ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
+#endif
 #define __va(x)        \
        ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
 #define pfn_valid(pfn) \
index 8aa0e0d9cbb21f0c3703192a828dbbeaf4d475ca..30dd5b2e4ad5af403bdf794e57a58b1c3beef2e7 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef _XTENSA_PGTABLE_H
 #define _XTENSA_PGTABLE_H
 
+#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 #include <asm/page.h>
 #include <asm/kmem_layout.h>
index 9fdbe1fe0473802caaf04782f9a5c05ca813f013..1eb6d2fe70d3483aa191a3f7cf4d3ad3dfe497a9 100644 (file)
 
 #define SCM_TIMESTAMPING_OPT_STATS     54
 
+#define SO_MEMINFO             55
+
+#define SO_INCOMING_NAPI_ID    56
+
+#define SO_COOKIE              57
+
 #endif /* _XTENSA_SOCKET_H */
index cd400af4a6b25597756cda04826278fea75ecf33..6be7eb27fd29d68b7c3a13643f628b7f2ae40c57 100644 (file)
@@ -774,7 +774,10 @@ __SYSCALL(349, sys_pkey_alloc, 2)
 #define __NR_pkey_free                         350
 __SYSCALL(350, sys_pkey_free, 1)
 
-#define __NR_syscall_count                     351
+#define __NR_statx                             351
+__SYSCALL(351, sys_statx, 5)
+
+#define __NR_syscall_count                     352
 
 /*
  * sysxtensa syscall handler
index c82c43bff2968cd3bab83688bc8a362671792150..bae697a06a984536bc51ce21cb3e402d5cfbd065 100644 (file)
@@ -483,10 +483,8 @@ void show_regs(struct pt_regs * regs)
 
 static int show_trace_cb(struct stackframe *frame, void *data)
 {
-       if (kernel_text_address(frame->pc)) {
-               pr_cont(" [<%08lx>]", frame->pc);
-               print_symbol(" %s\n", frame->pc);
-       }
+       if (kernel_text_address(frame->pc))
+               pr_cont(" [<%08lx>] %pB\n", frame->pc, (void *)frame->pc);
        return 0;
 }
 
index 5eec5e08417f6ff1989e3e2a07b31c62901953d5..e75878f8b14af8f852d814717c3900759b0ed6fc 100644 (file)
@@ -376,10 +376,14 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
        bio_list_init(&punt);
        bio_list_init(&nopunt);
 
-       while ((bio = bio_list_pop(current->bio_list)))
+       while ((bio = bio_list_pop(&current->bio_list[0])))
                bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
+       current->bio_list[0] = nopunt;
 
-       *current->bio_list = nopunt;
+       bio_list_init(&nopunt);
+       while ((bio = bio_list_pop(&current->bio_list[1])))
+               bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
+       current->bio_list[1] = nopunt;
 
        spin_lock(&bs->rescue_lock);
        bio_list_merge(&bs->rescue_list, &punt);
@@ -466,7 +470,9 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
                 * we retry with the original gfp_flags.
                 */
 
-               if (current->bio_list && !bio_list_empty(current->bio_list))
+               if (current->bio_list &&
+                   (!bio_list_empty(&current->bio_list[0]) ||
+                    !bio_list_empty(&current->bio_list[1])))
                        gfp_mask &= ~__GFP_DIRECT_RECLAIM;
 
                p = mempool_alloc(bs->bio_pool, gfp_mask);
index 1086dac8724c995b85bf3c843a4aae080ab938a1..d772c221cc178bf3ecfe448f3367121ec1d077de 100644 (file)
@@ -578,8 +578,6 @@ void blk_cleanup_queue(struct request_queue *q)
                q->queue_lock = &q->__queue_lock;
        spin_unlock_irq(lock);
 
-       put_disk_devt(q->disk_devt);
-
        /* @q is and will stay empty, shutdown and put */
        blk_put_queue(q);
 }
@@ -1975,7 +1973,14 @@ end_io:
  */
 blk_qc_t generic_make_request(struct bio *bio)
 {
-       struct bio_list bio_list_on_stack;
+       /*
+        * bio_list_on_stack[0] contains bios submitted by the current
+        * make_request_fn.
+        * bio_list_on_stack[1] contains bios that were submitted before
+        * the current make_request_fn, but that haven't been processed
+        * yet.
+        */
+       struct bio_list bio_list_on_stack[2];
        blk_qc_t ret = BLK_QC_T_NONE;
 
        if (!generic_make_request_checks(bio))
@@ -1992,7 +1997,7 @@ blk_qc_t generic_make_request(struct bio *bio)
         * should be added at the tail
         */
        if (current->bio_list) {
-               bio_list_add(current->bio_list, bio);
+               bio_list_add(&current->bio_list[0], bio);
                goto out;
        }
 
@@ -2011,23 +2016,39 @@ blk_qc_t generic_make_request(struct bio *bio)
         * bio_list, and call into ->make_request() again.
         */
        BUG_ON(bio->bi_next);
-       bio_list_init(&bio_list_on_stack);
-       current->bio_list = &bio_list_on_stack;
+       bio_list_init(&bio_list_on_stack[0]);
+       current->bio_list = bio_list_on_stack;
        do {
                struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 
                if (likely(blk_queue_enter(q, false) == 0)) {
+                       struct bio_list lower, same;
+
+                       /* Create a fresh bio_list for all subordinate requests */
+                       bio_list_on_stack[1] = bio_list_on_stack[0];
+                       bio_list_init(&bio_list_on_stack[0]);
                        ret = q->make_request_fn(q, bio);
 
                        blk_queue_exit(q);
 
-                       bio = bio_list_pop(current->bio_list);
+                       /* sort new bios into those for a lower level
+                        * and those for the same level
+                        */
+                       bio_list_init(&lower);
+                       bio_list_init(&same);
+                       while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
+                               if (q == bdev_get_queue(bio->bi_bdev))
+                                       bio_list_add(&same, bio);
+                               else
+                                       bio_list_add(&lower, bio);
+                       /* now assemble so we handle the lowest level first */
+                       bio_list_merge(&bio_list_on_stack[0], &lower);
+                       bio_list_merge(&bio_list_on_stack[0], &same);
+                       bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
                } else {
-                       struct bio *bio_next = bio_list_pop(current->bio_list);
-
                        bio_io_error(bio);
-                       bio = bio_next;
                }
+               bio = bio_list_pop(&bio_list_on_stack[0]);
        } while (bio);
        current->bio_list = NULL; /* deactivate */
 
index 295e69670c39343d058cbf7f67fc076f7249e94e..d745ab81033afa8510f6b2d8dbbbdc187693901a 100644 (file)
@@ -17,6 +17,15 @@ static void blk_mq_sysfs_release(struct kobject *kobj)
 {
 }
 
+static void blk_mq_hw_sysfs_release(struct kobject *kobj)
+{
+       struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
+                                                 kobj);
+       free_cpumask_var(hctx->cpumask);
+       kfree(hctx->ctxs);
+       kfree(hctx);
+}
+
 struct blk_mq_ctx_sysfs_entry {
        struct attribute attr;
        ssize_t (*show)(struct blk_mq_ctx *, char *);
@@ -200,7 +209,7 @@ static struct kobj_type blk_mq_ctx_ktype = {
 static struct kobj_type blk_mq_hw_ktype = {
        .sysfs_ops      = &blk_mq_hw_sysfs_ops,
        .default_attrs  = default_hw_ctx_attrs,
-       .release        = blk_mq_sysfs_release,
+       .release        = blk_mq_hw_sysfs_release,
 };
 
 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@@ -242,24 +251,15 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
 static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
 {
        struct blk_mq_hw_ctx *hctx;
-       struct blk_mq_ctx *ctx;
-       int i, j;
+       int i;
 
-       queue_for_each_hw_ctx(q, hctx, i) {
+       queue_for_each_hw_ctx(q, hctx, i)
                blk_mq_unregister_hctx(hctx);
 
-               hctx_for_each_ctx(hctx, ctx, j)
-                       kobject_put(&ctx->kobj);
-
-               kobject_put(&hctx->kobj);
-       }
-
        blk_mq_debugfs_unregister_hctxs(q);
 
        kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
        kobject_del(&q->mq_kobj);
-       kobject_put(&q->mq_kobj);
-
        kobject_put(&dev->kobj);
 
        q->mq_sysfs_init_done = false;
@@ -277,7 +277,19 @@ void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
        kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
 }
 
-static void blk_mq_sysfs_init(struct request_queue *q)
+void blk_mq_sysfs_deinit(struct request_queue *q)
+{
+       struct blk_mq_ctx *ctx;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               ctx = per_cpu_ptr(q->queue_ctx, cpu);
+               kobject_put(&ctx->kobj);
+       }
+       kobject_put(&q->mq_kobj);
+}
+
+void blk_mq_sysfs_init(struct request_queue *q)
 {
        struct blk_mq_ctx *ctx;
        int cpu;
@@ -297,8 +309,6 @@ int blk_mq_register_dev(struct device *dev, struct request_queue *q)
 
        blk_mq_disable_hotplug();
 
-       blk_mq_sysfs_init(q);
-
        ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
        if (ret < 0)
                goto out;
index e48bc2c72615de016f013a2e98ea72cd49713a04..9d97bfc4d4657b586d1a9b4d077a8e673300d79a 100644 (file)
@@ -295,6 +295,9 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
        for (i = 0; i < set->nr_hw_queues; i++) {
                struct blk_mq_tags *tags = set->tags[i];
 
+               if (!tags)
+                       continue;
+
                for (j = 0; j < tags->nr_tags; j++) {
                        if (!tags->static_rqs[j])
                                continue;
index b2fd175e84d79af071b28768e74de06cb673407b..6b6e7bc041dbf3c4699ca9bfa8e36c58a8f119d0 100644 (file)
@@ -697,17 +697,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
 {
        struct blk_mq_timeout_data *data = priv;
 
-       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
-               /*
-                * If a request wasn't started before the queue was
-                * marked dying, kill it here or it'll go unnoticed.
-                */
-               if (unlikely(blk_queue_dying(rq->q))) {
-                       rq->errors = -EIO;
-                       blk_mq_end_request(rq, rq->errors);
-               }
+       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
                return;
-       }
 
        if (time_after_eq(jiffies, rq->deadline)) {
                if (!blk_mark_rq_complete(rq))
@@ -978,7 +969,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
        struct request *rq;
        LIST_HEAD(driver_list);
        struct list_head *dptr;
-       int queued, ret = BLK_MQ_RQ_QUEUE_OK;
+       int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
 
        /*
         * Start off with dptr being NULL, so we start the first request
@@ -989,7 +980,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
        /*
         * Now process all the entries, sending them to the driver.
         */
-       queued = 0;
+       errors = queued = 0;
        while (!list_empty(list)) {
                struct blk_mq_queue_data bd;
 
@@ -1046,6 +1037,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
                default:
                        pr_err("blk-mq: bad return on queue: %d\n", ret);
                case BLK_MQ_RQ_QUEUE_ERROR:
+                       errors++;
                        rq->errors = -EIO;
                        blk_mq_end_request(rq, rq->errors);
                        break;
@@ -1097,7 +1089,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
                        blk_mq_run_hw_queue(hctx, true);
        }
 
-       return queued != 0;
+       return (queued + errors) != 0;
 }
 
 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
@@ -1434,7 +1426,8 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
        return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
 }
 
-static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
+static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
+                                     bool may_sleep)
 {
        struct request_queue *q = rq->q;
        struct blk_mq_queue_data bd = {
@@ -1475,7 +1468,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
        }
 
 insert:
-       blk_mq_sched_insert_request(rq, false, true, true, false);
+       blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
 }
 
 /*
@@ -1569,11 +1562,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
                if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
                        rcu_read_lock();
-                       blk_mq_try_issue_directly(old_rq, &cookie);
+                       blk_mq_try_issue_directly(old_rq, &cookie, false);
                        rcu_read_unlock();
                } else {
                        srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
-                       blk_mq_try_issue_directly(old_rq, &cookie);
+                       blk_mq_try_issue_directly(old_rq, &cookie, true);
                        srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
                }
                goto done;
@@ -1955,16 +1948,6 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
        }
 }
 
-static void blk_mq_free_hw_queues(struct request_queue *q,
-               struct blk_mq_tag_set *set)
-{
-       struct blk_mq_hw_ctx *hctx;
-       unsigned int i;
-
-       queue_for_each_hw_ctx(q, hctx, i)
-               free_cpumask_var(hctx->cpumask);
-}
-
 static int blk_mq_init_hctx(struct request_queue *q,
                struct blk_mq_tag_set *set,
                struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
@@ -2045,7 +2028,6 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
                struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
                struct blk_mq_hw_ctx *hctx;
 
-               memset(__ctx, 0, sizeof(*__ctx));
                __ctx->cpu = i;
                spin_lock_init(&__ctx->lock);
                INIT_LIST_HEAD(&__ctx->rq_list);
@@ -2257,15 +2239,19 @@ void blk_mq_release(struct request_queue *q)
        queue_for_each_hw_ctx(q, hctx, i) {
                if (!hctx)
                        continue;
-               kfree(hctx->ctxs);
-               kfree(hctx);
+               kobject_put(&hctx->kobj);
        }
 
        q->mq_map = NULL;
 
        kfree(q->queue_hw_ctx);
 
-       /* ctx kobj stays in queue_ctx */
+       /*
+        * release .mq_kobj and sw queue's kobject now because
+        * both share lifetime with request queue.
+        */
+       blk_mq_sysfs_deinit(q);
+
        free_percpu(q->queue_ctx);
 }
 
@@ -2330,10 +2316,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
                        if (hctx->tags)
                                blk_mq_free_map_and_requests(set, j);
                        blk_mq_exit_hctx(q, set, hctx, j);
-                       free_cpumask_var(hctx->cpumask);
                        kobject_put(&hctx->kobj);
-                       kfree(hctx->ctxs);
-                       kfree(hctx);
                        hctxs[j] = NULL;
 
                }
@@ -2352,6 +2335,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        if (!q->queue_ctx)
                goto err_exit;
 
+       /* init q->mq_kobj and sw queues' kobjects */
+       blk_mq_sysfs_init(q);
+
        q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
                                                GFP_KERNEL, set->numa_node);
        if (!q->queue_hw_ctx)
@@ -2442,7 +2428,6 @@ void blk_mq_free_queue(struct request_queue *q)
        blk_mq_del_queue_tag_set(q);
 
        blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
-       blk_mq_free_hw_queues(q, set);
 }
 
 /* Basically redo blk_mq_init_queue with queue frozen */
index 088ced003c13d7282712b423ade0521c16aeebdc..b79f9a7d8cf62010dd9a91d3b271e5d2474cb836 100644 (file)
@@ -77,6 +77,8 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
 /*
  * sysfs helpers
  */
+extern void blk_mq_sysfs_init(struct request_queue *q);
+extern void blk_mq_sysfs_deinit(struct request_queue *q);
 extern int blk_mq_sysfs_register(struct request_queue *q);
 extern void blk_mq_sysfs_unregister(struct request_queue *q);
 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
index 9b43efb8933fb9a0f352922029b86903e5b7c8e7..186fcb981e9b1d9696e3e000b0fde7b86e1a9663 100644 (file)
@@ -30,11 +30,11 @@ static void blk_stat_flush_batch(struct blk_rq_stat *stat)
 
 static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
 {
+       blk_stat_flush_batch(src);
+
        if (!src->nr_samples)
                return;
 
-       blk_stat_flush_batch(src);
-
        dst->min = min(dst->min, src->min);
        dst->max = max(dst->max, src->max);
 
index b26a5ea115d00b51f20c8e59f09a420b2dbadc9f..a9c516a8b37dbceca9f46a74b7ccb7b4df35639c 100644 (file)
@@ -572,20 +572,6 @@ exit:
        disk_part_iter_exit(&piter);
 }
 
-void put_disk_devt(struct disk_devt *disk_devt)
-{
-       if (disk_devt && atomic_dec_and_test(&disk_devt->count))
-               disk_devt->release(disk_devt);
-}
-EXPORT_SYMBOL(put_disk_devt);
-
-void get_disk_devt(struct disk_devt *disk_devt)
-{
-       if (disk_devt)
-               atomic_inc(&disk_devt->count);
-}
-EXPORT_SYMBOL(get_disk_devt);
-
 /**
  * device_add_disk - add partitioning information to kernel list
  * @parent: parent device for the disk
@@ -626,13 +612,6 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
 
        disk_alloc_events(disk);
 
-       /*
-        * Take a reference on the devt and assign it to queue since it
-        * must not be reallocated while the bdi is registered
-        */
-       disk->queue->disk_devt = disk->disk_devt;
-       get_disk_devt(disk->disk_devt);
-
        /* Register BDI before referencing it from bdev */
        bdi = disk->queue->backing_dev_info;
        bdi_register_owner(bdi, disk_to_dev(disk));
@@ -681,12 +660,16 @@ void del_gendisk(struct gendisk *disk)
        disk->flags &= ~GENHD_FL_UP;
 
        sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
-       /*
-        * Unregister bdi before releasing device numbers (as they can get
-        * reused and we'd get clashes in sysfs).
-        */
-       bdi_unregister(disk->queue->backing_dev_info);
-       blk_unregister_queue(disk);
+       if (disk->queue) {
+               /*
+                * Unregister bdi before releasing device numbers (as they can
+                * get reused and we'd get clashes in sysfs).
+                */
+               bdi_unregister(disk->queue->backing_dev_info);
+               blk_unregister_queue(disk);
+       } else {
+               WARN_ON(1);
+       }
        blk_unregister_region(disk_devt(disk), disk->minors);
 
        part_stat_set_all(&disk->part0, 0);
index 1e18dca360fc501033762d4c505c2e32c4674ee6..14035f826b5e350dbec1710d60aca560f2c1066b 100644 (file)
@@ -1023,7 +1023,6 @@ static int finalize_and_send(struct opal_dev *dev, cont_fn cont)
 
 static int gen_key(struct opal_dev *dev, void *data)
 {
-       const u8 *method;
        u8 uid[OPAL_UID_LENGTH];
        int err = 0;
 
@@ -1031,7 +1030,6 @@ static int gen_key(struct opal_dev *dev, void *data)
        set_comid(dev, dev->comid);
 
        memcpy(uid, dev->prev_data, min(sizeof(uid), dev->prev_d_len));
-       method = opalmethod[OPAL_GENKEY];
        kfree(dev->prev_data);
        dev->prev_data = NULL;
 
@@ -1669,7 +1667,6 @@ static int add_user_to_lr(struct opal_dev *dev, void *data)
 static int lock_unlock_locking_range(struct opal_dev *dev, void *data)
 {
        u8 lr_buffer[OPAL_UID_LENGTH];
-       const u8 *method;
        struct opal_lock_unlock *lkul = data;
        u8 read_locked = 1, write_locked = 1;
        int err = 0;
@@ -1677,7 +1674,6 @@ static int lock_unlock_locking_range(struct opal_dev *dev, void *data)
        clear_opal_cmd(dev);
        set_comid(dev, dev->comid);
 
-       method = opalmethod[OPAL_SET];
        if (build_locking_range(lr_buffer, sizeof(lr_buffer),
                                lkul->session.opal_key.lr) < 0)
                return -ERANGE;
@@ -1733,14 +1729,12 @@ static int lock_unlock_locking_range_sum(struct opal_dev *dev, void *data)
 {
        u8 lr_buffer[OPAL_UID_LENGTH];
        u8 read_locked = 1, write_locked = 1;
-       const u8 *method;
        struct opal_lock_unlock *lkul = data;
        int ret;
 
        clear_opal_cmd(dev);
        set_comid(dev, dev->comid);
 
-       method = opalmethod[OPAL_SET];
        if (build_locking_range(lr_buffer, sizeof(lr_buffer),
                                lkul->session.opal_key.lr) < 0)
                return -ERANGE;
@@ -2133,7 +2127,7 @@ static int opal_add_user_to_lr(struct opal_dev *dev,
                pr_err("Locking state was not RO or RW\n");
                return -EINVAL;
        }
-       if (lk_unlk->session.who < OPAL_USER1 &&
+       if (lk_unlk->session.who < OPAL_USER1 ||
            lk_unlk->session.who > OPAL_USER9) {
                pr_err("Authority was not within the range of users: %d\n",
                       lk_unlk->session.who);
@@ -2316,7 +2310,7 @@ static int opal_activate_user(struct opal_dev *dev,
        int ret;
 
        /* We can't activate Admin1 it's active as manufactured */
-       if (opal_session->who < OPAL_USER1 &&
+       if (opal_session->who < OPAL_USER1 ||
            opal_session->who > OPAL_USER9) {
                pr_err("Who was not a valid user: %d\n", opal_session->who);
                return -EINVAL;
index f5e18c2a48527bb3f5bbdc5202b37577689710b3..690deca17c35287c00171466f7b06e53262b0601 100644 (file)
@@ -266,7 +266,7 @@ unlock:
        return err;
 }
 
-int af_alg_accept(struct sock *sk, struct socket *newsock)
+int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
 {
        struct alg_sock *ask = alg_sk(sk);
        const struct af_alg_type *type;
@@ -281,7 +281,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
        if (!type)
                goto unlock;
 
-       sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, 0);
+       sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
        err = -ENOMEM;
        if (!sk2)
                goto unlock;
@@ -323,9 +323,10 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(af_alg_accept);
 
-static int alg_accept(struct socket *sock, struct socket *newsock, int flags)
+static int alg_accept(struct socket *sock, struct socket *newsock, int flags,
+                     bool kern)
 {
-       return af_alg_accept(sock->sk, newsock);
+       return af_alg_accept(sock->sk, newsock, kern);
 }
 
 static const struct proto_ops alg_proto_ops = {
index 54fc90e8339ce83edab95908adb16ddc1e9d7d32..5e92bd275ef38e8dfe47d9ca50a0e5ced51b2d0e 100644 (file)
@@ -239,7 +239,8 @@ unlock:
        return err ?: len;
 }
 
-static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
+static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
+                      bool kern)
 {
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
@@ -260,7 +261,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
        if (err)
                return err;
 
-       err = af_alg_accept(ask->parent, newsock);
+       err = af_alg_accept(ask->parent, newsock, kern);
        if (err)
                return err;
 
@@ -378,7 +379,7 @@ static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
 }
 
 static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
-                            int flags)
+                            int flags, bool kern)
 {
        int err;
 
@@ -386,7 +387,7 @@ static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
        if (err)
                return err;
 
-       return hash_accept(sock, newsock, flags);
+       return hash_accept(sock, newsock, flags, kern);
 }
 
 static struct proto_ops algif_hash_ops_nokey = {
index ecd8474018e3bde6d16d44c66da6844b6c78ad26..3ea095adafd9af0067f695c4c70e2af702f70923 100644 (file)
@@ -286,8 +286,11 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
 
        subreq->cryptlen = LRW_BUFFER_SIZE;
        if (req->cryptlen > LRW_BUFFER_SIZE) {
-               subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE);
-               rctx->ext = kmalloc(subreq->cryptlen, gfp);
+               unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
+
+               rctx->ext = kmalloc(n, gfp);
+               if (rctx->ext)
+                       subreq->cryptlen = n;
        }
 
        rctx->src = req->src;
index baeb34dd8582ebd11473ebb8e2ed5eadd7d2f71b..c976bfac29da526844f6a5578fea1455945c26f3 100644 (file)
@@ -230,8 +230,11 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
 
        subreq->cryptlen = XTS_BUFFER_SIZE;
        if (req->cryptlen > XTS_BUFFER_SIZE) {
-               subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE);
-               rctx->ext = kmalloc(subreq->cryptlen, gfp);
+               unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
+
+               rctx->ext = kmalloc(n, gfp);
+               if (rctx->ext)
+                       subreq->cryptlen = n;
        }
 
        rctx->src = req->src;
index a391bbc48105ae6cf3504775c60b00b4dcd5705e..d94f92f88ca1c9afb7e04b4f96901f9518a4a2c1 100644 (file)
@@ -2,7 +2,6 @@
 # Makefile for the Linux ACPI interpreter
 #
 
-ccflags-y                      := -Os
 ccflags-$(CONFIG_ACPI_DEBUG)   += -DACPI_DEBUG_OUTPUT
 
 #
index b4c1a6a51da482a953051959279fc9d39cd29d49..03250e1f11039b99e6a25e1650b9d67c3cee41c9 100644 (file)
 ACPI_MODULE_NAME("platform");
 
 static const struct acpi_device_id forbidden_id_list[] = {
-       {"PNP0000", 0}, /* PIC */
-       {"PNP0100", 0}, /* Timer */
-       {"PNP0200", 0}, /* AT DMA Controller */
+       {"PNP0000",  0},        /* PIC */
+       {"PNP0100",  0},        /* Timer */
+       {"PNP0200",  0},        /* AT DMA Controller */
+       {"ACPI0009", 0},        /* IOxAPIC */
+       {"ACPI000A", 0},        /* IOAPIC */
        {"", 0},
 };
 
index 4467a8089ab890695ccf7072220d9c43d1f29c2d..0143135b3abe3749d8a3bab492eb67b2e63a5d01 100644 (file)
@@ -182,11 +182,6 @@ int __weak arch_register_cpu(int cpu)
 
 void __weak arch_unregister_cpu(int cpu) {}
 
-int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
-{
-       return -ENODEV;
-}
-
 static int acpi_processor_hotadd_init(struct acpi_processor *pr)
 {
        unsigned long long sta;
@@ -285,6 +280,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
                pr->acpi_id = value;
        }
 
+       if (acpi_duplicate_processor_id(pr->acpi_id)) {
+               dev_err(&device->dev,
+                       "Failed to get unique processor _UID (0x%x)\n",
+                       pr->acpi_id);
+               return -ENODEV;
+       }
+
        pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
                                        pr->acpi_id);
        if (invalid_phys_cpuid(pr->phys_id))
@@ -585,7 +587,7 @@ static struct acpi_scan_handler processor_container_handler = {
 static int nr_unique_ids __initdata;
 
 /* The number of the duplicate processor IDs */
-static int nr_duplicate_ids __initdata;
+static int nr_duplicate_ids;
 
 /* Used to store the unique processor IDs */
 static int unique_processor_ids[] __initdata = {
@@ -593,7 +595,7 @@ static int unique_processor_ids[] __initdata = {
 };
 
 /* Used to store the duplicate processor IDs */
-static int duplicate_processor_ids[] __initdata = {
+static int duplicate_processor_ids[] = {
        [0 ... NR_CPUS - 1] = -1,
 };
 
@@ -638,28 +640,53 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
                                                  void **rv)
 {
        acpi_status status;
+       acpi_object_type acpi_type;
+       unsigned long long uid;
        union acpi_object object = { 0 };
        struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
 
-       status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+       status = acpi_get_type(handle, &acpi_type);
        if (ACPI_FAILURE(status))
-               acpi_handle_info(handle, "Not get the processor object\n");
-       else
-               processor_validated_ids_update(object.processor.proc_id);
+               return false;
+
+       switch (acpi_type) {
+       case ACPI_TYPE_PROCESSOR:
+               status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+               if (ACPI_FAILURE(status))
+                       goto err;
+               uid = object.processor.proc_id;
+               break;
+
+       case ACPI_TYPE_DEVICE:
+               status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
+               if (ACPI_FAILURE(status))
+                       goto err;
+               break;
+       default:
+               goto err;
+       }
+
+       processor_validated_ids_update(uid);
+       return true;
+
+err:
+       acpi_handle_info(handle, "Invalid processor object\n");
+       return false;
 
-       return AE_OK;
 }
 
-static void __init acpi_processor_check_duplicates(void)
+void __init acpi_processor_check_duplicates(void)
 {
-       /* Search all processor nodes in ACPI namespace */
+       /* check the correctness for all processors in ACPI namespace */
        acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
                                                ACPI_UINT32_MAX,
                                                acpi_processor_ids_walk,
                                                NULL, NULL, NULL);
+       acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk,
+                                               NULL, NULL);
 }
 
-bool __init acpi_processor_validate_proc_id(int proc_id)
+bool acpi_duplicate_processor_id(int proc_id)
 {
        int i;
 
index b192b42a835105a68038ce13d568fad7154497c4..79b3c9c5a3bc9497ea7e9ef5e26fced8b55617b2 100644 (file)
@@ -1073,6 +1073,7 @@ static int ghes_remove(struct platform_device *ghes_dev)
                if (list_empty(&ghes_sci))
                        unregister_acpi_hed_notifier(&ghes_notifier_sci);
                mutex_unlock(&ghes_list_mutex);
+               synchronize_rcu();
                break;
        case ACPI_HEST_NOTIFY_NMI:
                ghes_nmi_remove(ghes);
index 80cb5eb75b633db8aa278b5e709cfddd697f9a7e..34fbe027e73a26f195f981d2fbd373608f724415 100644 (file)
@@ -1249,7 +1249,6 @@ static int __init acpi_init(void)
        acpi_wakeup_device_init();
        acpi_debugger_init();
        acpi_setup_sb_notify_handler();
-       acpi_set_processor_mapping();
        return 0;
 }
 
index 219b90bc092297c753639f84972939710de25298..f15900132912a4349ecc5b6efe6a2d2e8ff6530f 100644 (file)
@@ -41,8 +41,10 @@ void acpi_gpe_apply_masked_gpes(void);
 void acpi_container_init(void);
 void acpi_memory_hotplug_init(void);
 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+void pci_ioapic_remove(struct acpi_pci_root *root);
 int acpi_ioapic_remove(struct acpi_pci_root *root);
 #else
+static inline void pci_ioapic_remove(struct acpi_pci_root *root) { return; }
 static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; }
 #endif
 #ifdef CONFIG_ACPI_DOCK
index 6d7ce6e12aaa6662b360c391f8c3b5e84a84ddc7..7e4fbf9a53a3ccd19488b2eb17de2d1be7dfe4de 100644 (file)
@@ -45,6 +45,12 @@ static acpi_status setup_res(struct acpi_resource *acpi_res, void *data)
        struct resource *res = data;
        struct resource_win win;
 
+       /*
+        * We might assign this to 'res' later, make sure all pointers are
+        * cleared before the resource is added to the global list
+        */
+       memset(&win, 0, sizeof(win));
+
        res->flags = 0;
        if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM))
                return AE_OK;
@@ -206,24 +212,34 @@ int acpi_ioapic_add(acpi_handle root_handle)
        return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV;
 }
 
-int acpi_ioapic_remove(struct acpi_pci_root *root)
+void pci_ioapic_remove(struct acpi_pci_root *root)
 {
-       int retval = 0;
        struct acpi_pci_ioapic *ioapic, *tmp;
 
        mutex_lock(&ioapic_list_lock);
        list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) {
                if (root->device->handle != ioapic->root_handle)
                        continue;
-
-               if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base))
-                       retval = -EBUSY;
-
                if (ioapic->pdev) {
                        pci_release_region(ioapic->pdev, 0);
                        pci_disable_device(ioapic->pdev);
                        pci_dev_put(ioapic->pdev);
                }
+       }
+       mutex_unlock(&ioapic_list_lock);
+}
+
+int acpi_ioapic_remove(struct acpi_pci_root *root)
+{
+       int retval = 0;
+       struct acpi_pci_ioapic *ioapic, *tmp;
+
+       mutex_lock(&ioapic_list_lock);
+       list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) {
+               if (root->device->handle != ioapic->root_handle)
+                       continue;
+               if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base))
+                       retval = -EBUSY;
                if (ioapic->res.flags && ioapic->res.parent)
                        release_resource(&ioapic->res);
                list_del(&ioapic->list);
index bf601d4df8cfcbb6e579b00cbff75efcb24d8071..919be0aa2578760d466031f866a0e8772adf7f97 100644 (file)
@@ -648,12 +648,12 @@ static void acpi_pci_root_remove(struct acpi_device *device)
 
        pci_stop_root_bus(root->bus);
 
-       WARN_ON(acpi_ioapic_remove(root));
-
+       pci_ioapic_remove(root);
        device_set_run_wake(root->bus->bridge, false);
        pci_acpi_remove_bus_pm_notifier(device);
 
        pci_remove_root_bus(root->bus);
+       WARN_ON(acpi_ioapic_remove(root));
 
        dmar_device_remove(device->handle);
 
index 611a5585a9024a728c71e60ada951b3a73936708..b933061b6b607c467e20317412c63c78728396fc 100644 (file)
@@ -32,12 +32,12 @@ static struct acpi_table_madt *get_madt_table(void)
 }
 
 static int map_lapic_id(struct acpi_subtable_header *entry,
-                u32 acpi_id, phys_cpuid_t *apic_id, bool ignore_disabled)
+                u32 acpi_id, phys_cpuid_t *apic_id)
 {
        struct acpi_madt_local_apic *lapic =
                container_of(entry, struct acpi_madt_local_apic, header);
 
-       if (ignore_disabled && !(lapic->lapic_flags & ACPI_MADT_ENABLED))
+       if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
                return -ENODEV;
 
        if (lapic->processor_id != acpi_id)
@@ -48,13 +48,12 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
 }
 
 static int map_x2apic_id(struct acpi_subtable_header *entry,
-               int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
-               bool ignore_disabled)
+               int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
 {
        struct acpi_madt_local_x2apic *apic =
                container_of(entry, struct acpi_madt_local_x2apic, header);
 
-       if (ignore_disabled && !(apic->lapic_flags & ACPI_MADT_ENABLED))
+       if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
                return -ENODEV;
 
        if (device_declaration && (apic->uid == acpi_id)) {
@@ -66,13 +65,12 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
 }
 
 static int map_lsapic_id(struct acpi_subtable_header *entry,
-               int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
-               bool ignore_disabled)
+               int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
 {
        struct acpi_madt_local_sapic *lsapic =
                container_of(entry, struct acpi_madt_local_sapic, header);
 
-       if (ignore_disabled && !(lsapic->lapic_flags & ACPI_MADT_ENABLED))
+       if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
                return -ENODEV;
 
        if (device_declaration) {
@@ -89,13 +87,12 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
  * Retrieve the ARM CPU physical identifier (MPIDR)
  */
 static int map_gicc_mpidr(struct acpi_subtable_header *entry,
-               int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr,
-               bool ignore_disabled)
+               int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr)
 {
        struct acpi_madt_generic_interrupt *gicc =
            container_of(entry, struct acpi_madt_generic_interrupt, header);
 
-       if (ignore_disabled && !(gicc->flags & ACPI_MADT_ENABLED))
+       if (!(gicc->flags & ACPI_MADT_ENABLED))
                return -ENODEV;
 
        /* device_declaration means Device object in DSDT, in the
@@ -112,7 +109,7 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry,
 }
 
 static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
-                                  int type, u32 acpi_id, bool ignore_disabled)
+                                  int type, u32 acpi_id)
 {
        unsigned long madt_end, entry;
        phys_cpuid_t phys_id = PHYS_CPUID_INVALID;      /* CPU hardware ID */
@@ -130,20 +127,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
                struct acpi_subtable_header *header =
                        (struct acpi_subtable_header *)entry;
                if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
-                       if (!map_lapic_id(header, acpi_id, &phys_id,
-                                         ignore_disabled))
+                       if (!map_lapic_id(header, acpi_id, &phys_id))
                                break;
                } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
-                       if (!map_x2apic_id(header, type, acpi_id, &phys_id,
-                                          ignore_disabled))
+                       if (!map_x2apic_id(header, type, acpi_id, &phys_id))
                                break;
                } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
-                       if (!map_lsapic_id(header, type, acpi_id, &phys_id,
-                                          ignore_disabled))
+                       if (!map_lsapic_id(header, type, acpi_id, &phys_id))
                                break;
                } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
-                       if (!map_gicc_mpidr(header, type, acpi_id, &phys_id,
-                                           ignore_disabled))
+                       if (!map_gicc_mpidr(header, type, acpi_id, &phys_id))
                                break;
                }
                entry += header->length;
@@ -161,15 +154,14 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
        if (!madt)
                return PHYS_CPUID_INVALID;
 
-       rv = map_madt_entry(madt, 1, acpi_id, true);
+       rv = map_madt_entry(madt, 1, acpi_id);
 
        acpi_put_table((struct acpi_table_header *)madt);
 
        return rv;
 }
 
-static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
-                                 bool ignore_disabled)
+static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
 {
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *obj;
@@ -190,38 +182,30 @@ static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
 
        header = (struct acpi_subtable_header *)obj->buffer.pointer;
        if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
-               map_lapic_id(header, acpi_id, &phys_id, ignore_disabled);
+               map_lapic_id(header, acpi_id, &phys_id);
        else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
-               map_lsapic_id(header, type, acpi_id, &phys_id, ignore_disabled);
+               map_lsapic_id(header, type, acpi_id, &phys_id);
        else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
-               map_x2apic_id(header, type, acpi_id, &phys_id, ignore_disabled);
+               map_x2apic_id(header, type, acpi_id, &phys_id);
        else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
-               map_gicc_mpidr(header, type, acpi_id, &phys_id,
-                              ignore_disabled);
+               map_gicc_mpidr(header, type, acpi_id, &phys_id);
 
 exit:
        kfree(buffer.pointer);
        return phys_id;
 }
 
-static phys_cpuid_t __acpi_get_phys_id(acpi_handle handle, int type,
-                                      u32 acpi_id, bool ignore_disabled)
+phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
 {
        phys_cpuid_t phys_id;
 
-       phys_id = map_mat_entry(handle, type, acpi_id, ignore_disabled);
+       phys_id = map_mat_entry(handle, type, acpi_id);
        if (invalid_phys_cpuid(phys_id))
-               phys_id = map_madt_entry(get_madt_table(), type, acpi_id,
-                                          ignore_disabled);
+               phys_id = map_madt_entry(get_madt_table(), type, acpi_id);
 
        return phys_id;
 }
 
-phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
-{
-       return __acpi_get_phys_id(handle, type, acpi_id, true);
-}
-
 int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
 {
 #ifdef CONFIG_SMP
@@ -278,79 +262,6 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
 }
 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
 
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
-static bool __init
-map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid)
-{
-       int type, id;
-       u32 acpi_id;
-       acpi_status status;
-       acpi_object_type acpi_type;
-       unsigned long long tmp;
-       union acpi_object object = { 0 };
-       struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
-
-       status = acpi_get_type(handle, &acpi_type);
-       if (ACPI_FAILURE(status))
-               return false;
-
-       switch (acpi_type) {
-       case ACPI_TYPE_PROCESSOR:
-               status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
-               if (ACPI_FAILURE(status))
-                       return false;
-               acpi_id = object.processor.proc_id;
-
-               /* validate the acpi_id */
-               if(acpi_processor_validate_proc_id(acpi_id))
-                       return false;
-               break;
-       case ACPI_TYPE_DEVICE:
-               status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
-               if (ACPI_FAILURE(status))
-                       return false;
-               acpi_id = tmp;
-               break;
-       default:
-               return false;
-       }
-
-       type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
-
-       *phys_id = __acpi_get_phys_id(handle, type, acpi_id, false);
-       id = acpi_map_cpuid(*phys_id, acpi_id);
-
-       if (id < 0)
-               return false;
-       *cpuid = id;
-       return true;
-}
-
-static acpi_status __init
-set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context,
-                          void **rv)
-{
-       phys_cpuid_t phys_id;
-       int cpu_id;
-
-       if (!map_processor(handle, &phys_id, &cpu_id))
-               return AE_ERROR;
-
-       acpi_map_cpu2node(handle, cpu_id, phys_id);
-       return AE_OK;
-}
-
-void __init acpi_set_processor_mapping(void)
-{
-       /* Set persistent cpu <-> node mapping for all processors. */
-       acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
-                           ACPI_UINT32_MAX, set_processor_node_mapping,
-                           NULL, NULL, NULL);
-}
-#else
-void __init acpi_set_processor_mapping(void) {}
-#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-
 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
 static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
                         u64 *phys_addr, int *ioapic_id)
index 01c94669a2b0ad91976daf9f3c7ef3338b48e965..3afa8c1fa12702c251d5d3654e2026ba0ebfcd62 100644 (file)
@@ -30,7 +30,7 @@ static bool qdf2400_erratum_44_present(struct acpi_table_header *h)
                return true;
 
        if (!memcmp(h->oem_table_id, "QDF2400 ", ACPI_OEM_TABLE_ID_SIZE) &&
-                       h->oem_revision == 0)
+                       h->oem_revision == 1)
                return true;
 
        return false;
index 85d833289f28f85de9aa98efe52a05a921cdc3bd..4c96f3ac4976d9bbc306010ca4fd76bb7d33015e 100644 (file)
@@ -177,7 +177,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
        case AHCI_LS1043A:
                if (!qpriv->ecc_addr)
                        return -EINVAL;
-               writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr);
+               writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
+                               qpriv->ecc_addr);
                writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
                writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
                if (qpriv->is_dmacoherent)
@@ -194,7 +195,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
        case AHCI_LS1046A:
                if (!qpriv->ecc_addr)
                        return -EINVAL;
-               writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr);
+               writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
+                               qpriv->ecc_addr);
                writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
                writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
                if (qpriv->is_dmacoherent)
index 2bd92dca3e6204027f6c1b5fb07ba519cbe039fa..274d6d7193d7caa9b57f111962aa6e245ebc8f7c 100644 (file)
@@ -1482,7 +1482,6 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                break;
 
        default:
-               WARN_ON_ONCE(1);
                return AC_ERR_SYSTEM;
        }
 
index 46698232e6bff069293200a5da842e196a4a0730..19e6e539a061b93a6ac81975a69b3cdba7b1d699 100644 (file)
@@ -224,7 +224,6 @@ static DECLARE_TRANSPORT_CLASS(ata_port_class,
 
 static void ata_tport_release(struct device *dev)
 {
-       put_device(dev->parent);
 }
 
 /**
@@ -284,7 +283,7 @@ int ata_tport_add(struct device *parent,
        device_initialize(dev);
        dev->type = &ata_port_type;
 
-       dev->parent = get_device(parent);
+       dev->parent = parent;
        dev->release = ata_tport_release;
        dev_set_name(dev, "ata%d", ap->print_id);
        transport_setup_device(dev);
@@ -348,7 +347,6 @@ static DECLARE_TRANSPORT_CLASS(ata_link_class,
 
 static void ata_tlink_release(struct device *dev)
 {
-       put_device(dev->parent);
 }
 
 /**
@@ -410,7 +408,7 @@ int ata_tlink_add(struct ata_link *link)
        int error;
 
        device_initialize(dev);
-       dev->parent = get_device(&ap->tdev);
+       dev->parent = &ap->tdev;
        dev->release = ata_tlink_release;
        if (ata_is_host_link(link))
                dev_set_name(dev, "link%d", ap->print_id);
@@ -589,7 +587,6 @@ static DECLARE_TRANSPORT_CLASS(ata_dev_class,
 
 static void ata_tdev_release(struct device *dev)
 {
-       put_device(dev->parent);
 }
 
 /**
@@ -662,7 +659,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
        int error;
 
        device_initialize(dev);
-       dev->parent = get_device(&link->tdev);
+       dev->parent = &link->tdev;
        dev->release = ata_tdev_release;
        if (ata_is_host_link(link))
                dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
index 4a610795b585fd41765676529eaaccc0685cda93..906705e5f7763c5ac91f1f00a140f49c54ee7c2a 100644 (file)
@@ -2267,9 +2267,8 @@ static int amb_probe(struct pci_dev *pci_dev,
        dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS;
        dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS;
 
-       init_timer(&dev->housekeeping);
-       dev->housekeeping.function = do_housekeeping;
-       dev->housekeeping.data = (unsigned long) dev;
+       setup_timer(&dev->housekeeping, do_housekeeping,
+                   (unsigned long)dev);
        mod_timer(&dev->housekeeping, jiffies);
 
        // enable host interrupts
index bf43b5d2aafcafb2b0bc32652cacac6750c5bff9..83f1439e57fd8cb5d40e0a9d435a8a7fe62939c4 100644 (file)
@@ -218,6 +218,7 @@ static const struct of_device_id img_ascii_lcd_matches[] = {
        { .compatible = "img,boston-lcd", .data = &boston_config },
        { .compatible = "mti,malta-lcd", .data = &malta_config },
        { .compatible = "mti,sead3-lcd", .data = &sead3_config },
+       { /* sentinel */ }
 };
 
 /**
index 684bda4d14a187b41ff453bf33ad8df4774c977f..6bb60fb6a30b7b9b4fd42e2872261317b38c22b5 100644 (file)
@@ -639,11 +639,6 @@ int lock_device_hotplug_sysfs(void)
        return restart_syscall();
 }
 
-void assert_held_device_hotplug(void)
-{
-       lockdep_assert_held(&device_hotplug_lock);
-}
-
 #ifdef CONFIG_BLOCK
 static inline int device_is_not_partition(struct device *dev)
 {
index 771a2a253440f736711bd996044d78b6ce3b723a..7bde8d7a2816b5cb42e7d3104c26a960ad32f8b9 100644 (file)
@@ -185,8 +185,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
        chip->owner             = THIS_MODULE;
        chip->parent            = bcma_bus_get_host_dev(bus);
 #if IS_BUILTIN(CONFIG_OF)
-       if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
-               chip->of_node   = cc->core->dev.of_node;
+       chip->of_node           = cc->core->dev.of_node;
 #endif
        switch (bus->chipinfo.id) {
        case BCMA_CHIP_ID_BCM4707:
index 12da68ec48baa751d78899e89ac7da2e60f197f7..e6986c7608f1e836eb8c61bfc3b756ee6d150e80 100644 (file)
@@ -201,9 +201,6 @@ static void bcma_of_fill_device(struct device *parent,
 {
        struct device_node *node;
 
-       if (!IS_ENABLED(CONFIG_OF_IRQ))
-               return;
-
        node = bcma_of_find_child_device(parent, core);
        if (node)
                core->dev.of_node = node;
@@ -242,19 +239,18 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
        core->dev.release = bcma_release_core_dev;
        core->dev.bus = &bcma_bus_type;
        dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
+       core->dev.parent = bcma_bus_get_host_dev(bus);
+       if (core->dev.parent)
+               bcma_of_fill_device(core->dev.parent, core);
 
        switch (bus->hosttype) {
        case BCMA_HOSTTYPE_PCI:
-               core->dev.parent = &bus->host_pci->dev;
                core->dma_dev = &bus->host_pci->dev;
                core->irq = bus->host_pci->irq;
                break;
        case BCMA_HOSTTYPE_SOC:
                if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) {
                        core->dma_dev = &bus->host_pdev->dev;
-                       core->dev.parent = &bus->host_pdev->dev;
-                       if (core->dev.parent)
-                               bcma_of_fill_device(core->dev.parent, core);
                } else {
                        core->dev.dma_mask = &core->dev.coherent_dma_mask;
                        core->dma_dev = &core->dev;
index 7e4287bc19e52991a82cf218906fe7384112921b..d8a23561b4cb4b720065ccf1bd97714a07395c73 100644 (file)
@@ -47,6 +47,8 @@ static DEFINE_MUTEX(nbd_index_mutex);
 struct nbd_sock {
        struct socket *sock;
        struct mutex tx_lock;
+       struct request *pending;
+       int sent;
 };
 
 #define NBD_TIMEDOUT                   0
@@ -124,7 +126,8 @@ static const char *nbdcmd_to_ascii(int cmd)
 
 static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
 {
-       bd_set_size(bdev, 0);
+       if (bdev->bd_openers <= 1)
+               bd_set_size(bdev, 0);
        set_capacity(nbd->disk, 0);
        kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
 
@@ -190,7 +193,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
 
        dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
        set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
-       req->errors++;
+       req->errors = -EIO;
 
        mutex_lock(&nbd->config_lock);
        sock_shutdown(nbd);
@@ -202,7 +205,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
  *  Send or receive packet.
  */
 static int sock_xmit(struct nbd_device *nbd, int index, int send,
-                    struct iov_iter *iter, int msg_flags)
+                    struct iov_iter *iter, int msg_flags, int *sent)
 {
        struct socket *sock = nbd->socks[index]->sock;
        int result;
@@ -237,6 +240,8 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
                                result = -EPIPE; /* short read */
                        break;
                }
+               if (sent)
+                       *sent += result;
        } while (msg_data_left(&msg));
 
        tsk_restore_flags(current, pflags, PF_MEMALLOC);
@@ -248,6 +253,7 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
 {
        struct request *req = blk_mq_rq_from_pdu(cmd);
+       struct nbd_sock *nsock = nbd->socks[index];
        int result;
        struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
        struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
@@ -256,6 +262,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
        struct bio *bio;
        u32 type;
        u32 tag = blk_mq_unique_tag(req);
+       int sent = nsock->sent, skip = 0;
 
        iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
 
@@ -283,6 +290,17 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
                return -EIO;
        }
 
+       /* We did a partial send previously, and we at least sent the whole
+        * request struct, so just go and send the rest of the pages in the
+        * request.
+        */
+       if (sent) {
+               if (sent >= sizeof(request)) {
+                       skip = sent - sizeof(request);
+                       goto send_pages;
+               }
+               iov_iter_advance(&from, sent);
+       }
        request.type = htonl(type);
        if (type != NBD_CMD_FLUSH) {
                request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
@@ -294,15 +312,27 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
                cmd, nbdcmd_to_ascii(type),
                (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
        result = sock_xmit(nbd, index, 1, &from,
-                       (type == NBD_CMD_WRITE) ? MSG_MORE : 0);
+                       (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
        if (result <= 0) {
+               if (result == -ERESTARTSYS) {
+                       /* If we havne't sent anything we can just return BUSY,
+                        * however if we have sent something we need to make
+                        * sure we only allow this req to be sent until we are
+                        * completely done.
+                        */
+                       if (sent) {
+                               nsock->pending = req;
+                               nsock->sent = sent;
+                       }
+                       return BLK_MQ_RQ_QUEUE_BUSY;
+               }
                dev_err_ratelimited(disk_to_dev(nbd->disk),
                        "Send control failed (result %d)\n", result);
                return -EIO;
        }
-
+send_pages:
        if (type != NBD_CMD_WRITE)
-               return 0;
+               goto out;
 
        bio = req->bio;
        while (bio) {
@@ -318,8 +348,25 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
                                cmd, bvec.bv_len);
                        iov_iter_bvec(&from, ITER_BVEC | WRITE,
                                      &bvec, 1, bvec.bv_len);
-                       result = sock_xmit(nbd, index, 1, &from, flags);
+                       if (skip) {
+                               if (skip >= iov_iter_count(&from)) {
+                                       skip -= iov_iter_count(&from);
+                                       continue;
+                               }
+                               iov_iter_advance(&from, skip);
+                               skip = 0;
+                       }
+                       result = sock_xmit(nbd, index, 1, &from, flags, &sent);
                        if (result <= 0) {
+                               if (result == -ERESTARTSYS) {
+                                       /* We've already sent the header, we
+                                        * have no choice but to set pending and
+                                        * return BUSY.
+                                        */
+                                       nsock->pending = req;
+                                       nsock->sent = sent;
+                                       return BLK_MQ_RQ_QUEUE_BUSY;
+                               }
                                dev_err(disk_to_dev(nbd->disk),
                                        "Send data failed (result %d)\n",
                                        result);
@@ -336,6 +383,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
                }
                bio = next;
        }
+out:
+       nsock->pending = NULL;
+       nsock->sent = 0;
        return 0;
 }
 
@@ -353,7 +403,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
 
        reply.magic = 0;
        iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
-       result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL);
+       result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
        if (result <= 0) {
                if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) &&
                    !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
@@ -383,7 +433,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
        if (ntohl(reply.error)) {
                dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
                        ntohl(reply.error));
-               req->errors++;
+               req->errors = -EIO;
                return cmd;
        }
 
@@ -395,11 +445,11 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
                rq_for_each_segment(bvec, req, iter) {
                        iov_iter_bvec(&to, ITER_BVEC | READ,
                                      &bvec, 1, bvec.bv_len);
-                       result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL);
+                       result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
                        if (result <= 0) {
                                dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
                                        result);
-                               req->errors++;
+                               req->errors = -EIO;
                                return cmd;
                        }
                        dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
@@ -469,7 +519,7 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved)
        if (!blk_mq_request_started(req))
                return;
        cmd = blk_mq_rq_to_pdu(req);
-       req->errors++;
+       req->errors = -EIO;
        nbd_end_request(cmd);
 }
 
@@ -482,22 +532,23 @@ static void nbd_clear_que(struct nbd_device *nbd)
 }
 
 
-static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
+static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
 {
        struct request *req = blk_mq_rq_from_pdu(cmd);
        struct nbd_device *nbd = cmd->nbd;
        struct nbd_sock *nsock;
+       int ret;
 
        if (index >= nbd->num_connections) {
                dev_err_ratelimited(disk_to_dev(nbd->disk),
                                    "Attempted send on invalid socket\n");
-               goto error_out;
+               return -EINVAL;
        }
 
        if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) {
                dev_err_ratelimited(disk_to_dev(nbd->disk),
                                    "Attempted send on closed socket\n");
-               goto error_out;
+               return -EINVAL;
        }
 
        req->errors = 0;
@@ -508,29 +559,30 @@ static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
                mutex_unlock(&nsock->tx_lock);
                dev_err_ratelimited(disk_to_dev(nbd->disk),
                                    "Attempted send on closed socket\n");
-               goto error_out;
+               return -EINVAL;
        }
 
-       if (nbd_send_cmd(nbd, cmd, index) != 0) {
-               dev_err_ratelimited(disk_to_dev(nbd->disk),
-                                   "Request send failed\n");
-               req->errors++;
-               nbd_end_request(cmd);
+       /* Handle the case that we have a pending request that was partially
+        * transmitted that _has_ to be serviced first.  We need to call requeue
+        * here so that it gets put _after_ the request that is already on the
+        * dispatch list.
+        */
+       if (unlikely(nsock->pending && nsock->pending != req)) {
+               blk_mq_requeue_request(req, true);
+               ret = 0;
+               goto out;
        }
-
+       ret = nbd_send_cmd(nbd, cmd, index);
+out:
        mutex_unlock(&nsock->tx_lock);
-
-       return;
-
-error_out:
-       req->errors++;
-       nbd_end_request(cmd);
+       return ret;
 }
 
 static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
                        const struct blk_mq_queue_data *bd)
 {
        struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+       int ret;
 
        /*
         * Since we look at the bio's to send the request over the network we
@@ -543,10 +595,20 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
         */
        init_completion(&cmd->send_complete);
        blk_mq_start_request(bd->rq);
-       nbd_handle_cmd(cmd, hctx->queue_num);
+
+       /* We can be called directly from the user space process, which means we
+        * could possibly have signals pending so our sendmsg will fail.  In
+        * this case we need to return that we are busy, otherwise error out as
+        * appropriate.
+        */
+       ret = nbd_handle_cmd(cmd, hctx->queue_num);
+       if (ret < 0)
+               ret = BLK_MQ_RQ_QUEUE_ERROR;
+       if (!ret)
+               ret = BLK_MQ_RQ_QUEUE_OK;
        complete(&cmd->send_complete);
 
-       return BLK_MQ_RQ_QUEUE_OK;
+       return ret;
 }
 
 static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev,
@@ -581,6 +643,8 @@ static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev,
 
        mutex_init(&nsock->tx_lock);
        nsock->sock = sock;
+       nsock->pending = NULL;
+       nsock->sent = 0;
        socks[nbd->num_connections++] = nsock;
 
        if (max_part)
@@ -602,6 +666,8 @@ static void nbd_reset(struct nbd_device *nbd)
 
 static void nbd_bdev_reset(struct block_device *bdev)
 {
+       if (bdev->bd_openers > 1)
+               return;
        set_device_ro(bdev, false);
        bdev->bd_inode->i_size = 0;
        if (max_part > 0) {
@@ -634,7 +700,7 @@ static void send_disconnects(struct nbd_device *nbd)
 
        for (i = 0; i < nbd->num_connections; i++) {
                iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
-               ret = sock_xmit(nbd, i, 1, &from, 0);
+               ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
                if (ret <= 0)
                        dev_err(disk_to_dev(nbd->disk),
                                "Send disconnect failed %d\n", ret);
@@ -665,7 +731,8 @@ static int nbd_clear_sock(struct nbd_device *nbd, struct block_device *bdev)
 {
        sock_shutdown(nbd);
        nbd_clear_que(nbd);
-       kill_bdev(bdev);
+
+       __invalidate_device(bdev, true);
        nbd_bdev_reset(bdev);
        /*
         * We want to give the run thread a chance to wait for everybody
@@ -781,7 +848,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
                nbd_size_set(nbd, bdev, nbd->blksize, arg);
                return 0;
        case NBD_SET_TIMEOUT:
-               nbd->tag_set.timeout = arg * HZ;
+               if (arg) {
+                       nbd->tag_set.timeout = arg * HZ;
+                       blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
+               }
                return 0;
 
        case NBD_SET_FLAGS:
index 10aed84244f51854305ff7e0c59277731ab826f2..939641d6e2625e80babab415331c1ac187d88251 100644 (file)
@@ -50,7 +50,7 @@
                         the slower the port i/o.  In some cases, setting
                         this to zero will speed up the device. (default -1)
                         
-            major       You may use this parameter to overide the
+            major       You may use this parameter to override the
                         default major number (46) that this driver
                         will use.  Be sure to change the device
                         name as well.
index 644ba0888bd41bb5e54f4ab58345b6af9519e0c2..9cfd2e06a64917a99a3f70da9753cbc8bd27401c 100644 (file)
@@ -61,7 +61,7 @@
                         first drive found.
                        
 
-            major       You may use this parameter to overide the
+            major       You may use this parameter to override the
                         default major number (45) that this driver
                         will use.  Be sure to change the device
                         name as well.
index ed93e8badf5684d513ef78a8c03f74ccc4531ecd..14c5d32f5d8bc067532ba6ea95070d5c2a76db73 100644 (file)
@@ -59,7 +59,7 @@
                         the slower the port i/o.  In some cases, setting
                         this to zero will speed up the device. (default -1)
 
-           major       You may use this parameter to overide the
+           major       You may use this parameter to override the
                        default major number (47) that this driver
                        will use.  Be sure to change the device
                        name as well.
index 5db955fe3a949018e353ebaa1b98a4a1f17b86ef..3b5882bfb7364e33ab3f7b8355219ee2c977c4fa 100644 (file)
@@ -84,7 +84,7 @@
                        the slower the port i/o.  In some cases, setting
                        this to zero will speed up the device. (default -1)
 
-           major       You may use this parameter to overide the
+           major       You may use this parameter to override the
                        default major number (97) that this driver
                        will use.  Be sure to change the device
                        name as well.
index 61fc6824299ac13c762e84dde6cae8baf8411e37..e815312a00add6b96651f2a956dc84d14d90adc7 100644 (file)
@@ -61,7 +61,7 @@
                         the slower the port i/o.  In some cases, setting
                         this to zero will speed up the device. (default -1)
 
-           major       You may use this parameter to overide the
+           major       You may use this parameter to override the
                        default major number (96) that this driver
                        will use.  Be sure to change the device
                        name as well.
index 4d680772379828423d8605b1cae8c5da271ec5b8..517838b659646d3e02bd0a2ba9e5de2bdf8539f8 100644 (file)
@@ -120,10 +120,11 @@ static int atomic_dec_return_safe(atomic_t *v)
 
 /* Feature bits */
 
-#define RBD_FEATURE_LAYERING   (1<<0)
-#define RBD_FEATURE_STRIPINGV2 (1<<1)
-#define RBD_FEATURE_EXCLUSIVE_LOCK (1<<2)
-#define RBD_FEATURE_DATA_POOL (1<<7)
+#define RBD_FEATURE_LAYERING           (1ULL<<0)
+#define RBD_FEATURE_STRIPINGV2         (1ULL<<1)
+#define RBD_FEATURE_EXCLUSIVE_LOCK     (1ULL<<2)
+#define RBD_FEATURE_DATA_POOL          (1ULL<<7)
+
 #define RBD_FEATURES_ALL       (RBD_FEATURE_LAYERING |         \
                                 RBD_FEATURE_STRIPINGV2 |       \
                                 RBD_FEATURE_EXCLUSIVE_LOCK |   \
@@ -499,16 +500,23 @@ static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
        return is_lock_owner;
 }
 
+static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf)
+{
+       return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
+}
+
 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
+static BUS_ATTR(supported_features, S_IRUGO, rbd_supported_features_show, NULL);
 
 static struct attribute *rbd_bus_attrs[] = {
        &bus_attr_add.attr,
        &bus_attr_remove.attr,
        &bus_attr_add_single_major.attr,
        &bus_attr_remove_single_major.attr,
+       &bus_attr_supported_features.attr,
        NULL,
 };
 
index e27d89a36c34170d1c894b60f43ab3903a5fbf70..dceb5edd1e5455f4c1b101e8ad3ce4dba46ac22f 100644 (file)
@@ -1189,6 +1189,8 @@ static int zram_add(void)
        blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
        blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
        zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
+       zram->disk->queue->limits.max_sectors = SECTORS_PER_PAGE;
+       zram->disk->queue->limits.chunk_sectors = 0;
        blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
        /*
         * zram_bio_discard() will clear all logical blocks if logical block
index c2c14a12713b56038c8c21deae2212550d24422b..a6a9dd4d0eeffd66ffa446de2fc726270049b857 100644 (file)
@@ -344,7 +344,8 @@ config BT_WILINK
 
 config BT_QCOMSMD
        tristate "Qualcomm SMD based HCI support"
-       depends on (QCOM_SMD && QCOM_WCNSS_CTRL) || COMPILE_TEST
+       depends on RPMSG || (COMPILE_TEST && RPMSG=n)
+       depends on QCOM_WCNSS_CTRL || (COMPILE_TEST && QCOM_WCNSS_CTRL=n)
        select BT_QCA
        help
          Qualcomm SMD based HCI driver.
index 8d4868af9bbd88fff13eb8525dec5310654f54de..ef730c173d4b875063726ccd398b7cded2e6f660 100644 (file)
@@ -14,7 +14,7 @@
 
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/soc/qcom/smd.h>
+#include <linux/rpmsg.h>
 #include <linux/soc/qcom/wcnss_ctrl.h>
 #include <linux/platform_device.h>
 
@@ -26,8 +26,8 @@
 struct btqcomsmd {
        struct hci_dev *hdev;
 
-       struct qcom_smd_channel *acl_channel;
-       struct qcom_smd_channel *cmd_channel;
+       struct rpmsg_endpoint *acl_channel;
+       struct rpmsg_endpoint *cmd_channel;
 };
 
 static int btqcomsmd_recv(struct hci_dev *hdev, unsigned int type,
@@ -48,19 +48,19 @@ static int btqcomsmd_recv(struct hci_dev *hdev, unsigned int type,
        return hci_recv_frame(hdev, skb);
 }
 
-static int btqcomsmd_acl_callback(struct qcom_smd_channel *channel,
-                                 const void *data, size_t count)
+static int btqcomsmd_acl_callback(struct rpmsg_device *rpdev, void *data,
+                                 int count, void *priv, u32 addr)
 {
-       struct btqcomsmd *btq = qcom_smd_get_drvdata(channel);
+       struct btqcomsmd *btq = priv;
 
        btq->hdev->stat.byte_rx += count;
        return btqcomsmd_recv(btq->hdev, HCI_ACLDATA_PKT, data, count);
 }
 
-static int btqcomsmd_cmd_callback(struct qcom_smd_channel *channel,
-                                 const void *data, size_t count)
+static int btqcomsmd_cmd_callback(struct rpmsg_device *rpdev, void *data,
+                                 int count, void *priv, u32 addr)
 {
-       struct btqcomsmd *btq = qcom_smd_get_drvdata(channel);
+       struct btqcomsmd *btq = priv;
 
        return btqcomsmd_recv(btq->hdev, HCI_EVENT_PKT, data, count);
 }
@@ -72,12 +72,12 @@ static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb)
 
        switch (hci_skb_pkt_type(skb)) {
        case HCI_ACLDATA_PKT:
-               ret = qcom_smd_send(btq->acl_channel, skb->data, skb->len);
+               ret = rpmsg_send(btq->acl_channel, skb->data, skb->len);
                hdev->stat.acl_tx++;
                hdev->stat.byte_tx += skb->len;
                break;
        case HCI_COMMAND_PKT:
-               ret = qcom_smd_send(btq->cmd_channel, skb->data, skb->len);
+               ret = rpmsg_send(btq->cmd_channel, skb->data, skb->len);
                hdev->stat.cmd_tx++;
                break;
        default:
@@ -114,18 +114,15 @@ static int btqcomsmd_probe(struct platform_device *pdev)
        wcnss = dev_get_drvdata(pdev->dev.parent);
 
        btq->acl_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_ACL",
-                                                  btqcomsmd_acl_callback);
+                                                  btqcomsmd_acl_callback, btq);
        if (IS_ERR(btq->acl_channel))
                return PTR_ERR(btq->acl_channel);
 
        btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD",
-                                                  btqcomsmd_cmd_callback);
+                                                  btqcomsmd_cmd_callback, btq);
        if (IS_ERR(btq->cmd_channel))
                return PTR_ERR(btq->cmd_channel);
 
-       qcom_smd_set_drvdata(btq->acl_channel, btq);
-       qcom_smd_set_drvdata(btq->cmd_channel, btq);
-
        hdev = hci_alloc_dev();
        if (!hdev)
                return -ENOMEM;
@@ -158,6 +155,9 @@ static int btqcomsmd_remove(struct platform_device *pdev)
        hci_unregister_dev(btq->hdev);
        hci_free_dev(btq->hdev);
 
+       rpmsg_destroy_ept(btq->cmd_channel);
+       rpmsg_destroy_ept(btq->acl_channel);
+
        return 0;
 }
 
index 4a99ac756f0815a890665f1433b1038a98069be9..9959c762da2f8ec1f5cb0fde48021598f3deecfd 100644 (file)
@@ -55,6 +55,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
 struct amd768_priv {
        void __iomem *iobase;
        struct pci_dev *pcidev;
+       u32 pmbase;
 };
 
 static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@@ -148,33 +149,58 @@ found:
        if (pmbase == 0)
                return -EIO;
 
-       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
 
-       if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET,
-                               PMBASE_SIZE, DRV_NAME)) {
+       if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
                dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
                        pmbase + 0xF0);
-               return -EBUSY;
+               err = -EBUSY;
+               goto out;
        }
 
-       priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET,
-                       PMBASE_SIZE);
+       priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
        if (!priv->iobase) {
                pr_err(DRV_NAME "Cannot map ioport\n");
-               return -ENOMEM;
+               err = -EINVAL;
+               goto err_iomap;
        }
 
        amd_rng.priv = (unsigned long)priv;
+       priv->pmbase = pmbase;
        priv->pcidev = pdev;
 
        pr_info(DRV_NAME " detected\n");
-       return devm_hwrng_register(&pdev->dev, &amd_rng);
+       err = hwrng_register(&amd_rng);
+       if (err) {
+               pr_err(DRV_NAME " registering failed (%d)\n", err);
+               goto err_hwrng;
+       }
+       return 0;
+
+err_hwrng:
+       ioport_unmap(priv->iobase);
+err_iomap:
+       release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+out:
+       kfree(priv);
+       return err;
 }
 
 static void __exit mod_exit(void)
 {
+       struct amd768_priv *priv;
+
+       priv = (struct amd768_priv *)amd_rng.priv;
+
+       hwrng_unregister(&amd_rng);
+
+       ioport_unmap(priv->iobase);
+
+       release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+
+       kfree(priv);
 }
 
 module_init(mod_init);
index e7a2459420291b6b931b285fe2b9be22cdd3e897..e1d421a36a138d6a5e30b16bf62208846b5372ab 100644 (file)
@@ -31,6 +31,9 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 
+
+#define PFX    KBUILD_MODNAME ": "
+
 #define GEODE_RNG_DATA_REG   0x50
 #define GEODE_RNG_STATUS_REG 0x54
 
@@ -82,6 +85,7 @@ static struct hwrng geode_rng = {
 
 static int __init mod_init(void)
 {
+       int err = -ENODEV;
        struct pci_dev *pdev = NULL;
        const struct pci_device_id *ent;
        void __iomem *mem;
@@ -89,27 +93,43 @@ static int __init mod_init(void)
 
        for_each_pci_dev(pdev) {
                ent = pci_match_id(pci_tbl, pdev);
-               if (ent) {
-                       rng_base = pci_resource_start(pdev, 0);
-                       if (rng_base == 0)
-                               return -ENODEV;
-
-                       mem = devm_ioremap(&pdev->dev, rng_base, 0x58);
-                       if (!mem)
-                               return -ENOMEM;
-                       geode_rng.priv = (unsigned long)mem;
-
-                       pr_info("AMD Geode RNG detected\n");
-                       return devm_hwrng_register(&pdev->dev, &geode_rng);
-               }
+               if (ent)
+                       goto found;
        }
-
        /* Device not found. */
-       return -ENODEV;
+       goto out;
+
+found:
+       rng_base = pci_resource_start(pdev, 0);
+       if (rng_base == 0)
+               goto out;
+       err = -ENOMEM;
+       mem = ioremap(rng_base, 0x58);
+       if (!mem)
+               goto out;
+       geode_rng.priv = (unsigned long)mem;
+
+       pr_info("AMD Geode RNG detected\n");
+       err = hwrng_register(&geode_rng);
+       if (err) {
+               pr_err(PFX "RNG registering failed (%d)\n",
+                      err);
+               goto err_unmap;
+       }
+out:
+       return err;
+
+err_unmap:
+       iounmap(mem);
+       goto out;
 }
 
 static void __exit mod_exit(void)
 {
+       void __iomem *mem = (void __iomem *)geode_rng.priv;
+
+       hwrng_unregister(&geode_rng);
+       iounmap(mem);
 }
 
 module_init(mod_init);
index 3ad86fdf954e96a71b16f436f2b22bd02e6cbd8f..b1ad12552b566a6892a7de411e9cd0c65cf25933 100644 (file)
@@ -397,9 +397,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
                                irq, err);
                        return err;
                }
-               omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
 
-               priv->clk = of_clk_get(pdev->dev.of_node, 0);
+               priv->clk = devm_clk_get(&pdev->dev, NULL);
                if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
                if (!IS_ERR(priv->clk)) {
@@ -408,6 +407,19 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
                                dev_err(&pdev->dev, "unable to enable the clk, "
                                                    "err = %d\n", err);
                }
+
+               /*
+                * On OMAP4, enabling the shutdown_oflo interrupt is
+                * done in the interrupt mask register. There is no
+                * such register on EIP76, and it's enabled by the
+                * same bit in the control register
+                */
+               if (priv->pdata->regs[RNG_INTMASK_REG])
+                       omap_rng_write(priv, RNG_INTMASK_REG,
+                                      RNG_SHUTDOWN_OFLO_MASK);
+               else
+                       omap_rng_write(priv, RNG_CONTROL_REG,
+                                      RNG_SHUTDOWN_OFLO_MASK);
        }
        return 0;
 }
index a5b1eb276c0bf97c9959d72171cf1738ac0503b6..e6d0d271c58c83073e3acb71c788867d4edfc846 100644 (file)
@@ -6,7 +6,7 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/sched.h>
+#include <linux/sched/signal.h>
 #include <linux/interrupt.h>
 #include <linux/time.h>
 #include <linux/timer.h>
index 2a558c706581b21864d45ac701213beb301e8899..3e73bcdf9e658d378963bee83bfeda2e76659c40 100644 (file)
@@ -84,11 +84,14 @@ struct pp_struct {
        struct ieee1284_info state;
        struct ieee1284_info saved_state;
        long default_inactivity;
+       int index;
 };
 
 /* should we use PARDEVICE_MAX here? */
 static struct device *devices[PARPORT_MAX];
 
+static DEFINE_IDA(ida_index);
+
 /* pp_struct.flags bitfields */
 #define PP_CLAIMED    (1<<0)
 #define PP_EXCL       (1<<1)
@@ -290,7 +293,7 @@ static int register_device(int minor, struct pp_struct *pp)
        struct pardevice *pdev = NULL;
        char *name;
        struct pardev_cb ppdev_cb;
-       int rc = 0;
+       int rc = 0, index;
 
        name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
        if (name == NULL)
@@ -303,20 +306,23 @@ static int register_device(int minor, struct pp_struct *pp)
                goto err;
        }
 
+       index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
        memset(&ppdev_cb, 0, sizeof(ppdev_cb));
        ppdev_cb.irq_func = pp_irq;
        ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
        ppdev_cb.private = pp;
-       pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
+       pdev = parport_register_dev_model(port, name, &ppdev_cb, index);
        parport_put_port(port);
 
        if (!pdev) {
                pr_warn("%s: failed to register device!\n", name);
                rc = -ENXIO;
+               ida_simple_remove(&ida_index, index);
                goto err;
        }
 
        pp->pdev = pdev;
+       pp->index = index;
        dev_dbg(&pdev->dev, "registered pardevice\n");
 err:
        kfree(name);
@@ -755,6 +761,7 @@ static int pp_release(struct inode *inode, struct file *file)
 
        if (pp->pdev) {
                parport_unregister_device(pp->pdev);
+               ida_simple_remove(&ida_index, pp->index);
                pp->pdev = NULL;
                pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
        }
index 1ef26403bcc83f6a0c26f20d67b74e7fe6331635..0ab0249189072befe3cee1b8696052727f360540 100644 (file)
@@ -312,13 +312,6 @@ static int random_read_wakeup_bits = 64;
  */
 static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
 
-/*
- * The minimum number of seconds between urandom pool reseeding.  We
- * do this to limit the amount of entropy that can be drained from the
- * input pool even if there are heavy demands on /dev/urandom.
- */
-static int random_min_urandom_seed = 60;
-
 /*
  * Originally, we used a primitive polynomial of degree .poolwords
  * over GF(2).  The taps for various sizes are defined below.  They
@@ -409,7 +402,6 @@ static struct poolinfo {
  */
 static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
 static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
-static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait);
 static struct fasync_struct *fasync;
 
 static DEFINE_SPINLOCK(random_ready_list_lock);
@@ -467,7 +459,6 @@ struct entropy_store {
        int entropy_count;
        int entropy_total;
        unsigned int initialized:1;
-       unsigned int limit:1;
        unsigned int last_data_init:1;
        __u8 last_data[EXTRACT_SIZE];
 };
@@ -485,7 +476,6 @@ static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
 static struct entropy_store input_pool = {
        .poolinfo = &poolinfo_table[0],
        .name = "input",
-       .limit = 1,
        .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
        .pool = input_pool_data
 };
@@ -493,7 +483,6 @@ static struct entropy_store input_pool = {
 static struct entropy_store blocking_pool = {
        .poolinfo = &poolinfo_table[1],
        .name = "blocking",
-       .limit = 1,
        .pull = &input_pool,
        .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
        .pool = blocking_pool_data,
@@ -855,13 +844,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
        spin_unlock_irqrestore(&primary_crng.lock, flags);
 }
 
-static inline void maybe_reseed_primary_crng(void)
-{
-       if (crng_init > 2 &&
-           time_after(jiffies, primary_crng.init_time + CRNG_RESEED_INTERVAL))
-               crng_reseed(&primary_crng, &input_pool);
-}
-
 static inline void crng_wait_ready(void)
 {
        wait_event_interruptible(crng_init_wait, crng_ready());
@@ -1220,15 +1202,6 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
            r->entropy_count > r->poolinfo->poolfracbits)
                return;
 
-       if (r->limit == 0 && random_min_urandom_seed) {
-               unsigned long now = jiffies;
-
-               if (time_before(now,
-                               r->last_pulled + random_min_urandom_seed * HZ))
-                       return;
-               r->last_pulled = now;
-       }
-
        _xfer_secondary_pool(r, nbytes);
 }
 
@@ -1236,8 +1209,6 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
 {
        __u32   tmp[OUTPUT_POOL_WORDS];
 
-       /* For /dev/random's pool, always leave two wakeups' worth */
-       int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4;
        int bytes = nbytes;
 
        /* pull at least as much as a wakeup */
@@ -1248,7 +1219,7 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
        trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
                                  ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
        bytes = extract_entropy(r->pull, tmp, bytes,
-                               random_read_wakeup_bits / 8, rsvd_bytes);
+                               random_read_wakeup_bits / 8, 0);
        mix_pool_bytes(r, tmp, bytes);
        credit_entropy_bits(r, bytes*8);
 }
@@ -1276,7 +1247,7 @@ static void push_to_pool(struct work_struct *work)
 static size_t account(struct entropy_store *r, size_t nbytes, int min,
                      int reserved)
 {
-       int entropy_count, orig;
+       int entropy_count, orig, have_bytes;
        size_t ibytes, nfrac;
 
        BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
@@ -1285,14 +1256,12 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
 retry:
        entropy_count = orig = ACCESS_ONCE(r->entropy_count);
        ibytes = nbytes;
-       /* If limited, never pull more than available */
-       if (r->limit) {
-               int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
+       /* never pull more than available */
+       have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
 
-               if ((have_bytes -= reserved) < 0)
-                       have_bytes = 0;
-               ibytes = min_t(size_t, ibytes, have_bytes);
-       }
+       if ((have_bytes -= reserved) < 0)
+               have_bytes = 0;
+       ibytes = min_t(size_t, ibytes, have_bytes);
        if (ibytes < min)
                ibytes = 0;
 
@@ -1912,6 +1881,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
 static int min_read_thresh = 8, min_write_thresh;
 static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
 static int max_write_thresh = INPUT_POOL_WORDS * 32;
+static int random_min_urandom_seed = 60;
 static char sysctl_bootid[16];
 
 /*
@@ -2042,63 +2012,64 @@ struct ctl_table random_table[] = {
 };
 #endif         /* CONFIG_SYSCTL */
 
-static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
-
-int random_int_secret_init(void)
-{
-       get_random_bytes(random_int_secret, sizeof(random_int_secret));
-       return 0;
-}
-
-static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash)
-               __aligned(sizeof(unsigned long));
+struct batched_entropy {
+       union {
+               u64 entropy_u64[CHACHA20_BLOCK_SIZE / sizeof(u64)];
+               u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)];
+       };
+       unsigned int position;
+};
 
 /*
- * Get a random word for internal kernel use only. Similar to urandom but
- * with the goal of minimal entropy pool depletion. As a result, the random
- * value is not cryptographically secure but for several uses the cost of
- * depleting entropy is too high
+ * Get a random word for internal kernel use only. The quality of the random
+ * number is either as good as RDRAND or as good as /dev/urandom, with the
+ * goal of being quite fast and not depleting entropy.
  */
-unsigned int get_random_int(void)
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
+u64 get_random_u64(void)
 {
-       __u32 *hash;
-       unsigned int ret;
+       u64 ret;
+       struct batched_entropy *batch;
 
-       if (arch_get_random_int(&ret))
+#if BITS_PER_LONG == 64
+       if (arch_get_random_long((unsigned long *)&ret))
                return ret;
+#else
+       if (arch_get_random_long((unsigned long *)&ret) &&
+           arch_get_random_long((unsigned long *)&ret + 1))
+           return ret;
+#endif
 
-       hash = get_cpu_var(get_random_int_hash);
-
-       hash[0] += current->pid + jiffies + random_get_entropy();
-       md5_transform(hash, random_int_secret);
-       ret = hash[0];
-       put_cpu_var(get_random_int_hash);
-
+       batch = &get_cpu_var(batched_entropy_u64);
+       if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
+               extract_crng((u8 *)batch->entropy_u64);
+               batch->position = 0;
+       }
+       ret = batch->entropy_u64[batch->position++];
+       put_cpu_var(batched_entropy_u64);
        return ret;
 }
-EXPORT_SYMBOL(get_random_int);
+EXPORT_SYMBOL(get_random_u64);
 
-/*
- * Same as get_random_int(), but returns unsigned long.
- */
-unsigned long get_random_long(void)
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
+u32 get_random_u32(void)
 {
-       __u32 *hash;
-       unsigned long ret;
+       u32 ret;
+       struct batched_entropy *batch;
 
-       if (arch_get_random_long(&ret))
+       if (arch_get_random_int(&ret))
                return ret;
 
-       hash = get_cpu_var(get_random_int_hash);
-
-       hash[0] += current->pid + jiffies + random_get_entropy();
-       md5_transform(hash, random_int_secret);
-       ret = *(unsigned long *)hash;
-       put_cpu_var(get_random_int_hash);
-
+       batch = &get_cpu_var(batched_entropy_u32);
+       if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
+               extract_crng((u8 *)batch->entropy_u32);
+               batch->position = 0;
+       }
+       ret = batch->entropy_u32[batch->position++];
+       put_cpu_var(batched_entropy_u32);
        return ret;
 }
-EXPORT_SYMBOL(get_random_long);
+EXPORT_SYMBOL(get_random_u32);
 
 /**
  * randomize_page - Generate a random, page aligned address
index 0fb39fe217d17ae1bb681912764b2506007f5f42..67201f67a14af7b07aec557308a7fb39d1432157 100644 (file)
@@ -2502,7 +2502,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
 
        clk->core = hw->core;
        clk->dev_id = dev_id;
-       clk->con_id = con_id;
+       clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
        clk->max_rate = ULONG_MAX;
 
        clk_prepare_lock();
@@ -2518,6 +2518,7 @@ void __clk_free_clk(struct clk *clk)
        hlist_del(&clk->clks_node);
        clk_prepare_unlock();
 
+       kfree_const(clk->con_id);
        kfree(clk);
 }
 
index 924f560dcf80e8a5681fba4c670524b2b20b01ee..00d4150e33c37434c056ba27867db908d6117bdf 100644 (file)
@@ -127,7 +127,7 @@ PNAME(mux_ddrphy_p)         = { "dpll_ddr", "gpll_ddr" };
 PNAME(mux_pll_src_3plls_p)     = { "apll", "dpll", "gpll" };
 PNAME(mux_timer_p)             = { "xin24m", "pclk_peri_src" };
 
-PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p)    = { "apll", "dpll", "gpll" "usb480m" };
+PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p)    = { "apll", "dpll", "gpll", "usb480m" };
 
 PNAME(mux_mmc_src_p)   = { "apll", "dpll", "gpll", "xin24m" };
 PNAME(mux_i2s_pre_p)   = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
@@ -450,6 +450,13 @@ static void __init rk3036_clk_init(struct device_node *np)
                return;
        }
 
+       /*
+        * Make uart_pll_clk a child of the gpll, as all other sources are
+        * not that usable / stable.
+        */
+       writel_relaxed(HIWORD_UPDATE(0x2, 0x3, 10),
+                      reg_base + RK2928_CLKSEL_CON(13));
+
        ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
        if (IS_ERR(ctx)) {
                pr_err("%s: rockchip clk init failed\n", __func__);
index 695bbf9ef428f98f5348d100a6af2525f04afb59..72109d2cf41b29da83dd88dbdb0e820364c893dd 100644 (file)
@@ -80,6 +80,7 @@ config SUN6I_A31_CCU
        select SUNXI_CCU_DIV
        select SUNXI_CCU_NK
        select SUNXI_CCU_NKM
+       select SUNXI_CCU_NKMP
        select SUNXI_CCU_NM
        select SUNXI_CCU_MP
        select SUNXI_CCU_PHASE
index e3c084cc6da55e77f24bf058038bda710fd792d0..f54114c607df76edeb77c70e0f7c97656e67634c 100644 (file)
@@ -566,7 +566,7 @@ static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
                             0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT);
 
 /* Fixed Factor clocks */
-static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 1, 2, 0);
+static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0);
 
 /* We hardcode the divider to 4 for now */
 static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
index 4c9a920ff4ab7c351d59333d131d039f5e36f40a..89e68d29bf456ab3d682f7d2ba7d35ad4a21bb58 100644 (file)
@@ -608,7 +608,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents,
                                 0x150, 0, 4, 24, 2, BIT(31),
                                 CLK_SET_RATE_PARENT);
 
-static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(31), 0);
+static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0);
 
 static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0);
 
index 22c2ca7a2a221c1f25456e6e2548d381fbc23adf..b583f186a804df669e974f811e6bef91c4aaa877 100644 (file)
@@ -85,6 +85,10 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
        unsigned int m, p;
        u32 reg;
 
+       /* Adjust parent_rate according to pre-dividers */
+       ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
+                                               -1, &parent_rate);
+
        reg = readl(cmp->common.base + cmp->common.reg);
 
        m = reg >> cmp->m.shift;
@@ -117,6 +121,10 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
        unsigned int m, p;
        u32 reg;
 
+       /* Adjust parent_rate according to pre-dividers */
+       ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
+                                               -1, &parent_rate);
+
        max_m = cmp->m.max ?: 1 << cmp->m.width;
        max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
 
index a2b40a0001577d2579aa563341d1d890797f4b7f..488055ed944f2b9dff8ca1baa5059ea22879328b 100644 (file)
@@ -107,7 +107,7 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
        p = reg >> nkmp->p.shift;
        p &= (1 << nkmp->p.width) - 1;
 
-       return parent_rate * n * k >> p / m;
+       return (parent_rate * n * k >> p) / m;
 }
 
 static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
index 8c30fec86094df926b8fa23bd162d44051894006..eb89b502acbdfdb5f343a34fde38afc1aa55683b 100644 (file)
@@ -17,7 +17,7 @@
 
 #include <linux/init.h>
 #include <linux/of.h>
-#include <linux/clockchip.h>
+#include <linux/clockchips.h>
 
 extern struct of_device_id __clkevt_of_table[];
 
index 745844ee973e1deda08203725d9b9d1b8e412972..d4ca9962a7595a0206710a0dd4a95656f426ae8e 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/atmel_tc.h>
-#include <linux/sched_clock.h>
 
 
 /*
@@ -57,14 +56,9 @@ static u64 tc_get_cycles(struct clocksource *cs)
        return (upper << 16) | lower;
 }
 
-static u32 tc_get_cv32(void)
-{
-       return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
-}
-
 static u64 tc_get_cycles32(struct clocksource *cs)
 {
-       return tc_get_cv32();
+       return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
 }
 
 static struct clocksource clksrc = {
@@ -75,11 +69,6 @@ static struct clocksource clksrc = {
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static u64 notrace tc_read_sched_clock(void)
-{
-       return tc_get_cv32();
-}
-
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 
 struct tc_clkevt_device {
@@ -350,9 +339,6 @@ static int __init tcb_clksrc_init(void)
                clksrc.read = tc_get_cycles32;
                /* setup ony channel 0 */
                tcb_setup_single_chan(tc, best_divisor_idx);
-
-               /* register sched_clock on chips with single 32 bit counter */
-               sched_clock_register(tc_read_sched_clock, 32, divided_rate);
        } else {
                /* tclib will give us three clocks no matter what the
                 * underlying platform supports.
index a475432818642fee4547699011ba4cf5aa619f3a..bc96d423781aa8a300725f8fbe0a052be12cd4b5 100644 (file)
@@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
                                        char *buf)
 {
        unsigned int cur_freq = __cpufreq_get(policy);
-       if (!cur_freq)
-               return sprintf(buf, "<unknown>");
-       return sprintf(buf, "%u\n", cur_freq);
+
+       if (cur_freq)
+               return sprintf(buf, "%u\n", cur_freq);
+
+       return sprintf(buf, "<unknown>\n");
 }
 
 /**
@@ -916,11 +918,19 @@ static struct kobj_type ktype_cpufreq = {
        .release        = cpufreq_sysfs_release,
 };
 
-static int add_cpu_dev_symlink(struct cpufreq_policy *policy,
-                              struct device *dev)
+static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
 {
+       struct device *dev = get_cpu_device(cpu);
+
+       if (!dev)
+               return;
+
+       if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
+               return;
+
        dev_dbg(dev, "%s: Adding symlink\n", __func__);
-       return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
+       if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
+               dev_err(dev, "cpufreq symlink creation failed\n");
 }
 
 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
@@ -1178,10 +1188,13 @@ static int cpufreq_online(unsigned int cpu)
                policy->user_policy.min = policy->min;
                policy->user_policy.max = policy->max;
 
-               write_lock_irqsave(&cpufreq_driver_lock, flags);
-               for_each_cpu(j, policy->related_cpus)
+               for_each_cpu(j, policy->related_cpus) {
                        per_cpu(cpufreq_cpu_data, j) = policy;
-               write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+                       add_cpu_dev_symlink(policy, j);
+               }
+       } else {
+               policy->min = policy->user_policy.min;
+               policy->max = policy->user_policy.max;
        }
 
        if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
@@ -1270,13 +1283,15 @@ out_exit_policy:
 
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(policy);
+
+       for_each_cpu(j, policy->real_cpus)
+               remove_cpu_dev_symlink(policy, get_cpu_device(j));
+
 out_free_policy:
        cpufreq_policy_free(policy);
        return ret;
 }
 
-static int cpufreq_offline(unsigned int cpu);
-
 /**
  * cpufreq_add_dev - the cpufreq interface for a CPU device.
  * @dev: CPU device.
@@ -1298,16 +1313,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 
        /* Create sysfs link on CPU registration */
        policy = per_cpu(cpufreq_cpu_data, cpu);
-       if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus))
-               return 0;
-
-       ret = add_cpu_dev_symlink(policy, dev);
-       if (ret) {
-               cpumask_clear_cpu(cpu, policy->real_cpus);
-               cpufreq_offline(cpu);
-       }
+       if (policy)
+               add_cpu_dev_symlink(policy, cpu);
 
-       return ret;
+       return 0;
 }
 
 static int cpufreq_offline(unsigned int cpu)
@@ -2532,4 +2541,5 @@ static int __init cpufreq_core_init(void)
 
        return 0;
 }
+module_param(off, int, 0444);
 core_initcall(cpufreq_core_init);
index b1fbaa30ae0415c330b9b1069e17900b99a48868..283491f742d3d78659696bd58c48fc5a3bd7a370 100644 (file)
@@ -84,6 +84,11 @@ static inline u64 div_ext_fp(u64 x, u64 y)
        return div64_u64(x << EXT_FRAC_BITS, y);
 }
 
+static inline int32_t percent_ext_fp(int percent)
+{
+       return div_ext_fp(percent, 100);
+}
+
 /**
  * struct sample -     Store performance sample
  * @core_avg_perf:     Ratio of APERF/MPERF which is the actual average
@@ -359,9 +364,7 @@ static bool driver_registered __read_mostly;
 static bool acpi_ppc;
 #endif
 
-static struct perf_limits performance_limits;
-static struct perf_limits powersave_limits;
-static struct perf_limits *limits;
+static struct perf_limits global;
 
 static void intel_pstate_init_limits(struct perf_limits *limits)
 {
@@ -372,13 +375,6 @@ static void intel_pstate_init_limits(struct perf_limits *limits)
        limits->max_sysfs_pct = 100;
 }
 
-static void intel_pstate_set_performance_limits(struct perf_limits *limits)
-{
-       intel_pstate_init_limits(limits);
-       limits->min_perf_pct = 100;
-       limits->min_perf = int_ext_tofp(1);
-}
-
 static DEFINE_MUTEX(intel_pstate_driver_lock);
 static DEFINE_MUTEX(intel_pstate_limits_lock);
 
@@ -501,7 +497,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
         * correct max turbo frequency based on the turbo state.
         * Also need to convert to MHz as _PSS freq is in MHz.
         */
-       if (!limits->turbo_disabled)
+       if (!global.turbo_disabled)
                cpu->acpi_perf_data.states[0].core_frequency =
                                        policy->cpuinfo.max_freq / 1000;
        cpu->valid_pss_table = true;
@@ -620,7 +616,7 @@ static inline void update_turbo_state(void)
 
        cpu = all_cpu_data[0];
        rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
-       limits->turbo_disabled =
+       global.turbo_disabled =
                (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
                 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
 }
@@ -844,12 +840,11 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
 
 static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
 {
-       int min, hw_min, max, hw_max, cpu, range, adj_range;
-       struct perf_limits *perf_limits = limits;
+       int min, hw_min, max, hw_max, cpu;
+       struct perf_limits *perf_limits = &global;
        u64 value, cap;
 
        for_each_cpu(cpu, policy->cpus) {
-               int max_perf_pct, min_perf_pct;
                struct cpudata *cpu_data = all_cpu_data[cpu];
                s16 epp;
 
@@ -858,24 +853,22 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
 
                rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
                hw_min = HWP_LOWEST_PERF(cap);
-               if (limits->no_turbo)
+               if (global.no_turbo)
                        hw_max = HWP_GUARANTEED_PERF(cap);
                else
                        hw_max = HWP_HIGHEST_PERF(cap);
-               range = hw_max - hw_min;
 
-               max_perf_pct = perf_limits->max_perf_pct;
-               min_perf_pct = perf_limits->min_perf_pct;
+               max = fp_ext_toint(hw_max * perf_limits->max_perf);
+               if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
+                       min = max;
+               else
+                       min = fp_ext_toint(hw_max * perf_limits->min_perf);
 
                rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
-               adj_range = min_perf_pct * range / 100;
-               min = hw_min + adj_range;
+
                value &= ~HWP_MIN_PERF(~0L);
                value |= HWP_MIN_PERF(min);
 
-               adj_range = max_perf_pct * range / 100;
-               max = hw_min + adj_range;
-
                value &= ~HWP_MAX_PERF(~0L);
                value |= HWP_MAX_PERF(max);
 
@@ -979,6 +972,7 @@ static void intel_pstate_update_policies(void)
 static int pid_param_set(void *data, u64 val)
 {
        *(u32 *)data = val;
+       pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
        intel_pstate_reset_all_pid();
        return 0;
 }
@@ -1050,7 +1044,7 @@ static void intel_pstate_debug_hide_params(void)
        static ssize_t show_##file_name                                 \
        (struct kobject *kobj, struct attribute *attr, char *buf)       \
        {                                                               \
-               return sprintf(buf, "%u\n", limits->object);            \
+               return sprintf(buf, "%u\n", global.object);             \
        }
 
 static ssize_t intel_pstate_show_status(char *buf);
@@ -1141,10 +1135,10 @@ static ssize_t show_no_turbo(struct kobject *kobj,
        }
 
        update_turbo_state();
-       if (limits->turbo_disabled)
-               ret = sprintf(buf, "%u\n", limits->turbo_disabled);
+       if (global.turbo_disabled)
+               ret = sprintf(buf, "%u\n", global.turbo_disabled);
        else
-               ret = sprintf(buf, "%u\n", limits->no_turbo);
+               ret = sprintf(buf, "%u\n", global.no_turbo);
 
        mutex_unlock(&intel_pstate_driver_lock);
 
@@ -1171,14 +1165,14 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
        mutex_lock(&intel_pstate_limits_lock);
 
        update_turbo_state();
-       if (limits->turbo_disabled) {
+       if (global.turbo_disabled) {
                pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
                mutex_unlock(&intel_pstate_limits_lock);
                mutex_unlock(&intel_pstate_driver_lock);
                return -EPERM;
        }
 
-       limits->no_turbo = clamp_t(int, input, 0, 1);
+       global.no_turbo = clamp_t(int, input, 0, 1);
 
        mutex_unlock(&intel_pstate_limits_lock);
 
@@ -1208,14 +1202,11 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
 
        mutex_lock(&intel_pstate_limits_lock);
 
-       limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
-       limits->max_perf_pct = min(limits->max_policy_pct,
-                                  limits->max_sysfs_pct);
-       limits->max_perf_pct = max(limits->min_policy_pct,
-                                  limits->max_perf_pct);
-       limits->max_perf_pct = max(limits->min_perf_pct,
-                                  limits->max_perf_pct);
-       limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
+       global.max_sysfs_pct = clamp_t(int, input, 0 , 100);
+       global.max_perf_pct = min(global.max_policy_pct, global.max_sysfs_pct);
+       global.max_perf_pct = max(global.min_policy_pct, global.max_perf_pct);
+       global.max_perf_pct = max(global.min_perf_pct, global.max_perf_pct);
+       global.max_perf = percent_ext_fp(global.max_perf_pct);
 
        mutex_unlock(&intel_pstate_limits_lock);
 
@@ -1245,14 +1236,11 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
 
        mutex_lock(&intel_pstate_limits_lock);
 
-       limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
-       limits->min_perf_pct = max(limits->min_policy_pct,
-                                  limits->min_sysfs_pct);
-       limits->min_perf_pct = min(limits->max_policy_pct,
-                                  limits->min_perf_pct);
-       limits->min_perf_pct = min(limits->max_perf_pct,
-                                  limits->min_perf_pct);
-       limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
+       global.min_sysfs_pct = clamp_t(int, input, 0 , 100);
+       global.min_perf_pct = max(global.min_policy_pct, global.min_sysfs_pct);
+       global.min_perf_pct = min(global.max_policy_pct, global.min_perf_pct);
+       global.min_perf_pct = min(global.max_perf_pct, global.min_perf_pct);
+       global.min_perf = percent_ext_fp(global.min_perf_pct);
 
        mutex_unlock(&intel_pstate_limits_lock);
 
@@ -1377,7 +1365,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
        u32 vid;
 
        val = (u64)pstate << 8;
-       if (limits->no_turbo && !limits->turbo_disabled)
+       if (global.no_turbo && !global.turbo_disabled)
                val |= (u64)1 << 32;
 
        vid_fp = cpudata->vid.min + mul_fp(
@@ -1547,7 +1535,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
        u64 val;
 
        val = (u64)pstate << 8;
-       if (limits->no_turbo && !limits->turbo_disabled)
+       if (global.no_turbo && !global.turbo_disabled)
                val |= (u64)1 << 32;
 
        return val;
@@ -1673,9 +1661,9 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
        int max_perf = cpu->pstate.turbo_pstate;
        int max_perf_adj;
        int min_perf;
-       struct perf_limits *perf_limits = limits;
+       struct perf_limits *perf_limits = &global;
 
-       if (limits->no_turbo || limits->turbo_disabled)
+       if (global.no_turbo || global.turbo_disabled)
                max_perf = cpu->pstate.max_pstate;
 
        if (per_cpu_limits)
@@ -1810,7 +1798,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
 
        sample->busy_scaled = busy_frac * 100;
 
-       target = limits->no_turbo || limits->turbo_disabled ?
+       target = global.no_turbo || global.turbo_disabled ?
                        cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
        target += target >> 2;
        target = mul_fp(target, busy_frac);
@@ -1874,13 +1862,11 @@ static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
 
        intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
        pstate = clamp_t(int, pstate, min_perf, max_perf);
-       trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
        return pstate;
 }
 
 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
 {
-       pstate = intel_pstate_prepare_request(cpu, pstate);
        if (pstate == cpu->pstate.current_pstate)
                return;
 
@@ -1900,6 +1886,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
 
        update_turbo_state();
 
+       target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
+       trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
        intel_pstate_update_pstate(cpu, target_pstate);
 
        sample = &cpu->sample;
@@ -2070,36 +2058,34 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
 static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
                                            struct perf_limits *limits)
 {
+       int32_t max_policy_perf, min_policy_perf;
 
-       limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
-                                             policy->cpuinfo.max_freq);
-       limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
+       max_policy_perf = div_ext_fp(policy->max, policy->cpuinfo.max_freq);
+       max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
        if (policy->max == policy->min) {
-               limits->min_policy_pct = limits->max_policy_pct;
+               min_policy_perf = max_policy_perf;
        } else {
-               limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100,
-                                                     policy->cpuinfo.max_freq);
-               limits->min_policy_pct = clamp_t(int, limits->min_policy_pct,
-                                                0, 100);
+               min_policy_perf = div_ext_fp(policy->min,
+                                            policy->cpuinfo.max_freq);
+               min_policy_perf = clamp_t(int32_t, min_policy_perf,
+                                         0, max_policy_perf);
        }
 
-       /* Normalize user input to [min_policy_pct, max_policy_pct] */
-       limits->min_perf_pct = max(limits->min_policy_pct,
-                                  limits->min_sysfs_pct);
-       limits->min_perf_pct = min(limits->max_policy_pct,
-                                  limits->min_perf_pct);
-       limits->max_perf_pct = min(limits->max_policy_pct,
-                                  limits->max_sysfs_pct);
-       limits->max_perf_pct = max(limits->min_policy_pct,
-                                  limits->max_perf_pct);
-
-       /* Make sure min_perf_pct <= max_perf_pct */
-       limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
-
-       limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
-       limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
+       /* Normalize user input to [min_perf, max_perf] */
+       limits->min_perf = max(min_policy_perf,
+                              percent_ext_fp(limits->min_sysfs_pct));
+       limits->min_perf = min(limits->min_perf, max_policy_perf);
+       limits->max_perf = min(max_policy_perf,
+                              percent_ext_fp(limits->max_sysfs_pct));
+       limits->max_perf = max(min_policy_perf, limits->max_perf);
+
+       /* Make sure min_perf <= max_perf */
+       limits->min_perf = min(limits->min_perf, limits->max_perf);
+
        limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
        limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
+       limits->max_perf_pct = fp_ext_toint(limits->max_perf * 100);
+       limits->min_perf_pct = fp_ext_toint(limits->min_perf * 100);
 
        pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
                 limits->max_perf_pct, limits->min_perf_pct);
@@ -2108,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 {
        struct cpudata *cpu;
-       struct perf_limits *perf_limits = NULL;
+       struct perf_limits *perf_limits = &global;
 
        if (!policy->cpuinfo.max_freq)
                return -ENODEV;
@@ -2131,28 +2117,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 
        mutex_lock(&intel_pstate_limits_lock);
 
-       if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
-               if (!perf_limits) {
-                       limits = &performance_limits;
-                       perf_limits = limits;
-               }
-               if (policy->max >= policy->cpuinfo.max_freq &&
-                   !limits->no_turbo) {
-                       pr_debug("set performance\n");
-                       intel_pstate_set_performance_limits(perf_limits);
-                       goto out;
-               }
-       } else {
-               pr_debug("set powersave\n");
-               if (!perf_limits) {
-                       limits = &powersave_limits;
-                       perf_limits = limits;
-               }
-
-       }
-
        intel_pstate_update_perf_limits(policy, perf_limits);
- out:
+
        if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
                /*
                 * NOHZ_FULL CPUs need this as the governor callback may not
@@ -2174,16 +2140,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
 {
        struct cpudata *cpu = all_cpu_data[policy->cpu];
-       struct perf_limits *perf_limits;
-
-       if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
-               perf_limits = &performance_limits;
-       else
-               perf_limits = &powersave_limits;
 
        update_turbo_state();
-       policy->cpuinfo.max_freq = perf_limits->turbo_disabled ||
-                                       perf_limits->no_turbo ?
+       policy->cpuinfo.max_freq = global.turbo_disabled || global.no_turbo ?
                                        cpu->pstate.max_freq :
                                        cpu->pstate.turbo_freq;
 
@@ -2198,9 +2157,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
                unsigned int max_freq, min_freq;
 
                max_freq = policy->cpuinfo.max_freq *
-                                               limits->max_sysfs_pct / 100;
+                                       global.max_sysfs_pct / 100;
                min_freq = policy->cpuinfo.max_freq *
-                                               limits->min_sysfs_pct / 100;
+                                       global.min_sysfs_pct / 100;
                cpufreq_verify_within_limits(policy, min_freq, max_freq);
        }
 
@@ -2243,13 +2202,8 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
 
        cpu = all_cpu_data[policy->cpu];
 
-       /*
-        * We need sane value in the cpu->perf_limits, so inherit from global
-        * perf_limits limits, which are seeded with values based on the
-        * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up.
-        */
        if (per_cpu_limits)
-               memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits));
+               intel_pstate_init_limits(cpu->perf_limits);
 
        policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
        policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
@@ -2257,7 +2211,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
        /* cpuinfo and default policy values */
        policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
        update_turbo_state();
-       policy->cpuinfo.max_freq = limits->turbo_disabled ?
+       policy->cpuinfo.max_freq = global.turbo_disabled ?
                        cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
        policy->cpuinfo.max_freq *= cpu->pstate.scaling;
 
@@ -2277,7 +2231,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
                return ret;
 
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-       if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
+       if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE))
                policy->policy = CPUFREQ_POLICY_PERFORMANCE;
        else
                policy->policy = CPUFREQ_POLICY_POWERSAVE;
@@ -2301,46 +2255,16 @@ static struct cpufreq_driver intel_pstate = {
 static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
 {
        struct cpudata *cpu = all_cpu_data[policy->cpu];
-       struct perf_limits *perf_limits = limits;
 
        update_turbo_state();
-       policy->cpuinfo.max_freq = limits->turbo_disabled ?
+       policy->cpuinfo.max_freq = global.no_turbo || global.turbo_disabled ?
                        cpu->pstate.max_freq : cpu->pstate.turbo_freq;
 
        cpufreq_verify_within_cpu_limits(policy);
 
-       if (per_cpu_limits)
-               perf_limits = cpu->perf_limits;
-
-       mutex_lock(&intel_pstate_limits_lock);
-
-       intel_pstate_update_perf_limits(policy, perf_limits);
-
-       mutex_unlock(&intel_pstate_limits_lock);
-
        return 0;
 }
 
-static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu,
-                                              struct cpufreq_policy *policy,
-                                              unsigned int target_freq)
-{
-       unsigned int max_freq;
-
-       update_turbo_state();
-
-       max_freq = limits->no_turbo || limits->turbo_disabled ?
-                       cpu->pstate.max_freq : cpu->pstate.turbo_freq;
-       policy->cpuinfo.max_freq = max_freq;
-       if (policy->max > max_freq)
-               policy->max = max_freq;
-
-       if (target_freq > max_freq)
-               target_freq = max_freq;
-
-       return target_freq;
-}
-
 static int intel_cpufreq_target(struct cpufreq_policy *policy,
                                unsigned int target_freq,
                                unsigned int relation)
@@ -2349,8 +2273,10 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
        struct cpufreq_freqs freqs;
        int target_pstate;
 
+       update_turbo_state();
+
        freqs.old = policy->cur;
-       freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq);
+       freqs.new = target_freq;
 
        cpufreq_freq_transition_begin(policy, &freqs);
        switch (relation) {
@@ -2370,6 +2296,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
                wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
                              pstate_funcs.get_val(cpu, target_pstate));
        }
+       freqs.new = target_pstate * cpu->pstate.scaling;
        cpufreq_freq_transition_end(policy, &freqs, false);
 
        return 0;
@@ -2381,10 +2308,12 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
        struct cpudata *cpu = all_cpu_data[policy->cpu];
        int target_pstate;
 
-       target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq);
+       update_turbo_state();
+
        target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
+       target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
        intel_pstate_update_pstate(cpu, target_pstate);
-       return target_freq;
+       return target_pstate * cpu->pstate.scaling;
 }
 
 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -2435,10 +2364,7 @@ static int intel_pstate_register_driver(void)
 {
        int ret;
 
-       intel_pstate_init_limits(&powersave_limits);
-       intel_pstate_set_performance_limits(&performance_limits);
-       limits = IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) ?
-                       &performance_limits : &powersave_limits;
+       intel_pstate_init_limits(&global);
 
        ret = cpufreq_register_driver(intel_pstate_driver);
        if (ret) {
index 370593006f5f76db29a0433912966cb49820346d..cda8f62d555b57700daded5a5214c8e88ffca4ee 100644 (file)
@@ -175,6 +175,24 @@ static int powernv_cpuidle_driver_init(void)
                drv->state_count += 1;
        }
 
+       /*
+        * On the PowerNV platform cpu_present may be less than cpu_possible in
+        * cases when firmware detects the CPU, but it is not available to the
+        * OS.  If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at
+        * run time and hence cpu_devices are not created for those CPUs by the
+        * generic topology_init().
+        *
+        * drv->cpumask defaults to cpu_possible_mask in
+        * __cpuidle_driver_init().  This breaks cpuidle on PowerNV where
+        * cpu_devices are not created for CPUs in cpu_possible_mask that
+        * cannot be hot-added later at run time.
+        *
+        * Trying cpuidle_register_device() on a CPU without a cpu_device is
+        * incorrect, so pass a correct CPU mask to the generic cpuidle driver.
+        */
+
+       drv->cpumask = (struct cpumask *)cpu_present_mask;
+
        return 0;
 }
 
index c5adc8c9ac43afeffb0f7ca842f730df76ef8eb1..ae948b1da93a379b12d16aaf98be5df6ac762da4 100644 (file)
@@ -615,6 +615,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
        struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
        int error;
 
+       /*
+        * Return if cpu_device is not setup for this CPU.
+        *
+        * This could happen if the arch did not set up cpu_device
+        * since this CPU is not in cpu_present mask and the
+        * driver did not send a correct CPU mask during registration.
+        * Without this check we would end up passing bogus
+        * value for &cpu_dev->kobj in kobject_init_and_add()
+        */
+       if (!cpu_dev)
+               return -ENODEV;
+
        kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
        if (!kdev)
                return -ENOMEM;
index 41cc853f8569cfd4825806028331ed5d98f005c0..fc08b4ed69d936f2866d5c9fef19450b852f900c 100644 (file)
@@ -1015,6 +1015,7 @@ const struct ccp_vdata ccpv5a = {
 
 const struct ccp_vdata ccpv5b = {
        .version = CCP_VERSION(5, 0),
+       .dma_chan_attr = DMA_PRIVATE,
        .setup = ccp5other_config,
        .perform = &ccp5_actions,
        .bar = 2,
index 511ab042b5e7939b008045129de0c9e268f2db46..92d1c6959f08b8943f513a9ed5fd8525c2d6b702 100644 (file)
@@ -283,11 +283,14 @@ EXPORT_SYMBOL_GPL(ccp_version);
  */
 int ccp_enqueue_cmd(struct ccp_cmd *cmd)
 {
-       struct ccp_device *ccp = ccp_get_device();
+       struct ccp_device *ccp;
        unsigned long flags;
        unsigned int i;
        int ret;
 
+       /* Some commands might need to be sent to a specific device */
+       ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
+
        if (!ccp)
                return -ENODEV;
 
index 2b5c01fade05a526d5e78241eab38ec16e790aa6..aa36f3f81860560a442687518f78a0e752a9c11b 100644 (file)
 
 /* ------------------------ General CCP Defines ------------------------ */
 
+#define        CCP_DMA_DFLT                    0x0
+#define        CCP_DMA_PRIV                    0x1
+#define        CCP_DMA_PUB                     0x2
+
 #define CCP_DMAPOOL_MAX_SIZE           64
 #define CCP_DMAPOOL_ALIGN              BIT(5)
 
@@ -636,6 +640,7 @@ struct ccp_actions {
 /* Structure to hold CCP version-specific values */
 struct ccp_vdata {
        const unsigned int version;
+       const unsigned int dma_chan_attr;
        void (*setup)(struct ccp_device *);
        const struct ccp_actions *perform;
        const unsigned int bar;
index e5d9278f40197427e913993fe9249d405585fe87..e00be01fbf5a036fcd8a9f09234b581998b7b75f 100644 (file)
@@ -10,6 +10,7 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/dmaengine.h>
 #include <linux/spinlock.h>
        (mask == 0) ? 64 : fls64(mask); \
 })
 
+/* The CCP as a DMA provider can be configured for public or private
+ * channels. Default is specified in the vdata for the device (PCI ID).
+ * This module parameter will override for all channels on all devices:
+ *   dma_chan_attr = 0x2 to force all channels public
+ *                 = 0x1 to force all channels private
+ *                 = 0x0 to defer to the vdata setting
+ *                 = any other value: warning, revert to 0x0
+ */
+static unsigned int dma_chan_attr = CCP_DMA_DFLT;
+module_param(dma_chan_attr, uint, 0444);
+MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
+
+unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
+{
+       switch (dma_chan_attr) {
+       case CCP_DMA_DFLT:
+               return ccp->vdata->dma_chan_attr;
+
+       case CCP_DMA_PRIV:
+               return DMA_PRIVATE;
+
+       case CCP_DMA_PUB:
+               return 0;
+
+       default:
+               dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n",
+                             dma_chan_attr);
+               return ccp->vdata->dma_chan_attr;
+       }
+}
+
 static void ccp_free_cmd_resources(struct ccp_device *ccp,
                                   struct list_head *list)
 {
@@ -390,6 +422,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
                        goto err;
 
                ccp_cmd = &cmd->ccp_cmd;
+               ccp_cmd->ccp = chan->ccp;
                ccp_pt = &ccp_cmd->u.passthru_nomap;
                ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
                ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
@@ -674,6 +707,15 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
        dma_cap_set(DMA_SG, dma_dev->cap_mask);
        dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
 
+       /* The DMA channels for this device can be set to public or private,
+        * and overridden by the module parameter dma_chan_attr.
+        * Default: according to the value in vdata (dma_chan_attr=0)
+        * dma_chan_attr=0x1: all channels private (override vdata)
+        * dma_chan_attr=0x2: all channels public (override vdata)
+        */
+       if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE)
+               dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+
        INIT_LIST_HEAD(&dma_dev->channels);
        for (i = 0; i < ccp->cmd_q_count; i++) {
                chan = ccp->ccp_dma_chan + i;
index dce1af0ce85ce8ec6dbd7184f02776cb173c41f0..1b9da3dc799b05dff2971e6a5415cc3ae3d4bd85 100644 (file)
@@ -270,7 +270,7 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
        scatterwalk_done(&walk, out, 0);
 }
 
-static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
+static void s5p_sg_done(struct s5p_aes_dev *dev)
 {
        if (dev->sg_dst_cpy) {
                dev_dbg(dev->dev,
@@ -281,8 +281,11 @@ static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
        }
        s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
        s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
+}
 
-       /* holding a lock outside */
+/* Calls the completion. Cannot be called with dev->lock hold. */
+static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
+{
        dev->req->base.complete(&dev->req->base, err);
        dev->busy = false;
 }
@@ -368,51 +371,44 @@ exit:
 }
 
 /*
- * Returns true if new transmitting (output) data is ready and its
- * address+length have to be written to device (by calling
- * s5p_set_dma_outdata()). False otherwise.
+ * Returns -ERRNO on error (mapping of new data failed).
+ * On success returns:
+ *  - 0 if there is no more data,
+ *  - 1 if new transmitting (output) data is ready and its address+length
+ *     have to be written to device (by calling s5p_set_dma_outdata()).
  */
-static bool s5p_aes_tx(struct s5p_aes_dev *dev)
+static int s5p_aes_tx(struct s5p_aes_dev *dev)
 {
-       int err = 0;
-       bool ret = false;
+       int ret = 0;
 
        s5p_unset_outdata(dev);
 
        if (!sg_is_last(dev->sg_dst)) {
-               err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
-               if (err)
-                       s5p_aes_complete(dev, err);
-               else
-                       ret = true;
-       } else {
-               s5p_aes_complete(dev, err);
-
-               dev->busy = true;
-               tasklet_schedule(&dev->tasklet);
+               ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
+               if (!ret)
+                       ret = 1;
        }
 
        return ret;
 }
 
 /*
- * Returns true if new receiving (input) data is ready and its
- * address+length have to be written to device (by calling
- * s5p_set_dma_indata()). False otherwise.
+ * Returns -ERRNO on error (mapping of new data failed).
+ * On success returns:
+ *  - 0 if there is no more data,
+ *  - 1 if new receiving (input) data is ready and its address+length
+ *     have to be written to device (by calling s5p_set_dma_indata()).
  */
-static bool s5p_aes_rx(struct s5p_aes_dev *dev)
+static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
 {
-       int err;
-       bool ret = false;
+       int ret = 0;
 
        s5p_unset_indata(dev);
 
        if (!sg_is_last(dev->sg_src)) {
-               err = s5p_set_indata(dev, sg_next(dev->sg_src));
-               if (err)
-                       s5p_aes_complete(dev, err);
-               else
-                       ret = true;
+               ret = s5p_set_indata(dev, sg_next(dev->sg_src));
+               if (!ret)
+                       ret = 1;
        }
 
        return ret;
@@ -422,33 +418,73 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
 {
        struct platform_device *pdev = dev_id;
        struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
-       bool set_dma_tx = false;
-       bool set_dma_rx = false;
+       int err_dma_tx = 0;
+       int err_dma_rx = 0;
+       bool tx_end = false;
        unsigned long flags;
        uint32_t status;
+       int err;
 
        spin_lock_irqsave(&dev->lock, flags);
 
+       /*
+        * Handle rx or tx interrupt. If there is still data (scatterlist did not
+        * reach end), then map next scatterlist entry.
+        * In case of such mapping error, s5p_aes_complete() should be called.
+        *
+        * If there is no more data in tx scatter list, call s5p_aes_complete()
+        * and schedule new tasklet.
+        */
        status = SSS_READ(dev, FCINTSTAT);
        if (status & SSS_FCINTSTAT_BRDMAINT)
-               set_dma_rx = s5p_aes_rx(dev);
-       if (status & SSS_FCINTSTAT_BTDMAINT)
-               set_dma_tx = s5p_aes_tx(dev);
+               err_dma_rx = s5p_aes_rx(dev);
+
+       if (status & SSS_FCINTSTAT_BTDMAINT) {
+               if (sg_is_last(dev->sg_dst))
+                       tx_end = true;
+               err_dma_tx = s5p_aes_tx(dev);
+       }
 
        SSS_WRITE(dev, FCINTPEND, status);
 
-       /*
-        * Writing length of DMA block (either receiving or transmitting)
-        * will start the operation immediately, so this should be done
-        * at the end (even after clearing pending interrupts to not miss the
-        * interrupt).
-        */
-       if (set_dma_tx)
-               s5p_set_dma_outdata(dev, dev->sg_dst);
-       if (set_dma_rx)
-               s5p_set_dma_indata(dev, dev->sg_src);
+       if (err_dma_rx < 0) {
+               err = err_dma_rx;
+               goto error;
+       }
+       if (err_dma_tx < 0) {
+               err = err_dma_tx;
+               goto error;
+       }
+
+       if (tx_end) {
+               s5p_sg_done(dev);
+
+               spin_unlock_irqrestore(&dev->lock, flags);
+
+               s5p_aes_complete(dev, 0);
+               dev->busy = true;
+               tasklet_schedule(&dev->tasklet);
+       } else {
+               /*
+                * Writing length of DMA block (either receiving or
+                * transmitting) will start the operation immediately, so this
+                * should be done at the end (even after clearing pending
+                * interrupts to not miss the interrupt).
+                */
+               if (err_dma_tx == 1)
+                       s5p_set_dma_outdata(dev, dev->sg_dst);
+               if (err_dma_rx == 1)
+                       s5p_set_dma_indata(dev, dev->sg_src);
 
+               spin_unlock_irqrestore(&dev->lock, flags);
+       }
+
+       return IRQ_HANDLED;
+
+error:
+       s5p_sg_done(dev);
        spin_unlock_irqrestore(&dev->lock, flags);
+       s5p_aes_complete(dev, err);
 
        return IRQ_HANDLED;
 }
@@ -597,8 +633,9 @@ outdata_error:
        s5p_unset_indata(dev);
 
 indata_error:
-       s5p_aes_complete(dev, err);
+       s5p_sg_done(dev);
        spin_unlock_irqrestore(&dev->lock, flags);
+       s5p_aes_complete(dev, err);
 }
 
 static void s5p_tasklet_cb(unsigned long data)
@@ -805,8 +842,9 @@ static int s5p_aes_probe(struct platform_device *pdev)
                dev_warn(dev, "feed control interrupt is not available.\n");
                goto err_irq;
        }
-       err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
-                              IRQF_SHARED, pdev->name, pdev);
+       err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
+                                       s5p_aes_interrupt, IRQF_ONESHOT,
+                                       pdev->name, pdev);
        if (err < 0) {
                dev_warn(dev, "feed control interrupt is not available.\n");
                goto err_irq;
index 43a0c8a26ab0c56c25b56f425d9ce8192780e54b..00a16ab601cb07d4b525a89a8ca6bf3a5393c94b 100644 (file)
@@ -82,7 +82,7 @@ void cryp_activity(struct cryp_device_data *device_data,
 void cryp_flush_inoutfifo(struct cryp_device_data *device_data)
 {
        /*
-        * We always need to disble the hardware before trying to flush the
+        * We always need to disable the hardware before trying to flush the
         * FIFO. This is something that isn't written in the design
         * specification, but we have been informed by the hardware designers
         * that this must be done.
index 8d9829ff2a784de9490404a86a194e2304ed65c7..80c6db279ae10cb8558b2e90a91a4c4dafa917e0 100644 (file)
@@ -427,6 +427,7 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
        int rc = VM_FAULT_SIGBUS;
        phys_addr_t phys;
        pfn_t pfn;
+       unsigned int fault_size = PAGE_SIZE;
 
        if (check_vma(dax_dev, vmf->vma, __func__))
                return VM_FAULT_SIGBUS;
@@ -437,9 +438,12 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
        }
 
+       if (fault_size != dax_region->align)
+               return VM_FAULT_SIGBUS;
+
        phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
        if (phys == -1) {
-               dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+               dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
                                vmf->pgoff);
                return VM_FAULT_SIGBUS;
        }
@@ -464,6 +468,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
        phys_addr_t phys;
        pgoff_t pgoff;
        pfn_t pfn;
+       unsigned int fault_size = PMD_SIZE;
 
        if (check_vma(dax_dev, vmf->vma, __func__))
                return VM_FAULT_SIGBUS;
@@ -480,10 +485,20 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
        }
 
+       if (fault_size < dax_region->align)
+               return VM_FAULT_SIGBUS;
+       else if (fault_size > dax_region->align)
+               return VM_FAULT_FALLBACK;
+
+       /* if we are outside of the VMA */
+       if (pmd_addr < vmf->vma->vm_start ||
+                       (pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
+               return VM_FAULT_SIGBUS;
+
        pgoff = linear_page_index(vmf->vma, pmd_addr);
        phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
        if (phys == -1) {
-               dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+               dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
                                pgoff);
                return VM_FAULT_SIGBUS;
        }
@@ -503,6 +518,8 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
        phys_addr_t phys;
        pgoff_t pgoff;
        pfn_t pfn;
+       unsigned int fault_size = PUD_SIZE;
+
 
        if (check_vma(dax_dev, vmf->vma, __func__))
                return VM_FAULT_SIGBUS;
@@ -519,10 +536,20 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
        }
 
+       if (fault_size < dax_region->align)
+               return VM_FAULT_SIGBUS;
+       else if (fault_size > dax_region->align)
+               return VM_FAULT_FALLBACK;
+
+       /* if we are outside of the VMA */
+       if (pud_addr < vmf->vma->vm_start ||
+                       (pud_addr + PUD_SIZE) > vmf->vma->vm_end)
+               return VM_FAULT_SIGBUS;
+
        pgoff = linear_page_index(vmf->vma, pud_addr);
        phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE);
        if (phys == -1) {
-               dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+               dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
                                pgoff);
                return VM_FAULT_SIGBUS;
        }
index e18dc596cf2447fa9ef7e41b62d9396e29043426..6204cc32d09c5096df8aec304c3c37b3bcb6be44 100644 (file)
@@ -251,8 +251,11 @@ static void bcm2835_dma_create_cb_set_length(
         */
 
        /* have we filled in period_length yet? */
-       if (*total_len + control_block->length < period_len)
+       if (*total_len + control_block->length < period_len) {
+               /* update number of bytes in this period so far */
+               *total_len += control_block->length;
                return;
+       }
 
        /* calculate the length that remains to reach period_length */
        control_block->length = period_len - *total_len;
index 24e0221fd66d1ff58eead62ee9f4a865eb87da03..d9118ec23025417eb6732542d653ac989cee05e1 100644 (file)
@@ -1108,12 +1108,14 @@ static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
        switch (order) {
        case 0 ... 1:
                return &unmap_pool[0];
+#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
        case 2 ... 4:
                return &unmap_pool[1];
        case 5 ... 7:
                return &unmap_pool[2];
        case 8:
                return &unmap_pool[3];
+#endif
        default:
                BUG();
                return NULL;
index 82d85cce81f815b9b8bf70352e3bdeeece2d7d41..4773f286723414fec52bde6d8396bab8bfece386 100644 (file)
@@ -43,6 +43,7 @@ config EDAC_LEGACY_SYSFS
 
 config EDAC_DEBUG
        bool "Debugging"
+       select DEBUG_FS
        help
          This turns on debugging information for the entire EDAC subsystem.
          You do so by inserting edac_module with "edac_debug_level=x." Valid
@@ -259,6 +260,15 @@ config EDAC_SKX
          Support for error detection and correction the Intel
          Skylake server Integrated Memory Controllers.
 
+config EDAC_PND2
+       tristate "Intel Pondicherry2"
+       depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
+       help
+         Support for error detection and correction on the Intel
+         Pondicherry2 Integrated Memory Controller. This SoC IP is
+         first used on the Apollo Lake platform and Denverton
+         micro-server but may appear on others in the future.
+
 config EDAC_MPC85XX
        tristate "Freescale MPC83xx / MPC85xx"
        depends on EDAC_MM_EDAC && FSL_SOC
index 88e472e8b9a918e36e65b4385c9c412f92c7cea3..587107e909967d56056df688624474bca4da3cac 100644 (file)
@@ -32,6 +32,7 @@ obj-$(CONFIG_EDAC_I7300)              += i7300_edac.o
 obj-$(CONFIG_EDAC_I7CORE)              += i7core_edac.o
 obj-$(CONFIG_EDAC_SBRIDGE)             += sb_edac.o
 obj-$(CONFIG_EDAC_SKX)                 += skx_edac.o
+obj-$(CONFIG_EDAC_PND2)                        += pnd2_edac.o
 obj-$(CONFIG_EDAC_E7XXX)               += e7xxx_edac.o
 obj-$(CONFIG_EDAC_E752X)               += e752x_edac.o
 obj-$(CONFIG_EDAC_I82443BXGX)          += i82443bxgx_edac.o
index 1670d27bcac82d51cbb3de30d75e261100ddb83e..f683919981b06730090c2a11b98d45a5c0713944 100644 (file)
@@ -1293,7 +1293,7 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
                        dimm->mtype = MEM_FB_DDR2;
 
                        /* ask what device type on this row */
-                       if (MTR_DRAM_WIDTH(mtr))
+                       if (MTR_DRAM_WIDTH(mtr) == 8)
                                dimm->dtype = DEV_X8;
                        else
                                dimm->dtype = DEV_X4;
index abf6ef22e220602f48559504ec18e77c5115de5b..37a9ba71da449bab30c12438325ecc419d9fa3e8 100644 (file)
@@ -1207,13 +1207,14 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
 
                        dimm->nr_pages = size_mb << 8;
                        dimm->grain = 8;
-                       dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4;
+                       dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ?
+                                     DEV_X8 : DEV_X4;
                        dimm->mtype = MEM_FB_DDR2;
                        /*
                         * The eccc mechanism is SDDC (aka SECC), with
                         * is similar to Chipkill.
                         */
-                       dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ?
+                       dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ?
                                          EDAC_S8ECD8ED : EDAC_S4ECD4ED;
                        ndimms++;
                }
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
new file mode 100644 (file)
index 0000000..928e0db
--- /dev/null
@@ -0,0 +1,1546 @@
+/*
+ * Driver for Pondicherry2 memory controller.
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * [Derived from sb_edac.c]
+ *
+ * Translation of system physical addresses to DIMM addresses
+ * is a two stage process:
+ *
+ * First the Pondicherry 2 memory controller handles slice and channel interleaving
+ * in "sys2pmi()". This is (almost) completley common between platforms.
+ *
+ * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
+ * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/edac.h>
+#include <linux/mmzone.h>
+#include <linux/smp.h>
+#include <linux/bitmap.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/processor.h>
+#include <asm/mce.h>
+
+#include "edac_mc.h"
+#include "edac_module.h"
+#include "pnd2_edac.h"
+
+#define APL_NUM_CHANNELS       4
+#define DNV_NUM_CHANNELS       2
+#define DNV_MAX_DIMMS          2 /* Max DIMMs per channel */
+
+enum type {
+       APL,
+       DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
+};
+
+struct dram_addr {
+       int chan;
+       int dimm;
+       int rank;
+       int bank;
+       int row;
+       int col;
+};
+
+struct pnd2_pvt {
+       int dimm_geom[APL_NUM_CHANNELS];
+       u64 tolm, tohm;
+};
+
+/*
+ * System address space is divided into multiple regions with
+ * different interleave rules in each. The as0/as1 regions
+ * have no interleaving at all. The as2 region is interleaved
+ * between two channels. The mot region is magic and may overlap
+ * other regions, with its interleave rules taking precedence.
+ * Addresses not in any of these regions are interleaved across
+ * all four channels.
+ */
+static struct region {
+       u64     base;
+       u64     limit;
+       u8      enabled;
+} mot, as0, as1, as2;
+
+static struct dunit_ops {
+       char *name;
+       enum type type;
+       int pmiaddr_shift;
+       int pmiidx_shift;
+       int channels;
+       int dimms_per_channel;
+       int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
+       int (*get_registers)(void);
+       int (*check_ecc)(void);
+       void (*mk_region)(char *name, struct region *rp, void *asym);
+       void (*get_dimm_config)(struct mem_ctl_info *mci);
+       int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
+                                  struct dram_addr *daddr, char *msg);
+} *ops;
+
+static struct mem_ctl_info *pnd2_mci;
+
+#define PND2_MSG_SIZE  256
+
+/* Debug macros */
+#define pnd2_printk(level, fmt, arg...)                        \
+       edac_printk(level, "pnd2", fmt, ##arg)
+
+#define pnd2_mc_printk(mci, level, fmt, arg...)        \
+       edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
+
+#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
+#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
+#define SELECTOR_DISABLED (-1)
+#define _4GB (1ul << 32)
+
+#define PMI_ADDRESS_WIDTH      31
+#define PND_MAX_PHYS_BIT       39
+
+#define APL_ASYMSHIFT          28
+#define DNV_ASYMSHIFT          31
+#define CH_HASH_MASK_LSB       6
+#define SLICE_HASH_MASK_LSB    6
+#define MOT_SLC_INTLV_BIT      12
+#define LOG2_PMI_ADDR_GRANULARITY      5
+#define MOT_SHIFT      24
+
+#define GET_BITFIELD(v, lo, hi)        (((v) & GENMASK_ULL(hi, lo)) >> (lo))
+#define U64_LSHIFT(val, s)     ((u64)(val) << (s))
+
+#ifdef CONFIG_X86_INTEL_SBI_APL
+#include "linux/platform_data/sbi_apl.h"
+int sbi_send(int port, int off, int op, u32 *data)
+{
+       struct sbi_apl_message sbi_arg;
+       int ret, read = 0;
+
+       memset(&sbi_arg, 0, sizeof(sbi_arg));
+
+       if (op == 0 || op == 4 || op == 6)
+               read = 1;
+       else
+               sbi_arg.data = *data;
+
+       sbi_arg.opcode = op;
+       sbi_arg.port_address = port;
+       sbi_arg.register_offset = off;
+       ret = sbi_apl_commit(&sbi_arg);
+       if (ret || sbi_arg.status)
+               edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n",
+                                sbi_arg.status, ret, sbi_arg.data);
+
+       if (ret == 0)
+               ret = sbi_arg.status;
+
+       if (ret == 0 && read)
+               *data = sbi_arg.data;
+
+       return ret;
+}
+#else
+int sbi_send(int port, int off, int op, u32 *data)
+{
+       return -EUNATCH;
+}
+#endif
+
+static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
+{
+       int     ret = 0;
+
+       edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
+       switch (sz) {
+       case 8:
+               ret = sbi_send(port, off + 4, op, (u32 *)(data + 4));
+       case 4:
+               ret = sbi_send(port, off, op, (u32 *)data);
+               pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
+                                       sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
+               break;
+       }
+
+       return ret;
+}
+
+static u64 get_mem_ctrl_hub_base_addr(void)
+{
+       struct b_cr_mchbar_lo_pci lo;
+       struct b_cr_mchbar_hi_pci hi;
+       struct pci_dev *pdev;
+
+       pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
+       if (pdev) {
+               pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
+               pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
+               pci_dev_put(pdev);
+       } else {
+               return 0;
+       }
+
+       if (!lo.enable) {
+               edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
+               return 0;
+       }
+
+       return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
+}
+
+static u64 get_sideband_reg_base_addr(void)
+{
+       struct pci_dev *pdev;
+       u32 hi, lo;
+
+       pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
+       if (pdev) {
+               pci_read_config_dword(pdev, 0x10, &lo);
+               pci_read_config_dword(pdev, 0x14, &hi);
+               pci_dev_put(pdev);
+               return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
+       } else {
+               return 0xfd000000;
+       }
+}
+
+static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
+{
+       struct pci_dev *pdev;
+       char *base;
+       u64 addr;
+
+       if (op == 4) {
+               pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
+               if (!pdev)
+                       return -ENODEV;
+
+               pci_read_config_dword(pdev, off, data);
+               pci_dev_put(pdev);
+       } else {
+               /* MMIO via memory controller hub base address */
+               if (op == 0 && port == 0x4c) {
+                       addr = get_mem_ctrl_hub_base_addr();
+                       if (!addr)
+                               return -ENODEV;
+               } else {
+                       /* MMIO via sideband register base address */
+                       addr = get_sideband_reg_base_addr();
+                       if (!addr)
+                               return -ENODEV;
+                       addr += (port << 16);
+               }
+
+               base = ioremap((resource_size_t)addr, 0x10000);
+               if (!base)
+                       return -ENODEV;
+
+               if (sz == 8)
+                       *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
+               *(u32 *)data = *(u32 *)(base + off);
+
+               iounmap(base);
+       }
+
+       edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
+                       (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
+
+       return 0;
+}
+
+#define RD_REGP(regp, regname, port)   \
+       ops->rd_reg(port,                                       \
+               regname##_offset,                               \
+               regname##_r_opcode,                             \
+               regp, sizeof(struct regname),   \
+               #regname)
+
+#define RD_REG(regp, regname)                  \
+       ops->rd_reg(regname ## _port,           \
+               regname##_offset,                               \
+               regname##_r_opcode,                             \
+               regp, sizeof(struct regname),   \
+               #regname)
+
+static u64 top_lm, top_hm;
+static bool two_slices;
+static bool two_channels; /* Both PMI channels in one slice enabled */
+
+static u8 sym_chan_mask;
+static u8 asym_chan_mask;
+static u8 chan_mask;
+
+static int slice_selector = -1;
+static int chan_selector = -1;
+static u64 slice_hash_mask;
+static u64 chan_hash_mask;
+
+static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
+{
+       rp->enabled = 1;
+       rp->base = base;
+       rp->limit = limit;
+       edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
+}
+
+static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
+{
+       if (mask == 0) {
+               pr_info(FW_BUG "MOT mask cannot be zero\n");
+               return;
+       }
+       if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
+               pr_info(FW_BUG "MOT mask not power of two\n");
+               return;
+       }
+       if (base & ~mask) {
+               pr_info(FW_BUG "MOT region base/mask alignment error\n");
+               return;
+       }
+       rp->base = base;
+       rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
+       rp->enabled = 1;
+       edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
+}
+
+static bool in_region(struct region *rp, u64 addr)
+{
+       if (!rp->enabled)
+               return false;
+
+       return rp->base <= addr && addr <= rp->limit;
+}
+
+static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
+{
+       int mask = 0;
+
+       if (!p->slice_0_mem_disabled)
+               mask |= p->sym_slice0_channel_enabled;
+
+       if (!p->slice_1_disabled)
+               mask |= p->sym_slice1_channel_enabled << 2;
+
+       if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
+               mask &= 0x5;
+
+       return mask;
+}
+
+static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
+                        struct b_cr_asym_mem_region0_mchbar *as0,
+                        struct b_cr_asym_mem_region1_mchbar *as1,
+                        struct b_cr_asym_2way_mem_region_mchbar *as2way)
+{
+       const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
+       int mask = 0;
+
+       if (as2way->asym_2way_interleave_enable)
+               mask = intlv[as2way->asym_2way_intlv_mode];
+       if (as0->slice0_asym_enable)
+               mask |= (1 << as0->slice0_asym_channel_select);
+       if (as1->slice1_asym_enable)
+               mask |= (4 << as1->slice1_asym_channel_select);
+       if (p->slice_0_mem_disabled)
+               mask &= 0xc;
+       if (p->slice_1_disabled)
+               mask &= 0x3;
+       if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
+               mask &= 0x5;
+
+       return mask;
+}
+
+static struct b_cr_tolud_pci tolud;
+static struct b_cr_touud_lo_pci touud_lo;
+static struct b_cr_touud_hi_pci touud_hi;
+static struct b_cr_asym_mem_region0_mchbar asym0;
+static struct b_cr_asym_mem_region1_mchbar asym1;
+static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
+static struct b_cr_mot_out_base_mchbar mot_base;
+static struct b_cr_mot_out_mask_mchbar mot_mask;
+static struct b_cr_slice_channel_hash chash;
+
+/* Apollo Lake dunit */
+/*
+ * Validated on board with just two DIMMs in the [0] and [2] positions
+ * in this array. Other port number matches documentation, but caution
+ * advised.
+ */
+static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
+static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
+
+/* Denverton dunit */
+static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
+static struct d_cr_dsch dsch;
+static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
+static struct d_cr_drp drp[DNV_NUM_CHANNELS];
+static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
+static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
+static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
+static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
+static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
+static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
+
+static void apl_mk_region(char *name, struct region *rp, void *asym)
+{
+       struct b_cr_asym_mem_region0_mchbar *a = asym;
+
+       mk_region(name, rp,
+                         U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
+                         U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
+                         GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
+}
+
+static void dnv_mk_region(char *name, struct region *rp, void *asym)
+{
+       struct b_cr_asym_mem_region_denverton *a = asym;
+
+       mk_region(name, rp,
+                         U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
+                         U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
+                         GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
+}
+
+static int apl_get_registers(void)
+{
+       int i;
+
+       if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
+               return -ENODEV;
+
+       for (i = 0; i < APL_NUM_CHANNELS; i++)
+               if (RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
+                       return -ENODEV;
+
+       return 0;
+}
+
+static int dnv_get_registers(void)
+{
+       int i;
+
+       if (RD_REG(&dsch, d_cr_dsch))
+               return -ENODEV;
+
+       for (i = 0; i < DNV_NUM_CHANNELS; i++)
+               if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
+                       RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
+                       RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
+                       RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
+                       RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
+                       RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
+                       RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
+                       RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
+                       return -ENODEV;
+
+       return 0;
+}
+
+/*
+ * Read all the h/w config registers once here (they don't
+ * change at run time. Figure out which address ranges have
+ * which interleave characteristics.
+ */
+static int get_registers(void)
+{
+       const int intlv[] = { 10, 11, 12, 12 };
+
+       if (RD_REG(&tolud, b_cr_tolud_pci) ||
+               RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
+               RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
+               RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
+               RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
+               RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
+               RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
+               RD_REG(&chash, b_cr_slice_channel_hash))
+               return -ENODEV;
+
+       if (ops->get_registers())
+               return -ENODEV;
+
+       if (ops->type == DNV) {
+               /* PMI channel idx (always 0) for asymmetric region */
+               asym0.slice0_asym_channel_select = 0;
+               asym1.slice1_asym_channel_select = 0;
+               /* PMI channel bitmap (always 1) for symmetric region */
+               chash.sym_slice0_channel_enabled = 0x1;
+               chash.sym_slice1_channel_enabled = 0x1;
+       }
+
+       if (asym0.slice0_asym_enable)
+               ops->mk_region("as0", &as0, &asym0);
+
+       if (asym1.slice1_asym_enable)
+               ops->mk_region("as1", &as1, &asym1);
+
+       if (asym_2way.asym_2way_interleave_enable) {
+               mk_region("as2way", &as2,
+                                 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
+                                 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
+                                 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
+       }
+
+       if (mot_base.imr_en) {
+               mk_region_mask("mot", &mot,
+                                          U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
+                                          U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
+       }
+
+       top_lm = U64_LSHIFT(tolud.tolud, 20);
+       top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
+
+       two_slices = !chash.slice_1_disabled &&
+                                !chash.slice_0_mem_disabled &&
+                                (chash.sym_slice0_channel_enabled != 0) &&
+                                (chash.sym_slice1_channel_enabled != 0);
+       two_channels = !chash.ch_1_disabled &&
+                                !chash.enable_pmi_dual_data_mode &&
+                                ((chash.sym_slice0_channel_enabled == 3) ||
+                                (chash.sym_slice1_channel_enabled == 3));
+
+       sym_chan_mask = gen_sym_mask(&chash);
+       asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
+       chan_mask = sym_chan_mask | asym_chan_mask;
+
+       if (two_slices && !two_channels) {
+               if (chash.hvm_mode)
+                       slice_selector = 29;
+               else
+                       slice_selector = intlv[chash.interleave_mode];
+       } else if (!two_slices && two_channels) {
+               if (chash.hvm_mode)
+                       chan_selector = 29;
+               else
+                       chan_selector = intlv[chash.interleave_mode];
+       } else if (two_slices && two_channels) {
+               if (chash.hvm_mode) {
+                       slice_selector = 29;
+                       chan_selector = 30;
+               } else {
+                       slice_selector = intlv[chash.interleave_mode];
+                       chan_selector = intlv[chash.interleave_mode] + 1;
+               }
+       }
+
+       if (two_slices) {
+               if (!chash.hvm_mode)
+                       slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
+               if (!two_channels)
+                       slice_hash_mask |= BIT_ULL(slice_selector);
+       }
+
+       if (two_channels) {
+               if (!chash.hvm_mode)
+                       chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
+               if (!two_slices)
+                       chan_hash_mask |= BIT_ULL(chan_selector);
+       }
+
+       return 0;
+}
+
+/* Get a contiguous memory address (remove the MMIO gap) */
+static u64 remove_mmio_gap(u64 sys)
+{
+       return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
+}
+
+/* Squeeze out one address bit, shift upper part down to fill gap */
+static void remove_addr_bit(u64 *addr, int bitidx)
+{
+       u64     mask;
+
+       if (bitidx == -1)
+               return;
+
+       mask = (1ull << bitidx) - 1;
+       *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
+}
+
+/* XOR all the bits from addr specified in mask */
+static int hash_by_mask(u64 addr, u64 mask)
+{
+       u64 result = addr & mask;
+
+       result = (result >> 32) ^ result;
+       result = (result >> 16) ^ result;
+       result = (result >> 8) ^ result;
+       result = (result >> 4) ^ result;
+       result = (result >> 2) ^ result;
+       result = (result >> 1) ^ result;
+
+       return (int)result & 1;
+}
+
+/*
+ * First stage decode. Take the system address and figure out which
+ * second stage will deal with it based on interleave modes.
+ */
+static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
+{
+       u64 contig_addr, contig_base, contig_offset, contig_base_adj;
+       int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
+                                               MOT_CHAN_INTLV_BIT_1SLC_2CH;
+       int slice_intlv_bit_rm = SELECTOR_DISABLED;
+       int chan_intlv_bit_rm = SELECTOR_DISABLED;
+       /* Determine if address is in the MOT region. */
+       bool mot_hit = in_region(&mot, addr);
+       /* Calculate the number of symmetric regions enabled. */
+       int sym_channels = hweight8(sym_chan_mask);
+
+       /*
+        * The amount we need to shift the asym base can be determined by the
+        * number of enabled symmetric channels.
+        * NOTE: This can only work because symmetric memory is not supposed
+        * to do a 3-way interleave.
+        */
+       int sym_chan_shift = sym_channels >> 1;
+
+       /* Give up if address is out of range, or in MMIO gap */
+       if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
+          (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
+               snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
+               return -EINVAL;
+       }
+
+       /* Get a contiguous memory address (remove the MMIO gap) */
+       contig_addr = remove_mmio_gap(addr);
+
+       if (in_region(&as0, addr)) {
+               *pmiidx = asym0.slice0_asym_channel_select;
+
+               contig_base = remove_mmio_gap(as0.base);
+               contig_offset = contig_addr - contig_base;
+               contig_base_adj = (contig_base >> sym_chan_shift) *
+                                                 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
+               contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
+       } else if (in_region(&as1, addr)) {
+               *pmiidx = 2u + asym1.slice1_asym_channel_select;
+
+               contig_base = remove_mmio_gap(as1.base);
+               contig_offset = contig_addr - contig_base;
+               contig_base_adj = (contig_base >> sym_chan_shift) *
+                                                 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
+               contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
+       } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
+               bool channel1;
+
+               mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
+               *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
+               channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
+                       hash_by_mask(contig_addr, chan_hash_mask);
+               *pmiidx |= (u32)channel1;
+
+               contig_base = remove_mmio_gap(as2.base);
+               chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
+               contig_offset = contig_addr - contig_base;
+               remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
+               contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
+       } else {
+               /* Otherwise we're in normal, boring symmetric mode. */
+               *pmiidx = 0u;
+
+               if (two_slices) {
+                       bool slice1;
+
+                       if (mot_hit) {
+                               slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
+                               slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
+                       } else {
+                               slice_intlv_bit_rm = slice_selector;
+                               slice1 = hash_by_mask(addr, slice_hash_mask);
+                       }
+
+                       *pmiidx = (u32)slice1 << 1;
+               }
+
+               if (two_channels) {
+                       bool channel1;
+
+                       mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
+                                                       MOT_CHAN_INTLV_BIT_1SLC_2CH;
+
+                       if (mot_hit) {
+                               chan_intlv_bit_rm = mot_intlv_bit;
+                               channel1 = (addr >> mot_intlv_bit) & 1;
+                       } else {
+                               chan_intlv_bit_rm = chan_selector;
+                               channel1 = hash_by_mask(contig_addr, chan_hash_mask);
+                       }
+
+                       *pmiidx |= (u32)channel1;
+               }
+       }
+
+       /* Remove the chan_selector bit first */
+       remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
+       /* Remove the slice bit (we remove it second because it must be lower */
+       remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
+       *pmiaddr = contig_addr;
+
+       return 0;
+}
+
+/* Translate PMI address to memory (rank, row, bank, column) */
+#define C(n) (0x10 | (n))      /* column */
+#define B(n) (0x20 | (n))      /* bank */
+#define R(n) (0x40 | (n))      /* row */
+#define RS   (0x80)                    /* rank */
+
+/* addrdec values */
+#define AMAP_1KB       0
+#define AMAP_2KB       1
+#define AMAP_4KB       2
+#define AMAP_RSVD      3
+
+/* dden values */
+#define DEN_4Gb                0
+#define DEN_8Gb                2
+
+/* dwid values */
+#define X8             0
+#define X16            1
+
+static struct dimm_geometry {
+       u8      addrdec;
+       u8      dden;
+       u8      dwid;
+       u8      rowbits, colbits;
+       u16     bits[PMI_ADDRESS_WIDTH];
+} dimms[] = {
+       {
+               .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
+               .rowbits = 15, .colbits = 10,
+               .bits = {
+                       C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
+                       R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
+                       R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
+                       0,     0,     0,     0
+               }
+       },
+       {
+               .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
+               .rowbits = 16, .colbits = 10,
+               .bits = {
+                       C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
+                       R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
+                       R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
+                       R(15), 0,     0,     0
+               }
+       },
+       {
+               .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
+               .rowbits = 16, .colbits = 10,
+               .bits = {
+                       C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
+                       R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
+                       R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
+                       R(15), 0,     0,     0
+               }
+       },
+       {
+               .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
+               .rowbits = 16, .colbits = 11,
+               .bits = {
+                       C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
+                       R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
+                       R(10), C(7),  C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
+                       R(14), R(15), 0,     0
+               }
+       },
+       {
+               .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
+               .rowbits = 15, .colbits = 10,
+               .bits = {
+                       C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
+                       R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
+                       R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
+                       0,     0,     0,     0
+               }
+       },
+       {
+               .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
+               .rowbits = 16, .colbits = 10,
+               .bits = {
+                       C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
+                       R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
+                       R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
+                       R(15), 0,     0,     0
+               }
+       },
+       {
+               .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
+               .rowbits = 16, .colbits = 10,
+               .bits = {
+                       C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
+                       R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
+                       R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
+                       R(15), 0,     0,     0
+               }
+       },
+       {
+               .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
+               .rowbits = 16, .colbits = 11,
+               .bits = {
+                       C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
+                       R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
+                       R(9),  R(10), C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
+                       R(14), R(15), 0,     0
+               }
+       },
+       {
+               .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
+               .rowbits = 15, .colbits = 10,
+               .bits = {
+                       C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
+                       B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
+                       R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
+                       0,     0,     0,     0
+               }
+       },
+       {
+               .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
+               .rowbits = 16, .colbits = 10,
+               .bits = {
+                       C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
+                       B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
+                       R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
+                       R(15), 0,     0,     0
+               }
+       },
+       {
+               .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
+               .rowbits = 16, .colbits = 10,
+               .bits = {
+                       C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
+                       B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
+                       R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
+                       R(15), 0,     0,     0
+               }
+       },
+       {
+               .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
+               .rowbits = 16, .colbits = 11,
+               .bits = {
+                       C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
+                       B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
+                       R(8),  R(9),  R(10), C(9),  R(11), RS,    C(11), R(12), R(13),
+                       R(14), R(15), 0,     0
+               }
+       }
+};
+
+static int bank_hash(u64 pmiaddr, int idx, int shft)
+{
+       int bhash = 0;
+
+       switch (idx) {
+       case 0:
+               bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
+               break;
+       case 1:
+               bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
+               bhash ^= ((pmiaddr >> 22) & 1) << 1;
+               break;
+       case 2:
+               bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
+               break;
+       }
+
+       return bhash;
+}
+
+static int rank_hash(u64 pmiaddr)
+{
+       return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
+}
+
+/* Second stage decode. Compute rank, bank, row & column. */
+static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
+                      struct dram_addr *daddr, char *msg)
+{
+       struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
+       struct pnd2_pvt *pvt = mci->pvt_info;
+       int g = pvt->dimm_geom[pmiidx];
+       struct dimm_geometry *d = &dimms[g];
+       int column = 0, bank = 0, row = 0, rank = 0;
+       int i, idx, type, skiprs = 0;
+
+       for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
+               int     bit = (pmiaddr >> i) & 1;
+
+               if (i + skiprs >= PMI_ADDRESS_WIDTH) {
+                       snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
+                       return -EINVAL;
+               }
+
+               type = d->bits[i + skiprs] & ~0xf;
+               idx = d->bits[i + skiprs] & 0xf;
+
+               /*
+                * On single rank DIMMs ignore the rank select bit
+                * and shift remainder of "bits[]" down one place.
+                */
+               if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
+                       skiprs = 1;
+                       type = d->bits[i + skiprs] & ~0xf;
+                       idx = d->bits[i + skiprs] & 0xf;
+               }
+
+               switch (type) {
+               case C(0):
+                       column |= (bit << idx);
+                       break;
+               case B(0):
+                       bank |= (bit << idx);
+                       if (cr_drp0->bahen)
+                               bank ^= bank_hash(pmiaddr, idx, d->addrdec);
+                       break;
+               case R(0):
+                       row |= (bit << idx);
+                       break;
+               case RS:
+                       rank = bit;
+                       if (cr_drp0->rsien)
+                               rank ^= rank_hash(pmiaddr);
+                       break;
+               default:
+                       if (bit) {
+                               snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
+                               return -EINVAL;
+                       }
+                       goto done;
+               }
+       }
+
+done:
+       daddr->col = column;
+       daddr->bank = bank;
+       daddr->row = row;
+       daddr->rank = rank;
+       daddr->dimm = 0;
+
+       return 0;
+}
+
+/* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
+#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
+
+static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
+                                          struct dram_addr *daddr, char *msg)
+{
+       /* Rank 0 or 1 */
+       daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
+       /* Rank 2 or 3 */
+       daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
+
+       /*
+        * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
+        * flip them if DIMM1 is larger than DIMM0.
+        */
+       daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
+
+       daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
+       daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
+       daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
+       if (dsch.ddr4en)
+               daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
+       if (dmap1[pmiidx].bxor) {
+               if (dsch.ddr4en) {
+                       daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
+                       daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
+                       if (dsch.chan_width == 0)
+                               /* 64/72 bit dram channel width */
+                               daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
+                       else
+                               /* 32/40 bit dram channel width */
+                               daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
+                       daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
+               } else {
+                       daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
+                       daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
+                       if (dsch.chan_width == 0)
+                               daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
+                       else
+                               daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
+               }
+       }
+
+       daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
+       daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
+       daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
+       daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
+       daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
+       daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
+       daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
+       daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
+       daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
+       daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
+       daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
+       daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
+       daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
+       daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
+       if (dmap4[pmiidx].row14 != 31)
+               daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
+       if (dmap4[pmiidx].row15 != 31)
+               daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
+       if (dmap4[pmiidx].row16 != 31)
+               daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
+       if (dmap4[pmiidx].row17 != 31)
+               daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
+
+       daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
+       daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
+       daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
+       daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
+       daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
+       daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
+       daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
+       if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
+               daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
+
+       return 0;
+}
+
+static int check_channel(int ch)
+{
+       if (drp0[ch].dramtype != 0) {
+               pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
+               return 1;
+       } else if (drp0[ch].eccen == 0) {
+               pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
+               return 1;
+       }
+       return 0;
+}
+
+static int apl_check_ecc_active(void)
+{
+       int     i, ret = 0;
+
+       /* Check dramtype and ECC mode for each present DIMM */
+       for (i = 0; i < APL_NUM_CHANNELS; i++)
+               if (chan_mask & BIT(i))
+                       ret += check_channel(i);
+       return ret ? -EINVAL : 0;
+}
+
+#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
+
+static int check_unit(int ch)
+{
+       struct d_cr_drp *d = &drp[ch];
+
+       if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
+               pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
+               return 1;
+       }
+       return 0;
+}
+
+static int dnv_check_ecc_active(void)
+{
+       int     i, ret = 0;
+
+       for (i = 0; i < DNV_NUM_CHANNELS; i++)
+               ret += check_unit(i);
+       return ret ? -EINVAL : 0;
+}
+
+static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
+                                                                struct dram_addr *daddr, char *msg)
+{
+       u64     pmiaddr;
+       u32     pmiidx;
+       int     ret;
+
+       ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
+       if (ret)
+               return ret;
+
+       pmiaddr >>= ops->pmiaddr_shift;
+       /* pmi channel idx to dimm channel idx */
+       pmiidx >>= ops->pmiidx_shift;
+       daddr->chan = pmiidx;
+
+       ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
+       if (ret)
+               return ret;
+
+       edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
+                        addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
+
+       return 0;
+}
+
+static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
+                                 struct dram_addr *daddr)
+{
+       enum hw_event_mc_err_type tp_event;
+       char *optype, msg[PND2_MSG_SIZE];
+       bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
+       bool overflow = m->status & MCI_STATUS_OVER;
+       bool uc_err = m->status & MCI_STATUS_UC;
+       bool recov = m->status & MCI_STATUS_S;
+       u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
+       u32 mscod = GET_BITFIELD(m->status, 16, 31);
+       u32 errcode = GET_BITFIELD(m->status, 0, 15);
+       u32 optypenum = GET_BITFIELD(m->status, 4, 6);
+       int rc;
+
+       tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
+                                                HW_EVENT_ERR_CORRECTED;
+
+       /*
+        * According with Table 15-9 of the Intel Architecture spec vol 3A,
+        * memory errors should fit in this mask:
+        *      000f 0000 1mmm cccc (binary)
+        * where:
+        *      f = Correction Report Filtering Bit. If 1, subsequent errors
+        *          won't be shown
+        *      mmm = error type
+        *      cccc = channel
+        * If the mask doesn't match, report an error to the parsing logic
+        */
+       if (!((errcode & 0xef80) == 0x80)) {
+               optype = "Can't parse: it is not a mem";
+       } else {
+               switch (optypenum) {
+               case 0:
+                       optype = "generic undef request error";
+                       break;
+               case 1:
+                       optype = "memory read error";
+                       break;
+               case 2:
+                       optype = "memory write error";
+                       break;
+               case 3:
+                       optype = "addr/cmd error";
+                       break;
+               case 4:
+                       optype = "memory scrubbing error";
+                       break;
+               default:
+                       optype = "reserved";
+                       break;
+               }
+       }
+
+       /* Only decode errors with an valid address (ADDRV) */
+       if (!(m->status & MCI_STATUS_ADDRV))
+               return;
+
+       rc = get_memory_error_data(mci, m->addr, daddr, msg);
+       if (rc)
+               goto address_error;
+
+       snprintf(msg, sizeof(msg),
+                "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
+                overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
+                errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
+
+       edac_dbg(0, "%s\n", msg);
+
+       /* Call the helper to output message */
+       edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
+                                                m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
+
+       return;
+
+address_error:
+       edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
+}
+
+static void apl_get_dimm_config(struct mem_ctl_info *mci)
+{
+       struct pnd2_pvt *pvt = mci->pvt_info;
+       struct dimm_info *dimm;
+       struct d_cr_drp0 *d;
+       u64     capacity;
+       int     i, g;
+
+       for (i = 0; i < APL_NUM_CHANNELS; i++) {
+               if (!(chan_mask & BIT(i)))
+                       continue;
+
+               dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
+               if (!dimm) {
+                       edac_dbg(0, "No allocated DIMM for channel %d\n", i);
+                       continue;
+               }
+
+               d = &drp0[i];
+               for (g = 0; g < ARRAY_SIZE(dimms); g++)
+                       if (dimms[g].addrdec == d->addrdec &&
+                           dimms[g].dden == d->dden &&
+                           dimms[g].dwid == d->dwid)
+                               break;
+
+               if (g == ARRAY_SIZE(dimms)) {
+                       edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
+                       continue;
+               }
+
+               pvt->dimm_geom[i] = g;
+               capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
+                                  (1ul << dimms[g].colbits);
+               edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
+               dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
+               dimm->grain = 32;
+               dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
+               dimm->mtype = MEM_DDR3;
+               dimm->edac_mode = EDAC_SECDED;
+               snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
+       }
+}
+
+static const int dnv_dtypes[] = {
+       DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
+};
+
+static void dnv_get_dimm_config(struct mem_ctl_info *mci)
+{
+       int     i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
+       struct dimm_info *dimm;
+       struct d_cr_drp *d;
+       u64     capacity;
+
+       if (dsch.ddr4en) {
+               memtype = MEM_DDR4;
+               banks = 16;
+               colbits = 10;
+       } else {
+               memtype = MEM_DDR3;
+               banks = 8;
+       }
+
+       for (i = 0; i < DNV_NUM_CHANNELS; i++) {
+               if (dmap4[i].row14 == 31)
+                       rowbits = 14;
+               else if (dmap4[i].row15 == 31)
+                       rowbits = 15;
+               else if (dmap4[i].row16 == 31)
+                       rowbits = 16;
+               else if (dmap4[i].row17 == 31)
+                       rowbits = 17;
+               else
+                       rowbits = 18;
+
+               if (memtype == MEM_DDR3) {
+                       if (dmap1[i].ca11 != 0x3f)
+                               colbits = 12;
+                       else
+                               colbits = 10;
+               }
+
+               d = &drp[i];
+               /* DIMM0 is present if rank0 and/or rank1 is enabled */
+               ranks_of_dimm[0] = d->rken0 + d->rken1;
+               /* DIMM1 is present if rank2 and/or rank3 is enabled */
+               ranks_of_dimm[1] = d->rken2 + d->rken3;
+
+               for (j = 0; j < DNV_MAX_DIMMS; j++) {
+                       if (!ranks_of_dimm[j])
+                               continue;
+
+                       dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
+                       if (!dimm) {
+                               edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
+                               continue;
+                       }
+
+                       capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
+                       edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
+                       dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
+                       dimm->grain = 32;
+                       dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
+                       dimm->mtype = memtype;
+                       dimm->edac_mode = EDAC_SECDED;
+                       snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
+               }
+       }
+}
+
+static int pnd2_register_mci(struct mem_ctl_info **ppmci)
+{
+       struct edac_mc_layer layers[2];
+       struct mem_ctl_info *mci;
+       struct pnd2_pvt *pvt;
+       int rc;
+
+       rc = ops->check_ecc();
+       if (rc < 0)
+               return rc;
+
+       /* Allocate a new MC control structure */
+       layers[0].type = EDAC_MC_LAYER_CHANNEL;
+       layers[0].size = ops->channels;
+       layers[0].is_virt_csrow = false;
+       layers[1].type = EDAC_MC_LAYER_SLOT;
+       layers[1].size = ops->dimms_per_channel;
+       layers[1].is_virt_csrow = true;
+       mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
+       if (!mci)
+               return -ENOMEM;
+
+       pvt = mci->pvt_info;
+       memset(pvt, 0, sizeof(*pvt));
+
+       mci->mod_name = "pnd2_edac.c";
+       mci->dev_name = ops->name;
+       mci->ctl_name = "Pondicherry2";
+
+       /* Get dimm basic config and the memory layout */
+       ops->get_dimm_config(mci);
+
+       if (edac_mc_add_mc(mci)) {
+               edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
+               edac_mc_free(mci);
+               return -EINVAL;
+       }
+
+       *ppmci = mci;
+
+       return 0;
+}
+
+static void pnd2_unregister_mci(struct mem_ctl_info *mci)
+{
+       if (unlikely(!mci || !mci->pvt_info)) {
+               pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
+               return;
+       }
+
+       /* Remove MC sysfs nodes */
+       edac_mc_del_mc(NULL);
+       edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
+       edac_mc_free(mci);
+}
+
+/*
+ * Callback function registered with core kernel mce code.
+ * Called once for each logged error.
+ */
+static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
+{
+       struct mce *mce = (struct mce *)data;
+       struct mem_ctl_info *mci;
+       struct dram_addr daddr;
+       char *type;
+
+       if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
+               return NOTIFY_DONE;
+
+       mci = pnd2_mci;
+       if (!mci)
+               return NOTIFY_DONE;
+
+       /*
+        * Just let mcelog handle it if the error is
+        * outside the memory controller. A memory error
+        * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
+        * bit 12 has an special meaning.
+        */
+       if ((mce->status & 0xefff) >> 7 != 1)
+               return NOTIFY_DONE;
+
+       if (mce->mcgstatus & MCG_STATUS_MCIP)
+               type = "Exception";
+       else
+               type = "Event";
+
+       pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
+       pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
+                                  mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
+       pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
+       pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
+       pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
+       pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
+                                  mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
+
+       pnd2_mce_output_error(mci, mce, &daddr);
+
+       /* Advice mcelog that the error were handled */
+       return NOTIFY_STOP;
+}
+
+static struct notifier_block pnd2_mce_dec = {
+       .notifier_call  = pnd2_mce_check_error,
+};
+
+#ifdef CONFIG_EDAC_DEBUG
+/*
+ * Write an address to this file to exercise the address decode
+ * logic in this driver.
+ */
+static u64 pnd2_fake_addr;
+#define PND2_BLOB_SIZE 1024
+static char pnd2_result[PND2_BLOB_SIZE];
+static struct dentry *pnd2_test;
+static struct debugfs_blob_wrapper pnd2_blob = {
+       .data = pnd2_result,
+       .size = 0
+};
+
+static int debugfs_u64_set(void *data, u64 val)
+{
+       struct dram_addr daddr;
+       struct mce m;
+
+       *(u64 *)data = val;
+       m.mcgstatus = 0;
+       /* ADDRV + MemRd + Unknown channel */
+       m.status = MCI_STATUS_ADDRV + 0x9f;
+       m.addr = val;
+       pnd2_mce_output_error(pnd2_mci, &m, &daddr);
+       snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
+                        "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
+                        m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
+       pnd2_blob.size = strlen(pnd2_blob.data);
+
+       return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
+
+static void setup_pnd2_debug(void)
+{
+       pnd2_test = edac_debugfs_create_dir("pnd2_test");
+       edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
+                                                        &pnd2_fake_addr, &fops_u64_wo);
+       debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
+}
+
+static void teardown_pnd2_debug(void)
+{
+       debugfs_remove_recursive(pnd2_test);
+}
+#else
+static void setup_pnd2_debug(void)     {}
+static void teardown_pnd2_debug(void)  {}
+#endif /* CONFIG_EDAC_DEBUG */
+
+
+static int pnd2_probe(void)
+{
+       int rc;
+
+       edac_dbg(2, "\n");
+       rc = get_registers();
+       if (rc)
+               return rc;
+
+       return pnd2_register_mci(&pnd2_mci);
+}
+
+static void pnd2_remove(void)
+{
+       edac_dbg(0, "\n");
+       pnd2_unregister_mci(pnd2_mci);
+}
+
+static struct dunit_ops apl_ops = {
+               .name                   = "pnd2/apl",
+               .type                   = APL,
+               .pmiaddr_shift          = LOG2_PMI_ADDR_GRANULARITY,
+               .pmiidx_shift           = 0,
+               .channels               = APL_NUM_CHANNELS,
+               .dimms_per_channel      = 1,
+               .rd_reg                 = apl_rd_reg,
+               .get_registers          = apl_get_registers,
+               .check_ecc              = apl_check_ecc_active,
+               .mk_region              = apl_mk_region,
+               .get_dimm_config        = apl_get_dimm_config,
+               .pmi2mem                = apl_pmi2mem,
+};
+
+static struct dunit_ops dnv_ops = {
+               .name                   = "pnd2/dnv",
+               .type                   = DNV,
+               .pmiaddr_shift          = 0,
+               .pmiidx_shift           = 1,
+               .channels               = DNV_NUM_CHANNELS,
+               .dimms_per_channel      = 2,
+               .rd_reg                 = dnv_rd_reg,
+               .get_registers          = dnv_get_registers,
+               .check_ecc              = dnv_check_ecc_active,
+               .mk_region              = dnv_mk_region,
+               .get_dimm_config        = dnv_get_dimm_config,
+               .pmi2mem                = dnv_pmi2mem,
+};
+
+static const struct x86_cpu_id pnd2_cpuids[] = {
+       { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
+       { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
+       { }
+};
+MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
+
+static int __init pnd2_init(void)
+{
+       const struct x86_cpu_id *id;
+       int rc;
+
+       edac_dbg(2, "\n");
+
+       id = x86_match_cpu(pnd2_cpuids);
+       if (!id)
+               return -ENODEV;
+
+       ops = (struct dunit_ops *)id->driver_data;
+
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
+       rc = pnd2_probe();
+       if (rc < 0) {
+               pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
+               return rc;
+       }
+
+       if (!pnd2_mci)
+               return -ENODEV;
+
+       mce_register_decode_chain(&pnd2_mce_dec);
+       setup_pnd2_debug();
+
+       return 0;
+}
+
+static void __exit pnd2_exit(void)
+{
+       edac_dbg(2, "\n");
+       teardown_pnd2_debug();
+       mce_unregister_decode_chain(&pnd2_mce_dec);
+       pnd2_remove();
+}
+
+module_init(pnd2_init);
+module_exit(pnd2_exit);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tony Luck");
+MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
diff --git a/drivers/edac/pnd2_edac.h b/drivers/edac/pnd2_edac.h
new file mode 100644 (file)
index 0000000..61b6e79
--- /dev/null
@@ -0,0 +1,301 @@
+/*
+ * Register bitfield descriptions for Pondicherry2 memory controller.
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _PND2_REGS_H
+#define _PND2_REGS_H
+
+struct b_cr_touud_lo_pci {
+       u32     lock : 1;
+       u32     reserved_1 : 19;
+       u32     touud : 12;
+};
+
+#define b_cr_touud_lo_pci_port 0x4c
+#define b_cr_touud_lo_pci_offset 0xa8
+#define b_cr_touud_lo_pci_r_opcode 0x04
+
+struct b_cr_touud_hi_pci {
+       u32     touud : 7;
+       u32     reserved_0 : 25;
+};
+
+#define b_cr_touud_hi_pci_port 0x4c
+#define b_cr_touud_hi_pci_offset 0xac
+#define b_cr_touud_hi_pci_r_opcode 0x04
+
+struct b_cr_tolud_pci {
+       u32     lock : 1;
+       u32     reserved_0 : 19;
+       u32     tolud : 12;
+};
+
+#define b_cr_tolud_pci_port 0x4c
+#define b_cr_tolud_pci_offset 0xbc
+#define b_cr_tolud_pci_r_opcode 0x04
+
+struct b_cr_mchbar_lo_pci {
+       u32 enable : 1;
+       u32 pad_3_1 : 3;
+       u32 pad_14_4: 11;
+       u32 base: 17;
+};
+
+struct b_cr_mchbar_hi_pci {
+       u32 base : 7;
+       u32 pad_31_7 : 25;
+};
+
+/* Symmetric region */
+struct b_cr_slice_channel_hash {
+       u64     slice_1_disabled : 1;
+       u64     hvm_mode : 1;
+       u64     interleave_mode : 2;
+       u64     slice_0_mem_disabled : 1;
+       u64     reserved_0 : 1;
+       u64     slice_hash_mask : 14;
+       u64     reserved_1 : 11;
+       u64     enable_pmi_dual_data_mode : 1;
+       u64     ch_1_disabled : 1;
+       u64     reserved_2 : 1;
+       u64     sym_slice0_channel_enabled : 2;
+       u64     sym_slice1_channel_enabled : 2;
+       u64     ch_hash_mask : 14;
+       u64     reserved_3 : 11;
+       u64     lock : 1;
+};
+
+#define b_cr_slice_channel_hash_port 0x4c
+#define b_cr_slice_channel_hash_offset 0x4c58
+#define b_cr_slice_channel_hash_r_opcode 0x06
+
+struct b_cr_mot_out_base_mchbar {
+       u32     reserved_0 : 14;
+       u32     mot_out_base : 15;
+       u32     reserved_1 : 1;
+       u32     tr_en : 1;
+       u32     imr_en : 1;
+};
+
+#define b_cr_mot_out_base_mchbar_port 0x4c
+#define b_cr_mot_out_base_mchbar_offset 0x6af0
+#define b_cr_mot_out_base_mchbar_r_opcode 0x00
+
+struct b_cr_mot_out_mask_mchbar {
+       u32     reserved_0 : 14;
+       u32     mot_out_mask : 15;
+       u32     reserved_1 : 1;
+       u32     ia_iwb_en : 1;
+       u32     gt_iwb_en : 1;
+};
+
+#define b_cr_mot_out_mask_mchbar_port 0x4c
+#define b_cr_mot_out_mask_mchbar_offset 0x6af4
+#define b_cr_mot_out_mask_mchbar_r_opcode 0x00
+
+struct b_cr_asym_mem_region0_mchbar {
+       u32     pad : 4;
+       u32     slice0_asym_base : 11;
+       u32     pad_18_15 : 4;
+       u32     slice0_asym_limit : 11;
+       u32     slice0_asym_channel_select : 1;
+       u32     slice0_asym_enable : 1;
+};
+
+#define b_cr_asym_mem_region0_mchbar_port 0x4c
+#define b_cr_asym_mem_region0_mchbar_offset 0x6e40
+#define b_cr_asym_mem_region0_mchbar_r_opcode 0x00
+
+struct b_cr_asym_mem_region1_mchbar {
+       u32     pad : 4;
+       u32     slice1_asym_base : 11;
+       u32     pad_18_15 : 4;
+       u32     slice1_asym_limit : 11;
+       u32     slice1_asym_channel_select : 1;
+       u32     slice1_asym_enable : 1;
+};
+
+#define b_cr_asym_mem_region1_mchbar_port 0x4c
+#define b_cr_asym_mem_region1_mchbar_offset 0x6e44
+#define b_cr_asym_mem_region1_mchbar_r_opcode 0x00
+
+/* Some bit fields moved in above two structs on Denverton */
+struct b_cr_asym_mem_region_denverton {
+       u32     pad : 4;
+       u32     slice_asym_base : 8;
+       u32     pad_19_12 : 8;
+       u32     slice_asym_limit : 8;
+       u32     pad_28_30 : 3;
+       u32     slice_asym_enable : 1;
+};
+
+struct b_cr_asym_2way_mem_region_mchbar {
+       u32     pad : 2;
+       u32     asym_2way_intlv_mode : 2;
+       u32     asym_2way_base : 11;
+       u32     pad_16_15 : 2;
+       u32     asym_2way_limit : 11;
+       u32     pad_30_28 : 3;
+       u32     asym_2way_interleave_enable : 1;
+};
+
+#define b_cr_asym_2way_mem_region_mchbar_port 0x4c
+#define b_cr_asym_2way_mem_region_mchbar_offset 0x6e50
+#define b_cr_asym_2way_mem_region_mchbar_r_opcode 0x00
+
+/* Apollo Lake d-unit */
+
+struct d_cr_drp0 {
+       u32     rken0 : 1;
+       u32     rken1 : 1;
+       u32     ddmen : 1;
+       u32     rsvd3 : 1;
+       u32     dwid : 2;
+       u32     dden : 3;
+       u32     rsvd13_9 : 5;
+       u32     rsien : 1;
+       u32     bahen : 1;
+       u32     rsvd18_16 : 3;
+       u32     caswizzle : 2;
+       u32     eccen : 1;
+       u32     dramtype : 3;
+       u32     blmode : 3;
+       u32     addrdec : 2;
+       u32     dramdevice_pr : 2;
+};
+
+#define d_cr_drp0_offset 0x1400
+#define d_cr_drp0_r_opcode 0x00
+
+/* Denverton d-unit */
+
+struct d_cr_dsch {
+       u32     ch0en : 1;
+       u32     ch1en : 1;
+       u32     ddr4en : 1;
+       u32     coldwake : 1;
+       u32     newbypdis : 1;
+       u32     chan_width : 1;
+       u32     rsvd6_6 : 1;
+       u32     ooodis : 1;
+       u32     rsvd18_8 : 11;
+       u32     ic : 1;
+       u32     rsvd31_20 : 12;
+};
+
+#define d_cr_dsch_port 0x16
+#define d_cr_dsch_offset 0x0
+#define d_cr_dsch_r_opcode 0x0
+
+struct d_cr_ecc_ctrl {
+       u32     eccen : 1;
+       u32     rsvd31_1 : 31;
+};
+
+#define d_cr_ecc_ctrl_offset 0x180
+#define d_cr_ecc_ctrl_r_opcode 0x0
+
+struct d_cr_drp {
+       u32     rken0 : 1;
+       u32     rken1 : 1;
+       u32     rken2 : 1;
+       u32     rken3 : 1;
+       u32     dimmdwid0 : 2;
+       u32     dimmdden0 : 2;
+       u32     dimmdwid1 : 2;
+       u32     dimmdden1 : 2;
+       u32     rsvd15_12 : 4;
+       u32     dimmflip : 1;
+       u32     rsvd31_17 : 15;
+};
+
+#define d_cr_drp_offset 0x158
+#define d_cr_drp_r_opcode 0x0
+
+struct d_cr_dmap {
+       u32     ba0 : 5;
+       u32     ba1 : 5;
+       u32     bg0 : 5; /* if ddr3, ba2 = bg0 */
+       u32     bg1 : 5; /* if ddr3, ba3 = bg1 */
+       u32     rs0 : 5;
+       u32     rs1 : 5;
+       u32     rsvd : 2;
+};
+
+#define d_cr_dmap_offset 0x174
+#define d_cr_dmap_r_opcode 0x0
+
+struct d_cr_dmap1 {
+       u32     ca11 : 6;
+       u32     bxor : 1;
+       u32     rsvd : 25;
+};
+
+#define d_cr_dmap1_offset 0xb4
+#define d_cr_dmap1_r_opcode 0x0
+
+struct d_cr_dmap2 {
+       u32     row0 : 5;
+       u32     row1 : 5;
+       u32     row2 : 5;
+       u32     row3 : 5;
+       u32     row4 : 5;
+       u32     row5 : 5;
+       u32     rsvd : 2;
+};
+
+#define d_cr_dmap2_offset 0x148
+#define d_cr_dmap2_r_opcode 0x0
+
+struct d_cr_dmap3 {
+       u32     row6 : 5;
+       u32     row7 : 5;
+       u32     row8 : 5;
+       u32     row9 : 5;
+       u32     row10 : 5;
+       u32     row11 : 5;
+       u32     rsvd : 2;
+};
+
+#define d_cr_dmap3_offset 0x14c
+#define d_cr_dmap3_r_opcode 0x0
+
+struct d_cr_dmap4 {
+       u32     row12 : 5;
+       u32     row13 : 5;
+       u32     row14 : 5;
+       u32     row15 : 5;
+       u32     row16 : 5;
+       u32     row17 : 5;
+       u32     rsvd : 2;
+};
+
+#define d_cr_dmap4_offset 0x150
+#define d_cr_dmap4_r_opcode 0x0
+
+struct d_cr_dmap5 {
+       u32     ca3 : 4;
+       u32     ca4 : 4;
+       u32     ca5 : 4;
+       u32     ca6 : 4;
+       u32     ca7 : 4;
+       u32     ca8 : 4;
+       u32     ca9 : 4;
+       u32     rsvd : 4;
+};
+
+#define d_cr_dmap5_offset 0x154
+#define d_cr_dmap5_r_opcode 0x0
+
+#endif /* _PND2_REGS_H */
index 6c270d9d304a8eafb7464f576fdff119cb4cf27a..669246056812e8d361040fb4ce6f098574460768 100644 (file)
@@ -1596,7 +1596,7 @@ static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev)
        reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS);
        if (!reg)
                goto chk_iob_axi0;
-       dev_err(edac_dev->dev, "IOB procesing agent (PA) transaction error\n");
+       dev_err(edac_dev->dev, "IOB processing agent (PA) transaction error\n");
        if (reg & IOBPA_RDATA_CORRUPT_MASK)
                dev_err(edac_dev->dev, "IOB PA read data RAM error\n");
        if (reg & IOBPA_M_RDATA_CORRUPT_MASK)
index 96bbae579c0b01cfdc3798e8fdb03948cb391280..fc09c76248b41630c8e849db0d8bf6826fb0e9aa 100644 (file)
@@ -44,7 +44,7 @@ config EXTCON_GPIO
 
 config EXTCON_INTEL_INT3496
        tristate "Intel INT3496 ACPI device extcon driver"
-       depends on GPIOLIB && ACPI
+       depends on GPIOLIB && ACPI && (X86 || COMPILE_TEST)
        help
          Say Y here to enable extcon support for USB OTG ports controlled by
          an Intel INT3496 ACPI device.
index a3131b036de6810281854730c429a17117e6832d..9d17984bbbd49a810ac749f06013c282a006c46f 100644 (file)
@@ -45,6 +45,17 @@ static const unsigned int int3496_cable[] = {
        EXTCON_NONE,
 };
 
+static const struct acpi_gpio_params id_gpios = { INT3496_GPIO_USB_ID, 0, false };
+static const struct acpi_gpio_params vbus_gpios = { INT3496_GPIO_VBUS_EN, 0, false };
+static const struct acpi_gpio_params mux_gpios = { INT3496_GPIO_USB_MUX, 0, false };
+
+static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = {
+       { "id-gpios", &id_gpios, 1 },
+       { "vbus-gpios", &vbus_gpios, 1 },
+       { "mux-gpios", &mux_gpios, 1 },
+       { },
+};
+
 static void int3496_do_usb_id(struct work_struct *work)
 {
        struct int3496_data *data =
@@ -83,6 +94,13 @@ static int int3496_probe(struct platform_device *pdev)
        struct int3496_data *data;
        int ret;
 
+       ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev),
+                                       acpi_int3496_default_gpios);
+       if (ret) {
+               dev_err(dev, "can't add GPIO ACPI mapping\n");
+               return ret;
+       }
+
        data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
@@ -90,30 +108,27 @@ static int int3496_probe(struct platform_device *pdev)
        data->dev = dev;
        INIT_DELAYED_WORK(&data->work, int3496_do_usb_id);
 
-       data->gpio_usb_id = devm_gpiod_get_index(dev, "id",
-                                               INT3496_GPIO_USB_ID,
-                                               GPIOD_IN);
+       data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN);
        if (IS_ERR(data->gpio_usb_id)) {
                ret = PTR_ERR(data->gpio_usb_id);
                dev_err(dev, "can't request USB ID GPIO: %d\n", ret);
                return ret;
+       } else if (gpiod_get_direction(data->gpio_usb_id) != GPIOF_DIR_IN) {
+               dev_warn(dev, FW_BUG "USB ID GPIO not in input mode, fixing\n");
+               gpiod_direction_input(data->gpio_usb_id);
        }
 
        data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id);
-       if (data->usb_id_irq <= 0) {
+       if (data->usb_id_irq < 0) {
                dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq);
-               return -EINVAL;
+               return data->usb_id_irq;
        }
 
-       data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en",
-                                                INT3496_GPIO_VBUS_EN,
-                                                GPIOD_ASIS);
+       data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS);
        if (IS_ERR(data->gpio_vbus_en))
                dev_info(dev, "can't request VBUS EN GPIO\n");
 
-       data->gpio_usb_mux = devm_gpiod_get_index(dev, "usb mux",
-                                                INT3496_GPIO_USB_MUX,
-                                                GPIOD_ASIS);
+       data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS);
        if (IS_ERR(data->gpio_usb_mux))
                dev_info(dev, "can't request USB MUX GPIO\n");
 
@@ -154,6 +169,8 @@ static int int3496_remove(struct platform_device *pdev)
        devm_free_irq(&pdev->dev, data->usb_id_irq, data);
        cancel_delayed_work_sync(&data->work);
 
+       acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
+
        return 0;
 }
 
index 349dc3e1e52e0a67bea26d971aa7d7e2764caabe..974c5a31a00598e0bcdb1742967cc7cca691a7cb 100644 (file)
@@ -65,6 +65,7 @@ static bool __init efi_virtmap_init(void)
        bool systab_found;
 
        efi_mm.pgd = pgd_alloc(&efi_mm);
+       mm_init_cpumask(&efi_mm);
        init_new_context(NULL, &efi_mm);
 
        systab_found = false;
index e7d404059b7316a5c5668f609ceb5957fdcdd97d..b372aad3b449c39a85daa7d8df1a417741a83a73 100644 (file)
@@ -389,7 +389,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
                        return 0;
                }
        }
-       pr_err_once("requested map not found.\n");
        return -ENOENT;
 }
 
index 08b026864d4e7d5f00b76cbd95b7398c6db943f8..8554d7aec31c640b6e845ec6502fb2d538408fae 100644 (file)
@@ -254,7 +254,7 @@ void __init efi_esrt_init(void)
 
        rc = efi_mem_desc_lookup(efi.esrt, &md);
        if (rc < 0) {
-               pr_err("ESRT header is not in the memory map.\n");
+               pr_warn("ESRT header is not in the memory map.\n");
                return;
        }
 
index 6def402bf5691f504292b88ff994b9a472fa2bc5..5da36e56b36a1cc29c971934559e5852bed0e7bc 100644 (file)
@@ -45,6 +45,8 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
        size = sizeof(secboot);
        status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid,
                             NULL, &size, &secboot);
+       if (status == EFI_NOT_FOUND)
+               return efi_secureboot_mode_disabled;
        if (status != EFI_SUCCESS)
                goto out_efi_err;
 
@@ -78,7 +80,5 @@ secure_boot_enabled:
 
 out_efi_err:
        pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n");
-       if (status == EFI_NOT_FOUND)
-               return efi_secureboot_mode_disabled;
        return efi_secureboot_mode_unknown;
 }
index 9e1a138fed53372a56dd1b7d2982ec198f46b3f1..16a8951b2beda389368c858848beb2edaf06949f 100644 (file)
@@ -96,7 +96,7 @@ static int altr_a10sr_gpio_probe(struct platform_device *pdev)
        gpio->regmap = a10sr->regmap;
 
        gpio->gp = altr_a10sr_gc;
-
+       gpio->gp.parent = pdev->dev.parent;
        gpio->gp.of_node = pdev->dev.of_node;
 
        ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
index 5bddbd507ca9f105aa18cfe5f43b673b676d551d..3fe6a21e05a5718d8769bf2dd505cb5968f41207 100644 (file)
@@ -90,21 +90,18 @@ static int altera_gpio_irq_set_type(struct irq_data *d,
 
        altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
 
-       if (type == IRQ_TYPE_NONE)
+       if (type == IRQ_TYPE_NONE) {
+               irq_set_handler_locked(d, handle_bad_irq);
                return 0;
-       if (type == IRQ_TYPE_LEVEL_HIGH &&
-               altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH)
-               return 0;
-       if (type == IRQ_TYPE_EDGE_RISING &&
-               altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING)
-               return 0;
-       if (type == IRQ_TYPE_EDGE_FALLING &&
-               altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING)
-               return 0;
-       if (type == IRQ_TYPE_EDGE_BOTH &&
-               altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH)
+       }
+       if (type == altera_gc->interrupt_trigger) {
+               if (type == IRQ_TYPE_LEVEL_HIGH)
+                       irq_set_handler_locked(d, handle_level_irq);
+               else
+                       irq_set_handler_locked(d, handle_simple_irq);
                return 0;
-
+       }
+       irq_set_handler_locked(d, handle_bad_irq);
        return -EINVAL;
 }
 
@@ -230,7 +227,6 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
        chained_irq_exit(chip, desc);
 }
 
-
 static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
 {
        struct altera_gpio_chip *altera_gc;
@@ -310,7 +306,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
        altera_gc->interrupt_trigger = reg;
 
        ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0,
-               handle_simple_irq, IRQ_TYPE_NONE);
+               handle_bad_irq, IRQ_TYPE_NONE);
 
        if (ret) {
                dev_err(&pdev->dev, "could not add irqchip\n");
index bdb692345428ccc99c8f22bd3b460f25b41e3156..2a57d024481db8c354badd976843f83a365c72a9 100644 (file)
@@ -270,8 +270,10 @@ mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value)
 static irqreturn_t mcp23s08_irq(int irq, void *data)
 {
        struct mcp23s08 *mcp = data;
-       int intcap, intf, i;
+       int intcap, intf, i, gpio, gpio_orig, intcap_mask;
        unsigned int child_irq;
+       bool intf_set, intcap_changed, gpio_bit_changed,
+               defval_changed, gpio_set;
 
        mutex_lock(&mcp->lock);
        if (mcp_read(mcp, MCP_INTF, &intf) < 0) {
@@ -287,14 +289,67 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
        }
 
        mcp->cache[MCP_INTCAP] = intcap;
+
+       /* This clears the interrupt(configurable on S18) */
+       if (mcp_read(mcp, MCP_GPIO, &gpio) < 0) {
+               mutex_unlock(&mcp->lock);
+               return IRQ_HANDLED;
+       }
+       gpio_orig = mcp->cache[MCP_GPIO];
+       mcp->cache[MCP_GPIO] = gpio;
        mutex_unlock(&mcp->lock);
 
+       if (mcp->cache[MCP_INTF] == 0) {
+               /* There is no interrupt pending */
+               return IRQ_HANDLED;
+       }
+
+       dev_dbg(mcp->chip.parent,
+               "intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n",
+               intcap, intf, gpio_orig, gpio);
 
        for (i = 0; i < mcp->chip.ngpio; i++) {
-               if ((BIT(i) & mcp->cache[MCP_INTF]) &&
-                   ((BIT(i) & intcap & mcp->irq_rise) ||
-                    (mcp->irq_fall & ~intcap & BIT(i)) ||
-                    (BIT(i) & mcp->cache[MCP_INTCON]))) {
+               /* We must check all of the inputs on the chip,
+                * otherwise we may not notice a change on >=2 pins.
+                *
+                * On at least the mcp23s17, INTCAP is only updated
+                * one byte at a time(INTCAPA and INTCAPB are
+                * not written to at the same time - only on a per-bank
+                * basis).
+                *
+                * INTF only contains the single bit that caused the
+                * interrupt per-bank.  On the mcp23s17, there is
+                * INTFA and INTFB.  If two pins are changed on the A
+                * side at the same time, INTF will only have one bit
+                * set.  If one pin on the A side and one pin on the B
+                * side are changed at the same time, INTF will have
+                * two bits set.  Thus, INTF can't be the only check
+                * to see if the input has changed.
+                */
+
+               intf_set = BIT(i) & mcp->cache[MCP_INTF];
+               if (i < 8 && intf_set)
+                       intcap_mask = 0x00FF;
+               else if (i >= 8 && intf_set)
+                       intcap_mask = 0xFF00;
+               else
+                       intcap_mask = 0x00;
+
+               intcap_changed = (intcap_mask &
+                       (BIT(i) & mcp->cache[MCP_INTCAP])) !=
+                       (intcap_mask & (BIT(i) & gpio_orig));
+               gpio_set = BIT(i) & mcp->cache[MCP_GPIO];
+               gpio_bit_changed = (BIT(i) & gpio_orig) !=
+                       (BIT(i) & mcp->cache[MCP_GPIO]);
+               defval_changed = (BIT(i) & mcp->cache[MCP_INTCON]) &&
+                       ((BIT(i) & mcp->cache[MCP_GPIO]) !=
+                       (BIT(i) & mcp->cache[MCP_DEFVAL]));
+
+               if (((gpio_bit_changed || intcap_changed) &&
+                       (BIT(i) & mcp->irq_rise) && gpio_set) ||
+                   ((gpio_bit_changed || intcap_changed) &&
+                       (BIT(i) & mcp->irq_fall) && !gpio_set) ||
+                   defval_changed) {
                        child_irq = irq_find_mapping(mcp->chip.irqdomain, i);
                        handle_nested_irq(child_irq);
                }
index 06dac72cb69c0c1c6e9005c748a613985dea111b..d993386892138757b67be09b4df8a822e39e4017 100644 (file)
@@ -197,7 +197,7 @@ static ssize_t gpio_mockup_event_write(struct file *file,
        struct seq_file *sfile;
        struct gpio_desc *desc;
        struct gpio_chip *gc;
-       int status, val;
+       int val;
        char buf;
 
        sfile = file->private_data;
@@ -206,9 +206,8 @@ static ssize_t gpio_mockup_event_write(struct file *file,
        chip = priv->chip;
        gc = &chip->gc;
 
-       status = copy_from_user(&buf, usr_buf, 1);
-       if (status)
-               return status;
+       if (copy_from_user(&buf, usr_buf, 1))
+               return -EFAULT;
 
        if (buf == '0')
                val = 0;
index 40a8881c2ce882bc1eef7eb59fff492afa6f378b..f1c6ec17b90a8352ecaf2e350aa8309a317925d8 100644 (file)
@@ -42,9 +42,7 @@ struct xgene_gpio {
        struct gpio_chip        chip;
        void __iomem            *base;
        spinlock_t              lock;
-#ifdef CONFIG_PM
        u32                     set_dr_val[XGENE_MAX_GPIO_BANKS];
-#endif
 };
 
 static int xgene_gpio_get(struct gpio_chip *gc, unsigned int offset)
@@ -138,8 +136,7 @@ static int xgene_gpio_dir_out(struct gpio_chip *gc,
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int xgene_gpio_suspend(struct device *dev)
+static __maybe_unused int xgene_gpio_suspend(struct device *dev)
 {
        struct xgene_gpio *gpio = dev_get_drvdata(dev);
        unsigned long bank_offset;
@@ -152,7 +149,7 @@ static int xgene_gpio_suspend(struct device *dev)
        return 0;
 }
 
-static int xgene_gpio_resume(struct device *dev)
+static __maybe_unused int xgene_gpio_resume(struct device *dev)
 {
        struct xgene_gpio *gpio = dev_get_drvdata(dev);
        unsigned long bank_offset;
@@ -166,10 +163,6 @@ static int xgene_gpio_resume(struct device *dev)
 }
 
 static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
-#define XGENE_GPIO_PM_OPS      (&xgene_gpio_pm)
-#else
-#define XGENE_GPIO_PM_OPS      NULL
-#endif
 
 static int xgene_gpio_probe(struct platform_device *pdev)
 {
@@ -241,7 +234,7 @@ static struct platform_driver xgene_gpio_driver = {
                .name = "xgene-gpio",
                .of_match_table = xgene_gpio_of_match,
                .acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match),
-               .pm     = XGENE_GPIO_PM_OPS,
+               .pm     = &xgene_gpio_pm,
        },
        .probe = xgene_gpio_probe,
 };
index 9b37a3692b3feed18578c39f4acc22ba03182e8c..2bd683e2be022dd2c9a2b5c6cd7285d4c68e69eb 100644 (file)
@@ -266,6 +266,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
                goto fail_free_event;
        }
 
+       if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
+               enable_irq_wake(irq);
+
        list_add_tail(&event->node, &acpi_gpio->events);
        return AE_OK;
 
@@ -339,6 +342,9 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
        list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
                struct gpio_desc *desc;
 
+               if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
+                       disable_irq_wake(event->irq);
+
                free_irq(event->irq, event);
                desc = event->desc;
                if (WARN_ON(IS_ERR(desc)))
@@ -571,8 +577,10 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
                }
 
                desc = acpi_get_gpiod_by_index(adev, propname, idx, &info);
-               if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER))
+               if (!IS_ERR(desc))
                        break;
+               if (PTR_ERR(desc) == -EPROBE_DEFER)
+                       return ERR_CAST(desc);
        }
 
        /* Then from plain _CRS GPIOs */
index 8363cb57915b0b726c704b8be37805ecef2a18ee..8a08e81ee90d579774ca96bc70853093ba623f09 100644 (file)
@@ -3,6 +3,4 @@
 # of AMDSOC/AMDGPU drm driver.
 # It provides the HW control for ACP related functionalities.
 
-subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include
-
 AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
index d2d0f60ff36d1f2fd4a80ef8b43d2d3d9737e1f9..99424cb8020bdf914b5627bffce01155ba8f6b73 100644 (file)
@@ -240,6 +240,8 @@ free_partial_kdata:
        for (; i >= 0; i--)
                drm_free_large(p->chunks[i].kdata);
        kfree(p->chunks);
+       p->chunks = NULL;
+       p->nchunks = 0;
 put_ctx:
        amdgpu_ctx_put(p->ctx);
 free_chunk:
index 6abb238b25c97e8acc9f76887fa4b042e6025c1c..de0cf3315484c997877b30a65a15e7e1af3b87a4 100644 (file)
@@ -475,7 +475,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
        int r;
 
        if (adev->wb.wb_obj == NULL) {
-               r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4,
+               r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
                                            PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
                                            &adev->wb.wb_obj, &adev->wb.gpu_addr,
                                            (void **)&adev->wb.wb);
@@ -488,7 +488,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
                memset(&adev->wb.used, 0, sizeof(adev->wb.used));
 
                /* clear wb memory */
-               memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE);
+               memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
        }
 
        return 0;
@@ -2094,8 +2094,11 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
        }
 
        r = amdgpu_late_init(adev);
-       if (r)
+       if (r) {
+               if (fbcon)
+                       console_unlock();
                return r;
+       }
 
        /* pin cursors */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -2587,7 +2590,7 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
                use_bank = 0;
        }
 
-       *pos &= 0x3FFFF;
+       *pos &= (1UL << 22) - 1;
 
        if (use_bank) {
                if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
@@ -2663,7 +2666,7 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
                use_bank = 0;
        }
 
-       *pos &= 0x3FFFF;
+       *pos &= (1UL << 22) - 1;
 
        if (use_bank) {
                if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
index 75fc376ba735874e3598d72a06e17f32eb6ec0da..b76cd699eb0d7357d79d68f9da4fd9bd02f3e132 100644 (file)
  * - 3.7.0 - Add support for VCE clock list packet
  * - 3.8.0 - Add support raster config init in the kernel
  * - 3.9.0 - Add support for memory query info about VRAM and GTT.
+ * - 3.10.0 - Add support for new fences ioctl, new gem ioctl flags
  */
 #define KMS_DRIVER_MAJOR       3
-#define KMS_DRIVER_MINOR       9
+#define KMS_DRIVER_MINOR       10
 #define KMS_DRIVER_PATCHLEVEL  0
 
 int amdgpu_vram_limit = 0;
@@ -420,6 +421,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+       {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 
        {0, 0, 0}
index 51d759463384602ef0c3ca90b9667b6de6e0a865..106cf83c2e6b46aa711b7e82381e22b8dd449aa7 100644 (file)
@@ -202,6 +202,27 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
        bool kernel = false;
        int r;
 
+       /* reject invalid gem flags */
+       if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+                                     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+                                     AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+                                     AMDGPU_GEM_CREATE_VRAM_CLEARED|
+                                     AMDGPU_GEM_CREATE_SHADOW |
+                                     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
+               r = -EINVAL;
+               goto error_unlock;
+       }
+       /* reject invalid gem domains */
+       if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
+                                AMDGPU_GEM_DOMAIN_GTT |
+                                AMDGPU_GEM_DOMAIN_VRAM |
+                                AMDGPU_GEM_DOMAIN_GDS |
+                                AMDGPU_GEM_DOMAIN_GWS |
+                                AMDGPU_GEM_DOMAIN_OA)) {
+               r = -EINVAL;
+               goto error_unlock;
+       }
+
        /* create a gem object to contain this object in */
        if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
            AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
index 31375bdde6f1769ec674082141c77fb7383e302a..011800f621c6ce5574f740e85188aec215e1f2e5 100644 (file)
@@ -788,7 +788,7 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
                }
        }
 
-       /* disble sdma engine before programing it */
+       /* disable sdma engine before programing it */
        sdma_v3_0_ctx_switch_enable(adev, false);
        sdma_v3_0_enable(adev, false);
 
index f55e45b52fbce2b658135bc5fc48b084332f811c..c5dec210d5299995c72ea79f95c05415306f034f 100644 (file)
@@ -3464,6 +3464,16 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
                    (adev->pdev->device == 0x6667)) {
                        max_sclk = 75000;
                }
+       } else if (adev->asic_type == CHIP_OLAND) {
+               if ((adev->pdev->revision == 0xC7) ||
+                   (adev->pdev->revision == 0x80) ||
+                   (adev->pdev->revision == 0x81) ||
+                   (adev->pdev->revision == 0x83) ||
+                   (adev->pdev->revision == 0x87) ||
+                   (adev->pdev->device == 0x6604) ||
+                   (adev->pdev->device == 0x6605)) {
+                       max_sclk = 75000;
+               }
        }
 
        if (rps->vce_active) {
index 50bdb24ef8d6e9f7e828ea661d873659beb3ce42..4a785d6acfb9afbde3b4f4b86116512134075759 100644 (file)
@@ -1051,7 +1051,7 @@ static int vi_common_early_init(void *handle)
                /* rev0 hardware requires workarounds to support PG */
                adev->pg_flags = 0;
                if (adev->rev_id != 0x00) {
-                       adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+                       adev->pg_flags |=
                                AMD_PG_SUPPORT_GFX_SMG |
                                AMD_PG_SUPPORT_GFX_PIPELINE |
                                AMD_PG_SUPPORT_CP |
index 8cf71f3c6d0ea4706096222574c9d85871baba6c..261b828ad59086990f9f054906448a5526f4cbc4 100644 (file)
@@ -178,7 +178,7 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
        if (bgate) {
                cgs_set_powergating_state(hwmgr->device,
                                                AMD_IP_BLOCK_TYPE_VCE,
-                                               AMD_PG_STATE_UNGATE);
+                                               AMD_PG_STATE_GATE);
                cgs_set_clockgating_state(hwmgr->device,
                                AMD_IP_BLOCK_TYPE_VCE,
                                AMD_CG_STATE_GATE);
index 08e6a71f5d05f412946496f39ee82303d19a56a4..294b53697334cc0855daa73925b8c58a19cf2222 100644 (file)
@@ -63,8 +63,7 @@ static void malidp_crtc_enable(struct drm_crtc *crtc)
 
        clk_prepare_enable(hwdev->pxlclk);
 
-       /* mclk needs to be set to the same or higher rate than pxlclk */
-       clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000);
+       /* We rely on firmware to set mclk to a sensible level. */
        clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
 
        hwdev->modeset(hwdev, &vm);
index 488aedf5b58d54e7997b2339c75b7a90f30dcfc1..9f5513006eeef8b4e54f6727b44b0e97562935d6 100644 (file)
@@ -83,7 +83,7 @@ static const struct malidp_layer malidp550_layers[] = {
        { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
        { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE },
        { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
-       { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 },
+       { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE },
 };
 
 #define MALIDP_DE_DEFAULT_PREFETCH_START       5
index 414aada10fe5e7d43392aa835b4c01aba594bcb7..d5aec082294cbdde5a19986a5b1908aef974bb19 100644 (file)
@@ -37,6 +37,8 @@
 #define   LAYER_V_VAL(x)               (((x) & 0x1fff) << 16)
 #define MALIDP_LAYER_COMP_SIZE         0x010
 #define MALIDP_LAYER_OFFSET            0x014
+#define MALIDP550_LS_ENABLE            0x01c
+#define MALIDP550_LS_R1_IN_SIZE                0x020
 
 /*
  * This 4-entry look-up-table is used to determine the full 8-bit alpha value
@@ -242,6 +244,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
                        LAYER_V_VAL(plane->state->crtc_y),
                        mp->layer->base + MALIDP_LAYER_OFFSET);
 
+       if (mp->layer->id == DE_SMART)
+               malidp_hw_write(mp->hwdev,
+                               LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
+                               mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
+
        /* first clear the rotation bits */
        val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
        val &= ~LAYER_ROT_MASK;
@@ -330,9 +337,16 @@ int malidp_de_planes_init(struct drm_device *drm)
                plane->hwdev = malidp->dev;
                plane->layer = &map->layers[i];
 
-               /* Skip the features which the SMART layer doesn't have */
-               if (id == DE_SMART)
+               if (id == DE_SMART) {
+                       /*
+                        * Enable the first rectangle in the SMART layer to be
+                        * able to use it as a drm plane.
+                        */
+                       malidp_hw_write(malidp->dev, 1,
+                                       plane->layer->base + MALIDP550_LS_ENABLE);
+                       /* Skip the features which the SMART layer doesn't have. */
                        continue;
+               }
 
                drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags);
                malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
index aff6d4a84e998c6cc1d01e3067d0f52712daa145..b816067a65c5727ab120000c5d5d080e022fee2c 100644 (file)
@@ -84,6 +84,7 @@
 /* Stride register offsets relative to Lx_BASE */
 #define MALIDP_DE_LG_STRIDE            0x18
 #define MALIDP_DE_LV_STRIDE0           0x18
+#define MALIDP550_DE_LS_R1_STRIDE      0x28
 
 /* macros to set values into registers */
 #define MALIDP_DE_H_FRONTPORCH(x)      (((x) & 0xfff) << 0)
index c8baab9bee0d05cf904153021db5601ade303586..ba58f1b11d1e16b141fe01359967a229a54da6b1 100644 (file)
@@ -148,6 +148,9 @@ static const struct edid_quirk {
 
        /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
        { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
+
+       /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
+       { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
 };
 
 /*
index f6d4d9700734e6d48792e90d47c7bfe4081116a1..324a688b3f3013e9020ce2b9f0f76083ad91f890 100644 (file)
@@ -1260,9 +1260,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
         * to KMS, hence fail if different settings are requested.
         */
        if (var->bits_per_pixel != fb->format->cpp[0] * 8 ||
-           var->xres != fb->width || var->yres != fb->height ||
-           var->xres_virtual != fb->width || var->yres_virtual != fb->height) {
-               DRM_DEBUG("fb userspace requested width/height/bpp different than current fb "
+           var->xres > fb->width || var->yres > fb->height ||
+           var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
+               DRM_DEBUG("fb requested width/height/bpp can't fit in current fb "
                          "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
                          var->xres, var->yres, var->bits_per_pixel,
                          var->xres_virtual, var->yres_virtual,
index 130d7d517a19a180ca7f2e744131aeabc554564b..da48819ff2e6550c0a7d6206569d85e8a880c0c5 100644 (file)
@@ -1311,6 +1311,8 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
                goto out_pm_put;
        }
 
+       mutex_lock(&gpu->lock);
+
        fence = etnaviv_gpu_fence_alloc(gpu);
        if (!fence) {
                event_free(gpu, event);
@@ -1318,8 +1320,6 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
                goto out_pm_put;
        }
 
-       mutex_lock(&gpu->lock);
-
        gpu->event[event].fence = fence;
        submit->fence = fence->seqno;
        gpu->active_fence = submit->fence;
index 0fd6f7a18364a69ba67cb000674593fc94041e19..c0e8d3302292c9c9329ca4795b4d9ffb3465a178 100644 (file)
@@ -68,6 +68,8 @@ struct decon_context {
        unsigned long                   flags;
        unsigned long                   out_type;
        int                             first_win;
+       spinlock_t                      vblank_lock;
+       u32                             frame_id;
 };
 
 static const uint32_t decon_formats[] = {
@@ -103,7 +105,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
                if (ctx->out_type & IFTYPE_I80)
                        val |= VIDINTCON0_FRAMEDONE;
                else
-                       val |= VIDINTCON0_INTFRMEN;
+                       val |= VIDINTCON0_INTFRMEN | VIDINTCON0_FRAMESEL_FP;
 
                writel(val, ctx->addr + DECON_VIDINTCON0);
        }
@@ -122,14 +124,56 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
                writel(0, ctx->addr + DECON_VIDINTCON0);
 }
 
+/* return number of starts/ends of frame transmissions since reset */
+static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
+{
+       u32 frm, pfrm, status, cnt = 2;
+
+       /* To get consistent result repeat read until frame id is stable.
+        * Usually the loop will be executed once, in rare cases when the loop
+        * is executed at frame change time 2nd pass will be needed.
+        */
+       frm = readl(ctx->addr + DECON_CRFMID);
+       do {
+               status = readl(ctx->addr + DECON_VIDCON1);
+               pfrm = frm;
+               frm = readl(ctx->addr + DECON_CRFMID);
+       } while (frm != pfrm && --cnt);
+
+       /* CRFMID is incremented on BPORCH in case of I80 and on VSYNC in case
+        * of RGB, it should be taken into account.
+        */
+       if (!frm)
+               return 0;
+
+       switch (status & (VIDCON1_VSTATUS_MASK | VIDCON1_I80_ACTIVE)) {
+       case VIDCON1_VSTATUS_VS:
+               if (!(ctx->out_type & IFTYPE_I80))
+                       --frm;
+               break;
+       case VIDCON1_VSTATUS_BP:
+               --frm;
+               break;
+       case VIDCON1_I80_ACTIVE:
+       case VIDCON1_VSTATUS_AC:
+               if (end)
+                       --frm;
+               break;
+       default:
+               break;
+       }
+
+       return frm;
+}
+
 static void decon_setup_trigger(struct decon_context *ctx)
 {
        if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)))
                return;
 
        if (!(ctx->out_type & I80_HW_TRG)) {
-               writel(TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN
-                      TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN,
+               writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
+                      TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN,
                       ctx->addr + DECON_TRIGCON);
                return;
        }
@@ -365,11 +409,14 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
 static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
 {
        struct decon_context *ctx = crtc->ctx;
+       unsigned long flags;
        int i;
 
        if (test_bit(BIT_SUSPENDED, &ctx->flags))
                return;
 
+       spin_lock_irqsave(&ctx->vblank_lock, flags);
+
        for (i = ctx->first_win; i < WINDOWS_NR; i++)
                decon_shadow_protect_win(ctx, i, false);
 
@@ -378,11 +425,18 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
 
        if (ctx->out_type & IFTYPE_I80)
                set_bit(BIT_WIN_UPDATED, &ctx->flags);
+
+       ctx->frame_id = decon_get_frame_count(ctx, true);
+
+       exynos_crtc_handle_event(crtc);
+
+       spin_unlock_irqrestore(&ctx->vblank_lock, flags);
 }
 
 static void decon_swreset(struct decon_context *ctx)
 {
        unsigned int tries;
+       unsigned long flags;
 
        writel(0, ctx->addr + DECON_VIDCON0);
        for (tries = 2000; tries; --tries) {
@@ -400,6 +454,10 @@ static void decon_swreset(struct decon_context *ctx)
 
        WARN(tries == 0, "failed to software reset DECON\n");
 
+       spin_lock_irqsave(&ctx->vblank_lock, flags);
+       ctx->frame_id = 0;
+       spin_unlock_irqrestore(&ctx->vblank_lock, flags);
+
        if (!(ctx->out_type & IFTYPE_HDMI))
                return;
 
@@ -578,6 +636,24 @@ static const struct component_ops decon_component_ops = {
        .unbind = decon_unbind,
 };
 
+static void decon_handle_vblank(struct decon_context *ctx)
+{
+       u32 frm;
+
+       spin_lock(&ctx->vblank_lock);
+
+       frm = decon_get_frame_count(ctx, true);
+
+       if (frm != ctx->frame_id) {
+               /* handle only if incremented, take care of wrap-around */
+               if ((s32)(frm - ctx->frame_id) > 0)
+                       drm_crtc_handle_vblank(&ctx->crtc->base);
+               ctx->frame_id = frm;
+       }
+
+       spin_unlock(&ctx->vblank_lock);
+}
+
 static irqreturn_t decon_irq_handler(int irq, void *dev_id)
 {
        struct decon_context *ctx = dev_id;
@@ -598,7 +674,7 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
                            (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F))
                                return IRQ_HANDLED;
                }
-               drm_crtc_handle_vblank(&ctx->crtc->base);
+               decon_handle_vblank(ctx);
        }
 
 out:
@@ -671,6 +747,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
        __set_bit(BIT_SUSPENDED, &ctx->flags);
        ctx->dev = dev;
        ctx->out_type = (unsigned long)of_device_get_match_data(dev);
+       spin_lock_init(&ctx->vblank_lock);
 
        if (ctx->out_type & IFTYPE_HDMI) {
                ctx->first_win = 1;
@@ -678,7 +755,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
                ctx->out_type |= IFTYPE_I80;
        }
 
-       if (ctx->out_type | I80_HW_TRG) {
+       if (ctx->out_type & I80_HW_TRG) {
                ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
                                                        "samsung,disp-sysreg");
                if (IS_ERR(ctx->sysreg)) {
index f9ab19e205e243d931412257064455582747e92a..48811806fa2727c540e5f5e9d0ffb1700dc08c4f 100644 (file)
@@ -526,6 +526,7 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
 
        for (i = 0; i < WINDOWS_NR; i++)
                decon_shadow_protect_win(ctx, i, false);
+       exynos_crtc_handle_event(crtc);
 }
 
 static void decon_init(struct decon_context *ctx)
index 5367b6664fe37d6ea1825eac48e620a7c6d902e7..c65f4509932c56f18f869f81b293d707ce2e47d7 100644 (file)
@@ -85,16 +85,28 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
                                     struct drm_crtc_state *old_crtc_state)
 {
        struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-       struct drm_pending_vblank_event *event;
-       unsigned long flags;
 
        if (exynos_crtc->ops->atomic_flush)
                exynos_crtc->ops->atomic_flush(exynos_crtc);
+}
+
+static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
+       .enable         = exynos_drm_crtc_enable,
+       .disable        = exynos_drm_crtc_disable,
+       .mode_set_nofb  = exynos_drm_crtc_mode_set_nofb,
+       .atomic_check   = exynos_crtc_atomic_check,
+       .atomic_begin   = exynos_crtc_atomic_begin,
+       .atomic_flush   = exynos_crtc_atomic_flush,
+};
+
+void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc)
+{
+       struct drm_crtc *crtc = &exynos_crtc->base;
+       struct drm_pending_vblank_event *event = crtc->state->event;
+       unsigned long flags;
 
-       event = crtc->state->event;
        if (event) {
                crtc->state->event = NULL;
-
                spin_lock_irqsave(&crtc->dev->event_lock, flags);
                if (drm_crtc_vblank_get(crtc) == 0)
                        drm_crtc_arm_vblank_event(crtc, event);
@@ -105,15 +117,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
 
 }
 
-static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
-       .enable         = exynos_drm_crtc_enable,
-       .disable        = exynos_drm_crtc_disable,
-       .mode_set_nofb  = exynos_drm_crtc_mode_set_nofb,
-       .atomic_check   = exynos_crtc_atomic_check,
-       .atomic_begin   = exynos_crtc_atomic_begin,
-       .atomic_flush   = exynos_crtc_atomic_flush,
-};
-
 static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
 {
        struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
index 6a581a8af4650fcf5f07ea3fa84c6b1dd5e81ad4..abd5d6ceac0c2fa0500650a139876d20ca9d3ea8 100644 (file)
@@ -40,4 +40,6 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
  */
 void exynos_drm_crtc_te_handler(struct drm_crtc *crtc);
 
+void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc);
+
 #endif
index 812e2ec0761d0b2b6ad17c71c99a37995e09ca51..d7ef26370e67c59fa2825a8be9303ab4f5b0c708 100644 (file)
@@ -86,7 +86,7 @@
 #define DSIM_SYNC_INFORM               (1 << 27)
 #define DSIM_EOT_DISABLE               (1 << 28)
 #define DSIM_MFLUSH_VS                 (1 << 29)
-/* This flag is valid only for exynos3250/3472/4415/5260/5430 */
+/* This flag is valid only for exynos3250/3472/5260/5430 */
 #define DSIM_CLKLANE_STOP              (1 << 30)
 
 /* DSIM_ESCMODE */
@@ -473,17 +473,6 @@ static const struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
        .reg_values = reg_values,
 };
 
-static const struct exynos_dsi_driver_data exynos4415_dsi_driver_data = {
-       .reg_ofs = exynos_reg_ofs,
-       .plltmr_reg = 0x58,
-       .has_clklane_stop = 1,
-       .num_clks = 2,
-       .max_freq = 1000,
-       .wait_for_reset = 1,
-       .num_bits_resol = 11,
-       .reg_values = reg_values,
-};
-
 static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
        .reg_ofs = exynos_reg_ofs,
        .plltmr_reg = 0x58,
@@ -521,8 +510,6 @@ static const struct of_device_id exynos_dsi_of_match[] = {
          .data = &exynos3_dsi_driver_data },
        { .compatible = "samsung,exynos4210-mipi-dsi",
          .data = &exynos4_dsi_driver_data },
-       { .compatible = "samsung,exynos4415-mipi-dsi",
-         .data = &exynos4415_dsi_driver_data },
        { .compatible = "samsung,exynos5410-mipi-dsi",
          .data = &exynos5_dsi_driver_data },
        { .compatible = "samsung,exynos5422-mipi-dsi",
@@ -979,7 +966,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
        bool first = !xfer->tx_done;
        u32 reg;
 
-       dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n",
+       dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n",
                xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
 
        if (length > DSI_TX_FIFO_SIZE)
@@ -1177,7 +1164,7 @@ static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi)
        spin_unlock_irqrestore(&dsi->transfer_lock, flags);
 
        dev_dbg(dsi->dev,
-               "> xfer %p, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
+               "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
                xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
                xfer->rx_done);
 
@@ -1348,9 +1335,12 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
        int te_gpio_irq;
 
        dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
+       if (dsi->te_gpio == -ENOENT)
+               return 0;
+
        if (!gpio_is_valid(dsi->te_gpio)) {
-               dev_err(dsi->dev, "no te-gpios specified\n");
                ret = dsi->te_gpio;
+               dev_err(dsi->dev, "cannot get te-gpios, %d\n", ret);
                goto out;
        }
 
index 95871577015d8a5389cdae3a0c87413af7f0448f..5b18b5c5fdf255f262d68fc6fc7103f8d4d06910 100644 (file)
@@ -1695,7 +1695,7 @@ static int fimc_probe(struct platform_device *pdev)
                goto err_put_clk;
        }
 
-       DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
+       DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
 
        spin_lock_init(&ctx->lock);
        platform_set_drvdata(pdev, ctx);
index a9fa444c6053c0c8d048598198938d7fa2647cce..3f04d72c448d386fc9f646e1033c8528406b107e 100644 (file)
 #define TRIGCON                                0x1A4
 #define TRGMODE_ENABLE                 (1 << 0)
 #define SWTRGCMD_ENABLE                        (1 << 1)
-/* Exynos3250, 3472, 4415, 5260 5410, 5420 and 5422 only supported. */
+/* Exynos3250, 3472, 5260 5410, 5420 and 5422 only supported. */
 #define HWTRGEN_ENABLE                 (1 << 3)
 #define HWTRGMASK_ENABLE               (1 << 4)
-/* Exynos3250, 3472, 4415, 5260, 5420 and 5422 only supported. */
+/* Exynos3250, 3472, 5260, 5420 and 5422 only supported. */
 #define HWTRIGEN_PER_ENABLE            (1 << 31)
 
 /* display mode change control register except exynos4 */
@@ -138,18 +138,6 @@ static struct fimd_driver_data exynos4_fimd_driver_data = {
        .has_vtsel = 1,
 };
 
-static struct fimd_driver_data exynos4415_fimd_driver_data = {
-       .timing_base = 0x20000,
-       .lcdblk_offset = 0x210,
-       .lcdblk_vt_shift = 10,
-       .lcdblk_bypass_shift = 1,
-       .trg_type = I80_HW_TRG,
-       .has_shadowcon = 1,
-       .has_vidoutcon = 1,
-       .has_vtsel = 1,
-       .has_trigger_per_te = 1,
-};
-
 static struct fimd_driver_data exynos5_fimd_driver_data = {
        .timing_base = 0x20000,
        .lcdblk_offset = 0x214,
@@ -210,8 +198,6 @@ static const struct of_device_id fimd_driver_dt_match[] = {
          .data = &exynos3_fimd_driver_data },
        { .compatible = "samsung,exynos4210-fimd",
          .data = &exynos4_fimd_driver_data },
-       { .compatible = "samsung,exynos4415-fimd",
-         .data = &exynos4415_fimd_driver_data },
        { .compatible = "samsung,exynos5250-fimd",
          .data = &exynos5_fimd_driver_data },
        { .compatible = "samsung,exynos5420-fimd",
@@ -257,7 +243,7 @@ static int fimd_enable_vblank(struct exynos_drm_crtc *crtc)
                        val |= VIDINTCON0_INT_FRAME;
 
                        val &= ~VIDINTCON0_FRAMESEL0_MASK;
-                       val |= VIDINTCON0_FRAMESEL0_VSYNC;
+                       val |= VIDINTCON0_FRAMESEL0_FRONTPORCH;
                        val &= ~VIDINTCON0_FRAMESEL1_MASK;
                        val |= VIDINTCON0_FRAMESEL1_NONE;
                }
@@ -723,6 +709,8 @@ static void fimd_atomic_flush(struct exynos_drm_crtc *crtc)
 
        for (i = 0; i < WINDOWS_NR; i++)
                fimd_shadow_protect_win(ctx, i, false);
+
+       exynos_crtc_handle_event(crtc);
 }
 
 static void fimd_update_plane(struct exynos_drm_crtc *crtc,
index 4c28f7ffcc4dd1a0593e5c37bb878f6481f1ed70..55a1579d11b3d7c1ce604eeba2988de78de61e0b 100644 (file)
@@ -218,7 +218,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
                return ERR_PTR(ret);
        }
 
-       DRM_DEBUG_KMS("created file object = %p\n", obj->filp);
+       DRM_DEBUG_KMS("created file object = %pK\n", obj->filp);
 
        return exynos_gem;
 }
index bef57987759d2c8d2da9451a80c02b7b1c370b7f..0506b2b17ac1c45a1bea639cb37ed90075be3677 100644 (file)
@@ -1723,7 +1723,7 @@ static int gsc_probe(struct platform_device *pdev)
                return ret;
        }
 
-       DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
+       DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
 
        mutex_init(&ctx->lock);
        platform_set_drvdata(pdev, ctx);
index 9c84ee76f18adc0ab1b8dcc3f406a4021346ece8..3edda18cc2d2d61b7010b57817aeecababe1d4c5 100644 (file)
@@ -208,7 +208,7 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
         * e.g PAUSE state, queue buf, command control.
         */
        list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
-               DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv);
+               DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n", count++, ippdrv);
 
                mutex_lock(&ippdrv->cmd_lock);
                list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
@@ -388,7 +388,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
        }
        property->prop_id = ret;
 
-       DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n",
+       DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%pK]\n",
                property->prop_id, property->cmd, ippdrv);
 
        /* stored property information and ippdrv in private data */
@@ -518,7 +518,7 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
 {
        int i;
 
-       DRM_DEBUG_KMS("node[%p]\n", m_node);
+       DRM_DEBUG_KMS("node[%pK]\n", m_node);
 
        if (!m_node) {
                DRM_ERROR("invalid dequeue node.\n");
@@ -562,7 +562,7 @@ static struct drm_exynos_ipp_mem_node
        m_node->buf_id = qbuf->buf_id;
        INIT_LIST_HEAD(&m_node->list);
 
-       DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id);
+       DRM_DEBUG_KMS("m_node[%pK]ops_id[%d]\n", m_node, qbuf->ops_id);
        DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
 
        for_each_ipp_planar(i) {
@@ -659,7 +659,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
 
        mutex_lock(&c_node->event_lock);
        list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
-               DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e);
+               DRM_DEBUG_KMS("count[%d]e[%pK]\n", count++, e);
 
                /*
                 * qbuf == NULL condition means all event deletion.
@@ -750,7 +750,7 @@ static struct drm_exynos_ipp_mem_node
 
        /* find memory node from memory list */
        list_for_each_entry(m_node, head, list) {
-               DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node);
+               DRM_DEBUG_KMS("count[%d]m_node[%pK]\n", count++, m_node);
 
                /* compare buffer id */
                if (m_node->buf_id == qbuf->buf_id)
@@ -767,7 +767,7 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
        struct exynos_drm_ipp_ops *ops = NULL;
        int ret = 0;
 
-       DRM_DEBUG_KMS("node[%p]\n", m_node);
+       DRM_DEBUG_KMS("node[%pK]\n", m_node);
 
        if (!m_node) {
                DRM_ERROR("invalid queue node.\n");
@@ -1232,7 +1232,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
                        m_node = list_first_entry(head,
                                struct drm_exynos_ipp_mem_node, list);
 
-                       DRM_DEBUG_KMS("m_node[%p]\n", m_node);
+                       DRM_DEBUG_KMS("m_node[%pK]\n", m_node);
 
                        ret = ipp_set_mem_node(ippdrv, c_node, m_node);
                        if (ret) {
@@ -1601,7 +1601,7 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
                }
                ippdrv->prop_list.ipp_id = ret;
 
-               DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n",
+               DRM_DEBUG_KMS("count[%d]ippdrv[%pK]ipp_id[%d]\n",
                        count++, ippdrv, ret);
 
                /* store parent device for node */
@@ -1659,7 +1659,7 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
 
        file_priv->ipp_dev = dev;
 
-       DRM_DEBUG_KMS("done priv[%p]\n", dev);
+       DRM_DEBUG_KMS("done priv[%pK]\n", dev);
 
        return 0;
 }
@@ -1676,7 +1676,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
                mutex_lock(&ippdrv->cmd_lock);
                list_for_each_entry_safe(c_node, tc_node,
                        &ippdrv->cmd_list, list) {
-                       DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n",
+                       DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n",
                                count++, ippdrv);
 
                        if (c_node->filp == file) {
index 6591e406084c164d30c1945aa2f6d7ce559c67fd..79282a820ecce104a59c828beac7be68b65a21ee 100644 (file)
@@ -748,7 +748,7 @@ static int rotator_probe(struct platform_device *pdev)
                goto err_ippdrv_register;
        }
 
-       DRM_DEBUG_KMS("ippdrv[%p]\n", ippdrv);
+       DRM_DEBUG_KMS("ippdrv[%pK]\n", ippdrv);
 
        platform_set_drvdata(pdev, rot);
 
index 57fe514d5c5bf9adc9f423d9f46a51e25aedfa46..5d9a62a87eec75f574ea0e696f31059d5802fa7d 100644 (file)
@@ -170,6 +170,7 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
        .enable_vblank = vidi_enable_vblank,
        .disable_vblank = vidi_disable_vblank,
        .update_plane = vidi_update_plane,
+       .atomic_flush = exynos_crtc_handle_event,
 };
 
 static void vidi_fake_vblank_timer(unsigned long arg)
index 72143ac1052526ffc03332c5b6b211ae7bf30216..25edb635a197621871d583e26b3d31d3717a82fa 100644 (file)
@@ -1012,6 +1012,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
                return;
 
        mixer_vsync_set_update(mixer_ctx, true);
+       exynos_crtc_handle_event(crtc);
 }
 
 static void mixer_enable(struct exynos_drm_crtc *crtc)
index 3b6caaca975135d6b8d595393ffc372bc14107bc..325618d969feedf035196c9e3d211affe2fd82f6 100644 (file)
@@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
        const char *item;
 
        if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
-               gvt_err("Invalid vGPU creation params\n");
+               gvt_vgpu_err("Invalid vGPU creation params\n");
                return -EINVAL;
        }
 
@@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu,
        return 0;
 
 no_enough_resource:
-       gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
-       gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n",
-               vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
+       gvt_vgpu_err("fail to allocate resource %s\n", item);
+       gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n",
+               BYTES_TO_MB(request), BYTES_TO_MB(avail),
                BYTES_TO_MB(max), BYTES_TO_MB(taken));
        return -ENOSPC;
 }
index 4a6a2ed65732e1fde39457148165274deda52db6..b7d7721e72faddc2a2d4fc76d69d795b8053cfad 100644 (file)
@@ -41,6 +41,54 @@ enum {
        INTEL_GVT_PCI_BAR_MAX,
 };
 
+/* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one
+ * byte) byte by byte in standard pci configuration space. (not the full
+ * 256 bytes.)
+ */
+static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
+       [PCI_COMMAND]           = 0xff, 0x07,
+       [PCI_STATUS]            = 0x00, 0xf9, /* the only one RW1C byte */
+       [PCI_CACHE_LINE_SIZE]   = 0xff,
+       [PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff,
+       [PCI_ROM_ADDRESS]       = 0x01, 0xf8, 0xff, 0xff,
+       [PCI_INTERRUPT_LINE]    = 0xff,
+};
+
+/**
+ * vgpu_pci_cfg_mem_write - write virtual cfg space memory
+ *
+ * Use this function to write virtual cfg space memory.
+ * For standard cfg space, only RW bits can be changed,
+ * and we emulates the RW1C behavior of PCI_STATUS register.
+ */
+static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
+                                  u8 *src, unsigned int bytes)
+{
+       u8 *cfg_base = vgpu_cfg_space(vgpu);
+       u8 mask, new, old;
+       int i = 0;
+
+       for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
+               mask = pci_cfg_space_rw_bmp[off + i];
+               old = cfg_base[off + i];
+               new = src[i] & mask;
+
+               /**
+                * The PCI_STATUS high byte has RW1C bits, here
+                * emulates clear by writing 1 for these bits.
+                * Writing a 0b to RW1C bits has no effect.
+                */
+               if (off + i == PCI_STATUS + 1)
+                       new = (~new & old) & mask;
+
+               cfg_base[off + i] = (old & ~mask) | new;
+       }
+
+       /* For other configuration space directly copy as it is. */
+       if (i < bytes)
+               memcpy(cfg_base + off + i, src + i, bytes - i);
+}
+
 /**
  * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
  *
@@ -123,7 +171,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
        u8 changed = old ^ new;
        int ret;
 
-       memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+       vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
        if (!(changed & PCI_COMMAND_MEMORY))
                return 0;
 
@@ -237,6 +285,9 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
 {
        int ret;
 
+       if (vgpu->failsafe)
+               return 0;
+
        if (WARN_ON(bytes > 4))
                return -EINVAL;
 
@@ -274,10 +325,10 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
                if (ret)
                        return ret;
 
-               memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+               vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
                break;
        default:
-               memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+               vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
                break;
        }
        return 0;
index b9c8e2407682fc5af454d70d8b1e881aed9e171f..2b92cc8a7d1aa551778917ed038bc6aa7961ce3e 100644 (file)
@@ -668,7 +668,7 @@ static inline void print_opcode(u32 cmd, int ring_id)
        if (d_info == NULL)
                return;
 
-       gvt_err("opcode=0x%x %s sub_ops:",
+       gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
                        cmd >> (32 - d_info->op_len), d_info->name);
 
        for (i = 0; i < d_info->nr_sub_op; i++)
@@ -693,23 +693,23 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
        int cnt = 0;
        int i;
 
-       gvt_err("  vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
+       gvt_dbg_cmd("  vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
                        " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
                        s->ring_id, s->ring_start, s->ring_start + s->ring_size,
                        s->ring_head, s->ring_tail);
 
-       gvt_err("  %s %s ip_gma(%08lx) ",
+       gvt_dbg_cmd("  %s %s ip_gma(%08lx) ",
                        s->buf_type == RING_BUFFER_INSTRUCTION ?
                        "RING_BUFFER" : "BATCH_BUFFER",
                        s->buf_addr_type == GTT_BUFFER ?
                        "GTT" : "PPGTT", s->ip_gma);
 
        if (s->ip_va == NULL) {
-               gvt_err(" ip_va(NULL)");
+               gvt_dbg_cmd(" ip_va(NULL)");
                return;
        }
 
-       gvt_err("  ip_va=%p: %08x %08x %08x %08x\n",
+       gvt_dbg_cmd("  ip_va=%p: %08x %08x %08x %08x\n",
                        s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
                        cmd_val(s, 2), cmd_val(s, 3));
 
@@ -817,6 +817,25 @@ static bool is_shadowed_mmio(unsigned int offset)
        return ret;
 }
 
+static inline bool is_force_nonpriv_mmio(unsigned int offset)
+{
+       return (offset >= 0x24d0 && offset < 0x2500);
+}
+
+static int force_nonpriv_reg_handler(struct parser_exec_state *s,
+                                    unsigned int offset, unsigned int index)
+{
+       struct intel_gvt *gvt = s->vgpu->gvt;
+       unsigned int data = cmd_val(s, index + 1);
+
+       if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
+               gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
+                       offset, data);
+               return -EINVAL;
+       }
+       return 0;
+}
+
 static int cmd_reg_handler(struct parser_exec_state *s,
        unsigned int offset, unsigned int index, char *cmd)
 {
@@ -824,23 +843,26 @@ static int cmd_reg_handler(struct parser_exec_state *s,
        struct intel_gvt *gvt = vgpu->gvt;
 
        if (offset + 4 > gvt->device_info.mmio_size) {
-               gvt_err("%s access to (%x) outside of MMIO range\n",
+               gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
                                cmd, offset);
                return -EINVAL;
        }
 
        if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
-               gvt_err("vgpu%d: %s access to non-render register (%x)\n",
-                               s->vgpu->id, cmd, offset);
+               gvt_vgpu_err("%s access to non-render register (%x)\n",
+                               cmd, offset);
                return 0;
        }
 
        if (is_shadowed_mmio(offset)) {
-               gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
-                               s->vgpu->id, offset);
+               gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
                return 0;
        }
 
+       if (is_force_nonpriv_mmio(offset) &&
+           force_nonpriv_reg_handler(s, offset, index))
+               return -EINVAL;
+
        if (offset == i915_mmio_reg_offset(DERRMR) ||
                offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
                /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
@@ -1008,7 +1030,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
                        ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
                else if (post_sync == 1) {
                        /* check ggtt*/
-                       if ((cmd_val(s, 2) & (1 << 2))) {
+                       if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
                                gma = cmd_val(s, 2) & GENMASK(31, 3);
                                if (gmadr_bytes == 8)
                                        gma |= (cmd_gma_hi(s, 3)) << 32;
@@ -1129,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
                struct mi_display_flip_command_info *info)
 {
        struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+       struct intel_vgpu *vgpu = s->vgpu;
        u32 dword0 = cmd_val(s, 0);
        u32 dword1 = cmd_val(s, 1);
        u32 dword2 = cmd_val(s, 2);
@@ -1167,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
                break;
 
        default:
-               gvt_err("unknown plane code %d\n", plane);
+               gvt_vgpu_err("unknown plane code %d\n", plane);
                return -EINVAL;
        }
 
@@ -1274,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip(
 static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
 {
        struct mi_display_flip_command_info info;
+       struct intel_vgpu *vgpu = s->vgpu;
        int ret;
        int i;
        int len = cmd_length(s);
 
        ret = decode_mi_display_flip(s, &info);
        if (ret) {
-               gvt_err("fail to decode MI display flip command\n");
+               gvt_vgpu_err("fail to decode MI display flip command\n");
                return ret;
        }
 
        ret = check_mi_display_flip(s, &info);
        if (ret) {
-               gvt_err("invalid MI display flip command\n");
+               gvt_vgpu_err("invalid MI display flip command\n");
                return ret;
        }
 
        ret = update_plane_mmio_from_mi_display_flip(s, &info);
        if (ret) {
-               gvt_err("fail to update plane mmio\n");
+               gvt_vgpu_err("fail to update plane mmio\n");
                return ret;
        }
 
@@ -1350,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
        int ret;
 
        if (op_size > max_surface_size) {
-               gvt_err("command address audit fail name %s\n", s->info->name);
+               gvt_vgpu_err("command address audit fail name %s\n",
+                       s->info->name);
                return -EINVAL;
        }
 
@@ -1367,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
        }
        return 0;
 err:
-       gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
+       gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
                        s->info->name, guest_gma, op_size);
 
        pr_err("cmd dump: ");
@@ -1412,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
 
 static inline int unexpected_cmd(struct parser_exec_state *s)
 {
-       gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
-                       s->vgpu->id, s->info->name);
+       struct intel_vgpu *vgpu = s->vgpu;
+
+       gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
+
        return -EINVAL;
 }
 
@@ -1516,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
        while (gma != end_gma) {
                gpa = intel_vgpu_gma_to_gpa(mm, gma);
                if (gpa == INTEL_GVT_INVALID_ADDR) {
-                       gvt_err("invalid gma address: %lx\n", gma);
+                       gvt_vgpu_err("invalid gma address: %lx\n", gma);
                        return -EFAULT;
                }
 
@@ -1557,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
        uint32_t bb_size = 0;
        uint32_t cmd_len = 0;
        bool met_bb_end = false;
+       struct intel_vgpu *vgpu = s->vgpu;
        u32 cmd;
 
        /* get the start gm address of the batch buffer */
@@ -1565,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
 
        info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
        if (info == NULL) {
-               gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+               gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
                                cmd, get_opcode(cmd, s->ring_id));
                return -EINVAL;
        }
@@ -1574,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
                                gma, gma + 4, &cmd);
                info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
                if (info == NULL) {
-                       gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+                       gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
                                cmd, get_opcode(cmd, s->ring_id));
                        return -EINVAL;
                }
@@ -1599,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
 static int perform_bb_shadow(struct parser_exec_state *s)
 {
        struct intel_shadow_bb_entry *entry_obj;
+       struct intel_vgpu *vgpu = s->vgpu;
        unsigned long gma = 0;
        uint32_t bb_size;
        void *dst = NULL;
@@ -1633,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
 
        ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
        if (ret) {
-               gvt_err("failed to set shadow batch to CPU\n");
+               gvt_vgpu_err("failed to set shadow batch to CPU\n");
                goto unmap_src;
        }
 
@@ -1645,7 +1674,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
                              gma, gma + bb_size,
                              dst);
        if (ret) {
-               gvt_err("fail to copy guest ring buffer\n");
+               gvt_vgpu_err("fail to copy guest ring buffer\n");
                goto unmap_src;
        }
 
@@ -1676,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
 {
        bool second_level;
        int ret = 0;
+       struct intel_vgpu *vgpu = s->vgpu;
 
        if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
-               gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
+               gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
                return -EINVAL;
        }
 
        second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
        if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
-               gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
+               gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
                return -EINVAL;
        }
 
@@ -1702,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
        if (batch_buffer_needs_scan(s)) {
                ret = perform_bb_shadow(s);
                if (ret < 0)
-                       gvt_err("invalid shadow batch buffer\n");
+                       gvt_vgpu_err("invalid shadow batch buffer\n");
        } else {
                /* emulate a batch buffer end to do return right */
                ret = cmd_handler_mi_batch_buffer_end(s);
@@ -2429,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
        int ret = 0;
        cycles_t t0, t1, t2;
        struct parser_exec_state s_before_advance_custom;
+       struct intel_vgpu *vgpu = s->vgpu;
 
        t0 = get_cycles();
 
@@ -2436,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
 
        info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
        if (info == NULL) {
-               gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+               gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
                                cmd, get_opcode(cmd, s->ring_id));
                return -EINVAL;
        }
@@ -2452,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
        if (info->handler) {
                ret = info->handler(s);
                if (ret < 0) {
-                       gvt_err("%s handler error\n", info->name);
+                       gvt_vgpu_err("%s handler error\n", info->name);
                        return ret;
                }
        }
@@ -2463,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
        if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
                ret = cmd_advance_default(s);
                if (ret) {
-                       gvt_err("%s IP advance error\n", info->name);
+                       gvt_vgpu_err("%s IP advance error\n", info->name);
                        return ret;
                }
        }
@@ -2486,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s,
 
        unsigned long gma_head, gma_tail, gma_bottom;
        int ret = 0;
+       struct intel_vgpu *vgpu = s->vgpu;
 
        gma_head = rb_start + rb_head;
        gma_tail = rb_start + rb_tail;
@@ -2497,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s,
                if (s->buf_type == RING_BUFFER_INSTRUCTION) {
                        if (!(s->ip_gma >= rb_start) ||
                                !(s->ip_gma < gma_bottom)) {
-                               gvt_err("ip_gma %lx out of ring scope."
+                               gvt_vgpu_err("ip_gma %lx out of ring scope."
                                        "(base:0x%lx, bottom: 0x%lx)\n",
                                        s->ip_gma, rb_start,
                                        gma_bottom);
@@ -2505,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s,
                                return -EINVAL;
                        }
                        if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
-                               gvt_err("ip_gma %lx out of range."
+                               gvt_vgpu_err("ip_gma %lx out of range."
                                        "base 0x%lx head 0x%lx tail 0x%lx\n",
                                        s->ip_gma, rb_start,
                                        rb_head, rb_tail);
@@ -2515,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s,
                }
                ret = cmd_parser_exec(s);
                if (ret) {
-                       gvt_err("cmd parser error\n");
+                       gvt_vgpu_err("cmd parser error\n");
                        parser_exec_state_dump(s);
                        break;
                }
@@ -2639,7 +2671,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
                                gma_head, gma_top,
                                workload->shadow_ring_buffer_va);
                if (ret) {
-                       gvt_err("fail to copy guest ring buffer\n");
+                       gvt_vgpu_err("fail to copy guest ring buffer\n");
                        return ret;
                }
                copy_len = gma_top - gma_head;
@@ -2651,7 +2683,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
                        gma_head, gma_tail,
                        workload->shadow_ring_buffer_va + copy_len);
        if (ret) {
-               gvt_err("fail to copy guest ring buffer\n");
+               gvt_vgpu_err("fail to copy guest ring buffer\n");
                return ret;
        }
        ring->tail += workload->rb_len;
@@ -2662,16 +2694,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 {
        int ret;
+       struct intel_vgpu *vgpu = workload->vgpu;
 
        ret = shadow_workload_ring_buffer(workload);
        if (ret) {
-               gvt_err("fail to shadow workload ring_buffer\n");
+               gvt_vgpu_err("fail to shadow workload ring_buffer\n");
                return ret;
        }
 
        ret = scan_workload(workload);
        if (ret) {
-               gvt_err("scan workload error\n");
+               gvt_vgpu_err("scan workload error\n");
                return ret;
        }
        return 0;
@@ -2681,6 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
        int ctx_size = wa_ctx->indirect_ctx.size;
        unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
+       struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
        struct drm_i915_gem_object *obj;
        int ret = 0;
        void *map;
@@ -2694,14 +2728,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
        /* get the va of the shadow batch buffer */
        map = i915_gem_object_pin_map(obj, I915_MAP_WB);
        if (IS_ERR(map)) {
-               gvt_err("failed to vmap shadow indirect ctx\n");
+               gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
                ret = PTR_ERR(map);
                goto put_obj;
        }
 
        ret = i915_gem_object_set_to_cpu_domain(obj, false);
        if (ret) {
-               gvt_err("failed to set shadow indirect ctx to CPU\n");
+               gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
                goto unmap_src;
        }
 
@@ -2710,7 +2744,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
                                guest_gma, guest_gma + ctx_size,
                                map);
        if (ret) {
-               gvt_err("fail to copy guest indirect ctx\n");
+               gvt_vgpu_err("fail to copy guest indirect ctx\n");
                goto unmap_src;
        }
 
@@ -2744,13 +2778,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
        int ret;
+       struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
 
        if (wa_ctx->indirect_ctx.size == 0)
                return 0;
 
        ret = shadow_indirect_ctx(wa_ctx);
        if (ret) {
-               gvt_err("fail to shadow indirect ctx\n");
+               gvt_vgpu_err("fail to shadow indirect ctx\n");
                return ret;
        }
 
@@ -2758,7 +2793,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 
        ret = scan_wa_ctx(wa_ctx);
        if (ret) {
-               gvt_err("scan wa ctx error\n");
+               gvt_vgpu_err("scan wa ctx error\n");
                return ret;
        }
 
index 68cba7bd980af8cb9a855ef3ff4a4c446947ebaf..b0cff4dc2684792271a5e648c889dbb5a4d04ac5 100644 (file)
 #define gvt_err(fmt, args...) \
        DRM_ERROR("gvt: "fmt, ##args)
 
+#define gvt_vgpu_err(fmt, args...)                                     \
+do {                                                                   \
+       if (IS_ERR_OR_NULL(vgpu))                                       \
+               DRM_DEBUG_DRIVER("gvt: "fmt, ##args);                   \
+       else                                                            \
+               DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
+} while (0)
+
 #define gvt_dbg_core(fmt, args...) \
        DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
 
index 6d8fde880c39936f816eae411320c38dac87662c..5419ae6ec6339cecee3ea7704c61c02689a86d22 100644 (file)
@@ -83,44 +83,80 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
        return 0;
 }
 
+static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
+       {
+/* EDID with 1024x768 as its resolution */
+               /*Header*/
+               0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+               /* Vendor & Product Identification */
+               0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
+               /* Version & Revision */
+               0x01, 0x04,
+               /* Basic Display Parameters & Features */
+               0xa5, 0x34, 0x20, 0x78, 0x23,
+               /* Color Characteristics */
+               0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
+               /* Established Timings: maximum resolution is 1024x768 */
+               0x21, 0x08, 0x00,
+               /* Standard Timings. All invalid */
+               0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
+               0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
+               /* 18 Byte Data Blocks 1: invalid */
+               0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
+               0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
+               /* 18 Byte Data Blocks 2: invalid */
+               0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
+               0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+               /* 18 Byte Data Blocks 3: invalid */
+               0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
+               0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
+               /* 18 Byte Data Blocks 4: invalid */
+               0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
+               0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
+               /* Extension Block Count */
+               0x00,
+               /* Checksum */
+               0xef,
+       },
+       {
 /* EDID with 1920x1200 as its resolution */
-static unsigned char virtual_dp_monitor_edid[] = {
-       /*Header*/
-       0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
-       /* Vendor & Product Identification */
-       0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
-       /* Version & Revision */
-       0x01, 0x04,
-       /* Basic Display Parameters & Features */
-       0xa5, 0x34, 0x20, 0x78, 0x23,
-       /* Color Characteristics */
-       0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
-       /* Established Timings: maximum resolution is 1024x768 */
-       0x21, 0x08, 0x00,
-       /*
-        * Standard Timings.
-        * below new resolutions can be supported:
-        * 1920x1080, 1280x720, 1280x960, 1280x1024,
-        * 1440x900, 1600x1200, 1680x1050
-        */
-       0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
-       0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
-       /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
-       0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
-       0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
-       /* 18 Byte Data Blocks 2: invalid */
-       0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
-       0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
-       /* 18 Byte Data Blocks 3: invalid */
-       0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
-       0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
-       /* 18 Byte Data Blocks 4: invalid */
-       0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
-       0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
-       /* Extension Block Count */
-       0x00,
-       /* Checksum */
-       0x45,
+               /*Header*/
+               0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+               /* Vendor & Product Identification */
+               0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
+               /* Version & Revision */
+               0x01, 0x04,
+               /* Basic Display Parameters & Features */
+               0xa5, 0x34, 0x20, 0x78, 0x23,
+               /* Color Characteristics */
+               0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
+               /* Established Timings: maximum resolution is 1024x768 */
+               0x21, 0x08, 0x00,
+               /*
+                * Standard Timings.
+                * below new resolutions can be supported:
+                * 1920x1080, 1280x720, 1280x960, 1280x1024,
+                * 1440x900, 1600x1200, 1680x1050
+                */
+               0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
+               0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
+               /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
+               0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
+               0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
+               /* 18 Byte Data Blocks 2: invalid */
+               0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
+               0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+               /* 18 Byte Data Blocks 3: invalid */
+               0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
+               0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
+               /* 18 Byte Data Blocks 4: invalid */
+               0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
+               0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
+               /* Extension Block Count */
+               0x00,
+               /* Checksum */
+               0x45,
+       },
 };
 
 #define DPCD_HEADER_SIZE        0xb
@@ -140,14 +176,20 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
                                SDE_PORTE_HOTPLUG_SPT);
 
-       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B))
+       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
                vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
+               vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
+       }
 
-       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C))
+       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
                vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
+               vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
+       }
 
-       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D))
+       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
                vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+               vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
+       }
 
        if (IS_SKYLAKE(dev_priv) &&
                        intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
@@ -160,6 +202,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                                GEN8_PORT_DP_A_HOTPLUG;
                else
                        vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
+
+               vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
        }
 }
 
@@ -175,10 +219,13 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
 }
 
 static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
-               int type)
+                                   int type, unsigned int resolution)
 {
        struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
 
+       if (WARN_ON(resolution >= GVT_EDID_NUM))
+               return -EINVAL;
+
        port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
        if (!port->edid)
                return -ENOMEM;
@@ -189,7 +236,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
                return -ENOMEM;
        }
 
-       memcpy(port->edid->edid_block, virtual_dp_monitor_edid,
+       memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution],
                        EDID_SIZE);
        port->edid->data_valid = true;
 
@@ -322,16 +369,18 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
  * Zero on success, negative error code if failed.
  *
  */
-int intel_vgpu_init_display(struct intel_vgpu *vgpu)
+int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 
        intel_vgpu_init_i2c_edid(vgpu);
 
        if (IS_SKYLAKE(dev_priv))
-               return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D);
+               return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
+                                               resolution);
        else
-               return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
+               return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B,
+                                               resolution);
 }
 
 /**
index 8b234ea961f67b96a185e3cd8bfd35728360a05d..d73de22102e2b77f1c4c166ee0688b86d2e29391 100644 (file)
@@ -154,10 +154,28 @@ struct intel_vgpu_port {
        int type;
 };
 
+enum intel_vgpu_edid {
+       GVT_EDID_1024_768,
+       GVT_EDID_1920_1200,
+       GVT_EDID_NUM,
+};
+
+static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
+{
+       switch (id) {
+       case GVT_EDID_1024_768:
+               return "1024x768";
+       case GVT_EDID_1920_1200:
+               return "1920x1200";
+       default:
+               return "";
+       }
+}
+
 void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
 void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
 
-int intel_vgpu_init_display(struct intel_vgpu *vgpu);
+int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution);
 void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
 void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
 
index bda85dff7b2a998d68d1485479c9687eab206600..42cd09ec63fa7c41b69d684f8cf382a04f15b2ab 100644 (file)
@@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
        unsigned char chr = 0;
 
        if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
-               gvt_err("Driver tries to read EDID without proper sequence!\n");
+               gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n");
                return 0;
        }
        if (edid->current_edid_read >= EDID_SIZE) {
-               gvt_err("edid_get_byte() exceeds the size of EDID!\n");
+               gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n");
                return 0;
        }
 
        if (!edid->edid_available) {
-               gvt_err("Reading EDID but EDID is not available!\n");
+               gvt_vgpu_err("Reading EDID but EDID is not available!\n");
                return 0;
        }
 
@@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
                chr = edid_data->edid_block[edid->current_edid_read];
                edid->current_edid_read++;
        } else {
-               gvt_err("No EDID available during the reading?\n");
+               gvt_vgpu_err("No EDID available during the reading?\n");
        }
        return chr;
 }
@@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                        vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
                        break;
                default:
-                       gvt_err("Unknown/reserved GMBUS cycle detected!\n");
+                       gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
                        break;
                }
                /*
@@ -292,8 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
                 */
        } else {
                memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
-               gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n",
-                               vgpu->id);
+               gvt_vgpu_err("warning: gmbus3 read with nothing returned\n");
        }
        return 0;
 }
@@ -496,7 +495,8 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
                        unsigned char val = edid_get_byte(vgpu);
 
                        aux_data_for_write = (val << 16);
-               }
+               } else
+                       aux_data_for_write = (0xff << 16);
        }
        /* write the return value in AUX_CH_DATA reg which includes:
         * ACK of I2C_WRITE
index 46eb9fd3c03f6b2fdb19fec6550a5f4347038f88..f1f426a97aa9d43826010d7be90f6bffdbe59426 100644 (file)
@@ -172,6 +172,7 @@ static int emulate_execlist_ctx_schedule_out(
                struct intel_vgpu_execlist *execlist,
                struct execlist_ctx_descriptor_format *ctx)
 {
+       struct intel_vgpu *vgpu = execlist->vgpu;
        struct intel_vgpu_execlist_slot *running = execlist->running_slot;
        struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
        struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
@@ -183,7 +184,7 @@ static int emulate_execlist_ctx_schedule_out(
        gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
 
        if (WARN_ON(!same_context(ctx, execlist->running_context))) {
-               gvt_err("schedule out context is not running context,"
+               gvt_vgpu_err("schedule out context is not running context,"
                                "ctx id %x running ctx id %x\n",
                                ctx->context_id,
                                execlist->running_context->context_id);
@@ -254,7 +255,7 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
        status.udw = vgpu_vreg(vgpu, status_reg + 4);
 
        if (status.execlist_queue_full) {
-               gvt_err("virtual execlist slots are full\n");
+               gvt_vgpu_err("virtual execlist slots are full\n");
                return NULL;
        }
 
@@ -270,11 +271,12 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
 
        struct execlist_ctx_descriptor_format *ctx0, *ctx1;
        struct execlist_context_status_format status;
+       struct intel_vgpu *vgpu = execlist->vgpu;
 
        gvt_dbg_el("emulate schedule-in\n");
 
        if (!slot) {
-               gvt_err("no available execlist slot\n");
+               gvt_vgpu_err("no available execlist slot\n");
                return -EINVAL;
        }
 
@@ -375,7 +377,6 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 
                vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
                if (IS_ERR(vma)) {
-                       gvt_err("Cannot pin\n");
                        return;
                }
 
@@ -428,7 +429,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
        vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
                                       0, CACHELINE_BYTES, 0);
        if (IS_ERR(vma)) {
-               gvt_err("Cannot pin indirect ctx obj\n");
                return;
        }
 
@@ -561,6 +561,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
 {
        struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
        struct intel_vgpu_mm *mm;
+       struct intel_vgpu *vgpu = workload->vgpu;
        int page_table_level;
        u32 pdp[8];
 
@@ -569,7 +570,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
        } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
                page_table_level = 4;
        } else {
-               gvt_err("Advanced Context mode(SVM) is not supported!\n");
+               gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
                return -EINVAL;
        }
 
@@ -583,7 +584,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
                mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
                                pdp, page_table_level, 0);
                if (IS_ERR(mm)) {
-                       gvt_err("fail to create mm object.\n");
+                       gvt_vgpu_err("fail to create mm object.\n");
                        return PTR_ERR(mm);
                }
        }
@@ -609,7 +610,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
        ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
                        (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
        if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
-               gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
+               gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
                return -EINVAL;
        }
 
@@ -724,8 +725,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
                        continue;
 
                if (!desc[i]->privilege_access) {
-                       gvt_err("vgpu%d: unexpected GGTT elsp submission\n",
-                                       vgpu->id);
+                       gvt_vgpu_err("unexpected GGTT elsp submission\n");
                        return -EINVAL;
                }
 
@@ -735,15 +735,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
        }
 
        if (!valid_desc_bitmap) {
-               gvt_err("vgpu%d: no valid desc in a elsp submission\n",
-                               vgpu->id);
+               gvt_vgpu_err("no valid desc in a elsp submission\n");
                return -EINVAL;
        }
 
        if (!test_bit(0, (void *)&valid_desc_bitmap) &&
                        test_bit(1, (void *)&valid_desc_bitmap)) {
-               gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
-                               vgpu->id);
+               gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
                return -EINVAL;
        }
 
@@ -752,8 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
                ret = submit_context(vgpu, ring_id, &valid_desc[i],
                                emulate_schedule_in);
                if (ret) {
-                       gvt_err("vgpu%d: fail to schedule workload\n",
-                                       vgpu->id);
+                       gvt_vgpu_err("fail to schedule workload\n");
                        return ret;
                }
                emulate_schedule_in = false;
index 1cb29b2d7dc638bd701b4ec16eec9edc8fea1133..933a7c211a1c29ab77357119e37b0de2bb3dd521 100644 (file)
@@ -80,7 +80,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
        int ret;
 
        size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
-       firmware = vmalloc(size);
+       firmware = vzalloc(size);
        if (!firmware)
                return -ENOMEM;
 
index 28c92346db0e4e3615c2b5c484b4421eeb097e6e..b832bea64e0367ed6c25bf031a2a71db82d7edcc 100644 (file)
@@ -49,8 +49,8 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
 {
        if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
                        && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
-               gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
-                               vgpu->id, addr, size);
+               gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
+                               addr, size);
                return false;
        }
        return true;
@@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
 
        mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
        if (mfn == INTEL_GVT_INVALID_ADDR) {
-               gvt_err("fail to translate gfn: 0x%lx\n", gfn);
+               gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
                return -ENXIO;
        }
 
@@ -611,7 +611,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu,
 
        daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
        if (dma_mapping_error(kdev, daddr)) {
-               gvt_err("fail to map dma addr\n");
+               gvt_vgpu_err("fail to map dma addr\n");
                return -EINVAL;
        }
 
@@ -735,7 +735,7 @@ retry:
                if (reclaim_one_mm(vgpu->gvt))
                        goto retry;
 
-               gvt_err("fail to allocate ppgtt shadow page\n");
+               gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
                return ERR_PTR(-ENOMEM);
        }
 
@@ -750,14 +750,14 @@ retry:
         */
        ret = init_shadow_page(vgpu, &spt->shadow_page, type);
        if (ret) {
-               gvt_err("fail to initialize shadow page for spt\n");
+               gvt_vgpu_err("fail to initialize shadow page for spt\n");
                goto err;
        }
 
        ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
                        gfn, ppgtt_write_protection_handler, NULL);
        if (ret) {
-               gvt_err("fail to initialize guest page for spt\n");
+               gvt_vgpu_err("fail to initialize guest page for spt\n");
                goto err;
        }
 
@@ -776,8 +776,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
        if (p)
                return shadow_page_to_ppgtt_spt(p);
 
-       gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
-                       vgpu->id, mfn);
+       gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
        return NULL;
 }
 
@@ -827,8 +826,8 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
        }
        s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
        if (!s) {
-               gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
-                               vgpu->id, ops->get_pfn(e));
+               gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
+                               ops->get_pfn(e));
                return -ENXIO;
        }
        return ppgtt_invalidate_shadow_page(s);
@@ -836,6 +835,7 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
 
 static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
 {
+       struct intel_vgpu *vgpu = spt->vgpu;
        struct intel_gvt_gtt_entry e;
        unsigned long index;
        int ret;
@@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
 
        for_each_present_shadow_entry(spt, &e, index) {
                if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
-                       gvt_err("GVT doesn't support pse bit for now\n");
+                       gvt_vgpu_err("GVT doesn't support pse bit for now\n");
                        return -EINVAL;
                }
                ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
@@ -868,8 +868,8 @@ release:
        ppgtt_free_shadow_page(spt);
        return 0;
 fail:
-       gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n",
-                       spt->vgpu->id, spt, e.val64, e.type);
+       gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
+                       spt, e.val64, e.type);
        return ret;
 }
 
@@ -914,8 +914,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
        }
        return s;
 fail:
-       gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
-                       vgpu->id, s, we->val64, we->type);
+       gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
+                       s, we->val64, we->type);
        return ERR_PTR(ret);
 }
 
@@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
 
        for_each_present_guest_entry(spt, &ge, i) {
                if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
-                       gvt_err("GVT doesn't support pse bit now\n");
+                       gvt_vgpu_err("GVT doesn't support pse bit now\n");
                        ret = -EINVAL;
                        goto fail;
                }
@@ -969,8 +969,8 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
        }
        return 0;
 fail:
-       gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
-                       vgpu->id, spt, ge.val64, ge.type);
+       gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
+                       spt, ge.val64, ge.type);
        return ret;
 }
 
@@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
                struct intel_vgpu_ppgtt_spt *s =
                        ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
                if (!s) {
-                       gvt_err("fail to find guest page\n");
+                       gvt_vgpu_err("fail to find guest page\n");
                        ret = -ENXIO;
                        goto fail;
                }
@@ -1011,8 +1011,8 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
        ppgtt_set_shadow_entry(spt, &e, index);
        return 0;
 fail:
-       gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
-                       vgpu->id, spt, e.val64, e.type);
+       gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
+                       spt, e.val64, e.type);
        return ret;
 }
 
@@ -1046,8 +1046,8 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
        }
        return 0;
 fail:
-       gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id,
-                       spt, we->val64, we->type);
+       gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
+               spt, we->val64, we->type);
        return ret;
 }
 
@@ -1250,8 +1250,8 @@ static int ppgtt_handle_guest_write_page_table(
        }
        return 0;
 fail:
-       gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
-                       vgpu->id, spt, we->val64, we->type);
+       gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
+                       spt, we->val64, we->type);
        return ret;
 }
 
@@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
 
                spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
                if (IS_ERR(spt)) {
-                       gvt_err("fail to populate guest root pointer\n");
+                       gvt_vgpu_err("fail to populate guest root pointer\n");
                        ret = PTR_ERR(spt);
                        goto fail;
                }
@@ -1566,7 +1566,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
 
        ret = gtt->mm_alloc_page_table(mm);
        if (ret) {
-               gvt_err("fail to allocate page table for mm\n");
+               gvt_vgpu_err("fail to allocate page table for mm\n");
                goto fail;
        }
 
@@ -1584,7 +1584,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
        }
        return mm;
 fail:
-       gvt_err("fail to create mm\n");
+       gvt_vgpu_err("fail to create mm\n");
        if (mm)
                intel_gvt_mm_unreference(mm);
        return ERR_PTR(ret);
@@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
                        mm->page_table_level, gma, gpa);
        return gpa;
 err:
-       gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma);
+       gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
        return INTEL_GVT_INVALID_ADDR;
 }
 
@@ -1825,11 +1825,8 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
        gma = g_gtt_index << GTT_PAGE_SHIFT;
 
        /* the VM may configure the whole GM space when ballooning is used */
-       if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma),
-                               "vgpu%d: found oob ggtt write, offset %x\n",
-                               vgpu->id, off)) {
+       if (!vgpu_gmadr_is_valid(vgpu, gma))
                return 0;
-       }
 
        ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
 
@@ -1839,13 +1836,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
        if (ops->test_present(&e)) {
                ret = gtt_entry_p2m(vgpu, &e, &m);
                if (ret) {
-                       gvt_err("vgpu%d: fail to translate guest gtt entry\n",
-                                       vgpu->id);
-                       return ret;
+                       gvt_vgpu_err("fail to translate guest gtt entry\n");
+                       /* guest driver may read/write the entry when partial
+                        * update the entry in this situation p2m will fail
+                        * settting the shadow entry to point to a scratch page
+                        */
+                       ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
                }
        } else {
                m = e;
-               m.val64 = 0;
+               ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
        }
 
        ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
@@ -1896,14 +1896,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
 
        scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
        if (!scratch_pt) {
-               gvt_err("fail to allocate scratch page\n");
+               gvt_vgpu_err("fail to allocate scratch page\n");
                return -ENOMEM;
        }
 
        daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
                        4096, PCI_DMA_BIDIRECTIONAL);
        if (dma_mapping_error(dev, daddr)) {
-               gvt_err("fail to dmamap scratch_pt\n");
+               gvt_vgpu_err("fail to dmamap scratch_pt\n");
                __free_page(virt_to_page(scratch_pt));
                return -ENOMEM;
        }
@@ -2006,7 +2006,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
        ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
                        NULL, 1, 0);
        if (IS_ERR(ggtt_mm)) {
-               gvt_err("fail to create mm for ggtt.\n");
+               gvt_vgpu_err("fail to create mm for ggtt.\n");
                return PTR_ERR(ggtt_mm);
        }
 
@@ -2015,6 +2015,22 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
        return create_scratch_page_tree(vgpu);
 }
 
+static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
+{
+       struct list_head *pos, *n;
+       struct intel_vgpu_mm *mm;
+
+       list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
+               mm = container_of(pos, struct intel_vgpu_mm, list);
+               if (mm->type == type) {
+                       vgpu->gvt->gtt.mm_free_page_table(mm);
+                       list_del(&mm->list);
+                       list_del(&mm->lru_list);
+                       kfree(mm);
+               }
+       }
+}
+
 /**
  * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
  * @vgpu: a vGPU
@@ -2027,19 +2043,11 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
  */
 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
 {
-       struct list_head *pos, *n;
-       struct intel_vgpu_mm *mm;
-
        ppgtt_free_all_shadow_page(vgpu);
        release_scratch_page_tree(vgpu);
 
-       list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
-               mm = container_of(pos, struct intel_vgpu_mm, list);
-               vgpu->gvt->gtt.mm_free_page_table(mm);
-               list_del(&mm->list);
-               list_del(&mm->lru_list);
-               kfree(mm);
-       }
+       intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
+       intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT);
 }
 
 static void clean_spt_oos(struct intel_gvt *gvt)
@@ -2071,7 +2079,6 @@ static int setup_spt_oos(struct intel_gvt *gvt)
        for (i = 0; i < preallocated_oos_pages; i++) {
                oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
                if (!oos_page) {
-                       gvt_err("fail to pre-allocate oos page\n");
                        ret = -ENOMEM;
                        goto fail;
                }
@@ -2161,7 +2168,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
                mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
                                pdp, page_table_level, 0);
                if (IS_ERR(mm)) {
-                       gvt_err("fail to create mm\n");
+                       gvt_vgpu_err("fail to create mm\n");
                        return PTR_ERR(mm);
                }
        }
@@ -2191,7 +2198,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
 
        mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
        if (!mm) {
-               gvt_err("fail to find ppgtt instance.\n");
+               gvt_vgpu_err("fail to find ppgtt instance.\n");
                return -EINVAL;
        }
        intel_gvt_mm_unreference(mm);
@@ -2322,6 +2329,13 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
        int i;
 
        ppgtt_free_all_shadow_page(vgpu);
+
+       /* Shadow pages are only created when there is no page
+        * table tracking data, so remove page tracking data after
+        * removing the shadow pages.
+        */
+       intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
+
        if (!dmlr)
                return;
 
index e227caf5859ebdfd2c420bc994d42a5734ba4272..6dfc48b63b718b4c4e6f5c62794db5ce279b18a4 100644 (file)
@@ -143,6 +143,8 @@ struct intel_vgpu {
        int id;
        unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
        bool active;
+       bool pv_notified;
+       bool failsafe;
        bool resetting;
        void *sched_data;
 
@@ -160,7 +162,6 @@ struct intel_vgpu {
        atomic_t running_workload_num;
        DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
        struct i915_gem_context *shadow_ctx;
-       struct notifier_block shadow_ctx_notifier_block;
 
 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
        struct {
@@ -203,18 +204,18 @@ struct intel_gvt_firmware {
 };
 
 struct intel_gvt_opregion {
-       void __iomem *opregion_va;
+       void *opregion_va;
        u32 opregion_pa;
 };
 
 #define NR_MAX_INTEL_VGPU_TYPES 20
 struct intel_vgpu_type {
        char name[16];
-       unsigned int max_instance;
        unsigned int avail_instance;
        unsigned int low_gm_size;
        unsigned int high_gm_size;
        unsigned int fence;
+       enum intel_vgpu_edid resolution;
 };
 
 struct intel_gvt {
@@ -231,6 +232,7 @@ struct intel_gvt {
        struct intel_gvt_gtt gtt;
        struct intel_gvt_opregion opregion;
        struct intel_gvt_workload_scheduler scheduler;
+       struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
        DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
        struct intel_vgpu_type *types;
        unsigned int num_types;
@@ -317,6 +319,7 @@ struct intel_vgpu_creation_params {
        __u64 low_gm_sz;  /* in MB */
        __u64 high_gm_sz; /* in MB */
        __u64 fence_sz;
+       __u64 resolution;
        __s32 primary;
        __u64 vgpu_id;
 };
@@ -449,6 +452,11 @@ struct intel_gvt_ops {
 };
 
 
+enum {
+       GVT_FAILSAFE_UNSUPPORTED_GUEST,
+       GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
+};
+
 #include "mpt.h"
 
 #endif
index 1d450627ff654025b56119a181864ed4f2b1c607..6da9ae1618e35e39fb06e99caec00e1455132873 100644 (file)
@@ -121,6 +121,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
                info->size = size;
                info->length = (i + 4) < end ? 4 : (end - i);
                info->addr_mask = addr_mask;
+               info->ro_mask = ro_mask;
                info->device = device;
                info->read = read ? read : intel_vgpu_default_mmio_read;
                info->write = write ? write : intel_vgpu_default_mmio_write;
@@ -150,15 +151,42 @@ static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
 #define fence_num_to_offset(num) \
        (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
 
+
+static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
+{
+       switch (reason) {
+       case GVT_FAILSAFE_UNSUPPORTED_GUEST:
+               pr_err("Detected your guest driver doesn't support GVT-g.\n");
+               break;
+       case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
+               pr_err("Graphics resource is not enough for the guest\n");
+       default:
+               break;
+       }
+       pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
+       vgpu->failsafe = true;
+}
+
 static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
                unsigned int fence_num, void *p_data, unsigned int bytes)
 {
        if (fence_num >= vgpu_fence_sz(vgpu)) {
-               gvt_err("vgpu%d: found oob fence register access\n",
-                               vgpu->id);
-               gvt_err("vgpu%d: total fence num %d access fence num %d\n",
-                               vgpu->id, vgpu_fence_sz(vgpu), fence_num);
+
+               /* When guest access oob fence regs without access
+                * pv_info first, we treat guest not supporting GVT,
+                * and we will let vgpu enter failsafe mode.
+                */
+               if (!vgpu->pv_notified)
+                       enter_failsafe_mode(vgpu,
+                                       GVT_FAILSAFE_UNSUPPORTED_GUEST);
+
+               if (!vgpu->mmio.disable_warn_untrack) {
+                       gvt_vgpu_err("found oob fence register access\n");
+                       gvt_vgpu_err("total fence %d, access fence %d\n",
+                                       vgpu_fence_sz(vgpu), fence_num);
+               }
                memset(p_data, 0, bytes);
+               return -EINVAL;
        }
        return 0;
 }
@@ -219,7 +247,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
                        break;
                default:
                        /*should not hit here*/
-                       gvt_err("invalid forcewake offset 0x%x\n", offset);
+                       gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
                        return -EINVAL;
                }
        } else {
@@ -369,6 +397,74 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
        return 0;
 }
 
+/* ascendingly sorted */
+static i915_reg_t force_nonpriv_white_list[] = {
+       GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
+       GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
+       GEN8_CS_CHICKEN1,//_MMIO(0x2580)
+       _MMIO(0x2690),
+       _MMIO(0x2694),
+       _MMIO(0x2698),
+       _MMIO(0x4de0),
+       _MMIO(0x4de4),
+       _MMIO(0x4dfc),
+       GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
+       _MMIO(0x7014),
+       HDC_CHICKEN0,//_MMIO(0x7300)
+       GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
+       _MMIO(0x7700),
+       _MMIO(0x7704),
+       _MMIO(0x7708),
+       _MMIO(0x770c),
+       _MMIO(0xb110),
+       GEN8_L3SQCREG4,//_MMIO(0xb118)
+       _MMIO(0xe100),
+       _MMIO(0xe18c),
+       _MMIO(0xe48c),
+       _MMIO(0xe5f4),
+};
+
+/* a simple bsearch */
+static inline bool in_whitelist(unsigned int reg)
+{
+       int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
+       i915_reg_t *array = force_nonpriv_white_list;
+
+       while (left < right) {
+               int mid = (left + right)/2;
+
+               if (reg > array[mid].reg)
+                       left = mid + 1;
+               else if (reg < array[mid].reg)
+                       right = mid;
+               else
+                       return true;
+       }
+       return false;
+}
+
+static int force_nonpriv_write(struct intel_vgpu *vgpu,
+       unsigned int offset, void *p_data, unsigned int bytes)
+{
+       u32 reg_nonpriv = *(u32 *)p_data;
+       int ret = -EINVAL;
+
+       if ((bytes != 4) || ((offset & (bytes - 1)) != 0)) {
+               gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
+                       vgpu->id, offset, bytes);
+               return ret;
+       }
+
+       if (in_whitelist(reg_nonpriv)) {
+               ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
+                       bytes);
+       } else {
+               gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x\n",
+                       vgpu->id, reg_nonpriv);
+       }
+       return ret;
+}
+
 static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
@@ -432,7 +528,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
                fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
                fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
        } else {
-               gvt_err("Invalid train pattern %d\n", train_pattern);
+               gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
                return -EINVAL;
        }
 
@@ -490,7 +586,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
        else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
                index = FDI_RX_IMR_TO_PIPE(offset);
        else {
-               gvt_err("Unsupport registers %x\n", offset);
+               gvt_vgpu_err("Unsupport registers %x\n", offset);
                return -EINVAL;
        }
 
@@ -720,7 +816,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
        u32 data;
 
        if (!dpy_is_valid_port(port_index)) {
-               gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id);
+               gvt_vgpu_err("Unsupported DP port access!\n");
                return 0;
        }
 
@@ -874,6 +970,14 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
        return 0;
 }
 
+static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
+               void *p_data, unsigned int bytes)
+{
+       *(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
+       write_vreg(vgpu, offset, p_data, bytes);
+       return 0;
+}
+
 static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
@@ -918,8 +1022,7 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
 
        if (i == num) {
                if (num == SBI_REG_MAX) {
-                       gvt_err("vgpu%d: SBI caching meets maximum limits\n",
-                                       vgpu->id);
+                       gvt_vgpu_err("SBI caching meets maximum limits\n");
                        return;
                }
                display->sbi.number++;
@@ -999,8 +1102,9 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
                break;
        }
        if (invalid_read)
-               gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
+               gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
                                offset, bytes, *(u32 *)p_data);
+       vgpu->pv_notified = true;
        return 0;
 }
 
@@ -1026,7 +1130,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
        case 1: /* Remove this in guest driver. */
                break;
        default:
-               gvt_err("Invalid PV notification %d\n", notification);
+               gvt_vgpu_err("Invalid PV notification %d\n", notification);
        }
        return ret;
 }
@@ -1039,7 +1143,7 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
        char vmid_str[20];
        char display_ready_str[20];
 
-       snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready);
+       snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
        env[0] = display_ready_str;
 
        snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
@@ -1078,8 +1182,11 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
        case _vgtif_reg(execlist_context_descriptor_lo):
        case _vgtif_reg(execlist_context_descriptor_hi):
                break;
+       case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
+               enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
+               break;
        default:
-               gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
+               gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
                                offset, bytes, data);
                break;
        }
@@ -1203,26 +1310,37 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
        u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
 
        switch (cmd) {
-       case 0x6:
-               /**
-                * "Read memory latency" command on gen9.
-                * Below memory latency values are read
-                * from skylake platform.
-                */
-               if (!*data0)
-                       *data0 = 0x1e1a1100;
-               else
-                       *data0 = 0x61514b3d;
+       case GEN9_PCODE_READ_MEM_LATENCY:
+               if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+                       /**
+                        * "Read memory latency" command on gen9.
+                        * Below memory latency values are read
+                        * from skylake platform.
+                        */
+                       if (!*data0)
+                               *data0 = 0x1e1a1100;
+                       else
+                               *data0 = 0x61514b3d;
+               }
                break;
-       case 0x5:
+       case SKL_PCODE_CDCLK_CONTROL:
+               if (IS_SKYLAKE(vgpu->gvt->dev_priv))
+                       *data0 = SKL_CDCLK_READY_FOR_CHANGE;
+               break;
+       case GEN6_PCODE_READ_RC6VIDS:
                *data0 |= 0x1;
                break;
        }
 
        gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
                     vgpu->id, value, *data0);
-
-       value &= ~(1 << 31);
+       /**
+        * PCODE_READY clear means ready for pcode read/write,
+        * PCODE_ERROR_MASK clear means no error happened. In GVT-g we
+        * always emulate as pcode read/write success and ready for access
+        * anytime, since we don't touch real physical registers here.
+        */
+       value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
        return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
 }
 
@@ -1302,7 +1420,8 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
        if (execlist->elsp_dwords.index == 3) {
                ret = intel_vgpu_submit_execlist(vgpu, ring_id);
                if(ret)
-                       gvt_err("fail submit workload on ring %d\n", ring_id);
+                       gvt_vgpu_err("fail submit workload on ring %d\n",
+                               ring_id);
        }
 
        ++execlist->elsp_dwords.index;
@@ -1318,6 +1437,17 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
        bool enable_execlist;
 
        write_vreg(vgpu, offset, p_data, bytes);
+
+       /* when PPGTT mode enabled, we will check if guest has called
+        * pvinfo, if not, we will treat this guest as non-gvtg-aware
+        * guest, and stop emulating its cfg space, mmio, gtt, etc.
+        */
+       if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
+                       (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
+                       && !vgpu->pv_notified) {
+               enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+               return 0;
+       }
        if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
                        || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
                enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
@@ -1400,6 +1530,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
 #define MMIO_GM(reg, d, r, w) \
        MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
 
+#define MMIO_GM_RDR(reg, d, r, w) \
+       MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
+
 #define MMIO_RO(reg, d, f, rm, r, w) \
        MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
 
@@ -1419,6 +1552,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
 #define MMIO_RING_GM(prefix, d, r, w) \
        MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
 
+#define MMIO_RING_GM_RDR(prefix, d, r, w) \
+       MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
+
 #define MMIO_RING_RO(prefix, d, f, rm, r, w) \
        MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
 
@@ -1427,73 +1563,81 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        struct drm_i915_private *dev_priv = gvt->dev_priv;
        int ret;
 
-       MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
+       MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
+               intel_vgpu_reg_imr_handler);
 
        MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
        MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
        MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
        MMIO_D(SDEISR, D_ALL);
 
-       MMIO_RING_D(RING_HWSTAM, D_ALL);
+       MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
 
-       MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
-       MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
-       MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
-       MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+       MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+       MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+       MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+       MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
 
 #define RING_REG(base) (base + 0x28)
-       MMIO_RING_D(RING_REG, D_ALL);
+       MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
 #undef RING_REG
 
 #define RING_REG(base) (base + 0x134)
-       MMIO_RING_D(RING_REG, D_ALL);
+       MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
 #undef RING_REG
 
-       MMIO_GM(0x2148, D_ALL, NULL, NULL);
-       MMIO_GM(CCID, D_ALL, NULL, NULL);
-       MMIO_GM(0x12198, D_ALL, NULL, NULL);
+       MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
+       MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
+       MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
        MMIO_D(GEN7_CXT_SIZE, D_ALL);
 
-       MMIO_RING_D(RING_TAIL, D_ALL);
-       MMIO_RING_D(RING_HEAD, D_ALL);
-       MMIO_RING_D(RING_CTL, D_ALL);
-       MMIO_RING_D(RING_ACTHD, D_ALL);
-       MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
+       MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
 
        /* RING MODE */
 #define RING_REG(base) (base + 0x29c)
-       MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write);
+       MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
+               ring_mode_mmio_write);
 #undef RING_REG
 
-       MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
-       MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL);
+       MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+               NULL, NULL);
+       MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+                       NULL, NULL);
        MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
                        ring_timestamp_mmio_read, NULL);
        MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
                        ring_timestamp_mmio_read, NULL);
 
-       MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
-       MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL);
+       MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+               NULL, NULL);
        MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
-
-       MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL);
-       MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL);
-       MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL);
-       MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL);
-       MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL);
-       MMIO_D(GAM_ECOCHK, D_ALL);
-       MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL);
+       MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x2124, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+
+       MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x2088, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x2470, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+               NULL, NULL);
        MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
-       MMIO_D(0x9030, D_ALL);
-       MMIO_D(0x20a0, D_ALL);
-       MMIO_D(0x2420, D_ALL);
-       MMIO_D(0x2430, D_ALL);
-       MMIO_D(0x2434, D_ALL);
-       MMIO_D(0x2438, D_ALL);
-       MMIO_D(0x243c, D_ALL);
-       MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL);
+       MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x2430, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x2434, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x2438, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x243c, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x7018, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL);
+       MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 
        /* display */
        MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
@@ -2022,8 +2166,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_D(FORCEWAKE_ACK, D_ALL);
        MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
        MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
-       MMIO_D(GTFIFODBG, D_ALL);
-       MMIO_D(GTFIFOCTL, D_ALL);
+       MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
        MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
        MMIO_D(ECOBUS, D_ALL);
@@ -2080,7 +2224,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 
        MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
 
-       MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL);
+       MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW);
        MMIO_D(GEN6_PCODE_DATA, D_ALL);
        MMIO_D(0x13812c, D_ALL);
        MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
@@ -2102,7 +2246,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_D(0x7180, D_ALL);
        MMIO_D(0x7408, D_ALL);
        MMIO_D(0x7c00, D_ALL);
-       MMIO_D(GEN6_MBCTL, D_ALL);
+       MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
        MMIO_D(0x911c, D_ALL);
        MMIO_D(0x9120, D_ALL);
        MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2159,36 +2303,35 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_D(0x1a054, D_ALL);
 
        MMIO_D(0x44070, D_ALL);
-
-       MMIO_D(0x215c, D_HSW_PLUS);
+       MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
 
-       MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL);
-       MMIO_D(GEN7_OACONTROL, D_HSW);
+       MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL);
+       MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL);
        MMIO_D(0x2b00, D_BDW_PLUS);
        MMIO_D(0x2360, D_BDW_PLUS);
-       MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL);
-       MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL);
-       MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL);
+       MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+       MMIO_F(0x5240, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+       MMIO_F(0x5280, 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
 
        MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
-       MMIO_D(BCS_SWCTRL, D_ALL);
-
-       MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
-       MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
-       MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
-       MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
-       MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
-       MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
-       MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
-       MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
-       MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
-       MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
-       MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+       MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
+
+       MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+       MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+       MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+       MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+       MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+       MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+       MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+       MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+       MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+       MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+       MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
        MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
        MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
        MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
@@ -2196,6 +2339,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
        MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
 
+       MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
+       MMIO_DFH(0x2220, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x12220, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x22220, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x22178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x1a178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x1a17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x2217c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
        return 0;
 }
 
@@ -2204,7 +2358,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
        struct drm_i915_private *dev_priv = gvt->dev_priv;
        int ret;
 
-       MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL,
+       MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL,
                        intel_vgpu_reg_imr_handler);
 
        MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
@@ -2269,24 +2423,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
        MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
                intel_vgpu_reg_master_irq_handler);
 
-       MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
-       MMIO_D(0x1c134, D_BDW_PLUS);
-
-       MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
-       MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE),  D_BDW_PLUS);
-       MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
-       MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
-       MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
-       MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
-       MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write);
-       MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
-                       NULL, NULL);
-       MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
-                       NULL, NULL);
+       MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+               F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+       MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
+               NULL, NULL);
+       MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE),  D_BDW_PLUS,
+               F_CMD_ACCESS, NULL, NULL);
+       MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
+       MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
+               NULL, NULL);
+       MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+               F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+               F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
+               ring_mode_mmio_write);
+       MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+               F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+               F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
                        ring_timestamp_mmio_read, NULL);
 
-       MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS);
+       MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
 
 #define RING_REG(base) (base + 0xd0)
        MMIO_RING_F(RING_REG, 4, F_RO, 0,
@@ -2303,13 +2464,16 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
 #undef RING_REG
 
 #define RING_REG(base) (base + 0x234)
-       MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
-       MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL);
+       MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
+               NULL, NULL);
+       MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0,
+               ~0LL, D_BDW_PLUS, NULL, NULL);
 #undef RING_REG
 
 #define RING_REG(base) (base + 0x244)
-       MMIO_RING_D(RING_REG, D_BDW_PLUS);
-       MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+       MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
+               NULL, NULL);
 #undef RING_REG
 
 #define RING_REG(base) (base + 0x370)
@@ -2331,6 +2495,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
        MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
        MMIO_D(0x1c054, D_BDW_PLUS);
 
+       MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
+
        MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
        MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
 
@@ -2341,14 +2507,14 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
        MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
 #undef RING_REG
 
-       MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
-       MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL);
+       MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
+       MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
 
        MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 
-       MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW);
-       MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW);
-       MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW);
+       MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS);
+       MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS);
+       MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
 
        MMIO_D(WM_MISC, D_BDW);
        MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
@@ -2362,27 +2528,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
        MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
        MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
 
-       MMIO_D(0xfdc, D_BDW);
-       MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
-       MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS);
-       MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS);
+       MMIO_D(0xfdc, D_BDW_PLUS);
+       MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+               NULL, NULL);
+       MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+               NULL, NULL);
+       MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
 
-       MMIO_D(0xb1f0, D_BDW);
-       MMIO_D(0xb1c0, D_BDW);
+       MMIO_DFH(0xb1f0, D_BDW, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0xb1c0, D_BDW, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
-       MMIO_D(0xb100, D_BDW);
-       MMIO_D(0xb10c, D_BDW);
+       MMIO_DFH(0xb100, D_BDW, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0xb10c, D_BDW, F_CMD_ACCESS, NULL, NULL);
        MMIO_D(0xb110, D_BDW);
 
-       MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
+               NULL, force_nonpriv_write);
 
-       MMIO_D(0x83a4, D_BDW);
+       MMIO_D(0x22040, D_BDW_PLUS);
+       MMIO_D(0x44484, D_BDW_PLUS);
+       MMIO_D(0x4448c, D_BDW_PLUS);
+
+       MMIO_DFH(0x83a4, D_BDW, F_CMD_ACCESS, NULL, NULL);
        MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
 
-       MMIO_D(0x8430, D_BDW);
+       MMIO_DFH(0x8430, D_BDW, F_CMD_ACCESS, NULL, NULL);
 
        MMIO_D(0x110000, D_BDW_PLUS);
 
@@ -2394,10 +2564,19 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
-
-       MMIO_D(0x2248, D_BDW);
-
+       MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+
+       MMIO_DFH(0x2248, D_BDW, F_CMD_ACCESS, NULL, NULL);
+
+       MMIO_DFH(0xe220, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0xe230, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0xe240, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0xe260, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0xe270, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0xe280, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0xe2a0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0xe2b0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0xe2c0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
        return 0;
 }
 
@@ -2420,7 +2599,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
        MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
 
-       MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write);
        MMIO_D(0xa210, D_SKL_PLUS);
        MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
        MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -2578,16 +2756,16 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
 
        MMIO_D(0xd08, D_SKL);
-       MMIO_D(0x20e0, D_SKL);
-       MMIO_D(0x20ec, D_SKL);
+       MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL);
+       MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 
        /* TRTT */
-       MMIO_D(0x4de0, D_SKL);
-       MMIO_D(0x4de4, D_SKL);
-       MMIO_D(0x4de8, D_SKL);
-       MMIO_D(0x4dec, D_SKL);
-       MMIO_D(0x4df0, D_SKL);
-       MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write);
+       MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write);
        MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
 
        MMIO_D(0x45008, D_SKL);
@@ -2611,7 +2789,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(0x65f08, D_SKL);
        MMIO_D(0x320f0, D_SKL);
 
-       MMIO_D(_REG_VCS2_EXCC, D_SKL);
+       MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL);
        MMIO_D(0x70034, D_SKL);
        MMIO_D(0x71034, D_SKL);
        MMIO_D(0x72034, D_SKL);
@@ -2624,6 +2802,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
 
        MMIO_D(0x44500, D_SKL);
+       MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS,
+               NULL, NULL);
        return 0;
 }
 
@@ -2813,3 +2994,20 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
        write_vreg(vgpu, offset, p_data, bytes);
        return 0;
 }
+
+/**
+ * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
+ * force-nopriv register
+ *
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if the register is in force-nonpriv whitelist;
+ * False if outside;
+ */
+bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
+                                         unsigned int offset)
+{
+       return in_whitelist(offset);
+}
index 0f7f5d97f5829d65aeaf7392d0fcb4b19fd4d713..d641214578a7dc6631e866bbc91c2d38f3e95a76 100644 (file)
@@ -96,10 +96,10 @@ static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
        struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
        dma_addr_t daddr;
 
-       page = pfn_to_page(pfn);
-       if (is_error_page(page))
+       if (unlikely(!pfn_valid(pfn)))
                return -EFAULT;
 
+       page = pfn_to_page(pfn);
        daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
                        PCI_DMA_BIDIRECTIONAL);
        if (dma_mapping_error(dev, daddr))
@@ -295,10 +295,10 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
                return 0;
 
        return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
-                               "fence: %d\n",
-                               BYTES_TO_MB(type->low_gm_size),
-                               BYTES_TO_MB(type->high_gm_size),
-                               type->fence);
+                      "fence: %d\nresolution: %s\n",
+                      BYTES_TO_MB(type->low_gm_size),
+                      BYTES_TO_MB(type->high_gm_size),
+                      type->fence, vgpu_edid_str(type->resolution));
 }
 
 static MDEV_TYPE_ATTR_RO(available_instances);
@@ -426,7 +426,7 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
 
 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
 {
-       struct intel_vgpu *vgpu;
+       struct intel_vgpu *vgpu = NULL;
        struct intel_vgpu_type *type;
        struct device *pdev;
        void *gvt;
@@ -437,7 +437,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
 
        type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
        if (!type) {
-               gvt_err("failed to find type %s to create\n",
+               gvt_vgpu_err("failed to find type %s to create\n",
                                                kobject_name(kobj));
                ret = -EINVAL;
                goto out;
@@ -446,7 +446,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
        vgpu = intel_gvt_ops->vgpu_create(gvt, type);
        if (IS_ERR_OR_NULL(vgpu)) {
                ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
-               gvt_err("failed to create intel vgpu: %d\n", ret);
+               gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
                goto out;
        }
 
@@ -526,7 +526,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
        ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
                                &vgpu->vdev.iommu_notifier);
        if (ret != 0) {
-               gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
+               gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
+                       ret);
                goto out;
        }
 
@@ -534,7 +535,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
        ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
                                &vgpu->vdev.group_notifier);
        if (ret != 0) {
-               gvt_err("vfio_register_notifier for group failed: %d\n", ret);
+               gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
+                       ret);
                goto undo_iommu;
        }
 
@@ -635,7 +637,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
 
 
        if (index >= VFIO_PCI_NUM_REGIONS) {
-               gvt_err("invalid index: %u\n", index);
+               gvt_vgpu_err("invalid index: %u\n", index);
                return -EINVAL;
        }
 
@@ -669,7 +671,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
        case VFIO_PCI_VGA_REGION_INDEX:
        case VFIO_PCI_ROM_REGION_INDEX:
        default:
-               gvt_err("unsupported region: %u\n", index);
+               gvt_vgpu_err("unsupported region: %u\n", index);
        }
 
        return ret == 0 ? count : ret;
@@ -861,7 +863,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
 
                trigger = eventfd_ctx_fdget(fd);
                if (IS_ERR(trigger)) {
-                       gvt_err("eventfd_ctx_fdget failed\n");
+                       gvt_vgpu_err("eventfd_ctx_fdget failed\n");
                        return PTR_ERR(trigger);
                }
                vgpu->vdev.msi_trigger = trigger;
@@ -1120,7 +1122,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
                        ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
                                                VFIO_PCI_NUM_IRQS, &data_size);
                        if (ret) {
-                               gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
+                               gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
                                return -EINVAL;
                        }
                        if (data_size) {
@@ -1310,7 +1312,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
 
        kvm = vgpu->vdev.kvm;
        if (!kvm || kvm->mm != current->mm) {
-               gvt_err("KVM is required to use Intel vGPU\n");
+               gvt_vgpu_err("KVM is required to use Intel vGPU\n");
                return -ESRCH;
        }
 
@@ -1324,6 +1326,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
        vgpu->handle = (unsigned long)info;
        info->vgpu = vgpu;
        info->kvm = kvm;
+       kvm_get_kvm(info->kvm);
 
        kvmgt_protect_table_init(info);
        gvt_cache_init(vgpu);
@@ -1337,12 +1340,15 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
 
 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
 {
+       struct intel_vgpu *vgpu = info->vgpu;
+
        if (!info) {
-               gvt_err("kvmgt_guest_info invalid\n");
+               gvt_vgpu_err("kvmgt_guest_info invalid\n");
                return false;
        }
 
        kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
+       kvm_put_kvm(info->kvm);
        kvmgt_protect_table_destroy(info);
        gvt_cache_destroy(info->vgpu);
        vfree(info);
@@ -1383,12 +1389,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
        unsigned long iova, pfn;
        struct kvmgt_guest_info *info;
        struct device *dev;
+       struct intel_vgpu *vgpu;
        int rc;
 
        if (!handle_valid(handle))
                return INTEL_GVT_INVALID_ADDR;
 
        info = (struct kvmgt_guest_info *)handle;
+       vgpu = info->vgpu;
        iova = gvt_cache_find(info->vgpu, gfn);
        if (iova != INTEL_GVT_INVALID_ADDR)
                return iova;
@@ -1397,13 +1405,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
        dev = mdev_dev(info->vgpu->vdev.mdev);
        rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
        if (rc != 1) {
-               gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
+               gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
+                       gfn, rc);
                return INTEL_GVT_INVALID_ADDR;
        }
        /* transfer to host iova for GFX to use DMA */
        rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
        if (rc) {
-               gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
+               gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
                vfio_unpin_pages(dev, &gfn, 1);
                return INTEL_GVT_INVALID_ADDR;
        }
@@ -1417,7 +1426,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
 {
        struct kvmgt_guest_info *info;
        struct kvm *kvm;
-       int ret;
+       int idx, ret;
        bool kthread = current->mm == NULL;
 
        if (!handle_valid(handle))
@@ -1429,8 +1438,10 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
        if (kthread)
                use_mm(kvm->mm);
 
+       idx = srcu_read_lock(&kvm->srcu);
        ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
                      kvm_read_guest(kvm, gpa, buf, len);
+       srcu_read_unlock(&kvm->srcu, idx);
 
        if (kthread)
                unuse_mm(kvm->mm);
index 4df078bc5d042b1f4fc411fbb0f98c83a3cba729..1ba3bdb093416674c2f44014942971bcdcd8ea9e 100644 (file)
@@ -57,6 +57,58 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
        (reg >= gvt->device_info.gtt_start_offset \
         && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
 
+static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
+               void *p_data, unsigned int bytes, bool read)
+{
+       struct intel_gvt *gvt = NULL;
+       void *pt = NULL;
+       unsigned int offset = 0;
+
+       if (!vgpu || !p_data)
+               return;
+
+       gvt = vgpu->gvt;
+       mutex_lock(&gvt->lock);
+       offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
+       if (reg_is_mmio(gvt, offset)) {
+               if (read)
+                       intel_vgpu_default_mmio_read(vgpu, offset, p_data,
+                                       bytes);
+               else
+                       intel_vgpu_default_mmio_write(vgpu, offset, p_data,
+                                       bytes);
+       } else if (reg_is_gtt(gvt, offset) &&
+                       vgpu->gtt.ggtt_mm->virtual_page_table) {
+               offset -= gvt->device_info.gtt_start_offset;
+               pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
+               if (read)
+                       memcpy(p_data, pt, bytes);
+               else
+                       memcpy(pt, p_data, bytes);
+
+       } else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
+               struct intel_vgpu_guest_page *gp;
+
+               /* Since we enter the failsafe mode early during guest boot,
+                * guest may not have chance to set up its ppgtt table, so
+                * there should not be any wp pages for guest. Keep the wp
+                * related code here in case we need to handle it in furture.
+                */
+               gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
+               if (gp) {
+                       /* remove write protection to prevent furture traps */
+                       intel_vgpu_clean_guest_page(vgpu, gp);
+                       if (read)
+                               intel_gvt_hypervisor_read_gpa(vgpu, pa,
+                                               p_data, bytes);
+                       else
+                               intel_gvt_hypervisor_write_gpa(vgpu, pa,
+                                               p_data, bytes);
+               }
+       }
+       mutex_unlock(&gvt->lock);
+}
+
 /**
  * intel_vgpu_emulate_mmio_read - emulate MMIO read
  * @vgpu: a vGPU
@@ -75,6 +127,11 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
        unsigned int offset = 0;
        int ret = -EINVAL;
 
+
+       if (vgpu->failsafe) {
+               failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
+               return 0;
+       }
        mutex_lock(&gvt->lock);
 
        if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
@@ -85,10 +142,10 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
                        ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
                                        p_data, bytes);
                        if (ret) {
-                               gvt_err("vgpu%d: guest page read error %d, "
+                               gvt_vgpu_err("guest page read error %d, "
                                        "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
-                                       vgpu->id, ret,
-                                       gp->gfn, pa, *(u32 *)p_data, bytes);
+                                       ret, gp->gfn, pa, *(u32 *)p_data,
+                                       bytes);
                        }
                        mutex_unlock(&gvt->lock);
                        return ret;
@@ -143,14 +200,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
                ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
 
                if (!vgpu->mmio.disable_warn_untrack) {
-                       gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
-                               vgpu->id, offset, bytes, *(u32 *)p_data);
+                       gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
+                               offset, bytes, *(u32 *)p_data);
 
                        if (offset == 0x206c) {
-                               gvt_err("------------------------------------------\n");
-                               gvt_err("vgpu%d: likely triggers a gfx reset\n",
-                                       vgpu->id);
-                               gvt_err("------------------------------------------\n");
+                               gvt_vgpu_err("------------------------------------------\n");
+                               gvt_vgpu_err("likely triggers a gfx reset\n");
+                               gvt_vgpu_err("------------------------------------------\n");
                                vgpu->mmio.disable_warn_untrack = true;
                        }
                }
@@ -163,8 +219,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
        mutex_unlock(&gvt->lock);
        return 0;
 err:
-       gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
-                       vgpu->id, offset, bytes);
+       gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
+                       offset, bytes);
        mutex_unlock(&gvt->lock);
        return ret;
 }
@@ -188,6 +244,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
        u32 old_vreg = 0, old_sreg = 0;
        int ret = -EINVAL;
 
+       if (vgpu->failsafe) {
+               failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false);
+               return 0;
+       }
+
        mutex_lock(&gvt->lock);
 
        if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
@@ -197,10 +258,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
                if (gp) {
                        ret = gp->handler(gp, pa, p_data, bytes);
                        if (ret) {
-                               gvt_err("vgpu%d: guest page write error %d, "
-                                       "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
-                                       vgpu->id, ret,
-                                       gp->gfn, pa, *(u32 *)p_data, bytes);
+                               gvt_err("guest page write error %d, "
+                                       "gfn 0x%lx, pa 0x%llx, "
+                                       "var 0x%x, len %d\n",
+                                       ret, gp->gfn, pa,
+                                       *(u32 *)p_data, bytes);
                        }
                        mutex_unlock(&gvt->lock);
                        return ret;
@@ -236,7 +298,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
 
        mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
        if (!mmio && !vgpu->mmio.disable_warn_untrack)
-               gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
+               gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n",
                                vgpu->id, offset, bytes, *(u32 *)p_data);
 
        if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
@@ -267,8 +329,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
 
                        /* all register bits are RO. */
                        if (ro_mask == ~(u64)0) {
-                               gvt_err("vgpu%d: try to write RO reg %x\n",
-                                               vgpu->id, offset);
+                               gvt_vgpu_err("try to write RO reg %x\n",
+                                       offset);
                                ret = 0;
                                goto out;
                        }
@@ -298,8 +360,8 @@ out:
        mutex_unlock(&gvt->lock);
        return 0;
 err:
-       gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
-                       vgpu->id, offset, bytes);
+       gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
+                    bytes);
        mutex_unlock(&gvt->lock);
        return ret;
 }
@@ -322,6 +384,8 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
 
        /* set the bit 0:2(Core C-State ) to C0 */
        vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+
+       vgpu->mmio.disable_warn_untrack = false;
 }
 
 /**
index 3bc620f56f351e774dc8658c9f06c79d0b24446b..a3a027025cd0a40f9543e6ee76b385a4ad761dcc 100644 (file)
@@ -107,4 +107,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
                                 void *p_data, unsigned int bytes);
 int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                                  void *p_data, unsigned int bytes);
+
+bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
+                                         unsigned int offset);
 #endif
index d9fb41ab71198cb19b1ade4796f687af49444c80..311799136d7f6e9e2fd96537da3c69918d7258ee 100644 (file)
@@ -27,7 +27,6 @@
 
 static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
 {
-       void __iomem *host_va = vgpu->gvt->opregion.opregion_va;
        u8 *buf;
        int i;
 
@@ -43,8 +42,8 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
        if (!vgpu_opregion(vgpu)->va)
                return -ENOMEM;
 
-       memcpy_fromio(vgpu_opregion(vgpu)->va, host_va,
-                       INTEL_GVT_OPREGION_SIZE);
+       memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va,
+              INTEL_GVT_OPREGION_SIZE);
 
        for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
                vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
@@ -68,14 +67,15 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
                mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
                        + i * PAGE_SIZE);
                if (mfn == INTEL_GVT_INVALID_ADDR) {
-                       gvt_err("fail to get MFN from VA\n");
+                       gvt_vgpu_err("fail to get MFN from VA\n");
                        return -EINVAL;
                }
                ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
                                vgpu_opregion(vgpu)->gfn[i],
                                mfn, 1, map);
                if (ret) {
-                       gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
+                       gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
+                               ret);
                        return ret;
                }
        }
@@ -288,7 +288,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
        parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
 
        if (!(swsci & SWSCI_SCI_SELECT)) {
-               gvt_err("vgpu%d: requesting SMI service\n", vgpu->id);
+               gvt_vgpu_err("requesting SMI service\n");
                return 0;
        }
        /* ignore non 0->1 trasitions */
@@ -301,9 +301,8 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
        func = GVT_OPREGION_FUNC(*scic);
        subfunc = GVT_OPREGION_SUBFUNC(*scic);
        if (!querying_capabilities(*scic)) {
-               gvt_err("vgpu%d: requesting runtime service: func \"%s\","
+               gvt_vgpu_err("requesting runtime service: func \"%s\","
                                " subfunc \"%s\"\n",
-                               vgpu->id,
                                opregion_func_name(func),
                                opregion_subfunc_name(subfunc));
                /*
index 2b3a642284b6da67f8f5d821256314d896799298..0beb83563b0870edecae35cf5a5807ba1bfc4ae8 100644 (file)
@@ -53,6 +53,14 @@ static struct render_mmio gen8_render_mmio_list[] = {
        {RCS, _MMIO(0x24d4), 0, false},
        {RCS, _MMIO(0x24d8), 0, false},
        {RCS, _MMIO(0x24dc), 0, false},
+       {RCS, _MMIO(0x24e0), 0, false},
+       {RCS, _MMIO(0x24e4), 0, false},
+       {RCS, _MMIO(0x24e8), 0, false},
+       {RCS, _MMIO(0x24ec), 0, false},
+       {RCS, _MMIO(0x24f0), 0, false},
+       {RCS, _MMIO(0x24f4), 0, false},
+       {RCS, _MMIO(0x24f8), 0, false},
+       {RCS, _MMIO(0x24fc), 0, false},
        {RCS, _MMIO(0x7004), 0xffff, true},
        {RCS, _MMIO(0x7008), 0xffff, true},
        {RCS, _MMIO(0x7000), 0xffff, true},
@@ -76,6 +84,14 @@ static struct render_mmio gen9_render_mmio_list[] = {
        {RCS, _MMIO(0x24d4), 0, false},
        {RCS, _MMIO(0x24d8), 0, false},
        {RCS, _MMIO(0x24dc), 0, false},
+       {RCS, _MMIO(0x24e0), 0, false},
+       {RCS, _MMIO(0x24e4), 0, false},
+       {RCS, _MMIO(0x24e8), 0, false},
+       {RCS, _MMIO(0x24ec), 0, false},
+       {RCS, _MMIO(0x24f0), 0, false},
+       {RCS, _MMIO(0x24f4), 0, false},
+       {RCS, _MMIO(0x24f8), 0, false},
+       {RCS, _MMIO(0x24fc), 0, false},
        {RCS, _MMIO(0x7004), 0xffff, true},
        {RCS, _MMIO(0x7008), 0xffff, true},
        {RCS, _MMIO(0x7000), 0xffff, true},
@@ -151,7 +167,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
        I915_WRITE_FW(reg, 0x1);
 
        if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
-               gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+               gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
        else
                vgpu_vreg(vgpu, regs[ring_id]) = 0;
 
@@ -191,7 +207,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
                l3_offset.reg = 0xb020;
                for (i = 0; i < 32; i++) {
                        gen9_render_mocs_L3[i] = I915_READ(l3_offset);
-                       I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset));
+                       I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset));
                        POSTING_READ(l3_offset);
                        l3_offset.reg += 4;
                }
index 06c9584ac5f0333c28d628d797686b82d8f82806..34b9acdf34791c84170cd6c96203a5b84f860b77 100644 (file)
@@ -101,7 +101,7 @@ struct tbs_sched_data {
        struct list_head runq_head;
 };
 
-#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
+#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
 
 static void tbs_sched_func(struct work_struct *work)
 {
@@ -223,7 +223,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
                return;
 
        list_add_tail(&vgpu_data->list, &sched_data->runq_head);
-       schedule_delayed_work(&sched_data->work, sched_data->period);
+       schedule_delayed_work(&sched_data->work, 0);
 }
 
 static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
index d6b6d0efdd1aeef15463e9504a4054ff3f2c3f8f..a44782412f2c9922bd4ec4fa5160bdb588db6cf0 100644 (file)
@@ -84,7 +84,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
                                (u32)((workload->ctx_desc.lrca + i) <<
                                GTT_PAGE_SHIFT));
                if (context_gpa == INTEL_GVT_INVALID_ADDR) {
-                       gvt_err("Invalid guest context descriptor\n");
+                       gvt_vgpu_err("Invalid guest context descriptor\n");
                        return -EINVAL;
                }
 
@@ -127,18 +127,24 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
        return 0;
 }
 
+static inline bool is_gvt_request(struct drm_i915_gem_request *req)
+{
+       return i915_gem_context_force_single_submission(req->ctx);
+}
+
 static int shadow_context_status_change(struct notifier_block *nb,
                unsigned long action, void *data)
 {
-       struct intel_vgpu *vgpu = container_of(nb,
-                       struct intel_vgpu, shadow_ctx_notifier_block);
-       struct drm_i915_gem_request *req =
-               (struct drm_i915_gem_request *)data;
-       struct intel_gvt_workload_scheduler *scheduler =
-               &vgpu->gvt->scheduler;
+       struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
+       struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
+                               shadow_ctx_notifier_block[req->engine->id]);
+       struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct intel_vgpu_workload *workload =
                scheduler->current_workload[req->engine->id];
 
+       if (!is_gvt_request(req) || unlikely(!workload))
+               return NOTIFY_OK;
+
        switch (action) {
        case INTEL_CONTEXT_SCHEDULE_IN:
                intel_gvt_load_render_mmio(workload->vgpu,
@@ -148,6 +154,15 @@ static int shadow_context_status_change(struct notifier_block *nb,
        case INTEL_CONTEXT_SCHEDULE_OUT:
                intel_gvt_restore_render_mmio(workload->vgpu,
                                              workload->ring_id);
+               /* If the status is -EINPROGRESS means this workload
+                * doesn't meet any issue during dispatching so when
+                * get the SCHEDULE_OUT set the status to be zero for
+                * good. If the status is NOT -EINPROGRESS means there
+                * is something wrong happened during dispatching and
+                * the status should not be set to zero
+                */
+               if (workload->status == -EINPROGRESS)
+                       workload->status = 0;
                atomic_set(&workload->shadow_ctx_active, 0);
                break;
        default:
@@ -163,7 +178,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
        int ring_id = workload->ring_id;
        struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
        struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+       struct intel_engine_cs *engine = dev_priv->engine[ring_id];
        struct drm_i915_gem_request *rq;
+       struct intel_vgpu *vgpu = workload->vgpu;
        int ret;
 
        gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
@@ -175,9 +192,24 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 
        mutex_lock(&dev_priv->drm.struct_mutex);
 
+       /* pin shadow context by gvt even the shadow context will be pinned
+        * when i915 alloc request. That is because gvt will update the guest
+        * context from shadow context when workload is completed, and at that
+        * moment, i915 may already unpined the shadow context to make the
+        * shadow_ctx pages invalid. So gvt need to pin itself. After update
+        * the guest context, gvt can unpin the shadow_ctx safely.
+        */
+       ret = engine->context_pin(engine, shadow_ctx);
+       if (ret) {
+               gvt_vgpu_err("fail to pin shadow context\n");
+               workload->status = ret;
+               mutex_unlock(&dev_priv->drm.struct_mutex);
+               return ret;
+       }
+
        rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
        if (IS_ERR(rq)) {
-               gvt_err("fail to allocate gem request\n");
+               gvt_vgpu_err("fail to allocate gem request\n");
                ret = PTR_ERR(rq);
                goto out;
        }
@@ -190,9 +222,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
        if (ret)
                goto out;
 
-       ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
-       if (ret)
-               goto out;
+       if ((workload->ring_id == RCS) &&
+           (workload->wa_ctx.indirect_ctx.size != 0)) {
+               ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
+               if (ret)
+                       goto out;
+       }
 
        ret = populate_shadow_context(workload);
        if (ret)
@@ -215,6 +250,9 @@ out:
 
        if (!IS_ERR_OR_NULL(rq))
                i915_add_request_no_flush(rq);
+       else
+               engine->context_unpin(engine, shadow_ctx);
+
        mutex_unlock(&dev_priv->drm.struct_mutex);
        return ret;
 }
@@ -310,7 +348,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
                                (u32)((workload->ctx_desc.lrca + i) <<
                                        GTT_PAGE_SHIFT));
                if (context_gpa == INTEL_GVT_INVALID_ADDR) {
-                       gvt_err("invalid guest context descriptor\n");
+                       gvt_vgpu_err("invalid guest context descriptor\n");
                        return;
                }
 
@@ -359,15 +397,31 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
        workload = scheduler->current_workload[ring_id];
        vgpu = workload->vgpu;
 
-       if (!workload->status && !vgpu->resetting) {
+       /* For the workload w/ request, needs to wait for the context
+        * switch to make sure request is completed.
+        * For the workload w/o request, directly complete the workload.
+        */
+       if (workload->req) {
+               struct drm_i915_private *dev_priv =
+                       workload->vgpu->gvt->dev_priv;
+               struct intel_engine_cs *engine =
+                       dev_priv->engine[workload->ring_id];
                wait_event(workload->shadow_ctx_status_wq,
                           !atomic_read(&workload->shadow_ctx_active));
 
-               update_guest_context(workload);
+               i915_gem_request_put(fetch_and_zero(&workload->req));
+
+               if (!workload->status && !vgpu->resetting) {
+                       update_guest_context(workload);
 
-               for_each_set_bit(event, workload->pending_events,
-                                INTEL_GVT_EVENT_MAX)
-                       intel_vgpu_trigger_virtual_event(vgpu, event);
+                       for_each_set_bit(event, workload->pending_events,
+                                        INTEL_GVT_EVENT_MAX)
+                               intel_vgpu_trigger_virtual_event(vgpu, event);
+               }
+               mutex_lock(&dev_priv->drm.struct_mutex);
+               /* unpin shadow ctx as the shadow_ctx update is done */
+               engine->context_unpin(engine, workload->vgpu->shadow_ctx);
+               mutex_unlock(&dev_priv->drm.struct_mutex);
        }
 
        gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -397,7 +451,7 @@ static int workload_thread(void *priv)
        int ring_id = p->ring_id;
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct intel_vgpu_workload *workload = NULL;
-       long lret;
+       struct intel_vgpu *vgpu = NULL;
        int ret;
        bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -440,29 +494,19 @@ static int workload_thread(void *priv)
                mutex_unlock(&gvt->lock);
 
                if (ret) {
-                       gvt_err("fail to dispatch workload, skip\n");
+                       vgpu = workload->vgpu;
+                       gvt_vgpu_err("fail to dispatch workload, skip\n");
                        goto complete;
                }
 
                gvt_dbg_sched("ring id %d wait workload %p\n",
                                workload->ring_id, workload);
-
-               lret = i915_wait_request(workload->req,
-                                        0, MAX_SCHEDULE_TIMEOUT);
-               if (lret < 0) {
-                       workload->status = lret;
-                       gvt_err("fail to wait workload, skip\n");
-               } else {
-                       workload->status = 0;
-               }
+               i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
 
 complete:
                gvt_dbg_sched("will complete workload %p, status: %d\n",
                                workload, workload->status);
 
-               if (workload->req)
-                       i915_gem_request_put(fetch_and_zero(&workload->req));
-
                complete_current_workload(gvt, ring_id);
 
                if (need_force_wake)
@@ -493,15 +537,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
 {
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-       int i;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id i;
 
        gvt_dbg_core("clean workload scheduler\n");
 
-       for (i = 0; i < I915_NUM_ENGINES; i++) {
-               if (scheduler->thread[i]) {
-                       kthread_stop(scheduler->thread[i]);
-                       scheduler->thread[i] = NULL;
-               }
+       for_each_engine(engine, gvt->dev_priv, i) {
+               atomic_notifier_chain_unregister(
+                                       &engine->context_status_notifier,
+                                       &gvt->shadow_ctx_notifier_block[i]);
+               kthread_stop(scheduler->thread[i]);
        }
 }
 
@@ -509,18 +554,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 {
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct workload_thread_param *param = NULL;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id i;
        int ret;
-       int i;
 
        gvt_dbg_core("init workload scheduler\n");
 
        init_waitqueue_head(&scheduler->workload_complete_wq);
 
-       for (i = 0; i < I915_NUM_ENGINES; i++) {
-               /* check ring mask at init time */
-               if (!HAS_ENGINE(gvt->dev_priv, i))
-                       continue;
-
+       for_each_engine(engine, gvt->dev_priv, i) {
                init_waitqueue_head(&scheduler->waitq[i]);
 
                param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -539,6 +581,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
                        ret = PTR_ERR(scheduler->thread[i]);
                        goto err;
                }
+
+               gvt->shadow_ctx_notifier_block[i].notifier_call =
+                                       shadow_context_status_change;
+               atomic_notifier_chain_register(&engine->context_status_notifier,
+                                       &gvt->shadow_ctx_notifier_block[i]);
        }
        return 0;
 err:
@@ -550,9 +597,6 @@ err:
 
 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
 {
-       atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
-                       &vgpu->shadow_ctx_notifier_block);
-
        i915_gem_context_put_unlocked(vgpu->shadow_ctx);
 }
 
@@ -567,10 +611,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
 
        vgpu->shadow_ctx->engine[RCS].initialised = true;
 
-       vgpu->shadow_ctx_notifier_block.notifier_call =
-               shadow_context_status_change;
-
-       atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
-                                      &vgpu->shadow_ctx_notifier_block);
        return 0;
 }
index 95a97aa0051e787430fff4266be7ac559973b78f..41cfa5ccae84ce4020c6b2ff4a051ce8e17f6298 100644 (file)
@@ -64,6 +64,20 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
        WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
 }
 
+static struct {
+       unsigned int low_mm;
+       unsigned int high_mm;
+       unsigned int fence;
+       enum intel_vgpu_edid edid;
+       char *name;
+} vgpu_types[] = {
+/* Fixed vGPU type table */
+       { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" },
+       { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
+       { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
+       { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
+};
+
 /**
  * intel_gvt_init_vgpu_types - initialize vGPU type list
  * @gvt : GVT device
@@ -78,9 +92,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
        unsigned int min_low;
 
        /* vGPU type name is defined as GVTg_Vx_y which contains
-        * physical GPU generation type and 'y' means maximum vGPU
-        * instances user can create on one physical GPU for this
-        * type.
+        * physical GPU generation type (e.g V4 as BDW server, V5 as
+        * SKL server).
         *
         * Depend on physical SKU resource, might see vGPU types like
         * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
@@ -92,7 +105,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
         */
        low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
        high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
-       num_types = 4;
+       num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
 
        gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
                             GFP_KERNEL);
@@ -101,28 +114,29 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
 
        min_low = MB_TO_BYTES(32);
        for (i = 0; i < num_types; ++i) {
-               if (low_avail / min_low == 0)
+               if (low_avail / vgpu_types[i].low_mm == 0)
                        break;
-               gvt->types[i].low_gm_size = min_low;
-               gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
-               gvt->types[i].fence = 4;
-               gvt->types[i].max_instance = min(low_avail / min_low,
-                                                high_avail / gvt->types[i].high_gm_size);
-               gvt->types[i].avail_instance = gvt->types[i].max_instance;
+
+               gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
+               gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
+               gvt->types[i].fence = vgpu_types[i].fence;
+               gvt->types[i].resolution = vgpu_types[i].edid;
+               gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
+                                                  high_avail / vgpu_types[i].high_mm);
 
                if (IS_GEN8(gvt->dev_priv))
-                       sprintf(gvt->types[i].name, "GVTg_V4_%u",
-                                               gvt->types[i].max_instance);
+                       sprintf(gvt->types[i].name, "GVTg_V4_%s",
+                                               vgpu_types[i].name);
                else if (IS_GEN9(gvt->dev_priv))
-                       sprintf(gvt->types[i].name, "GVTg_V5_%u",
-                                               gvt->types[i].max_instance);
+                       sprintf(gvt->types[i].name, "GVTg_V5_%s",
+                                               vgpu_types[i].name);
 
-               min_low <<= 1;
-               gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
-                            i, gvt->types[i].name, gvt->types[i].max_instance,
+               gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n",
+                            i, gvt->types[i].name,
                             gvt->types[i].avail_instance,
                             gvt->types[i].low_gm_size,
-                            gvt->types[i].high_gm_size, gvt->types[i].fence);
+                            gvt->types[i].high_gm_size, gvt->types[i].fence,
+                            vgpu_edid_str(gvt->types[i].resolution));
        }
 
        gvt->num_types = i;
@@ -138,7 +152,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
 {
        int i;
        unsigned int low_gm_avail, high_gm_avail, fence_avail;
-       unsigned int low_gm_min, high_gm_min, fence_min, total_min;
+       unsigned int low_gm_min, high_gm_min, fence_min;
 
        /* Need to depend on maxium hw resource size but keep on
         * static config for now.
@@ -154,12 +168,11 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
                low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
                high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
                fence_min = fence_avail / gvt->types[i].fence;
-               total_min = min(min(low_gm_min, high_gm_min), fence_min);
-               gvt->types[i].avail_instance = min(gvt->types[i].max_instance,
-                                                  total_min);
+               gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
+                                                  fence_min);
 
-               gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n",
-                      i, gvt->types[i].name, gvt->types[i].max_instance,
+               gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
+                      i, gvt->types[i].name,
                       gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
                       gvt->types[i].high_gm_size, gvt->types[i].fence);
        }
@@ -248,7 +261,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        if (ret)
                goto out_detach_hypervisor_vgpu;
 
-       ret = intel_vgpu_init_display(vgpu);
+       ret = intel_vgpu_init_display(vgpu, param->resolution);
        if (ret)
                goto out_clean_gtt;
 
@@ -312,6 +325,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
        param.low_gm_sz = type->low_gm_size;
        param.high_gm_sz = type->high_gm_size;
        param.fence_sz = type->fence;
+       param.resolution = type->resolution;
 
        /* XXX current param based on MB */
        param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
@@ -387,8 +401,12 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
                populate_pvinfo_page(vgpu);
                intel_vgpu_reset_display(vgpu);
 
-               if (dmlr)
+               if (dmlr) {
                        intel_vgpu_reset_cfg_space(vgpu);
+                       /* only reset the failsafe mode when dmlr reset */
+                       vgpu->failsafe = false;
+                       vgpu->pv_notified = false;
+               }
        }
 
        vgpu->resetting = false;
index e703556eba999a95fd1a728538f6412db28068ae..1c75402a59c1377e7abd410f2dc58dcec0df7094 100644 (file)
@@ -248,6 +248,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_IRQ_ACTIVE:
        case I915_PARAM_ALLOW_BATCHBUFFER:
        case I915_PARAM_LAST_DISPATCH:
+       case I915_PARAM_HAS_EXEC_CONSTANTS:
                /* Reject all old ums/dri params. */
                return -ENODEV;
        case I915_PARAM_CHIPSET_ID:
@@ -274,9 +275,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_HAS_BSD2:
                value = !!dev_priv->engine[VCS2];
                break;
-       case I915_PARAM_HAS_EXEC_CONSTANTS:
-               value = INTEL_GEN(dev_priv) >= 4;
-               break;
        case I915_PARAM_HAS_LLC:
                value = HAS_LLC(dev_priv);
                break;
@@ -1788,7 +1786,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
                goto error;
        }
 
-       i915_gem_reset_finish(dev_priv);
+       i915_gem_reset(dev_priv);
        intel_overlay_reset(dev_priv);
 
        /* Ok, now get things going again... */
@@ -1814,6 +1812,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
        i915_queue_hangcheck(dev_priv);
 
 wakeup:
+       i915_gem_reset_finish(dev_priv);
        enable_irq(dev_priv->drm.irq);
        wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
        return;
index 0a4b42d313912c3c5b56a449cfac33e63afeb16e..1e53c31b6826ec996b2d153e1dd32232b77dd9d7 100644 (file)
@@ -293,6 +293,7 @@ enum plane_id {
        PLANE_PRIMARY,
        PLANE_SPRITE0,
        PLANE_SPRITE1,
+       PLANE_SPRITE2,
        PLANE_CURSOR,
        I915_MAX_PLANES,
 };
@@ -1324,7 +1325,7 @@ struct intel_gen6_power_mgmt {
        unsigned boosts;
 
        /* manual wa residency calculations */
-       struct intel_rps_ei up_ei, down_ei;
+       struct intel_rps_ei ei;
 
        /*
         * Protects RPS/RC6 register access and PCU communication.
@@ -2063,8 +2064,6 @@ struct drm_i915_private {
 
        const struct intel_device_info info;
 
-       int relative_constants_mode;
-
        void __iomem *regs;
 
        struct intel_uncore uncore;
@@ -3341,6 +3340,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
 }
 
 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
+void i915_gem_reset(struct drm_i915_private *dev_priv);
 void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
 void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
 void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
index 6908123162d17cd998c1e7f0bf54a27064e67588..67b1fc5a03313b80bc9459543b8dec3743ea953b 100644 (file)
@@ -1434,6 +1434,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 
        trace_i915_gem_object_pwrite(obj, args->offset, args->size);
 
+       ret = -ENODEV;
+       if (obj->ops->pwrite)
+               ret = obj->ops->pwrite(obj, args);
+       if (ret != -ENODEV)
+               goto err;
+
        ret = i915_gem_object_wait(obj,
                                   I915_WAIT_INTERRUPTIBLE |
                                   I915_WAIT_ALL,
@@ -2119,6 +2125,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
         */
        shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
        obj->mm.madv = __I915_MADV_PURGED;
+       obj->mm.pages = ERR_PTR(-EFAULT);
 }
 
 /* Try to discard unwanted pages */
@@ -2218,7 +2225,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 
        __i915_gem_object_reset_page_iter(obj);
 
-       obj->ops->put_pages(obj, pages);
+       if (!IS_ERR(pages))
+               obj->ops->put_pages(obj, pages);
+
 unlock:
        mutex_unlock(&obj->mm.lock);
 }
@@ -2437,7 +2446,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
        if (err)
                return err;
 
-       if (unlikely(!obj->mm.pages)) {
+       if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
                err = ____i915_gem_object_get_pages(obj);
                if (err)
                        goto unlock;
@@ -2515,7 +2524,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 
        pinned = true;
        if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
-               if (unlikely(!obj->mm.pages)) {
+               if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
                        ret = ____i915_gem_object_get_pages(obj);
                        if (ret)
                                goto err_unlock;
@@ -2563,6 +2572,75 @@ err_unlock:
        goto out_unlock;
 }
 
+static int
+i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
+                          const struct drm_i915_gem_pwrite *arg)
+{
+       struct address_space *mapping = obj->base.filp->f_mapping;
+       char __user *user_data = u64_to_user_ptr(arg->data_ptr);
+       u64 remain, offset;
+       unsigned int pg;
+
+       /* Before we instantiate/pin the backing store for our use, we
+        * can prepopulate the shmemfs filp efficiently using a write into
+        * the pagecache. We avoid the penalty of instantiating all the
+        * pages, important if the user is just writing to a few and never
+        * uses the object on the GPU, and using a direct write into shmemfs
+        * allows it to avoid the cost of retrieving a page (either swapin
+        * or clearing-before-use) before it is overwritten.
+        */
+       if (READ_ONCE(obj->mm.pages))
+               return -ENODEV;
+
+       /* Before the pages are instantiated the object is treated as being
+        * in the CPU domain. The pages will be clflushed as required before
+        * use, and we can freely write into the pages directly. If userspace
+        * races pwrite with any other operation; corruption will ensue -
+        * that is userspace's prerogative!
+        */
+
+       remain = arg->size;
+       offset = arg->offset;
+       pg = offset_in_page(offset);
+
+       do {
+               unsigned int len, unwritten;
+               struct page *page;
+               void *data, *vaddr;
+               int err;
+
+               len = PAGE_SIZE - pg;
+               if (len > remain)
+                       len = remain;
+
+               err = pagecache_write_begin(obj->base.filp, mapping,
+                                           offset, len, 0,
+                                           &page, &data);
+               if (err < 0)
+                       return err;
+
+               vaddr = kmap(page);
+               unwritten = copy_from_user(vaddr + pg, user_data, len);
+               kunmap(page);
+
+               err = pagecache_write_end(obj->base.filp, mapping,
+                                         offset, len, len - unwritten,
+                                         page, data);
+               if (err < 0)
+                       return err;
+
+               if (unwritten)
+                       return -EFAULT;
+
+               remain -= len;
+               user_data += len;
+               offset += len;
+               pg = 0;
+       } while (remain);
+
+       return 0;
+}
+
 static bool ban_context(const struct i915_gem_context *ctx)
 {
        return (i915_gem_context_is_bannable(ctx) &&
@@ -2641,7 +2719,16 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
        for_each_engine(engine, dev_priv, id) {
                struct drm_i915_gem_request *request;
 
+               /* Prevent request submission to the hardware until we have
+                * completed the reset in i915_gem_reset_finish(). If a request
+                * is completed by one engine, it may then queue a request
+                * to a second via its engine->irq_tasklet *just* as we are
+                * calling engine->init_hw() and also writing the ELSP.
+                * Turning off the engine->irq_tasklet until the reset is over
+                * prevents the race.
+                */
                tasklet_kill(&engine->irq_tasklet);
+               tasklet_disable(&engine->irq_tasklet);
 
                if (engine_stalled(engine)) {
                        request = i915_gem_find_active_request(engine);
@@ -2756,7 +2843,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
        engine->reset_hw(engine, request);
 }
 
-void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
+void i915_gem_reset(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
@@ -2778,6 +2865,17 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
        }
 }
 
+void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+       for_each_engine(engine, dev_priv, id)
+               tasklet_enable(&engine->irq_tasklet);
+}
+
 static void nop_submit_request(struct drm_i915_gem_request *request)
 {
        dma_fence_set_error(&request->fence, -EIO);
@@ -3029,6 +3127,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
                if (args->timeout_ns < 0)
                        args->timeout_ns = 0;
+
+               /*
+                * Apparently ktime isn't accurate enough and occasionally has a
+                * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+                * things up to make the test happy. We allow up to 1 jiffy.
+                *
+                * This is a regression from the timespec->ktime conversion.
+                */
+               if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
+                       args->timeout_ns = 0;
        }
 
        i915_gem_object_put(obj);
@@ -3974,8 +4082,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
        .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
                 I915_GEM_OBJECT_IS_SHRINKABLE,
+
        .get_pages = i915_gem_object_get_pages_gtt,
        .put_pages = i915_gem_object_put_pages_gtt,
+
+       .pwrite = i915_gem_object_pwrite_gtt,
 };
 
 struct drm_i915_gem_object *
@@ -4583,8 +4694,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
        init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
        init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 
-       dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
-
        init_waitqueue_head(&dev_priv->pending_flip_queue);
 
        dev_priv->mm.interruptible = true;
index 17f90c6182081c932652715ed34724f1c71b373d..e2d83b6d376b03e8bdef54bc9ed9e7c9a3d0138c 100644 (file)
@@ -311,7 +311,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
        ctx->ring_size = 4 * PAGE_SIZE;
        ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
                             GEN8_CTX_ADDRESSING_MODE_SHIFT;
-       ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
 
        /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
         * present or not in use we still need a small bias as ring wraparound
index 0ac750b90f3d33f5585351fe284da68eaf6cb1e2..e9c008fe14b1d77500e7d456ef37705053685130 100644 (file)
@@ -160,9 +160,6 @@ struct i915_gem_context {
        /** desc_template: invariant fields for the HW context descriptor */
        u32 desc_template;
 
-       /** status_notifier: list of callbacks for context-switch changes */
-       struct atomic_notifier_head status_notifier;
-
        /** guilty_count: How many times this context has caused a GPU hang. */
        unsigned int guilty_count;
        /**
index c181b1bb3d2c9e72addb040ee8a0d5a4b52f06c9..3be2503aa042c0c48cb2745ad26e9316a2409484 100644 (file)
@@ -293,12 +293,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
                 * those as well to make room for our guard pages.
                 */
                if (check_color) {
-                       if (vma->node.start + vma->node.size == node->start) {
-                               if (vma->node.color == node->color)
+                       if (node->start + node->size == target->start) {
+                               if (node->color == target->color)
                                        continue;
                        }
-                       if (vma->node.start == node->start + node->size) {
-                               if (vma->node.color == node->color)
+                       if (node->start == target->start + target->size) {
+                               if (node->color == target->color)
                                        continue;
                        }
                }
index d02cfaefe1c84e86cf39d74aa1ef09a4d1345af1..30e0675fd7dab7949d3cb3cb498be852d7448ac9 100644 (file)
@@ -1408,10 +1408,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
               struct drm_i915_gem_execbuffer2 *args,
               struct list_head *vmas)
 {
-       struct drm_i915_private *dev_priv = params->request->i915;
        u64 exec_start, exec_len;
-       int instp_mode;
-       u32 instp_mask;
        int ret;
 
        ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
@@ -1422,56 +1419,11 @@ execbuf_submit(struct i915_execbuffer_params *params,
        if (ret)
                return ret;
 
-       instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
-       instp_mask = I915_EXEC_CONSTANTS_MASK;
-       switch (instp_mode) {
-       case I915_EXEC_CONSTANTS_REL_GENERAL:
-       case I915_EXEC_CONSTANTS_ABSOLUTE:
-       case I915_EXEC_CONSTANTS_REL_SURFACE:
-               if (instp_mode != 0 && params->engine->id != RCS) {
-                       DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
-                       return -EINVAL;
-               }
-
-               if (instp_mode != dev_priv->relative_constants_mode) {
-                       if (INTEL_INFO(dev_priv)->gen < 4) {
-                               DRM_DEBUG("no rel constants on pre-gen4\n");
-                               return -EINVAL;
-                       }
-
-                       if (INTEL_INFO(dev_priv)->gen > 5 &&
-                           instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
-                               DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
-                               return -EINVAL;
-                       }
-
-                       /* The HW changed the meaning on this bit on gen6 */
-                       if (INTEL_INFO(dev_priv)->gen >= 6)
-                               instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
-               }
-               break;
-       default:
-               DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
+       if (args->flags & I915_EXEC_CONSTANTS_MASK) {
+               DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n");
                return -EINVAL;
        }
 
-       if (params->engine->id == RCS &&
-           instp_mode != dev_priv->relative_constants_mode) {
-               struct intel_ring *ring = params->request->ring;
-
-               ret = intel_ring_begin(params->request, 4);
-               if (ret)
-                       return ret;
-
-               intel_ring_emit(ring, MI_NOOP);
-               intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-               intel_ring_emit_reg(ring, INSTPM);
-               intel_ring_emit(ring, instp_mask << 16 | instp_mode);
-               intel_ring_advance(ring);
-
-               dev_priv->relative_constants_mode = instp_mode;
-       }
-
        if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
                ret = i915_reset_gen7_sol_offsets(params->request);
                if (ret)
index bf90b07163d1266a6bb0c87f036e84fa78181991..76b80a0be79767be189c94694434c338c1f97e6a 100644 (file)
@@ -54,6 +54,9 @@ struct drm_i915_gem_object_ops {
        struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
        void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
 
+       int (*pwrite)(struct drm_i915_gem_object *,
+                     const struct drm_i915_gem_pwrite *);
+
        int (*dmabuf_export)(struct drm_i915_gem_object *);
        void (*release)(struct drm_i915_gem_object *);
 };
index 401006b4c6a36bf2a8058c2b47eb38105ab5baf4..d5d2b4c6ed382d687719a088d943580ccacbba15 100644 (file)
@@ -263,7 +263,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
                                I915_SHRINK_BOUND |
                                I915_SHRINK_UNBOUND |
                                I915_SHRINK_ACTIVE);
-       rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
+       synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
 
        return freed;
 }
index e6ffef2f707a01934a3a6f777b1dc7548ef370c8..b6c886ac901bd78cfa7beb1a3aaaf706821a6d2b 100644 (file)
@@ -1046,68 +1046,51 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
        ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
 }
 
-static bool vlv_c0_above(struct drm_i915_private *dev_priv,
-                        const struct intel_rps_ei *old,
-                        const struct intel_rps_ei *now,
-                        int threshold)
-{
-       u64 time, c0;
-       unsigned int mul = 100;
-
-       if (old->cz_clock == 0)
-               return false;
-
-       if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
-               mul <<= 8;
-
-       time = now->cz_clock - old->cz_clock;
-       time *= threshold * dev_priv->czclk_freq;
-
-       /* Workload can be split between render + media, e.g. SwapBuffers
-        * being blitted in X after being rendered in mesa. To account for
-        * this we need to combine both engines into our activity counter.
-        */
-       c0 = now->render_c0 - old->render_c0;
-       c0 += now->media_c0 - old->media_c0;
-       c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
-
-       return c0 >= time;
-}
-
 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
 {
-       vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
-       dev_priv->rps.up_ei = dev_priv->rps.down_ei;
+       memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
 }
 
 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
 {
+       const struct intel_rps_ei *prev = &dev_priv->rps.ei;
        struct intel_rps_ei now;
        u32 events = 0;
 
-       if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
+       if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
                return 0;
 
        vlv_c0_read(dev_priv, &now);
        if (now.cz_clock == 0)
                return 0;
 
-       if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
-               if (!vlv_c0_above(dev_priv,
-                                 &dev_priv->rps.down_ei, &now,
-                                 dev_priv->rps.down_threshold))
-                       events |= GEN6_PM_RP_DOWN_THRESHOLD;
-               dev_priv->rps.down_ei = now;
-       }
+       if (prev->cz_clock) {
+               u64 time, c0;
+               unsigned int mul;
+
+               mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
+               if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+                       mul <<= 8;
 
-       if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
-               if (vlv_c0_above(dev_priv,
-                                &dev_priv->rps.up_ei, &now,
-                                dev_priv->rps.up_threshold))
-                       events |= GEN6_PM_RP_UP_THRESHOLD;
-               dev_priv->rps.up_ei = now;
+               time = now.cz_clock - prev->cz_clock;
+               time *= dev_priv->czclk_freq;
+
+               /* Workload can be split between render + media,
+                * e.g. SwapBuffers being blitted in X after being rendered in
+                * mesa. To account for this we need to combine both engines
+                * into our activity counter.
+                */
+               c0 = now.render_c0 - prev->render_c0;
+               c0 += now.media_c0 - prev->media_c0;
+               c0 *= mul;
+
+               if (c0 > time * dev_priv->rps.up_threshold)
+                       events = GEN6_PM_RP_UP_THRESHOLD;
+               else if (c0 < time * dev_priv->rps.down_threshold)
+                       events = GEN6_PM_RP_DOWN_THRESHOLD;
        }
 
+       dev_priv->rps.ei = now;
        return events;
 }
 
@@ -4228,7 +4211,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
        /* Let's track the enabled rps events */
        if (IS_VALLEYVIEW(dev_priv))
                /* WaGsvRC0ResidencyMethod:vlv */
-               dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
+               dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
        else
                dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
 
@@ -4266,6 +4249,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
        if (!IS_GEN2(dev_priv))
                dev->vblank_disable_immediate = true;
 
+       /* Most platforms treat the display irq block as an always-on
+        * power domain. vlv/chv can disable it at runtime and need
+        * special care to avoid writing any of the display block registers
+        * outside of the power domain. We defer setting up the display irqs
+        * in this case to the runtime pm.
+        */
+       dev_priv->display_irqs_enabled = true;
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               dev_priv->display_irqs_enabled = false;
+
        dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
        dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
 
index 155906e848120ae2e1de533d81658080c546888d..df20e9bc1c0f3dee67eb555ae20741d907a6b430 100644 (file)
@@ -512,10 +512,36 @@ err_unpin:
        return ret;
 }
 
+static void
+i915_vma_remove(struct i915_vma *vma)
+{
+       struct drm_i915_gem_object *obj = vma->obj;
+
+       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+       GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+
+       drm_mm_remove_node(&vma->node);
+       list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+
+       /* Since the unbound list is global, only move to that list if
+        * no more VMAs exist.
+        */
+       if (--obj->bind_count == 0)
+               list_move_tail(&obj->global_link,
+                              &to_i915(obj->base.dev)->mm.unbound_list);
+
+       /* And finally now the object is completely decoupled from this vma,
+        * we can drop its hold on the backing storage and allow it to be
+        * reaped by the shrinker.
+        */
+       i915_gem_object_unpin_pages(obj);
+       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+}
+
 int __i915_vma_do_pin(struct i915_vma *vma,
                      u64 size, u64 alignment, u64 flags)
 {
-       unsigned int bound = vma->flags;
+       const unsigned int bound = vma->flags;
        int ret;
 
        lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
@@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma,
 
        if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
                ret = -EBUSY;
-               goto err;
+               goto err_unpin;
        }
 
        if ((bound & I915_VMA_BIND_MASK) == 0) {
                ret = i915_vma_insert(vma, size, alignment, flags);
                if (ret)
-                       goto err;
+                       goto err_unpin;
        }
 
        ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
        if (ret)
-               goto err;
+               goto err_remove;
 
        if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
                __i915_vma_set_map_and_fenceable(vma);
@@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma,
        GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
        return 0;
 
-err:
+err_remove:
+       if ((bound & I915_VMA_BIND_MASK) == 0) {
+               GEM_BUG_ON(vma->pages);
+               i915_vma_remove(vma);
+       }
+err_unpin:
        __i915_vma_unpin(vma);
        return ret;
 }
@@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma)
        }
        vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 
-       drm_mm_remove_node(&vma->node);
-       list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
-
        if (vma->pages != obj->mm.pages) {
                GEM_BUG_ON(!vma->pages);
                sg_free_table(vma->pages);
@@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma)
        }
        vma->pages = NULL;
 
-       /* Since the unbound list is global, only move to that list if
-        * no more VMAs exist. */
-       if (--obj->bind_count == 0)
-               list_move_tail(&obj->global_link,
-                              &to_i915(obj->base.dev)->mm.unbound_list);
-
-       /* And finally now the object is completely decoupled from this vma,
-        * we can drop its hold on the backing storage and allow it to be
-        * reaped by the shrinker.
-        */
-       i915_gem_object_unpin_pages(obj);
-       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+       i915_vma_remove(vma);
 
 destroy:
        if (unlikely(i915_vma_is_closed(vma)))
index 0085bc745f6aa5256cf21c1ae76dd2cbce80e387..de219b71fb76ecbfab51d58bd379064b555c8241 100644 (file)
@@ -35,7 +35,6 @@
  */
 
 #define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin"
-MODULE_FIRMWARE(I915_CSR_GLK);
 #define GLK_CSR_VERSION_REQUIRED       CSR_VERSION(1, 1)
 
 #define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
index 01341670738fbb118d8402bbda62d7234c8c3863..ed1f4f272b4fb3907adeea175216173fea5253fb 100644 (file)
@@ -3669,10 +3669,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
        /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
        crtc->base.mode = crtc->base.state->mode;
 
-       DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
-                     old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
-                     pipe_config->pipe_src_w, pipe_config->pipe_src_h);
-
        /*
         * Update pipe size and adjust fitter if needed: the reason for this is
         * that in compute_mode_changes we check the native mode (not the pfit
@@ -4796,23 +4792,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
        struct intel_crtc_scaler_state *scaler_state =
                &crtc->config->scaler_state;
 
-       DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
-
        if (crtc->config->pch_pfit.enabled) {
                int id;
 
-               if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
-                       DRM_ERROR("Requesting pfit without getting a scaler first\n");
+               if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
                        return;
-               }
 
                id = scaler_state->scaler_id;
                I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
                        PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
                I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
                I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
-
-               DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
        }
 }
 
@@ -14379,6 +14369,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
        } while (progress);
 }
 
+static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
+{
+       struct intel_atomic_state *state, *next;
+       struct llist_node *freed;
+
+       freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+       llist_for_each_entry_safe(state, next, freed, freed)
+               drm_atomic_state_put(&state->base);
+}
+
+static void intel_atomic_helper_free_state_worker(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+
+       intel_atomic_helper_free_state(dev_priv);
+}
+
 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 {
        struct drm_device *dev = state->dev;
@@ -14545,6 +14553,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
         * can happen also when the device is completely off.
         */
        intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
+       intel_atomic_helper_free_state(dev_priv);
 }
 
 static void intel_atomic_commit_work(struct work_struct *work)
@@ -14946,17 +14956,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
                to_intel_atomic_state(old_crtc_state->state);
        bool modeset = needs_modeset(crtc->state);
 
+       if (!modeset &&
+           (intel_cstate->base.color_mgmt_changed ||
+            intel_cstate->update_pipe)) {
+               intel_color_set_csc(crtc->state);
+               intel_color_load_luts(crtc->state);
+       }
+
        /* Perform vblank evasion around commit operation */
        intel_pipe_update_start(intel_crtc);
 
        if (modeset)
                goto out;
 
-       if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
-               intel_color_set_csc(crtc->state);
-               intel_color_load_luts(crtc->state);
-       }
-
        if (intel_cstate->update_pipe)
                intel_update_pipe_config(intel_crtc, old_intel_cstate);
        else if (INTEL_GEN(dev_priv) >= 9)
@@ -16599,18 +16611,6 @@ fail:
        drm_modeset_acquire_fini(&ctx);
 }
 
-static void intel_atomic_helper_free_state(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), atomic_helper.free_work);
-       struct intel_atomic_state *state, *next;
-       struct llist_node *freed;
-
-       freed = llist_del_all(&dev_priv->atomic_helper.free_list);
-       llist_for_each_entry_safe(state, next, freed, freed)
-               drm_atomic_state_put(&state->base);
-}
-
 int intel_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16631,7 +16631,7 @@ int intel_modeset_init(struct drm_device *dev)
        dev->mode_config.funcs = &intel_mode_funcs;
 
        INIT_WORK(&dev_priv->atomic_helper.free_work,
-                 intel_atomic_helper_free_state);
+                 intel_atomic_helper_free_state_worker);
 
        intel_init_quirks(dev);
 
@@ -16696,12 +16696,11 @@ int intel_modeset_init(struct drm_device *dev)
                }
        }
 
-       intel_update_czclk(dev_priv);
-       intel_update_cdclk(dev_priv);
-       dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
-
        intel_shared_dpll_init(dev);
 
+       intel_update_czclk(dev_priv);
+       intel_modeset_init_hw(dev);
+
        if (dev_priv->max_cdclk_freq == 0)
                intel_update_max_cdclk(dev_priv);
 
@@ -17258,8 +17257,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
 
        intel_init_gt_powersave(dev_priv);
 
-       intel_modeset_init_hw(dev);
-
        intel_setup_overlay(dev_priv);
 }
 
index 371acf109e343295ae060c4cf49908bf5607118e..ab1be5c80ea5960df983940e70f98f0d49cbd8e1 100644 (file)
@@ -105,6 +105,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
        /* Nothing to do here, execute in order of dependencies */
        engine->schedule = NULL;
 
+       ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
+
        dev_priv->engine[id] = engine;
        return 0;
 }
index 1b8ba2e77539577f5eb997f9e1eb315f1f7ae078..2d449fb5d1d2b02dc016ebb50a026733b50acbf3 100644 (file)
@@ -357,14 +357,13 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                                    bool *enabled, int width, int height)
 {
        struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
-       unsigned long conn_configured, mask;
+       unsigned long conn_configured, conn_seq, mask;
        unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
        int i, j;
        bool *save_enabled;
        bool fallback = true;
        int num_connectors_enabled = 0;
        int num_connectors_detected = 0;
-       int pass = 0;
 
        save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
        if (!save_enabled)
@@ -374,6 +373,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
        mask = BIT(count) - 1;
        conn_configured = 0;
 retry:
+       conn_seq = conn_configured;
        for (i = 0; i < count; i++) {
                struct drm_fb_helper_connector *fb_conn;
                struct drm_connector *connector;
@@ -387,7 +387,7 @@ retry:
                if (conn_configured & BIT(i))
                        continue;
 
-               if (pass == 0 && !connector->has_tile)
+               if (conn_seq == 0 && !connector->has_tile)
                        continue;
 
                if (connector->status == connector_status_connected)
@@ -498,10 +498,8 @@ retry:
                conn_configured |= BIT(i);
        }
 
-       if ((conn_configured & mask) != mask) {
-               pass++;
+       if ((conn_configured & mask) != mask && conn_configured != conn_seq)
                goto retry;
-       }
 
        /*
         * If the BIOS didn't enable everything it could, fall back to have the
index d23c0fcff7516a9f3df4363010e2c3dfa84e3c00..8c04eca84351cbbe5d7f385fd6d262f40a080a28 100644 (file)
@@ -77,6 +77,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
                goto bail;
        }
 
+       if (!i915.enable_execlists) {
+               DRM_INFO("GPU guest virtualisation [GVT-g] disabled due to disabled execlist submission [i915.enable_execlists module parameter]\n");
+               goto bail;
+       }
+
        /*
         * We're not in host or fail to find a MPT module, disable GVT-g
         */
index ebae2bd839189c07588e88a526f3f804d08157b3..24b2fa5b62824dfa86d87c9e5d1c630957192867 100644 (file)
@@ -1298,16 +1298,34 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
 
 static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
 {
-       struct drm_device *dev = crtc_state->base.crtc->dev;
+       struct drm_i915_private *dev_priv =
+               to_i915(crtc_state->base.crtc->dev);
+       struct drm_atomic_state *state = crtc_state->base.state;
+       struct drm_connector_state *connector_state;
+       struct drm_connector *connector;
+       int i;
 
-       if (HAS_GMCH_DISPLAY(to_i915(dev)))
+       if (HAS_GMCH_DISPLAY(dev_priv))
                return false;
 
        /*
         * HDMI 12bpc affects the clocks, so it's only possible
         * when not cloning with other encoder types.
         */
-       return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI;
+       if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
+               return false;
+
+       for_each_connector_in_state(state, connector, connector_state, i) {
+               const struct drm_display_info *info = &connector->display_info;
+
+               if (connector_state->crtc != crtc_state->base.crtc)
+                       continue;
+
+               if ((info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36) == 0)
+                       return false;
+       }
+
+       return true;
 }
 
 bool intel_hdmi_compute_config(struct intel_encoder *encoder,
index b62e3f8ad415f6173470c90a3cb1b35b04f91c4b..54208bef7a83561eb72c29d26079fa447211203c 100644 (file)
@@ -219,7 +219,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
                        }
                }
        }
-       if (dev_priv->display.hpd_irq_setup)
+       if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
                dev_priv->display.hpd_irq_setup(dev_priv);
        spin_unlock_irq(&dev_priv->irq_lock);
 
@@ -425,7 +425,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
                }
        }
 
-       if (storm_detected)
+       if (storm_detected && dev_priv->display_irqs_enabled)
                dev_priv->display.hpd_irq_setup(dev_priv);
        spin_unlock(&dev_priv->irq_lock);
 
@@ -471,10 +471,12 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
         * Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked checks happy.
         */
-       spin_lock_irq(&dev_priv->irq_lock);
-       if (dev_priv->display.hpd_irq_setup)
-               dev_priv->display.hpd_irq_setup(dev_priv);
-       spin_unlock_irq(&dev_priv->irq_lock);
+       if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
+               spin_lock_irq(&dev_priv->irq_lock);
+               if (dev_priv->display_irqs_enabled)
+                       dev_priv->display.hpd_irq_setup(dev_priv);
+               spin_unlock_irq(&dev_priv->irq_lock);
+       }
 }
 
 static void i915_hpd_poll_init_work(struct work_struct *work)
index ebf8023d21e6fba52c01b54d46fb3456b4709731..471af3b480adc38a3a48c27d1999805836cf5630 100644 (file)
@@ -345,7 +345,8 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
        if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
                return;
 
-       atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
+       atomic_notifier_call_chain(&rq->engine->context_status_notifier,
+                                  status, rq);
 }
 
 static void
index 249623d45be0caa3e891e8a272706dff84dbc4be..6a29784d2b4137c9805e85ffb80265e05e46af53 100644 (file)
@@ -4891,6 +4891,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
                break;
        }
 
+       /* When byt can survive without system hang with dynamic
+        * sw freq adjustments, this restriction can be lifted.
+        */
+       if (IS_VALLEYVIEW(dev_priv))
+               goto skip_hw_write;
+
        I915_WRITE(GEN6_RP_UP_EI,
                   GT_INTERVAL_FROM_US(dev_priv, ei_up));
        I915_WRITE(GEN6_RP_UP_THRESHOLD,
@@ -4911,6 +4917,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
                   GEN6_RP_UP_BUSY_AVG |
                   GEN6_RP_DOWN_IDLE_AVG);
 
+skip_hw_write:
        dev_priv->rps.power = new_power;
        dev_priv->rps.up_threshold = threshold_up;
        dev_priv->rps.down_threshold = threshold_down;
@@ -4921,8 +4928,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
 {
        u32 mask = 0;
 
+       /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
        if (val > dev_priv->rps.min_freq_softlimit)
-               mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+               mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
        if (val < dev_priv->rps.max_freq_softlimit)
                mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
 
@@ -5032,7 +5040,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
 {
        mutex_lock(&dev_priv->rps.hw_lock);
        if (dev_priv->rps.enabled) {
-               if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
+               if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
                        gen6_rps_reset_ei(dev_priv);
                I915_WRITE(GEN6_PMINTRMSK,
                           gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
@@ -7916,10 +7924,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
  * @timeout_base_ms: timeout for polling with preemption enabled
  *
  * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
- * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
+ * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
  * The request is acknowledged once the PCODE reply dword equals @reply after
  * applying @reply_mask. Polling is first attempted with preemption enabled
- * for @timeout_base_ms and if this times out for another 10 ms with
+ * for @timeout_base_ms and if this times out for another 50 ms with
  * preemption disabled.
  *
  * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
@@ -7955,14 +7963,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
         * worst case) _and_ PCODE was busy for some reason even after a
         * (queued) request and @timeout_base_ms delay. As a workaround retry
         * the poll with preemption disabled to maximize the number of
-        * requests. Increase the timeout from @timeout_base_ms to 10ms to
+        * requests. Increase the timeout from @timeout_base_ms to 50ms to
         * account for interrupts that could reduce the number of these
-        * requests.
+        * requests, and for any quirks of the PCODE firmware that delays
+        * the request completion.
         */
        DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
        WARN_ON_ONCE(timeout_base_ms > 3);
        preempt_disable();
-       ret = wait_for_atomic(COND, 10);
+       ret = wait_for_atomic(COND, 50);
        preempt_enable();
 
 out:
index 91bc4abf5d3e578ae9dffe2dd2adfd9c4305e31e..6c5f9958197d5541381e7c2838c909fe19fa1520 100644 (file)
@@ -2024,6 +2024,8 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
                ret = context_pin(ctx, flags);
                if (ret)
                        goto error;
+
+               ce->state->obj->mm.dirty = true;
        }
 
        /* The kernel context is only used as a placeholder for flushing the
index 79c2b8d72322cf58cacd3f732c5a55ce0ef68f3b..13dccb18cd43ed85aee58a6a15618c515094a83b 100644 (file)
@@ -403,6 +403,9 @@ struct intel_engine_cs {
         */
        struct i915_gem_context *legacy_active_context;
 
+       /* status_notifier: list of callbacks for context-switch changes */
+       struct atomic_notifier_head context_status_notifier;
+
        struct intel_engine_hangcheck hangcheck;
 
        bool needs_cmd_parser;
index 9ef54688872a86a70ab020a64b7209e040de70e0..9481ca9a3ae7e0a342957baf655a34f570a51eae 100644 (file)
@@ -254,9 +254,6 @@ skl_update_plane(struct drm_plane *drm_plane,
                int scaler_id = plane_state->scaler_id;
                const struct intel_scaler *scaler;
 
-               DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n",
-                             plane_id, PS_PLANE_SEL(plane_id));
-
                scaler = &crtc_state->scaler_state.scalers[scaler_id];
 
                I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
index abe08885a5ba4ef1726d67809544534cf35a57df..b7ff592b14f5e00d68ff1cf6440dd45d6959606d 100644 (file)
@@ -119,6 +119,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
 
        for_each_fw_domain_masked(d, fw_domains, dev_priv)
                fw_domain_wait_ack(d);
+
+       dev_priv->uncore.fw_domains_active |= fw_domains;
 }
 
 static void
@@ -130,6 +132,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
                fw_domain_put(d);
                fw_domain_posting_read(d);
        }
+
+       dev_priv->uncore.fw_domains_active &= ~fw_domains;
 }
 
 static void
@@ -240,10 +244,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
        if (WARN_ON(domain->wake_count == 0))
                domain->wake_count++;
 
-       if (--domain->wake_count == 0) {
+       if (--domain->wake_count == 0)
                dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
-               dev_priv->uncore.fw_domains_active &= ~domain->mask;
-       }
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 
@@ -454,10 +456,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
                        fw_domains &= ~domain->mask;
        }
 
-       if (fw_domains) {
+       if (fw_domains)
                dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
-               dev_priv->uncore.fw_domains_active |= fw_domains;
-       }
 }
 
 /**
@@ -968,7 +968,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
                fw_domain_arm_timer(domain);
 
        dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
-       dev_priv->uncore.fw_domains_active |= fw_domains;
 }
 
 static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
index 4414cf73735d26ccb655756327af8226f539f4f5..36602ac7e24835fb9350b3040037524f4c95b7d1 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -534,7 +534,7 @@ static void a5xx_destroy(struct msm_gpu *gpu)
        }
 
        if (a5xx_gpu->gpmu_bo) {
-               if (a5xx_gpu->gpmu_bo)
+               if (a5xx_gpu->gpmu_iova)
                        msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
                drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
        }
@@ -860,7 +860,9 @@ static const struct adreno_gpu_funcs funcs = {
                .idle = a5xx_idle,
                .irq = a5xx_irq,
                .destroy = a5xx_destroy,
+#ifdef CONFIG_DEBUG_FS
                .show = a5xx_show,
+#endif
        },
        .get_timestamp = a5xx_get_timestamp,
 };
index c9bd1e6225f4f96e3b8a79a101508f244de17baf..5ae65426b4e5593c5f88b19627c10b403995ed73 100644 (file)
@@ -418,18 +418,27 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
        return 0;
 }
 
-void adreno_gpu_cleanup(struct adreno_gpu *gpu)
+void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
 {
-       if (gpu->memptrs_bo) {
-               if (gpu->memptrs)
-                       msm_gem_put_vaddr(gpu->memptrs_bo);
+       struct msm_gpu *gpu = &adreno_gpu->base;
+
+       if (adreno_gpu->memptrs_bo) {
+               if (adreno_gpu->memptrs)
+                       msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
+
+               if (adreno_gpu->memptrs_iova)
+                       msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id);
+
+               drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
+       }
+       release_firmware(adreno_gpu->pm4);
+       release_firmware(adreno_gpu->pfp);
 
-               if (gpu->memptrs_iova)
-                       msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
+       msm_gpu_cleanup(gpu);
 
-               drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+       if (gpu->aspace) {
+               gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
+                       iommu_ports, ARRAY_SIZE(iommu_ports));
+               msm_gem_address_space_destroy(gpu->aspace);
        }
-       release_firmware(gpu->pm4);
-       release_firmware(gpu->pfp);
-       msm_gpu_cleanup(&gpu->base);
 }
index 921270ea6059debadab1a785ed19ca79b064811c..a879ffa534b4d845007d3f974f9ce9c7df69dff0 100644 (file)
@@ -171,7 +171,7 @@ dsi_mgr_phy_enable(int id,
                        }
                }
        } else {
-               msm_dsi_host_reset_phy(mdsi->host);
+               msm_dsi_host_reset_phy(msm_dsi->host);
                ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]);
                if (ret)
                        return ret;
index a54d3bb5baad9c01047a582ad3cad98a9efb7f66..8177e8511afd8c6827b1a731688e159da652a8d2 100644 (file)
 #include <linux/hdmi.h>
 #include "hdmi.h"
 
-
-/* Supported HDMI Audio channels */
-#define MSM_HDMI_AUDIO_CHANNEL_2               0
-#define MSM_HDMI_AUDIO_CHANNEL_4               1
-#define MSM_HDMI_AUDIO_CHANNEL_6               2
-#define MSM_HDMI_AUDIO_CHANNEL_8               3
-
 /* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
 static int nchannels[] = { 2, 4, 6, 8 };
 
index 611da7a660c9426ed6c165ae90c8257f7a777360..238901987e00b0c1bc74979647a6cfadcfee084a 100644 (file)
@@ -18,7 +18,8 @@
 #ifndef __MDP5_PIPE_H__
 #define __MDP5_PIPE_H__
 
-#define SSPP_MAX       (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
+/* TODO: Add SSPP_MAX in mdp5.xml.h */
+#define SSPP_MAX       (SSPP_CURSOR1 + 1)
 
 /* represents a hw pipe, which is dynamically assigned to a plane */
 struct mdp5_hw_pipe {
index 59811f29607de60f2a22d53f9ec969e62bb39d98..68e509b3b9e4d08730e3901f46a397519c33e77c 100644 (file)
@@ -812,6 +812,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 
        size = PAGE_ALIGN(size);
 
+       /* Disallow zero sized objects as they make the underlying
+        * infrastructure grumpy
+        */
+       if (size == 0)
+               return ERR_PTR(-EINVAL);
+
        ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
        if (ret)
                goto fail;
index 99e05aacbee181f4341625af8abbc91ad284f381..af5b6ba4095b06f6a24069f62e178618b547209e 100644 (file)
@@ -706,9 +706,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
                msm_ringbuffer_destroy(gpu->rb);
        }
 
-       if (gpu->aspace)
-               msm_gem_address_space_destroy(gpu->aspace);
-
        if (gpu->fctx)
                msm_fence_context_free(gpu->fctx);
 }
index e10a4eda4078ba9b211ca49530da2b659865c8ee..1144e0c9e8942ddb6226a7f409ed07f25a0eb96a 100644 (file)
@@ -65,13 +65,11 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb)
        switch (format) {
        case DRM_FORMAT_RGB565:
                dev_dbg(drm->dev, "Setting up RGB565 mode\n");
-               ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT);
                ctrl |= CTRL_SET_WORD_LENGTH(0);
                ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0xf);
                break;
        case DRM_FORMAT_XRGB8888:
                dev_dbg(drm->dev, "Setting up XRGB8888 mode\n");
-               ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT);
                ctrl |= CTRL_SET_WORD_LENGTH(3);
                /* Do not use packed pixels = one pixel per word instead. */
                ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0x7);
@@ -87,6 +85,36 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb)
        return 0;
 }
 
+static void mxsfb_set_bus_fmt(struct mxsfb_drm_private *mxsfb)
+{
+       struct drm_crtc *crtc = &mxsfb->pipe.crtc;
+       struct drm_device *drm = crtc->dev;
+       u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+       u32 reg;
+
+       reg = readl(mxsfb->base + LCDC_CTRL);
+
+       if (mxsfb->connector.display_info.num_bus_formats)
+               bus_format = mxsfb->connector.display_info.bus_formats[0];
+
+       reg &= ~CTRL_BUS_WIDTH_MASK;
+       switch (bus_format) {
+       case MEDIA_BUS_FMT_RGB565_1X16:
+               reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT);
+               break;
+       case MEDIA_BUS_FMT_RGB666_1X18:
+               reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_18BIT);
+               break;
+       case MEDIA_BUS_FMT_RGB888_1X24:
+               reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT);
+               break;
+       default:
+               dev_err(drm->dev, "Unknown media bus format %d\n", bus_format);
+               break;
+       }
+       writel(reg, mxsfb->base + LCDC_CTRL);
+}
+
 static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb)
 {
        u32 reg;
@@ -168,13 +196,22 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
                vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
        if (m->flags & DRM_MODE_FLAG_PVSYNC)
                vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
-       if (bus_flags & DRM_BUS_FLAG_DE_HIGH)
+       /* Make sure Data Enable is high active by default */
+       if (!(bus_flags & DRM_BUS_FLAG_DE_LOW))
                vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
-       if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+       /*
+        * DRM_BUS_FLAG_PIXDATA_ defines are controller centric,
+        * controllers VDCTRL0_DOTCLK is display centric.
+        * Drive on positive edge       -> display samples on falling edge
+        * DRM_BUS_FLAG_PIXDATA_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING
+        */
+       if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
                vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING;
 
        writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0);
 
+       mxsfb_set_bus_fmt(mxsfb);
+
        /* Frame length in lines. */
        writel(m->crtc_vtotal, mxsfb->base + LCDC_VDCTRL1);
 
@@ -184,8 +221,8 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
               VDCTRL2_SET_HSYNC_PERIOD(m->crtc_htotal),
               mxsfb->base + LCDC_VDCTRL2);
 
-       writel(SET_HOR_WAIT_CNT(m->crtc_hblank_end - m->crtc_hsync_end) |
-              SET_VERT_WAIT_CNT(m->crtc_vblank_end - m->crtc_vsync_end),
+       writel(SET_HOR_WAIT_CNT(m->crtc_htotal - m->crtc_hsync_start) |
+              SET_VERT_WAIT_CNT(m->crtc_vtotal - m->crtc_vsync_start),
               mxsfb->base + LCDC_VDCTRL3);
 
        writel(SET_DOTCLK_H_VALID_DATA_CNT(m->hdisplay),
index cdfbe0284635decf262db79c0b048291cc87c026..ff6d6a6f842e5a61def5c23264b10eef06fa1e75 100644 (file)
@@ -102,14 +102,18 @@ static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe,
 {
        struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
 
+       drm_panel_prepare(mxsfb->panel);
        mxsfb_crtc_enable(mxsfb);
+       drm_panel_enable(mxsfb->panel);
 }
 
 static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe)
 {
        struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
 
+       drm_panel_disable(mxsfb->panel);
        mxsfb_crtc_disable(mxsfb);
+       drm_panel_unprepare(mxsfb->panel);
 }
 
 static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
index fa8d173994071d64f0b772ec435e1e5038916a7f..b8e81422d4e26f9f8c3f692955753bb046aebcca 100644 (file)
@@ -112,6 +112,7 @@ static int mxsfb_attach_endpoint(struct drm_device *drm,
 
 int mxsfb_create_output(struct drm_device *drm)
 {
+       struct mxsfb_drm_private *mxsfb = drm->dev_private;
        struct device_node *ep_np = NULL;
        struct of_endpoint ep;
        int ret;
@@ -127,5 +128,8 @@ int mxsfb_create_output(struct drm_device *drm)
                }
        }
 
+       if (!mxsfb->panel)
+               return -EPROBE_DEFER;
+
        return 0;
 }
index 31d62cd0d3d78abe465559992ea75f8dd988ff22..66a6ba9ec533ffe96c04915937bd6b8fba68a1b0 100644 (file)
@@ -44,6 +44,7 @@
 #define CTRL_DATA_SELECT               (1 << 16)
 #define CTRL_SET_BUS_WIDTH(x)          (((x) & 0x3) << 10)
 #define CTRL_GET_BUS_WIDTH(x)          (((x) >> 10) & 0x3)
+#define CTRL_BUS_WIDTH_MASK            (0x3 << 10)
 #define CTRL_SET_WORD_LENGTH(x)                (((x) & 0x3) << 8)
 #define CTRL_GET_WORD_LENGTH(x)                (((x) >> 8) & 0x3)
 #define CTRL_MASTER                    (1 << 5)
index af267c35d813cc7548f060ef5771d6cd4232b4c9..ee5883f59be5a1992c6bdd20c751285079f5d3c1 100644 (file)
@@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
        struct drm_gem_object *obj = buffer->priv;
        int ret = 0;
 
-       if (WARN_ON(!obj->filp))
-               return -EINVAL;
-
        ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
        if (ret < 0)
                return ret;
index 684f1703aa5c7189bc55ca1633aeafd78a97c55a..aaa3e80fecb425164a3e5bcf799081aa7d2cf049 100644 (file)
@@ -213,8 +213,8 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
                        rbo->placement.num_busy_placement = 0;
                        for (i = 0; i < rbo->placement.num_placement; i++) {
                                if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
-                                       if (rbo->placements[0].fpfn < fpfn)
-                                               rbo->placements[0].fpfn = fpfn;
+                                       if (rbo->placements[i].fpfn < fpfn)
+                                               rbo->placements[i].fpfn = fpfn;
                                } else {
                                        rbo->placement.busy_placement =
                                                &rbo->placements[i];
index d12b8978142f69b52e19a159f9a628080f7a18e5..c7af9fdd20c729184654222dfc8b4bc6a870c066 100644 (file)
@@ -2984,6 +2984,16 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                    (rdev->pdev->device == 0x6667)) {
                        max_sclk = 75000;
                }
+       } else if (rdev->family == CHIP_OLAND) {
+               if ((rdev->pdev->revision == 0xC7) ||
+                   (rdev->pdev->revision == 0x80) ||
+                   (rdev->pdev->revision == 0x81) ||
+                   (rdev->pdev->revision == 0x83) ||
+                   (rdev->pdev->revision == 0x87) ||
+                   (rdev->pdev->device == 0x6604) ||
+                   (rdev->pdev->device == 0x6605)) {
+                       max_sclk = 75000;
+               }
        }
 
        if (rps->vce_active) {
index b5bfbe50bd87167a7b28f528a74a034b1f68a738..b0ff304ce3dc4a9ac18f359a73498efede18cbd4 100644 (file)
@@ -32,6 +32,10 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
 {
        const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode;
        struct rcar_du_device *rcdu = crtc->group->dev;
+       struct vsp1_du_lif_config cfg = {
+               .width = mode->hdisplay,
+               .height = mode->vdisplay,
+       };
        struct rcar_du_plane_state state = {
                .state = {
                        .crtc = &crtc->crtc,
@@ -66,12 +70,12 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
         */
        crtc->group->need_restart = true;
 
-       vsp1_du_setup_lif(crtc->vsp->vsp, mode->hdisplay, mode->vdisplay);
+       vsp1_du_setup_lif(crtc->vsp->vsp, &cfg);
 }
 
 void rcar_du_vsp_disable(struct rcar_du_crtc *crtc)
 {
-       vsp1_du_setup_lif(crtc->vsp->vsp, 0, 0);
+       vsp1_du_setup_lif(crtc->vsp->vsp, NULL);
 }
 
 void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc)
index f80bf9385e412db766424bf00cacd76458a64a8e..d745e8b50fb86458d09e400f5c35c9d257f4de2b 100644 (file)
@@ -464,6 +464,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+       unsigned long flags;
 
        WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
        mutex_lock(&tilcdc_crtc->enable_lock);
@@ -484,7 +485,17 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
        tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
                          LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
                          LCDC_PALETTE_LOAD_MODE_MASK);
+
+       /* There is no real chance for a race here as the time stamp
+        * is taken before the raster DMA is started. The spin-lock is
+        * taken to have a memory barrier after taking the time-stamp
+        * and to avoid a context switch between taking the stamp and
+        * enabling the raster.
+        */
+       spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+       tilcdc_crtc->last_vblank = ktime_get();
        tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+       spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
 
        drm_crtc_vblank_on(crtc);
 
@@ -539,7 +550,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
        }
 
        drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
-       tilcdc_crtc->last_vblank = 0;
 
        tilcdc_crtc->enabled = false;
        mutex_unlock(&tilcdc_crtc->enable_lock);
@@ -602,7 +612,6 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
 {
        struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
        struct drm_device *dev = crtc->dev;
-       unsigned long flags;
 
        WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
@@ -614,28 +623,30 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
        drm_framebuffer_reference(fb);
 
        crtc->primary->fb = fb;
+       tilcdc_crtc->event = event;
 
-       spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+       mutex_lock(&tilcdc_crtc->enable_lock);
 
-       if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
+       if (tilcdc_crtc->enabled) {
+               unsigned long flags;
                ktime_t next_vblank;
                s64 tdiff;
 
-               next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
-                       1000000 / crtc->hwmode.vrefresh);
+               spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
 
+               next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
+                                          1000000 / crtc->hwmode.vrefresh);
                tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
 
                if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
                        tilcdc_crtc->next_fb = fb;
-       }
-
-       if (tilcdc_crtc->next_fb != fb)
-               set_scanout(crtc, fb);
+               else
+                       set_scanout(crtc, fb);
 
-       tilcdc_crtc->event = event;
+               spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+       }
 
-       spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+       mutex_unlock(&tilcdc_crtc->enable_lock);
 
        return 0;
 }
@@ -1036,5 +1047,5 @@ int tilcdc_crtc_create(struct drm_device *dev)
 
 fail:
        tilcdc_crtc_destroy(crtc);
-       return -ENOMEM;
+       return ret;
 }
index fdb451e3ec01184a4642e6facd6ddf8e5f0cad47..26a7ad0f478978205be87f9409f309021b610884 100644 (file)
@@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
        if (unlikely(ret != 0))
                goto out_err0;
 
-       ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+       ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
        if (unlikely(ret != 0))
                goto out_err1;
 
@@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists);
 
 int ttm_ref_object_add(struct ttm_object_file *tfile,
                       struct ttm_base_object *base,
-                      enum ttm_ref_type ref_type, bool *existed)
+                      enum ttm_ref_type ref_type, bool *existed,
+                      bool require_existed)
 {
        struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
        struct ttm_ref_object *ref;
@@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
                }
 
                rcu_read_unlock();
+               if (require_existed)
+                       return -EPERM;
+
                ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
                                           false, false);
                if (unlikely(ret != 0))
@@ -449,10 +453,10 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
                ttm_ref_object_release(&ref->kref);
        }
 
+       spin_unlock(&tfile->lock);
        for (i = 0; i < TTM_REF_NUM; ++i)
                drm_ht_remove(&tfile->ref_hash[i]);
 
-       spin_unlock(&tfile->lock);
        ttm_object_file_unref(&tfile);
 }
 EXPORT_SYMBOL(ttm_object_file_release);
@@ -529,9 +533,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
 
        *p_tdev = NULL;
 
-       spin_lock(&tdev->object_lock);
        drm_ht_remove(&tdev->object_hash);
-       spin_unlock(&tdev->object_lock);
 
        kfree(tdev);
 }
@@ -635,7 +637,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
        prime = (struct ttm_prime_object *) dma_buf->priv;
        base = &prime->base;
        *handle = base->hash.key;
-       ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+       ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
 
        dma_buf_put(dma_buf);
 
index 0c06844af4455d6319e83c183fbbd594e61cd678..9fcf05ca492b0c065a323f69be32ed3cc93b630f 100644 (file)
@@ -846,6 +846,17 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
        drm_atomic_helper_crtc_destroy_state(crtc, state);
 }
 
+static void
+vc4_crtc_reset(struct drm_crtc *crtc)
+{
+       if (crtc->state)
+               __drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+       crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
+       if (crtc->state)
+               crtc->state->crtc = crtc;
+}
+
 static const struct drm_crtc_funcs vc4_crtc_funcs = {
        .set_config = drm_atomic_helper_set_config,
        .destroy = vc4_crtc_destroy,
@@ -853,7 +864,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
        .set_property = NULL,
        .cursor_set = NULL, /* handled by drm_mode_cursor_universal */
        .cursor_move = NULL, /* handled by drm_mode_cursor_universal */
-       .reset = drm_atomic_helper_crtc_reset,
+       .reset = vc4_crtc_reset,
        .atomic_duplicate_state = vc4_crtc_duplicate_state,
        .atomic_destroy_state = vc4_crtc_destroy_state,
        .gamma_set = vc4_crtc_gamma_set,
index 6541dd8b82dc0747433403b64795843a29e0544c..6b2708b4eafe84c832d41a10f75c2be9e65fc0b9 100644 (file)
@@ -538,7 +538,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
                     struct vmw_fence_obj **p_fence)
 {
        struct vmw_fence_obj *fence;
-       int ret;
+       int ret;
 
        fence = kzalloc(sizeof(*fence), GFP_KERNEL);
        if (unlikely(fence == NULL))
@@ -701,6 +701,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
 }
 
 
+/**
+ * vmw_fence_obj_lookup - Look up a user-space fence object
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @handle: A handle identifying the fence object.
+ * @return: A struct vmw_user_fence base ttm object on success or
+ * an error pointer on failure.
+ *
+ * The fence object is looked up and type-checked. The caller needs
+ * to have opened the fence object first, but since that happens on
+ * creation and fence objects aren't shareable, that's not an
+ * issue currently.
+ */
+static struct ttm_base_object *
+vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
+{
+       struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
+
+       if (!base) {
+               pr_err("Invalid fence object handle 0x%08lx.\n",
+                      (unsigned long)handle);
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (base->refcount_release != vmw_user_fence_base_release) {
+               pr_err("Invalid fence object handle 0x%08lx.\n",
+                      (unsigned long)handle);
+               ttm_base_object_unref(&base);
+               return ERR_PTR(-EINVAL);
+       }
+
+       return base;
+}
+
+
 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *file_priv)
 {
@@ -726,13 +761,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
                arg->kernel_cookie = jiffies + wait_timeout;
        }
 
-       base = ttm_base_object_lookup(tfile, arg->handle);
-       if (unlikely(base == NULL)) {
-               printk(KERN_ERR "Wait invalid fence object handle "
-                      "0x%08lx.\n",
-                      (unsigned long)arg->handle);
-               return -EINVAL;
-       }
+       base = vmw_fence_obj_lookup(tfile, arg->handle);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
 
        fence = &(container_of(base, struct vmw_user_fence, base)->fence);
 
@@ -771,13 +802,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
        struct vmw_private *dev_priv = vmw_priv(dev);
 
-       base = ttm_base_object_lookup(tfile, arg->handle);
-       if (unlikely(base == NULL)) {
-               printk(KERN_ERR "Fence signaled invalid fence object handle "
-                      "0x%08lx.\n",
-                      (unsigned long)arg->handle);
-               return -EINVAL;
-       }
+       base = vmw_fence_obj_lookup(tfile, arg->handle);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
 
        fence = &(container_of(base, struct vmw_user_fence, base)->fence);
        fman = fman_from_fence(fence);
@@ -1024,6 +1051,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
                (struct drm_vmw_fence_event_arg *) data;
        struct vmw_fence_obj *fence = NULL;
        struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+       struct ttm_object_file *tfile = vmw_fp->tfile;
        struct drm_vmw_fence_rep __user *user_fence_rep =
                (struct drm_vmw_fence_rep __user *)(unsigned long)
                arg->fence_rep;
@@ -1037,24 +1065,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
         */
        if (arg->handle) {
                struct ttm_base_object *base =
-                       ttm_base_object_lookup_for_ref(dev_priv->tdev,
-                                                      arg->handle);
-
-               if (unlikely(base == NULL)) {
-                       DRM_ERROR("Fence event invalid fence object handle "
-                                 "0x%08lx.\n",
-                                 (unsigned long)arg->handle);
-                       return -EINVAL;
-               }
+                       vmw_fence_obj_lookup(tfile, arg->handle);
+
+               if (IS_ERR(base))
+                       return PTR_ERR(base);
+
                fence = &(container_of(base, struct vmw_user_fence,
                                       base)->fence);
                (void) vmw_fence_obj_reference(fence);
 
                if (user_fence_rep != NULL) {
-                       bool existed;
-
                        ret = ttm_ref_object_add(vmw_fp->tfile, base,
-                                                TTM_REF_USAGE, &existed);
+                                                TTM_REF_USAGE, NULL, false);
                        if (unlikely(ret != 0)) {
                                DRM_ERROR("Failed to reference a fence "
                                          "object.\n");
@@ -1097,8 +1119,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
        return 0;
 out_no_create:
        if (user_fence_rep != NULL)
-               ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
-                                         handle, TTM_REF_USAGE);
+               ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
 out_no_ref_obj:
        vmw_fence_obj_unreference(&fence);
        return ret;
index b8c6a03c8c54df15def2d359ee5253f213f1816e..5ec24fd801cd2bb1b32b66540b91edcca2ef3b6d 100644 (file)
@@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
                param->value = dev_priv->has_dx;
                break;
        default:
-               DRM_ERROR("Illegal vmwgfx get param request: %d\n",
-                         param->param);
                return -EINVAL;
        }
 
@@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
        bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
        struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 
-       if (unlikely(arg->pad64 != 0)) {
+       if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
                DRM_ERROR("Illegal GET_3D_CAP argument.\n");
                return -EINVAL;
        }
index 65b3f0369636710eda49086f72e250478dfe288d..bf23153d4f55515b54c66f5a250b8f04aa7f5051 100644 (file)
@@ -589,7 +589,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
                return ret;
 
        ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
-                                TTM_REF_SYNCCPU_WRITE, &existed);
+                                TTM_REF_SYNCCPU_WRITE, &existed, false);
        if (ret != 0 || existed)
                ttm_bo_synccpu_write_release(&user_bo->dma.base);
 
@@ -773,7 +773,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
 
        *handle = user_bo->prime.base.hash.key;
        return ttm_ref_object_add(tfile, &user_bo->prime.base,
-                                 TTM_REF_USAGE, NULL);
+                                 TTM_REF_USAGE, NULL, false);
 }
 
 /*
index b445ce9b9757861ecc1ece1071f1c8c3a02a166f..05fa092c942beedb209b25777971a0c196d19d85 100644 (file)
@@ -713,11 +713,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
                        128;
 
        num_sizes = 0;
-       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+               if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
+                       return -EINVAL;
                num_sizes += req->mip_levels[i];
+       }
 
-       if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
-           DRM_VMW_MAX_MIP_LEVELS)
+       if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
+           num_sizes == 0)
                return -EINVAL;
 
        size = vmw_user_surface_size + 128 +
@@ -891,17 +894,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
        uint32_t handle;
        struct ttm_base_object *base;
        int ret;
+       bool require_exist = false;
 
        if (handle_type == DRM_VMW_HANDLE_PRIME) {
                ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
                if (unlikely(ret != 0))
                        return ret;
        } else {
-               if (unlikely(drm_is_render_client(file_priv))) {
-                       DRM_ERROR("Render client refused legacy "
-                                 "surface reference.\n");
-                       return -EACCES;
-               }
+               if (unlikely(drm_is_render_client(file_priv)))
+                       require_exist = true;
+
                if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
                        DRM_ERROR("Locked master refused legacy "
                                  "surface reference.\n");
@@ -929,17 +931,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
 
                /*
                 * Make sure the surface creator has the same
-                * authenticating master.
+                * authenticating master, or is already registered with us.
                 */
                if (drm_is_primary_client(file_priv) &&
-                   user_srf->master != file_priv->master) {
-                       DRM_ERROR("Trying to reference surface outside of"
-                                 " master domain.\n");
-                       ret = -EACCES;
-                       goto out_bad_resource;
-               }
+                   user_srf->master != file_priv->master)
+                       require_exist = true;
 
-               ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+               ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
+                                        require_exist);
                if (unlikely(ret != 0)) {
                        DRM_ERROR("Could not add a reference to a surface.\n");
                        goto out_bad_resource;
index 1aeb80e5242461830f1d4075f0fb59bcb6ddc898..8c54cb8f5d6d1013ec1f4a39e8f88fcfe3333758 100644 (file)
@@ -175,11 +175,11 @@ config HID_CHERRY
        Support for Cherry Cymotion keyboard.
 
 config HID_CHICONY
-       tristate "Chicony Tactical pad"
+       tristate "Chicony devices"
        depends on HID
        default !EXPERT
        ---help---
-       Support for Chicony Tactical pad.
+       Support for Chicony Tactical pad and special keys on Chicony keyboards.
 
 config HID_CORSAIR
        tristate "Corsair devices"
@@ -190,6 +190,7 @@ config HID_CORSAIR
 
        Supported devices:
        - Vengeance K90
+       - Scimitar PRO RGB
 
 config HID_PRODIKEYS
        tristate "Prodikeys PC-MIDI Keyboard support"
index bc3cec199feefdf437d0c0141c5ff6f73aa10308..f04ed9aabc3f9fea0baf5b074acd83b6d07527c6 100644 (file)
@@ -86,6 +86,7 @@ static const struct hid_device_id ch_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, ch_devices);
index e9e87d337446918f672771551f41041755d83d22..63ec1993eaaa905af1583f7169e6be812cf0486d 100644 (file)
@@ -1870,6 +1870,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
@@ -1910,6 +1911,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
@@ -2110,6 +2112,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
        { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
index c0303f61c26a94f1998f6883d42a0fc8cb41f432..9ba5d98a118042a52dc40b895c3b2e8df67c0b39 100644 (file)
@@ -3,8 +3,10 @@
  *
  * Supported devices:
  *  - Vengeance K90 Keyboard
+ *  - Scimitar PRO RGB Gaming Mouse
  *
  * Copyright (c) 2015 Clement Vuchener
+ * Copyright (c) 2017 Oscar Campos
  */
 
 /*
@@ -670,10 +672,51 @@ static int corsair_input_mapping(struct hid_device *dev,
        return 0;
 }
 
+/*
+ * The report descriptor of Corsair Scimitar RGB Pro gaming mouse is
+ * non parseable as they define two consecutive Logical Minimum for
+ * the Usage Page (Consumer) in rdescs bytes 75 and 77 being 77 0x16
+ * that should be obviousy 0x26 for Logical Magimum of 16 bits. This
+ * prevents poper parsing of the report descriptor due Logical
+ * Minimum being larger than Logical Maximum.
+ *
+ * This driver fixes the report descriptor for:
+ * - USB ID b1c:1b3e, sold as Scimitar RGB Pro Gaming mouse
+ */
+
+static __u8 *corsair_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+        unsigned int *rsize)
+{
+       struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+       if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
+               /*
+                * Corsair Scimitar RGB Pro report descriptor is broken and
+                * defines two different Logical Minimum for the Consumer
+                * Application. The byte 77 should be a 0x26 defining a 16
+                * bits integer for the Logical Maximum but it is a 0x16
+                * instead (Logical Minimum)
+                */
+               switch (hdev->product) {
+               case USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB:
+                       if (*rsize >= 172 && rdesc[75] == 0x15 && rdesc[77] == 0x16
+                       && rdesc[78] == 0xff && rdesc[79] == 0x0f) {
+                               hid_info(hdev, "Fixing up report descriptor\n");
+                               rdesc[77] = 0x26;
+                       }
+                       break;
+               }
+
+       }
+       return rdesc;
+}
+
 static const struct hid_device_id corsair_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90),
                .driver_data = CORSAIR_USE_K90_MACRO |
                               CORSAIR_USE_K90_BACKLIGHT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR,
+            USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
        {}
 };
 
@@ -686,10 +729,14 @@ static struct hid_driver corsair_driver = {
        .event = corsair_event,
        .remove = corsair_remove,
        .input_mapping = corsair_input_mapping,
+       .report_fixup = corsair_mouse_report_fixup,
 };
 
 module_hid_driver(corsair_driver);
 
 MODULE_LICENSE("GPL");
+/* Original K90 driver author */
 MODULE_AUTHOR("Clement Vuchener");
+/* Scimitar PRO RGB driver author */
+MODULE_AUTHOR("Oscar Campos");
 MODULE_DESCRIPTION("HID driver for Corsair devices");
index 86c95d30ac801f2895caef97a575955289d352a4..4e2648c86c8c56142cd06b6a269433f72c5c07c9 100644 (file)
 #define USB_DEVICE_ID_CORSAIR_K70RGB    0x1b13
 #define USB_DEVICE_ID_CORSAIR_STRAFE    0x1b15
 #define USB_DEVICE_ID_CORSAIR_K65RGB    0x1b17
+#define USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE  0x1b38
+#define USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE  0x1b39
+#define USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB  0x1b3e
 
 #define USB_VENDOR_ID_CREATIVELABS     0x041e
 #define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51     0x322c
 
 #define USB_VENDOR_ID_JESS             0x0c45
 #define USB_DEVICE_ID_JESS_YUREX       0x1010
+#define USB_DEVICE_ID_JESS_ZEN_AIO_KBD 0x5112
 
 #define USB_VENDOR_ID_JESS2            0x0f30
 #define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111
 
 #define USB_VENDOR_ID_XIN_MO                   0x16c0
 #define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE       0x05e1
+#define USB_DEVICE_ID_THT_2P_ARCADE            0x75e1
 
 #define USB_VENDOR_ID_XIROKU           0x1477
 #define USB_DEVICE_ID_XIROKU_SPX       0x1006
index f405b07d03816506215bd19fe3c878393370484a..740996f9bdd49dde3d26659f68d2addbff291c60 100644 (file)
@@ -2632,6 +2632,8 @@ err_stop:
                sony_leds_remove(sc);
        if (sc->quirks & SONY_BATTERY_SUPPORT)
                sony_battery_remove(sc);
+       if (sc->touchpad)
+               sony_unregister_touchpad(sc);
        sony_cancel_work_sync(sc);
        kfree(sc->output_report_dmabuf);
        sony_remove_dev_list(sc);
index 7df5227a7e61d6ff79acd62cb009e29ffa4b79d8..9ad7731d2e10dad45268b55fbcf9dfc025570682 100644 (file)
@@ -46,6 +46,7 @@ static int xinmo_event(struct hid_device *hdev, struct hid_field *field,
 
 static const struct hid_device_id xinmo_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
        { }
 };
 
index d6847a664446529831395a962aacab7cb49ab8f5..a69a3c88ab29f5fd736ad18a358fc185f63be99c 100644 (file)
@@ -80,6 +80,9 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
index be8f7e2a026f428f51200e395792dd715a612eeb..e2666ef84dc1ca479646fd1211f45341704755a8 100644 (file)
@@ -2165,6 +2165,14 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
 
        wacom_update_name(wacom, wireless ? " (WL)" : "");
 
+       /* pen only Bamboo neither support touch nor pad */
+       if ((features->type == BAMBOO_PEN) &&
+           ((features->device_type & WACOM_DEVICETYPE_TOUCH) ||
+           (features->device_type & WACOM_DEVICETYPE_PAD))) {
+               error = -ENODEV;
+               goto fail;
+       }
+
        error = wacom_add_shared_data(hdev);
        if (error)
                goto fail;
@@ -2208,14 +2216,8 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
        /* touch only Bamboo doesn't support pen */
        if ((features->type == BAMBOO_TOUCH) &&
            (features->device_type & WACOM_DEVICETYPE_PEN)) {
-               error = -ENODEV;
-               goto fail_quirks;
-       }
-
-       /* pen only Bamboo neither support touch nor pad */
-       if ((features->type == BAMBOO_PEN) &&
-           ((features->device_type & WACOM_DEVICETYPE_TOUCH) ||
-           (features->device_type & WACOM_DEVICETYPE_PAD))) {
+               cancel_delayed_work_sync(&wacom->init_work);
+               _wacom_query_tablet_data(wacom);
                error = -ENODEV;
                goto fail_quirks;
        }
@@ -2579,7 +2581,9 @@ static void wacom_remove(struct hid_device *hdev)
 
        /* make sure we don't trigger the LEDs */
        wacom_led_groups_release(wacom);
-       wacom_release_resources(wacom);
+
+       if (wacom->wacom_wac.features.type != REMOTE)
+               wacom_release_resources(wacom);
 
        hid_set_drvdata(hdev, NULL);
 }
index 4aa3de9f1163b30eb64b4304f285a4167aef0cf0..94250c293be2a18b247e2be006a0e7e4faf4f6f8 100644 (file)
@@ -1959,8 +1959,10 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
                input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
                input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
                input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
-               input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
-               input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+               if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) {
+                       input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
+                       input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+               }
                break;
        case WACOM_HID_WD_FINGERWHEEL:
                wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
@@ -4197,10 +4199,10 @@ static const struct wacom_features wacom_features_0x343 =
          WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
 static const struct wacom_features wacom_features_0x360 =
        { "Wacom Intuos Pro M", 44800, 29600, 8191, 63,
-         INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 };
+         INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 };
 static const struct wacom_features wacom_features_0x361 =
        { "Wacom Intuos Pro L", 62200, 43200, 8191, 63,
-         INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 };
+         INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 };
 
 static const struct wacom_features wacom_features_HID_ANY_ID =
        { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
index 81a80c82f1bd2b6a55df393a3df55376d709adfd..321b8833fa6f35a5a1941d319816ed2ffc83a6d2 100644 (file)
@@ -502,12 +502,15 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
 
        wait_for_completion(&info->waitevent);
 
-       if (channel->rescind) {
-               ret = -ENODEV;
-               goto post_msg_err;
-       }
-
 post_msg_err:
+       /*
+        * If the channel has been rescinded;
+        * we will be awakened by the rescind
+        * handler; set the error code to zero so we don't leak memory.
+        */
+       if (channel->rescind)
+               ret = 0;
+
        spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
        list_del(&info->msglistentry);
        spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
@@ -530,20 +533,18 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
        int ret;
 
        /*
-        * vmbus_on_event(), running in the tasklet, can race
+        * vmbus_on_event(), running in the per-channel tasklet, can race
         * with vmbus_close_internal() in the case of SMP guest, e.g., when
         * the former is accessing channel->inbound.ring_buffer, the latter
-        * could be freeing the ring_buffer pages.
-        *
-        * To resolve the race, we can serialize them by disabling the
-        * tasklet when the latter is running here.
+        * could be freeing the ring_buffer pages, so here we must stop it
+        * first.
         */
-       hv_event_tasklet_disable(channel);
+       tasklet_disable(&channel->callback_event);
 
        /*
         * In case a device driver's probe() fails (e.g.,
         * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
-        * rescinded later (e.g., we dynamically disble an Integrated Service
+        * rescinded later (e.g., we dynamically disable an Integrated Service
         * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
         * here we should skip most of the below cleanup work.
         */
@@ -605,8 +606,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
                get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
 
 out:
-       hv_event_tasklet_enable(channel);
-
        return ret;
 }
 
index f33465d78a025680d7515978fdbcc432d51a8062..fbcb063523082840b61bf23d7852f36da4682647 100644 (file)
@@ -350,7 +350,8 @@ static struct vmbus_channel *alloc_channel(void)
 static void free_channel(struct vmbus_channel *channel)
 {
        tasklet_kill(&channel->callback_event);
-       kfree(channel);
+
+       kfree_rcu(channel, rcu);
 }
 
 static void percpu_channel_enq(void *arg)
@@ -359,14 +360,14 @@ static void percpu_channel_enq(void *arg)
        struct hv_per_cpu_context *hv_cpu
                = this_cpu_ptr(hv_context.cpu_context);
 
-       list_add_tail(&channel->percpu_list, &hv_cpu->chan_list);
+       list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list);
 }
 
 static void percpu_channel_deq(void *arg)
 {
        struct vmbus_channel *channel = arg;
 
-       list_del(&channel->percpu_list);
+       list_del_rcu(&channel->percpu_list);
 }
 
 
@@ -381,19 +382,6 @@ static void vmbus_release_relid(u32 relid)
                       true);
 }
 
-void hv_event_tasklet_disable(struct vmbus_channel *channel)
-{
-       tasklet_disable(&channel->callback_event);
-}
-
-void hv_event_tasklet_enable(struct vmbus_channel *channel)
-{
-       tasklet_enable(&channel->callback_event);
-
-       /* In case there is any pending event */
-       tasklet_schedule(&channel->callback_event);
-}
-
 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
 {
        unsigned long flags;
@@ -402,7 +390,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
        BUG_ON(!channel->rescind);
        BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
 
-       hv_event_tasklet_disable(channel);
        if (channel->target_cpu != get_cpu()) {
                put_cpu();
                smp_call_function_single(channel->target_cpu,
@@ -411,7 +398,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
                percpu_channel_deq(channel);
                put_cpu();
        }
-       hv_event_tasklet_enable(channel);
 
        if (channel->primary_channel == NULL) {
                list_del(&channel->listentry);
@@ -505,7 +491,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
 
        init_vp_index(newchannel, dev_type);
 
-       hv_event_tasklet_disable(newchannel);
        if (newchannel->target_cpu != get_cpu()) {
                put_cpu();
                smp_call_function_single(newchannel->target_cpu,
@@ -515,7 +500,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
                percpu_channel_enq(newchannel);
                put_cpu();
        }
-       hv_event_tasklet_enable(newchannel);
 
        /*
         * This state is used to indicate a successful open
@@ -565,7 +549,6 @@ err_deq_chan:
        list_del(&newchannel->listentry);
        mutex_unlock(&vmbus_connection.channel_mutex);
 
-       hv_event_tasklet_disable(newchannel);
        if (newchannel->target_cpu != get_cpu()) {
                put_cpu();
                smp_call_function_single(newchannel->target_cpu,
@@ -574,7 +557,6 @@ err_deq_chan:
                percpu_channel_deq(newchannel);
                put_cpu();
        }
-       hv_event_tasklet_enable(newchannel);
 
        vmbus_release_relid(newchannel->offermsg.child_relid);
 
@@ -814,6 +796,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
        /* Allocate the channel object and save this offer. */
        newchannel = alloc_channel();
        if (!newchannel) {
+               vmbus_release_relid(offer->child_relid);
                pr_err("Unable to allocate channel object\n");
                return;
        }
index 9aee6014339dffc8627d173446e7bfe0dc06247c..a5596a642ed06b31beb5b2e19347e10b9abb0ed2 100644 (file)
@@ -71,7 +71,6 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
 static const char fcopy_devname[] = "vmbus/hv_fcopy";
 static u8 *recv_buffer;
 static struct hvutil_transport *hvt;
-static struct completion release_event;
 /*
  * This state maintains the version number registered by the daemon.
  */
@@ -331,7 +330,6 @@ static void fcopy_on_reset(void)
 
        if (cancel_delayed_work_sync(&fcopy_timeout_work))
                fcopy_respond_to_host(HV_E_FAIL);
-       complete(&release_event);
 }
 
 int hv_fcopy_init(struct hv_util_service *srv)
@@ -339,7 +337,6 @@ int hv_fcopy_init(struct hv_util_service *srv)
        recv_buffer = srv->recv_buffer;
        fcopy_transaction.recv_channel = srv->channel;
 
-       init_completion(&release_event);
        /*
         * When this driver loads, the user level daemon that
         * processes the host requests may not yet be running.
@@ -361,5 +358,4 @@ void hv_fcopy_deinit(void)
        fcopy_transaction.state = HVUTIL_DEVICE_DYING;
        cancel_delayed_work_sync(&fcopy_timeout_work);
        hvutil_transport_destroy(hvt);
-       wait_for_completion(&release_event);
 }
index de263712e247c2b8c47f450f3d1b7eb387f1e073..a1adfe2cfb34244e8f86a48cc1143c0f439be709 100644 (file)
@@ -101,7 +101,6 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
 static const char kvp_devname[] = "vmbus/hv_kvp";
 static u8 *recv_buffer;
 static struct hvutil_transport *hvt;
-static struct completion release_event;
 /*
  * Register the kernel component with the user-level daemon.
  * As part of this registration, pass the LIC version number.
@@ -714,7 +713,6 @@ static void kvp_on_reset(void)
        if (cancel_delayed_work_sync(&kvp_timeout_work))
                kvp_respond_to_host(NULL, HV_E_FAIL);
        kvp_transaction.state = HVUTIL_DEVICE_INIT;
-       complete(&release_event);
 }
 
 int
@@ -723,7 +721,6 @@ hv_kvp_init(struct hv_util_service *srv)
        recv_buffer = srv->recv_buffer;
        kvp_transaction.recv_channel = srv->channel;
 
-       init_completion(&release_event);
        /*
         * When this driver loads, the user level daemon that
         * processes the host requests may not yet be running.
@@ -747,5 +744,4 @@ void hv_kvp_deinit(void)
        cancel_delayed_work_sync(&kvp_timeout_work);
        cancel_work_sync(&kvp_sendkey_work);
        hvutil_transport_destroy(hvt);
-       wait_for_completion(&release_event);
 }
index bcc03f0748d61cd5a8c9f31447b77fd8b2e7e13b..e659d1b94a5794f11272086374e45b5fc99a3df6 100644 (file)
@@ -79,7 +79,6 @@ static int dm_reg_value;
 static const char vss_devname[] = "vmbus/hv_vss";
 static __u8 *recv_buffer;
 static struct hvutil_transport *hvt;
-static struct completion release_event;
 
 static void vss_timeout_func(struct work_struct *dummy);
 static void vss_handle_request(struct work_struct *dummy);
@@ -361,13 +360,11 @@ static void vss_on_reset(void)
        if (cancel_delayed_work_sync(&vss_timeout_work))
                vss_respond_to_host(HV_E_FAIL);
        vss_transaction.state = HVUTIL_DEVICE_INIT;
-       complete(&release_event);
 }
 
 int
 hv_vss_init(struct hv_util_service *srv)
 {
-       init_completion(&release_event);
        if (vmbus_proto_version < VERSION_WIN8_1) {
                pr_warn("Integration service 'Backup (volume snapshot)'"
                        " not supported on this host version.\n");
@@ -400,5 +397,4 @@ void hv_vss_deinit(void)
        cancel_delayed_work_sync(&vss_timeout_work);
        cancel_work_sync(&vss_handle_request_work);
        hvutil_transport_destroy(hvt);
-       wait_for_completion(&release_event);
 }
index 3042eaa13062bbdfbdba853521b7632d35e619e8..186b10083c552b1e026cc056bd131fe8548b2f03 100644 (file)
@@ -590,6 +590,8 @@ static int hv_timesync_init(struct hv_util_service *srv)
        if (!hyperv_cs)
                return -ENODEV;
 
+       spin_lock_init(&host_ts.lock);
+
        INIT_WORK(&wrk.work, hv_set_host_time);
 
        /*
index c235a95152671104cd7042a451b16ccd0d0d06ab..4402a71e23f7f7277c6d1561c4a8db5d46489b8d 100644 (file)
@@ -182,10 +182,11 @@ static int hvt_op_release(struct inode *inode, struct file *file)
         * connects back.
         */
        hvt_reset(hvt);
-       mutex_unlock(&hvt->lock);
 
        if (mode_old == HVUTIL_TRANSPORT_DESTROY)
-               hvt_transport_free(hvt);
+               complete(&hvt->release);
+
+       mutex_unlock(&hvt->lock);
 
        return 0;
 }
@@ -304,6 +305,7 @@ struct hvutil_transport *hvutil_transport_init(const char *name,
 
        init_waitqueue_head(&hvt->outmsg_q);
        mutex_init(&hvt->lock);
+       init_completion(&hvt->release);
 
        spin_lock(&hvt_list_lock);
        list_add(&hvt->list, &hvt_list);
@@ -351,6 +353,8 @@ void hvutil_transport_destroy(struct hvutil_transport *hvt)
        if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0)
                cn_del_callback(&hvt->cn_id);
 
-       if (mode_old != HVUTIL_TRANSPORT_CHARDEV)
-               hvt_transport_free(hvt);
+       if (mode_old == HVUTIL_TRANSPORT_CHARDEV)
+               wait_for_completion(&hvt->release);
+
+       hvt_transport_free(hvt);
 }
index d98f5225c3e693468fdb27ccc03de8d17e2a8576..79afb626e1668981beaa740b4e7d618f22ab194d 100644 (file)
@@ -41,6 +41,7 @@ struct hvutil_transport {
        int outmsg_len;                     /* its length */
        wait_queue_head_t outmsg_q;         /* poll/read wait queue */
        struct mutex lock;                  /* protects struct members */
+       struct completion release;          /* synchronize with fd release */
 };
 
 struct hvutil_transport *hvutil_transport_init(const char *name,
index da6b59ba594039e6d490e8cc9903f41f99032900..8370b9dc6037c17959abc3d9c45c9b233bd0004f 100644 (file)
@@ -939,8 +939,10 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
                if (relid == 0)
                        continue;
 
+               rcu_read_lock();
+
                /* Find channel based on relid */
-               list_for_each_entry(channel, &hv_cpu->chan_list, percpu_list) {
+               list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) {
                        if (channel->offermsg.child_relid != relid)
                                continue;
 
@@ -956,6 +958,8 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
                                tasklet_schedule(&channel->callback_event);
                        }
                }
+
+               rcu_read_unlock();
        }
 }
 
index cccef87963e050afb99181d63d3ed5dcf401d5a1..975c43d446f8593d0e701efeaf8da133717cff74 100644 (file)
@@ -646,6 +646,9 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value)
                else
                        err = atk_read_value_new(sensor, value);
 
+               if (err)
+                       return err;
+
                sensor->is_valid = true;
                sensor->last_updated = jiffies;
                sensor->cached_value = *value;
index efb01c247e2d90680f02980af76148799f3823fc..4dfc7238313ebd393e0e4dc0e17bb4414d1e8b68 100644 (file)
@@ -3198,7 +3198,7 @@ static int __init sm_it87_init(void)
 {
        int sioaddr[2] = { REG_2E, REG_4E };
        struct it87_sio_data sio_data;
-       unsigned short isa_address;
+       unsigned short isa_address[2];
        bool found = false;
        int i, err;
 
@@ -3208,15 +3208,29 @@ static int __init sm_it87_init(void)
 
        for (i = 0; i < ARRAY_SIZE(sioaddr); i++) {
                memset(&sio_data, 0, sizeof(struct it87_sio_data));
-               isa_address = 0;
-               err = it87_find(sioaddr[i], &isa_address, &sio_data);
-               if (err || isa_address == 0)
+               isa_address[i] = 0;
+               err = it87_find(sioaddr[i], &isa_address[i], &sio_data);
+               if (err || isa_address[i] == 0)
                        continue;
+               /*
+                * Don't register second chip if its ISA address matches
+                * the first chip's ISA address.
+                */
+               if (i && isa_address[i] == isa_address[0])
+                       break;
 
-               err = it87_device_add(i, isa_address, &sio_data);
+               err = it87_device_add(i, isa_address[i], &sio_data);
                if (err)
                        goto exit_dev_unregister;
+
                found = true;
+
+               /*
+                * IT8705F may respond on both SIO addresses.
+                * Stop probing after finding one.
+                */
+               if (sio_data.type == it87)
+                       break;
        }
 
        if (!found) {
index c1b9275978f9d9ee9172e99e569de7dca48b491d..281491cca5103ad4b82e2f2a52be6f2408262bdb 100644 (file)
@@ -311,7 +311,7 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel,
                data->pwm[channel] = val << 8;
                err = i2c_smbus_write_word_swapped(client,
                                                   MAX31790_REG_PWMOUT(channel),
-                                                  val);
+                                                  data->pwm[channel]);
                break;
        case hwmon_pwm_enable:
                fan_config = data->fan_config[channel];
index cdd9b3b26195aa38f03a910d10e483bca61edcca..7563eceeaaeaa3a4e70855a6d9622e960b0ab6c0 100644 (file)
@@ -221,8 +221,10 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
        else
                intel_th_trace_enable(thdev);
 
-       if (ret)
+       if (ret) {
                pm_runtime_put(&thdev->dev);
+               module_put(thdrv->driver.owner);
+       }
 
        return ret;
 }
index 0bba3842336e6d6b9cb9a2b78ca91d7ff2712dcd..590cf90dd21a61a6855dd99885c99da7d2dbc33a 100644 (file)
@@ -85,6 +85,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
                .driver_data = (kernel_ulong_t)0,
        },
+       {
+               /* Denverton */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x19e1),
+               .driver_data = (kernel_ulong_t)0,
+       },
+       {
+               /* Gemini Lake */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
+               .driver_data = (kernel_ulong_t)0,
+       },
        { 0 },
 };
 
index 0652281662a8b35b974d084c5d0636f8ac75e450..78792b4d6437c7cca6d84fd4977773f0c65781e2 100644 (file)
@@ -465,6 +465,7 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
        u8 *tmp_buf;
        int len = 0;
        int xfersz = brcmstb_i2c_get_xfersz(dev);
+       u32 cond, cond_per_msg;
 
        if (dev->is_suspended)
                return -EBUSY;
@@ -481,10 +482,11 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
                        pmsg->buf ? pmsg->buf[0] : '0', pmsg->len);
 
                if (i < (num - 1) && (msgs[i + 1].flags & I2C_M_NOSTART))
-                       brcmstb_set_i2c_start_stop(dev, ~(COND_START_STOP));
+                       cond = ~COND_START_STOP;
                else
-                       brcmstb_set_i2c_start_stop(dev,
-                                                  COND_RESTART | COND_NOSTOP);
+                       cond = COND_RESTART | COND_NOSTOP;
+
+               brcmstb_set_i2c_start_stop(dev, cond);
 
                /* Send slave address */
                if (!(pmsg->flags & I2C_M_NOSTART)) {
@@ -497,13 +499,24 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
                        }
                }
 
+               cond_per_msg = cond;
+
                /* Perform data transfer */
                while (len) {
                        bytes_to_xfer = min(len, xfersz);
 
-                       if (len <= xfersz && i == (num - 1))
-                               brcmstb_set_i2c_start_stop(dev,
-                                                          ~(COND_START_STOP));
+                       if (len <= xfersz) {
+                               if (i == (num - 1))
+                                       cond_per_msg = cond_per_msg &
+                                               ~(COND_RESTART | COND_NOSTOP);
+                               else
+                                       cond_per_msg = cond;
+                       } else {
+                               cond_per_msg = (cond_per_msg & ~COND_RESTART) |
+                                       COND_NOSTOP;
+                       }
+
+                       brcmstb_set_i2c_start_stop(dev, cond_per_msg);
 
                        rc = brcmstb_i2c_xfer_bsc_data(dev, tmp_buf,
                                                       bytes_to_xfer, pmsg);
@@ -512,6 +525,8 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
 
                        len -=  bytes_to_xfer;
                        tmp_buf += bytes_to_xfer;
+
+                       cond_per_msg = COND_NOSTART | COND_NOSTOP;
                }
        }
 
index c1db3a5a340f599b6bee5c0165112703e2e082fc..d9aaf1790e0eff58dc6b26ca9d4881058e890f79 100644 (file)
@@ -88,6 +88,7 @@ struct dw_i2c_dev {
        void __iomem            *base;
        struct completion       cmd_complete;
        struct clk              *clk;
+       struct reset_control    *rst;
        u32                     (*get_clk_rate_khz) (struct dw_i2c_dev *dev);
        struct dw_pci_controller *controller;
        int                     cmd_err;
index 6ce4313231257f8251b62e02f5aaa390a4619edf..79c4b4ea053969e46226dc749fb12a5b59c3ac2c 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/property.h>
 #include <linux/io.h>
+#include <linux/reset.h>
 #include <linux/slab.h>
 #include <linux/acpi.h>
 #include <linux/platform_data/i2c-designware.h>
@@ -199,6 +200,14 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
        dev->irq = irq;
        platform_set_drvdata(pdev, dev);
 
+       dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+       if (IS_ERR(dev->rst)) {
+               if (PTR_ERR(dev->rst) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+       } else {
+               reset_control_deassert(dev->rst);
+       }
+
        if (pdata) {
                dev->clk_freq = pdata->i2c_scl_freq;
        } else {
@@ -235,12 +244,13 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
            && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
                dev_err(&pdev->dev,
                        "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported");
-               return -EINVAL;
+               r = -EINVAL;
+               goto exit_reset;
        }
 
        r = i2c_dw_eval_lock_support(dev);
        if (r)
-               return r;
+               goto exit_reset;
 
        dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
 
@@ -286,10 +296,18 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
        }
 
        r = i2c_dw_probe(dev);
-       if (r && !dev->pm_runtime_disabled)
-               pm_runtime_disable(&pdev->dev);
+       if (r)
+               goto exit_probe;
 
        return r;
+
+exit_probe:
+       if (!dev->pm_runtime_disabled)
+               pm_runtime_disable(&pdev->dev);
+exit_reset:
+       if (!IS_ERR_OR_NULL(dev->rst))
+               reset_control_assert(dev->rst);
+       return r;
 }
 
 static int dw_i2c_plat_remove(struct platform_device *pdev)
@@ -306,6 +324,8 @@ static int dw_i2c_plat_remove(struct platform_device *pdev)
        pm_runtime_put_sync(&pdev->dev);
        if (!dev->pm_runtime_disabled)
                pm_runtime_disable(&pdev->dev);
+       if (!IS_ERR_OR_NULL(dev->rst))
+               reset_control_assert(dev->rst);
 
        return 0;
 }
index cbd93ce0661f225dd0492baef8e91a255079ac21..736a82472101733d7ada08b360c9f159c35371cb 100644 (file)
@@ -457,7 +457,6 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
 
        int_status = readl(i2c->regs + HSI2C_INT_STATUS);
        writel(int_status, i2c->regs + HSI2C_INT_STATUS);
-       trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
 
        /* handle interrupt related to the transfer status */
        if (i2c->variant->hw == HSI2C_EXYNOS7) {
@@ -482,11 +481,13 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
                        goto stop;
                }
 
+               trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
                if ((trans_status & HSI2C_MASTER_ST_MASK) == HSI2C_MASTER_ST_LOSE) {
                        i2c->state = -EAGAIN;
                        goto stop;
                }
        } else if (int_status & HSI2C_INT_I2C) {
+               trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
                if (trans_status & HSI2C_NO_DEV_ACK) {
                        dev_dbg(i2c->dev, "No ACK from device\n");
                        i2c->state = -ENXIO;
index 2aa61bbbd307b9aead3730e137bbf5b95e29a366..73b97c71a484ee186fcb488c1b758cbb04178898 100644 (file)
@@ -175,7 +175,7 @@ static void meson_i2c_put_data(struct meson_i2c *i2c, char *buf, int len)
                wdata1 |= *buf++ << ((i - 4) * 8);
 
        writel(wdata0, i2c->regs + REG_TOK_WDATA0);
-       writel(wdata0, i2c->regs + REG_TOK_WDATA1);
+       writel(wdata1, i2c->regs + REG_TOK_WDATA1);
 
        dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__,
                wdata0, wdata1, len);
index 4a7d9bc2142ba31db7579ed140cfbe6ed5224c02..45d61714c81bd2cfdec86e7f95ffaaa11abc5565 100644 (file)
@@ -172,14 +172,6 @@ static const struct i2c_adapter_quirks mt6577_i2c_quirks = {
        .max_comb_2nd_msg_len = 31,
 };
 
-static const struct i2c_adapter_quirks mt8173_i2c_quirks = {
-       .max_num_msgs = 65535,
-       .max_write_len = 65535,
-       .max_read_len = 65535,
-       .max_comb_1st_msg_len = 65535,
-       .max_comb_2nd_msg_len = 65535,
-};
-
 static const struct mtk_i2c_compatible mt6577_compat = {
        .quirks = &mt6577_i2c_quirks,
        .pmic_i2c = 0,
@@ -199,7 +191,6 @@ static const struct mtk_i2c_compatible mt6589_compat = {
 };
 
 static const struct mtk_i2c_compatible mt8173_compat = {
-       .quirks = &mt8173_i2c_quirks,
        .pmic_i2c = 0,
        .dcm = 1,
        .auto_restart = 1,
index 8f11d347b3ec482815e37d3170fa6abef4537c31..c811af4c8d817bcf353068bf2e3f95f56953155b 100644 (file)
@@ -218,8 +218,12 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
        }
 
        if (riic->is_last || riic->err) {
-               riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
+               riic_clear_set_bit(riic, ICIER_TEIE, ICIER_SPIE, RIIC_ICIER);
                writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
+       } else {
+               /* Transfer is complete, but do not send STOP */
+               riic_clear_set_bit(riic, ICIER_TEIE, 0, RIIC_ICIER);
+               complete(&riic->msg_done);
        }
 
        return IRQ_HANDLED;
index 83768e85a919cb5c6eb60af01ce5918027600962..2178266bca794825e948ce275d48a1b13064056c 100644 (file)
@@ -429,6 +429,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc)
        while (muxc->num_adapters) {
                struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters];
                struct i2c_mux_priv *priv = adap->algo_data;
+               struct device_node *np = adap->dev.of_node;
 
                muxc->adapter[muxc->num_adapters] = NULL;
 
@@ -438,6 +439,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc)
 
                sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
                i2c_del_adapter(adap);
+               of_node_put(np);
                kfree(priv);
        }
 }
index dfc1c0e37c4022b66da0facee93af030c70c4e73..ad31d21da3165f73ecc8f7beb1feef4fc186cbe5 100644 (file)
@@ -35,7 +35,6 @@
  * warranty of any kind, whether express or implied.
  */
 
-#include <linux/acpi.h>
 #include <linux/device.h>
 #include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
@@ -117,6 +116,10 @@ static const struct chip_desc chips[] = {
                .has_irq = 1,
                .muxtype = pca954x_isswi,
        },
+       [pca_9546] = {
+               .nchans = 4,
+               .muxtype = pca954x_isswi,
+       },
        [pca_9547] = {
                .nchans = 8,
                .enable = 0x8,
@@ -134,28 +137,13 @@ static const struct i2c_device_id pca954x_id[] = {
        { "pca9543", pca_9543 },
        { "pca9544", pca_9544 },
        { "pca9545", pca_9545 },
-       { "pca9546", pca_9545 },
+       { "pca9546", pca_9546 },
        { "pca9547", pca_9547 },
        { "pca9548", pca_9548 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, pca954x_id);
 
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id pca954x_acpi_ids[] = {
-       { .id = "PCA9540", .driver_data = pca_9540 },
-       { .id = "PCA9542", .driver_data = pca_9542 },
-       { .id = "PCA9543", .driver_data = pca_9543 },
-       { .id = "PCA9544", .driver_data = pca_9544 },
-       { .id = "PCA9545", .driver_data = pca_9545 },
-       { .id = "PCA9546", .driver_data = pca_9545 },
-       { .id = "PCA9547", .driver_data = pca_9547 },
-       { .id = "PCA9548", .driver_data = pca_9548 },
-       { }
-};
-MODULE_DEVICE_TABLE(acpi, pca954x_acpi_ids);
-#endif
-
 #ifdef CONFIG_OF
 static const struct of_device_id pca954x_of_match[] = {
        { .compatible = "nxp,pca9540", .data = &chips[pca_9540] },
@@ -393,17 +381,8 @@ static int pca954x_probe(struct i2c_client *client,
        match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev);
        if (match)
                data->chip = of_device_get_match_data(&client->dev);
-       else if (id)
+       else
                data->chip = &chips[id->driver_data];
-       else {
-               const struct acpi_device_id *acpi_id;
-
-               acpi_id = acpi_match_device(ACPI_PTR(pca954x_acpi_ids),
-                                               &client->dev);
-               if (!acpi_id)
-                       return -ENODEV;
-               data->chip = &chips[acpi_id->driver_data];
-       }
 
        data->last_chan = 0;               /* force the first selection */
 
@@ -492,7 +471,6 @@ static struct i2c_driver pca954x_driver = {
                .name   = "pca954x",
                .pm     = &pca954x_pm,
                .of_match_table = of_match_ptr(pca954x_of_match),
-               .acpi_match_table = ACPI_PTR(pca954x_acpi_ids),
        },
        .probe          = pca954x_probe,
        .remove         = pca954x_remove,
index ad9dec30bb304ffbbb0f67f01b753f7bc88cd891..4282ceca3d8f9f417a1a511b3d608896a7ad4159 100644 (file)
@@ -169,7 +169,9 @@ static irqreturn_t tiadc_irq_h(int irq, void *private)
 {
        struct iio_dev *indio_dev = private;
        struct tiadc_device *adc_dev = iio_priv(indio_dev);
-       unsigned int status, config;
+       unsigned int status, config, adc_fsm;
+       unsigned short count = 0;
+
        status = tiadc_readl(adc_dev, REG_IRQSTATUS);
 
        /*
@@ -183,6 +185,15 @@ static irqreturn_t tiadc_irq_h(int irq, void *private)
                tiadc_writel(adc_dev, REG_CTRL, config);
                tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN
                                | IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES);
+
+               /* wait for idle state.
+                * ADC needs to finish the current conversion
+                * before disabling the module
+                */
+               do {
+                       adc_fsm = tiadc_readl(adc_dev, REG_ADCFSM);
+               } while (adc_fsm != 0x10 && count++ < 100);
+
                tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB));
                return IRQ_HANDLED;
        } else if (status & IRQENB_FIFO1THRES) {
index a3cce3a38300796b6b0899aa7fe2190481c7137d..ecf592d69043ae82e2e85184096471bfebef95e2 100644 (file)
@@ -51,8 +51,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
                        st->report_state.report_id,
                        st->report_state.index,
                        HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
-
-               poll_value = hid_sensor_read_poll_value(st);
        } else {
                int val;
 
@@ -89,7 +87,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
        sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
                               st->power_state.index,
                               sizeof(state_val), &state_val);
-       if (state && poll_value)
+       if (state)
+               poll_value = hid_sensor_read_poll_value(st);
+       if (poll_value > 0)
                msleep_interruptible(poll_value * 2);
 
        return 0;
index 78532ce074497985dfa22fb881b3639e480ae7ed..81b572d7699a89bb17baa21321744c7b518e5c0a 100644 (file)
@@ -193,8 +193,8 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
        if (err < 0)
                goto out;
 
-       fifo_watermark = ((data & ~ST_LSM6DSX_FIFO_TH_MASK) << 8) |
-                         (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK);
+       fifo_watermark = ((data << 8) & ~ST_LSM6DSX_FIFO_TH_MASK) |
+                        (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK);
 
        wdata = cpu_to_le16(fifo_watermark);
        err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_FIFO_THL_ADDR,
index 6dd8cbd7ce9531a5883173f34eb30edc45301083..e13370dc9b1cb424c4c2d43f0e92b9fadca78df9 100644 (file)
@@ -763,7 +763,7 @@ power_off:
        return ret;
 }
 
-static int __exit ak8974_remove(struct i2c_client *i2c)
+static int ak8974_remove(struct i2c_client *i2c)
 {
        struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
        struct ak8974 *ak8974 = iio_priv(indio_dev);
@@ -845,7 +845,7 @@ static struct i2c_driver ak8974_driver = {
                .of_match_table = of_match_ptr(ak8974_of_match),
        },
        .probe    = ak8974_probe,
-       .remove   = __exit_p(ak8974_remove),
+       .remove   = ak8974_remove,
        .id_table = ak8974_id,
 };
 module_i2c_driver(ak8974_driver);
index e95510117a6dd7069f1251796eaebd1ea8283693..f2ae75fa3128b9101985bb92e882b596404c7ebc 100644 (file)
@@ -29,7 +29,13 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
 {
        int i, n, completed = 0;
 
-       while ((n = ib_poll_cq(cq, IB_POLL_BATCH, cq->wc)) > 0) {
+       /*
+        * budget might be (-1) if the caller does not
+        * want to bound this call, thus we need unsigned
+        * minimum here.
+        */
+       while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH,
+                       budget - completed), cq->wc)) > 0) {
                for (i = 0; i < n; i++) {
                        struct ib_wc *wc = &cq->wc[i];
 
@@ -196,7 +202,7 @@ void ib_free_cq(struct ib_cq *cq)
                irq_poll_disable(&cq->iop);
                break;
        case IB_POLL_WORKQUEUE:
-               flush_work(&cq->work);
+               cancel_work_sync(&cq->work);
                break;
        default:
                WARN_ON_ONCE(1);
index 593d2ce6ec7cec115b58006a5cb13c49bec72d83..7c9e34d679d325d101f937e97ea25dfe76f79f80 100644 (file)
@@ -336,12 +336,26 @@ int ib_register_device(struct ib_device *device,
        struct device *parent = device->dev.parent;
 
        WARN_ON_ONCE(!parent);
-       if (!device->dev.dma_ops)
-               device->dev.dma_ops = parent->dma_ops;
-       if (!device->dev.dma_mask)
-               device->dev.dma_mask = parent->dma_mask;
-       if (!device->dev.coherent_dma_mask)
-               device->dev.coherent_dma_mask = parent->coherent_dma_mask;
+       WARN_ON_ONCE(device->dma_device);
+       if (device->dev.dma_ops) {
+               /*
+                * The caller provided custom DMA operations. Copy the
+                * DMA-related fields that are used by e.g. dma_alloc_coherent()
+                * into device->dev.
+                */
+               device->dma_device = &device->dev;
+               if (!device->dev.dma_mask)
+                       device->dev.dma_mask = parent->dma_mask;
+               if (!device->dev.coherent_dma_mask)
+                       device->dev.coherent_dma_mask =
+                               parent->coherent_dma_mask;
+       } else {
+               /*
+                * The caller did not provide custom DMA operations. Use the
+                * DMA mapping operations of the parent device.
+                */
+               device->dma_device = parent;
+       }
 
        mutex_lock(&device_mutex);
 
@@ -1015,8 +1029,7 @@ static int __init ib_core_init(void)
                return -ENOMEM;
 
        ib_comp_wq = alloc_workqueue("ib-comp-wq",
-                       WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
-                       WQ_UNBOUND_MAX_ACTIVE);
+                       WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
        if (!ib_comp_wq) {
                ret = -ENOMEM;
                goto err;
index 0f5d43d1f5fc30d812a6a3988d90b30cdb459da0..70c3e9e795082b7152ea754a52fc68c412dee7ca 100644 (file)
@@ -160,6 +160,9 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
                return NOTIFY_DONE;
 
        iwdev = &hdl->device;
+       if (iwdev->init_state < INET_NOTIFIER)
+               return NOTIFY_DONE;
+
        netdev = iwdev->ldev->netdev;
        upper_dev = netdev_master_upper_dev_get(netdev);
        if (netdev != event_netdev)
@@ -214,6 +217,9 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
                return NOTIFY_DONE;
 
        iwdev = &hdl->device;
+       if (iwdev->init_state < INET_NOTIFIER)
+               return NOTIFY_DONE;
+
        netdev = iwdev->ldev->netdev;
        if (netdev != event_netdev)
                return NOTIFY_DONE;
@@ -260,6 +266,8 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
                if (!iwhdl)
                        return NOTIFY_DONE;
                iwdev = &iwhdl->device;
+               if (iwdev->init_state < INET_NOTIFIER)
+                       return NOTIFY_DONE;
                p = (__be32 *)neigh->primary_key;
                i40iw_copy_ip_ntohl(local_ipaddr, p);
                if (neigh->nud_state & NUD_VALID) {
index 85acd0843b503807e92eafb26bce1cf60abc55a7..3f9e56e8b3796d3f225e89c67df130d32d55d583 100644 (file)
@@ -36,6 +36,7 @@
 
 #include <linux/netdevice.h>
 #include <linux/inetdevice.h>
+#include <linux/interrupt.h>
 #include <linux/spinlock.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
index bc9fb144e57b8e3a05863223156f25b7f17e6a34..c52edeafd616a3be52fec913430405e5adff13d2 100644 (file)
@@ -372,7 +372,7 @@ static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
        return 0;
 }
 
-static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
+static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
                                   bool dpp_pool)
 {
        int status;
index b9b47e5cc8b3bde5a053107c0ba6a4997754c1ee..ced0461d6e9ff822633d60505d4b390450f7a082 100644 (file)
@@ -587,9 +587,8 @@ void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
 #define EVENT_TYPE_CQ          1
 #define EVENT_TYPE_QP          2
        struct qedr_dev *dev = (struct qedr_dev *)context;
-       union event_ring_data *data = fw_handle;
-       u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) +
-                           data->roce_handle.lo;
+       struct regpair *async_handle = (struct regpair *)fw_handle;
+       u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
        u8 event_type = EVENT_TYPE_NOT_DEFINED;
        struct ib_event event;
        struct ib_cq *ibcq;
index bb32e4792ec9f022d201c0585bcce7a7cbae179c..5cb9195513bdd4c754bf7e19b2f0c7d0ee30dc08 100644 (file)
@@ -38,7 +38,8 @@
 #include <linux/qed/qed_chain.h>
 #include <linux/qed/qed_roce_if.h>
 #include <linux/qed/qede_roce.h>
-#include "qedr_hsi.h"
+#include <linux/qed/roce_common.h>
+#include "qedr_hsi_rdma.h"
 
 #define QEDR_MODULE_VERSION    "8.10.10.0"
 #define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
index 699632893dd9842c3a3b47153f631ae85765a531..a6280ce3e2a54c6cef7617c79862680e425f81d4 100644 (file)
 #include <rdma/ib_addr.h>
 #include <rdma/ib_cache.h>
 
-#include "qedr_hsi.h"
 #include <linux/qed/qed_if.h>
 #include <linux/qed/qed_roce_if.h>
 #include "qedr.h"
-#include "qedr_hsi.h"
 #include "verbs.h"
 #include <rdma/qedr-abi.h>
-#include "qedr_hsi.h"
 #include "qedr_cm.h"
 
 void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi.h b/drivers/infiniband/hw/qedr/qedr_hsi.h
deleted file mode 100644 (file)
index 66d2752..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/* QLogic qedr NIC Driver
- * Copyright (c) 2015-2016  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __QED_HSI_ROCE__
-#define __QED_HSI_ROCE__
-
-#include <linux/qed/common_hsi.h>
-#include <linux/qed/roce_common.h>
-#include "qedr_hsi_rdma.h"
-
-/* Affiliated asynchronous events / errors enumeration */
-enum roce_async_events_type {
-       ROCE_ASYNC_EVENT_NONE = 0,
-       ROCE_ASYNC_EVENT_COMM_EST = 1,
-       ROCE_ASYNC_EVENT_SQ_DRAINED,
-       ROCE_ASYNC_EVENT_SRQ_LIMIT,
-       ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
-       ROCE_ASYNC_EVENT_CQ_ERR,
-       ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
-       ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
-       ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
-       ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
-       ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
-       ROCE_ASYNC_EVENT_SRQ_EMPTY,
-       MAX_ROCE_ASYNC_EVENTS_TYPE
-};
-
-#endif /* __QED_HSI_ROCE__ */
index 6b3bb32803bd8661d9efebd14dcefff0b601f6f3..2091902848e6c47bb1f3dfdc4cc08cec99386dc0 100644 (file)
@@ -43,7 +43,8 @@
 #include <rdma/ib_addr.h>
 #include <rdma/ib_cache.h>
 
-#include "qedr_hsi.h"
+#include <linux/qed/common_hsi.h>
+#include "qedr_hsi_rdma.h"
 #include <linux/qed/qed_if.h>
 #include "qedr.h"
 #include "verbs.h"
index 12c4208fd7013b78c5e1bf736d82461c05c814c3..af9f596bb68b294cc587f13924a645ac70e6235c 100644 (file)
@@ -7068,7 +7068,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
        unsigned long flags;
 
        while (wait) {
-               unsigned long shadow;
+               unsigned long shadow = 0;
                int cstart, previ = -1;
 
                /*
index 3cd96c1b95029d2d365279d3ccfeebe038e3a2e4..9fbe22d3467b222988cf33fee7c04f1996a84928 100644 (file)
@@ -69,6 +69,9 @@
  */
 #define PCI_DEVICE_ID_VMWARE_PVRDMA    0x0820
 
+#define PVRDMA_NUM_RING_PAGES          4
+#define PVRDMA_QP_NUM_HEADER_PAGES     1
+
 struct pvrdma_dev;
 
 struct pvrdma_page_dir {
index e69d6f3cae32b5018ea4a09efd299ec402aa4a4f..09078ccfaec719b8800ce87beaafbaa92b50cbff 100644 (file)
@@ -132,7 +132,7 @@ enum pvrdma_pci_resource {
 
 enum pvrdma_device_ctl {
        PVRDMA_DEVICE_CTL_ACTIVATE,     /* Activate device. */
-       PVRDMA_DEVICE_CTL_QUIESCE,      /* Quiesce device. */
+       PVRDMA_DEVICE_CTL_UNQUIESCE,    /* Unquiesce device. */
        PVRDMA_DEVICE_CTL_RESET,        /* Reset device. */
 };
 
index 100bea5c42ffb74375552131ebb1fbd5cbdc3659..34ebc7615411d9e3574e50fc0bf083056ef756cf 100644 (file)
@@ -56,7 +56,7 @@
 #include "pvrdma.h"
 
 #define DRV_NAME       "vmw_pvrdma"
-#define DRV_VERSION    "1.0.0.0-k"
+#define DRV_VERSION    "1.0.1.0-k"
 
 static DEFINE_MUTEX(pvrdma_device_list_lock);
 static LIST_HEAD(pvrdma_device_list);
@@ -660,7 +660,16 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
                pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
                break;
        case NETDEV_UP:
-               pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+               pvrdma_write_reg(dev, PVRDMA_REG_CTL,
+                                PVRDMA_DEVICE_CTL_UNQUIESCE);
+
+               mb();
+
+               if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
+                       dev_err(&dev->pdev->dev,
+                               "failed to activate device during link up\n");
+               else
+                       pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
                break;
        default:
                dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
@@ -858,7 +867,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
        dev->dsr->resp_slot_dma = (u64)slot_dma;
 
        /* Async event ring */
-       dev->dsr->async_ring_pages.num_pages = 4;
+       dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
        ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
                                   dev->dsr->async_ring_pages.num_pages, true);
        if (ret)
@@ -867,7 +876,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
        dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
 
        /* CQ notification ring */
-       dev->dsr->cq_ring_pages.num_pages = 4;
+       dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
        ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
                                   dev->dsr->cq_ring_pages.num_pages, true);
        if (ret)
index dbbfd35e7da7ade590bc0b13784e82ba9266f213..30062aad3af1a2cfa6d14ce9bd60250644b85ab8 100644 (file)
@@ -170,8 +170,9 @@ static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
                                             sizeof(struct pvrdma_sge) *
                                             qp->sq.max_sg);
        /* Note: one extra page for the header. */
-       qp->npages_send = 1 + (qp->sq.wqe_cnt * qp->sq.wqe_size +
-                              PAGE_SIZE - 1) / PAGE_SIZE;
+       qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES +
+                         (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) /
+                                                               PAGE_SIZE;
 
        return 0;
 }
@@ -288,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
                        qp->npages = qp->npages_send + qp->npages_recv;
 
                        /* Skip header page. */
-                       qp->sq.offset = PAGE_SIZE;
+                       qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE;
 
                        /* Recv queue pages are after send pages. */
                        qp->rq.offset = qp->npages_send * PAGE_SIZE;
@@ -341,7 +342,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
        cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type);
        cmd->access_flags = IB_ACCESS_LOCAL_WRITE;
        cmd->total_chunks = qp->npages;
-       cmd->send_chunks = qp->npages_send - 1;
+       cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES;
        cmd->pdir_dma = qp->pdir.dir_dma;
 
        dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n",
@@ -554,13 +555,13 @@ out:
        return ret;
 }
 
-static inline void *get_sq_wqe(struct pvrdma_qp *qp, int n)
+static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n)
 {
        return pvrdma_page_dir_get_ptr(&qp->pdir,
                                       qp->sq.offset + n * qp->sq.wqe_size);
 }
 
-static inline void *get_rq_wqe(struct pvrdma_qp *qp, int n)
+static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
 {
        return pvrdma_page_dir_get_ptr(&qp->pdir,
                                       qp->rq.offset + n * qp->rq.wqe_size);
@@ -598,9 +599,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        unsigned long flags;
        struct pvrdma_sq_wqe_hdr *wqe_hdr;
        struct pvrdma_sge *sge;
-       int i, index;
-       int nreq;
-       int ret;
+       int i, ret;
 
        /*
         * In states lower than RTS, we can fail immediately. In other states,
@@ -613,9 +612,8 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 
        spin_lock_irqsave(&qp->sq.lock, flags);
 
-       index = pvrdma_idx(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt);
-       for (nreq = 0; wr; nreq++, wr = wr->next) {
-               unsigned int tail;
+       while (wr) {
+               unsigned int tail = 0;
 
                if (unlikely(!pvrdma_idx_ring_has_space(
                                qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
@@ -680,7 +678,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        }
                }
 
-               wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, index);
+               wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail);
                memset(wqe_hdr, 0, sizeof(*wqe_hdr));
                wqe_hdr->wr_id = wr->wr_id;
                wqe_hdr->num_sge = wr->num_sge;
@@ -771,12 +769,11 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                /* Make sure wqe is written before index update */
                smp_wmb();
 
-               index++;
-               if (unlikely(index >= qp->sq.wqe_cnt))
-                       index = 0;
                /* Update shared sq ring */
                pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
                                    qp->sq.wqe_cnt);
+
+               wr = wr->next;
        }
 
        ret = 0;
@@ -806,7 +803,6 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
        struct pvrdma_qp *qp = to_vqp(ibqp);
        struct pvrdma_rq_wqe_hdr *wqe_hdr;
        struct pvrdma_sge *sge;
-       int index, nreq;
        int ret = 0;
        int i;
 
@@ -821,9 +817,8 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 
        spin_lock_irqsave(&qp->rq.lock, flags);
 
-       index = pvrdma_idx(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt);
-       for (nreq = 0; wr; nreq++, wr = wr->next) {
-               unsigned int tail;
+       while (wr) {
+               unsigned int tail = 0;
 
                if (unlikely(wr->num_sge > qp->rq.max_sg ||
                             wr->num_sge < 0)) {
@@ -843,7 +838,7 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                        goto out;
                }
 
-               wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, index);
+               wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail);
                wqe_hdr->wr_id = wr->wr_id;
                wqe_hdr->num_sge = wr->num_sge;
                wqe_hdr->total_len = 0;
@@ -859,12 +854,11 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                /* Make sure wqe is written before index update */
                smp_wmb();
 
-               index++;
-               if (unlikely(index >= qp->rq.wqe_cnt))
-                       index = 0;
                /* Update shared rq ring */
                pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
                                    qp->rq.wqe_cnt);
+
+               wr = wr->next;
        }
 
        spin_unlock_irqrestore(&qp->rq.lock, flags);
index e202b8142759f58e0c06317ce1e4f3d798c54f37..6b712eecbd37d9ca5a0bf312028dc7b23c1c2fd8 100644 (file)
@@ -170,9 +170,9 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
 
        spin_lock_irq(&rdi->mmap_offset_lock);
        if (rdi->mmap_offset == 0)
-               rdi->mmap_offset = PAGE_SIZE;
+               rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
        ip->offset = rdi->mmap_offset;
-       rdi->mmap_offset += size;
+       rdi->mmap_offset += ALIGN(size, SHMLBA);
        spin_unlock_irq(&rdi->mmap_offset_lock);
 
        INIT_LIST_HEAD(&ip->pending_mmaps);
index 7d1ac27ed2516dad367354f36b15903cb3197ade..6332dedc11e8a3306697e494ab89996b34d453ab 100644 (file)
@@ -22,4 +22,4 @@ config RDMA_RXE
        To configure and work with soft-RoCE driver please use the
        following wiki page under "configure Soft-RoCE (RXE)" section:
 
-       https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
+       https://github.com/linux-rdma/rdma-core/blob/master/Documentation/rxe.md
index c572a4c09359c9f91fcf7f55466fadff012fbc82..bd812e00988ed32f5517ce952a0e2186c4678f87 100644 (file)
@@ -156,10 +156,10 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
        spin_lock_bh(&rxe->mmap_offset_lock);
 
        if (rxe->mmap_offset == 0)
-               rxe->mmap_offset = PAGE_SIZE;
+               rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
 
        ip->info.offset = rxe->mmap_offset;
-       rxe->mmap_offset += size;
+       rxe->mmap_offset += ALIGN(size, SHMLBA);
 
        spin_unlock_bh(&rxe->mmap_offset_lock);
 
index dbfde0dc6ff7e7bcf3c6b182b5f49fc5daf65009..9f95f50b290904fe67797cacfa5af2f565e3188d 100644 (file)
@@ -729,11 +729,11 @@ next_wqe:
        ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
        if (ret) {
                qp->need_req_skb = 1;
-               kfree_skb(skb);
 
                rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
 
                if (ret == -EAGAIN) {
+                       kfree_skb(skb);
                        rxe_run_task(&qp->req.task, 1);
                        goto exit;
                }
index d404a8aba7afcaf8ab8addd38949ddf5adcd0847..c9dd385ce62e2c65b97e5bf16e5d8f6288b6f19c 100644 (file)
@@ -813,18 +813,17 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
                WARN_ON_ONCE(1);
        }
 
-       /* We successfully processed this new request. */
-       qp->resp.msn++;
-
        /* next expected psn, read handles this separately */
        qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
 
        qp->resp.opcode = pkt->opcode;
        qp->resp.status = IB_WC_SUCCESS;
 
-       if (pkt->mask & RXE_COMP_MASK)
+       if (pkt->mask & RXE_COMP_MASK) {
+               /* We successfully processed this new request. */
+               qp->resp.msn++;
                return RESPST_COMPLETE;
-       else if (qp_type(qp) == IB_QPT_RC)
+       else if (qp_type(qp) == IB_QPT_RC)
                return RESPST_ACKNOWLEDGE;
        else
                return RESPST_CLEANUP;
index 9d0b22ad58c15759c3b92472083da15af5c42bac..c1ae4aeae2f90e5a9c1376a296529d912d77d457 100644 (file)
@@ -430,6 +430,7 @@ struct iser_fr_desc {
        struct list_head                  list;
        struct iser_reg_resources         rsc;
        struct iser_pi_context           *pi_ctx;
+       struct list_head                  all_list;
 };
 
 /**
@@ -443,6 +444,7 @@ struct iser_fr_pool {
        struct list_head        list;
        spinlock_t              lock;
        int                     size;
+       struct list_head        all_list;
 };
 
 /**
index 30b622f2ab7382ca04a8affbd26dfd01ff98716c..c538a38c91ce95acf8e00fcdf43fa28588ad2f49 100644 (file)
@@ -362,6 +362,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
        int i, ret;
 
        INIT_LIST_HEAD(&fr_pool->list);
+       INIT_LIST_HEAD(&fr_pool->all_list);
        spin_lock_init(&fr_pool->lock);
        fr_pool->size = 0;
        for (i = 0; i < cmds_max; i++) {
@@ -373,6 +374,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
                }
 
                list_add_tail(&desc->list, &fr_pool->list);
+               list_add_tail(&desc->all_list, &fr_pool->all_list);
                fr_pool->size++;
        }
 
@@ -392,13 +394,13 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
        struct iser_fr_desc *desc, *tmp;
        int i = 0;
 
-       if (list_empty(&fr_pool->list))
+       if (list_empty(&fr_pool->all_list))
                return;
 
        iser_info("freeing conn %p fr pool\n", ib_conn);
 
-       list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) {
-               list_del(&desc->list);
+       list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
+               list_del(&desc->all_list);
                iser_free_reg_res(&desc->rsc);
                if (desc->pi_ctx)
                        iser_free_pi_ctx(desc->pi_ctx);
index d96aa27dfcdc9776260c72753b48b63691af1b04..db64adfbe1aff092b4418832c1c81cce0901e044 100644 (file)
@@ -141,6 +141,9 @@ static int iforce_usb_probe(struct usb_interface *intf,
 
        interface = intf->cur_altsetting;
 
+       if (interface->desc.bNumEndpoints < 2)
+               return -ENODEV;
+
        epirq = &interface->endpoint[0].desc;
        epout = &interface->endpoint[1].desc;
 
index 9cc6d057c302a1753f33e9072ab5f5384f1eec40..23c191a2a0715abe9584760742327b25f058d53b 100644 (file)
@@ -700,6 +700,10 @@ static int cm109_usb_probe(struct usb_interface *intf,
        int error = -ENOMEM;
 
        interface = intf->cur_altsetting;
+
+       if (interface->desc.bNumEndpoints < 1)
+               return -ENODEV;
+
        endpoint = &interface->endpoint[0].desc;
 
        if (!usb_endpoint_is_int_in(endpoint))
index 9c0ea36913b4a98293911f62a2d649dc08a855cd..f4e8fbec6a942a8ea7a48e268b1e96f9fe458369 100644 (file)
@@ -1667,6 +1667,10 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
                return -EINVAL;
 
        alt = pcu->ctrl_intf->cur_altsetting;
+
+       if (alt->desc.bNumEndpoints < 1)
+               return -ENODEV;
+
        pcu->ep_ctrl = &alt->endpoint[0].desc;
        pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl);
 
index 79c964c075f14029a8072a6ad441927a0a40cf04..6e7ff9561d9261f31f919720925e12d8f1720b98 100644 (file)
@@ -875,6 +875,10 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
        int ret, pipe, i;
 
        interface = intf->cur_altsetting;
+
+       if (interface->desc.bNumEndpoints < 1)
+               return -ENODEV;
+
        endpoint = &interface->endpoint[0].desc;
        if (!usb_endpoint_is_int_in(endpoint))
                return -ENODEV;
index 72b28ebfe360030cbeeadec8a52385a9de09a7f0..f210e19ddba66b86312b09c6925c55ea9fc0fab7 100644 (file)
@@ -1282,10 +1282,8 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
        /* handle buttons */
        if (pkt_id == SS4_PACKET_ID_STICK) {
                f->ts_left = !!(SS4_BTN_V2(p) & 0x01);
-               if (!(priv->flags & ALPS_BUTTONPAD)) {
-                       f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
-                       f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
-               }
+               f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
+               f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
        } else {
                f->left = !!(SS4_BTN_V2(p) & 0x01);
                if (!(priv->flags & ALPS_BUTTONPAD)) {
@@ -2462,14 +2460,34 @@ static int alps_update_device_area_ss4_v2(unsigned char otp[][4],
        int num_y_electrode;
        int x_pitch, y_pitch, x_phys, y_phys;
 
-       num_x_electrode = SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F);
-       num_y_electrode = SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F);
+       if (IS_SS4PLUS_DEV(priv->dev_id)) {
+               num_x_electrode =
+                       SS4PLUS_NUMSENSOR_XOFFSET + (otp[0][2] & 0x0F);
+               num_y_electrode =
+                       SS4PLUS_NUMSENSOR_YOFFSET + ((otp[0][2] >> 4) & 0x0F);
+
+               priv->x_max =
+                       (num_x_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE;
+               priv->y_max =
+                       (num_y_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE;
 
-       priv->x_max = (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
-       priv->y_max = (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
+               x_pitch = (otp[0][1] & 0x0F) + SS4PLUS_MIN_PITCH_MM;
+               y_pitch = ((otp[0][1] >> 4) & 0x0F) + SS4PLUS_MIN_PITCH_MM;
 
-       x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM;
-       y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM;
+       } else {
+               num_x_electrode =
+                       SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F);
+               num_y_electrode =
+                       SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F);
+
+               priv->x_max =
+                       (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
+               priv->y_max =
+                       (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
+
+               x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM;
+               y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM;
+       }
 
        x_phys = x_pitch * (num_x_electrode - 1); /* In 0.1 mm units */
        y_phys = y_pitch * (num_y_electrode - 1); /* In 0.1 mm units */
@@ -2485,7 +2503,10 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4],
 {
        unsigned char is_btnless;
 
-       is_btnless = (otp[1][1] >> 3) & 0x01;
+       if (IS_SS4PLUS_DEV(priv->dev_id))
+               is_btnless = (otp[1][0] >> 1) & 0x01;
+       else
+               is_btnless = (otp[1][1] >> 3) & 0x01;
 
        if (is_btnless)
                priv->flags |= ALPS_BUTTONPAD;
@@ -2493,6 +2514,21 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4],
        return 0;
 }
 
+static int alps_update_dual_info_ss4_v2(unsigned char otp[][4],
+                                      struct alps_data *priv)
+{
+       bool is_dual = false;
+
+       if (IS_SS4PLUS_DEV(priv->dev_id))
+               is_dual = (otp[0][0] >> 4) & 0x01;
+
+       if (is_dual)
+               priv->flags |= ALPS_DUALPOINT |
+                                       ALPS_DUALPOINT_WITH_PRESSURE;
+
+       return 0;
+}
+
 static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
                                    struct alps_data *priv)
 {
@@ -2508,6 +2544,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
 
        alps_update_btn_info_ss4_v2(otp, priv);
 
+       alps_update_dual_info_ss4_v2(otp, priv);
+
        return 0;
 }
 
@@ -2753,10 +2791,6 @@ static int alps_set_protocol(struct psmouse *psmouse,
                if (alps_set_defaults_ss4_v2(psmouse, priv))
                        return -EIO;
 
-               if (priv->fw_ver[1] == 0x1)
-                       priv->flags |= ALPS_DUALPOINT |
-                                       ALPS_DUALPOINT_WITH_PRESSURE;
-
                break;
        }
 
@@ -2827,10 +2861,7 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
                           ec[2] >= 0x90 && ec[2] <= 0x9d) {
                        protocol = &alps_v3_protocol_data;
                } else if (e7[0] == 0x73 && e7[1] == 0x03 &&
-                          e7[2] == 0x14 && ec[1] == 0x02) {
-                       protocol = &alps_v8_protocol_data;
-               } else if (e7[0] == 0x73 && e7[1] == 0x03 &&
-                          e7[2] == 0x28 && ec[1] == 0x01) {
+                          (e7[2] == 0x14 || e7[2] == 0x28)) {
                        protocol = &alps_v8_protocol_data;
                } else {
                        psmouse_dbg(psmouse,
@@ -2840,7 +2871,8 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
        }
 
        if (priv) {
-               /* Save the Firmware version */
+               /* Save Device ID and Firmware version */
+               memcpy(priv->dev_id, e7, 3);
                memcpy(priv->fw_ver, ec, 3);
                error = alps_set_protocol(psmouse, priv, protocol);
                if (error)
index 6d279aa27cb9a10d70a2e732fe9599297883cbc0..4334f2805d93c7a3e8454a8cf0d950a14999278c 100644 (file)
@@ -54,6 +54,16 @@ enum SS4_PACKET_ID {
 
 #define SS4_MASK_NORMAL_BUTTONS                0x07
 
+#define SS4PLUS_COUNT_PER_ELECTRODE    128
+#define SS4PLUS_NUMSENSOR_XOFFSET      16
+#define SS4PLUS_NUMSENSOR_YOFFSET      5
+#define SS4PLUS_MIN_PITCH_MM           37
+
+#define IS_SS4PLUS_DEV(_b)     (((_b[0]) == 0x73) &&   \
+                                ((_b[1]) == 0x03) &&   \
+                                ((_b[2]) == 0x28)              \
+                               )
+
 #define SS4_IS_IDLE_V2(_b)     (((_b[0]) == 0x18) &&           \
                                 ((_b[1]) == 0x10) &&           \
                                 ((_b[2]) == 0x00) &&           \
@@ -283,6 +293,7 @@ struct alps_data {
        int addr_command;
        u16 proto_version;
        u8 byte0, mask0;
+       u8 dev_id[3];
        u8 fw_ver[3];
        int flags;
        int x_max;
index 352050e9031dc31ab87e7d3cdb2948b2ead7dee1..d5ab9ddef3e37eeb553307c7406adc1f2c011057 100644 (file)
@@ -218,17 +218,19 @@ static int elan_query_product(struct elan_tp_data *data)
 
 static int elan_check_ASUS_special_fw(struct elan_tp_data *data)
 {
-       if (data->ic_type != 0x0E)
-               return false;
-
-       switch (data->product_id) {
-       case 0x05 ... 0x07:
-       case 0x09:
-       case 0x13:
+       if (data->ic_type == 0x0E) {
+               switch (data->product_id) {
+               case 0x05 ... 0x07:
+               case 0x09:
+               case 0x13:
+                       return true;
+               }
+       } else if (data->ic_type == 0x08 && data->product_id == 0x26) {
+               /* ASUS EeeBook X205TA */
                return true;
-       default:
-               return false;
        }
+
+       return false;
 }
 
 static int __elan_initialize(struct elan_tp_data *data)
index 1986786133824d5b7cc815ec98efe61afd2624e2..34dfee555b201b0577e6e5fc0c58c63e7cc8cac7 100644 (file)
@@ -170,6 +170,10 @@ static int rmi_f30_config(struct rmi_function *fn)
                                rmi_get_platform_data(fn->rmi_dev);
        int error;
 
+       /* can happen if f30_data.disable is set */
+       if (!f30)
+               return 0;
+
        if (pdata->f30_data.trackstick_buttons) {
                /* Try [re-]establish link to F03. */
                f30->f03 = rmi_find_function(fn->rmi_dev, 0x03);
index 05afd16ea9c9efc0bb52efe49510444478e1e220..312bd6ca919806f2593e12814e5037469d609c0f 100644 (file)
@@ -119,6 +119,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"),
                },
        },
+       {
+               /* Dell Embedded Box PC 3000 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
+               },
+       },
        {
                /* OQO Model 01 */
                .matches = {
@@ -513,6 +520,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
                },
        },
+       {
+               /* TUXEDO BU1406 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
+               },
+       },
        { }
 };
 
index cd852059b99e81899f8436f31071c929eb470b83..df4bea96d7ed7d10e66478c929feb1094ff5f09c 100644 (file)
@@ -340,6 +340,9 @@ static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id
        int error;
        int i;
 
+       if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+               return -ENODEV;
+
        hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL);
        input_dev = input_allocate_device();
        if (!hanwang || !input_dev) {
index e850d7e8afbc4d22afb1bc73b2e99fc3f1cf5e8f..4d9d64908b595f9828e8e82e47e57b6b2935c07f 100644 (file)
@@ -122,6 +122,9 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
        struct input_dev *input_dev;
        int error = -ENOMEM;
 
+       if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+               return -ENODEV;
+
        kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
        input_dev = input_allocate_device();
        if (!kbtab || !input_dev)
index aefb6e11f88a0838917b0a3b7a59dc9eae6b37f2..4c0eecae065c113a26400469d7def3c2b9bcc635 100644 (file)
@@ -527,6 +527,9 @@ static int sur40_probe(struct usb_interface *interface,
        if (iface_desc->desc.bInterfaceClass != 0xFF)
                return -ENODEV;
 
+       if (iface_desc->desc.bNumEndpoints < 5)
+               return -ENODEV;
+
        /* Use endpoint #4 (0x86). */
        endpoint = &iface_desc->endpoint[4].desc;
        if (endpoint->bEndpointAddress != TOUCH_ENDPOINT)
index 98940d1392cb0cd19d648b6a25f2a5ba36c052d4..b17536d6e69bdbf956a61d8c3dff06351a1a51df 100644 (file)
@@ -3202,7 +3202,7 @@ static void amd_iommu_get_resv_regions(struct device *dev,
 
        region = iommu_alloc_resv_region(MSI_RANGE_START,
                                         MSI_RANGE_END - MSI_RANGE_START + 1,
-                                        0, IOMMU_RESV_RESERVED);
+                                        0, IOMMU_RESV_MSI);
        if (!region)
                return;
        list_add_tail(&region->list, head);
index 5806a6acc94ecd7543c2435558a0907ec0934ff2..591bb96047c9765fd3e0fb536d3f26ca0a5f187f 100644 (file)
@@ -1888,7 +1888,7 @@ static void arm_smmu_get_resv_regions(struct device *dev,
        int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
 
        region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
-                                        prot, IOMMU_RESV_MSI);
+                                        prot, IOMMU_RESV_SW_MSI);
        if (!region)
                return;
 
index abf6496843a617070289377ffad3fd1e119b0aa6..b493c99e17f74de338805167c3ddb104e3354b62 100644 (file)
@@ -1608,7 +1608,7 @@ static void arm_smmu_get_resv_regions(struct device *dev,
        int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
 
        region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
-                                        prot, IOMMU_RESV_MSI);
+                                        prot, IOMMU_RESV_SW_MSI);
        if (!region)
                return;
 
index a7e0821c9967e490258921238e6640723e79375d..c01bfcdb238316c049ae0dd6b4bf2d43c6c61440 100644 (file)
@@ -512,7 +512,13 @@ static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
        spin_lock_irqsave(&data->lock, flags);
        if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
                clk_enable(data->clk_master);
-               __sysmmu_tlb_invalidate_entry(data, iova, 1);
+               if (sysmmu_block(data)) {
+                       if (data->version >= MAKE_MMU_VER(5, 0))
+                               __sysmmu_tlb_invalidate(data);
+                       else
+                               __sysmmu_tlb_invalidate_entry(data, iova, 1);
+                       sysmmu_unblock(data);
+               }
                clk_disable(data->clk_master);
        }
        spin_unlock_irqrestore(&data->lock, flags);
index 238ad3447712d263ef9d67a109c2d86c03693a87..d412a313a37232997d406e53379e1466d10b93e7 100644 (file)
@@ -916,7 +916,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
                                 * which we used for the IOMMU lookup. Strictly speaking
                                 * we could do this for all PCI devices; we only need to
                                 * get the BDF# from the scope table for ACPI matches. */
-                               if (pdev->is_virtfn)
+                               if (pdev && pdev->is_virtfn)
                                        goto got_pdev;
 
                                *bus = drhd->devices[i].bus;
@@ -5249,7 +5249,7 @@ static void intel_iommu_get_resv_regions(struct device *device,
 
        reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
                                      IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
-                                     0, IOMMU_RESV_RESERVED);
+                                     0, IOMMU_RESV_MSI);
        if (!reg)
                return;
        list_add_tail(&reg->list, head);
index 1c049e2e12bf0ddacbc0e8ff9cbb09751996a549..8d6ca28c3e1f14a6c364aae89640ee6535f8f0d6 100644 (file)
@@ -422,8 +422,12 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
                        pte |= ARM_V7S_ATTR_NS_TABLE;
 
                __arm_v7s_set_pte(ptep, pte, 1, cfg);
-       } else {
+       } else if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) {
                cptep = iopte_deref(pte, lvl);
+       } else {
+               /* We require an unmap first */
+               WARN_ON(!selftest_running);
+               return -EEXIST;
        }
 
        /* Rinse, repeat */
index feacc54bec683b535fcba37e47ecb46af014ef5a..f9bc6ebb8140b06c845355560fd6e5d113912073 100644 (file)
@@ -335,8 +335,12 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
                if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
                        pte |= ARM_LPAE_PTE_NSTABLE;
                __arm_lpae_set_pte(ptep, pte, cfg);
-       } else {
+       } else if (!iopte_leaf(pte, lvl)) {
                cptep = iopte_deref(pte, data);
+       } else {
+               /* We require an unmap first */
+               WARN_ON(!selftest_running);
+               return -EEXIST;
        }
 
        /* Rinse, repeat */
index 8ea14f41a979fd4e72e3a6093e5fa8d2a0eff24a..3b67144dead2e3811918af8fa44ec6e67a19c955 100644 (file)
@@ -72,6 +72,7 @@ static const char * const iommu_group_resv_type_string[] = {
        [IOMMU_RESV_DIRECT]     = "direct",
        [IOMMU_RESV_RESERVED]   = "reserved",
        [IOMMU_RESV_MSI]        = "msi",
+       [IOMMU_RESV_SW_MSI]     = "msi",
 };
 
 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)          \
@@ -1743,8 +1744,8 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list)
 }
 
 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
-                                                 size_t length,
-                                                 int prot, int type)
+                                                 size_t length, int prot,
+                                                 enum iommu_resv_type type)
 {
        struct iommu_resv_region *region;
 
index 125528f39e92c2377846f799a841e23ef1e98654..8162121bb1bcd766e06c74c7a8e6fc337a57a605 100644 (file)
@@ -262,6 +262,7 @@ config IRQ_MXS
 
 config MVEBU_ODMI
        bool
+       select GENERIC_MSI_IRQ_DOMAIN
 
 config MVEBU_PIC
        bool
index 1eef56a89b1fbff1ee348f08b4623f6fcf4f6851..f96601268f7194bb5aada8f0f07327f2871d1c76 100644 (file)
@@ -198,7 +198,8 @@ static const struct irq_domain_ops crossbar_domain_ops = {
 
 static int __init crossbar_of_init(struct device_node *node)
 {
-       int i, size, max = 0, reserved = 0, entry;
+       u32 max = 0, entry, reg_size;
+       int i, size, reserved = 0;
        const __be32 *irqsr;
        int ret = -ENOMEM;
 
@@ -275,9 +276,9 @@ static int __init crossbar_of_init(struct device_node *node)
        if (!cb->register_offsets)
                goto err_irq_map;
 
-       of_property_read_u32(node, "ti,reg-size", &size);
+       of_property_read_u32(node, "ti,reg-size", &reg_size);
 
-       switch (size) {
+       switch (reg_size) {
        case 1:
                cb->write = crossbar_writeb;
                break;
@@ -303,7 +304,7 @@ static int __init crossbar_of_init(struct device_node *node)
                        continue;
 
                cb->register_offsets[i] = reserved;
-               reserved += size;
+               reserved += reg_size;
        }
 
        of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
index 23201004fd7a68e39055a69abbd41019aa66b12b..f77f840d2b5f7995ee0424445546a140079a5022 100644 (file)
@@ -1601,6 +1601,14 @@ static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
        its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
 }
 
+static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
+{
+       struct its_node *its = data;
+
+       /* On QDF2400, the size of the ITE is 16Bytes */
+       its->ite_size = 16;
+}
+
 static const struct gic_quirk its_quirks[] = {
 #ifdef CONFIG_CAVIUM_ERRATUM_22375
        {
@@ -1617,6 +1625,14 @@ static const struct gic_quirk its_quirks[] = {
                .mask   = 0xffff0fff,
                .init   = its_enable_quirk_cavium_23144,
        },
+#endif
+#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
+       {
+               .desc   = "ITS: QDF2400 erratum 0065",
+               .iidr   = 0x00001070, /* QDF2400 ITS rev 1.x */
+               .mask   = 0xffffffff,
+               .init   = its_enable_quirk_qdf2400_e0065,
+       },
 #endif
        {
        }
index 11d12bccc4e7f10d72fa41f5464ed4d6b6f02a5f..cd20df12d63d98f0e1ae7315b135493699feca4e 100644 (file)
@@ -991,8 +991,12 @@ static void __init gic_map_single_int(struct device_node *node,
 
 static void __init gic_map_interrupts(struct device_node *node)
 {
+       gic_map_single_int(node, GIC_LOCAL_INT_WD);
+       gic_map_single_int(node, GIC_LOCAL_INT_COMPARE);
        gic_map_single_int(node, GIC_LOCAL_INT_TIMER);
        gic_map_single_int(node, GIC_LOCAL_INT_PERFCTR);
+       gic_map_single_int(node, GIC_LOCAL_INT_SWINT0);
+       gic_map_single_int(node, GIC_LOCAL_INT_SWINT1);
        gic_map_single_int(node, GIC_LOCAL_INT_FDC);
 }
 
index 1dfd1085a04f87a016a2405e68200f9c09a2263e..9ca691d6c13b4d31cdb5b29221214f377a7d490a 100644 (file)
@@ -1032,6 +1032,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
                                                     sizeof(avmb1_carddef))))
                                return -EFAULT;
                        cdef.cardtype = AVM_CARDTYPE_B1;
+                       cdef.cardnr = 0;
                } else {
                        if ((retval = copy_from_user(&cdef, data,
                                                     sizeof(avmb1_extcarddef))))
index 50749a70c5cacb99ee2dfbb0a1957b6baf336247..060d357f107f8c7720b08e6791dc352893c08fff 100644 (file)
@@ -157,10 +157,8 @@ int cf_command(int drvid, int mode,
        /* allocate mem for information struct */
        if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
                return (-ENOMEM); /* no memory */
-       init_timer(&cs->timer);
+       setup_timer(&cs->timer, deflect_timer_expire, (ulong)cs);
        cs->info[0] = '\0';
-       cs->timer.function = deflect_timer_expire;
-       cs->timer.data = (ulong) cs; /* pointer to own structure */
        cs->ics.driver = drvid;
        cs->ics.command = ISDN_CMD_PROT_IO; /* protocol specific io */
        cs->ics.arg = DSS1_CMD_INVOKE; /* invoke supplementary service */
@@ -452,10 +450,9 @@ static int isdn_divert_icall(isdn_ctrl *ic)
                                        return (0); /* no external deflection needed */
                        if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
                                return (0); /* no memory */
-                       init_timer(&cs->timer);
+                       setup_timer(&cs->timer, deflect_timer_expire,
+                                   (ulong)cs);
                        cs->info[0] = '\0';
-                       cs->timer.function = deflect_timer_expire;
-                       cs->timer.data = (ulong) cs; /* pointer to own structure */
 
                        cs->ics = *ic; /* copy incoming data */
                        if (!cs->ics.parm.setup.phone[0]) strcpy(cs->ics.parm.setup.phone, "0");
index 11e13c56126fba31fca9c59d66252ffeced55c8a..2da3ff650e1d550cc50fd8572156ddd1cabd7961 100644 (file)
@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
                return -ENODEV;
        }
 
+       if (hostif->desc.bNumEndpoints < 1)
+               return -ENODEV;
+
        dev_info(&udev->dev,
                 "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
                 __func__, le16_to_cpu(udev->descriptor.idVendor),
index cb88090f9cea3036af14b8e8bcaa9eaeb4d58434..c61049585cbd7b67f24e057238244b8938500a9e 100644 (file)
@@ -300,9 +300,8 @@ static int um_idi_open_adapter(struct file *file, int adapter_nr)
        p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(e);
        init_waitqueue_head(&p_os->read_wait);
        init_waitqueue_head(&p_os->close_wait);
-       init_timer(&p_os->diva_timer_id);
-       p_os->diva_timer_id.function = (void *) diva_um_timer_function;
-       p_os->diva_timer_id.data = (unsigned long) p_os;
+       setup_timer(&p_os->diva_timer_id, (void *)diva_um_timer_function,
+                   (unsigned long)p_os);
        p_os->aborted = 0;
        p_os->adapter_nr = adapter_nr;
        return (1);
index 09df54fc1fef2162bf06228dddc431f2f7a9feeb..fda912b0833ff9dad7c6e04898487074f094b2a3 100644 (file)
@@ -13,7 +13,7 @@ config MISDN_HFCPCI
 
 config MISDN_HFCMULTI
        tristate "Support for HFC multiport cards (HFC-4S/8S/E1)"
-       depends on PCI || 8xx
+       depends on PCI || CPM1
        depends on MISDN
        help
          Enable support for cards with Cologne Chip AG's HFC multiport
@@ -27,8 +27,8 @@ config MISDN_HFCMULTI_8xx
        bool "Support for XHFC embedded board in HFC multiport driver"
        depends on MISDN
        depends on MISDN_HFCMULTI
-       depends on 8xx
-       default 8xx
+       depends on CPM1
+       default CPM1
        help
          Enable support for the XHFC embedded solution from Speech Design.
 
index 0eafe9f04fca326a40f6e5bb549b78420bd39e26..8a254747768e9de4181d6b133da05b292830e0ae 100644 (file)
@@ -6,7 +6,7 @@
  *
  */
 
-#include <asm/8xx_immap.h>
+#include <asm/cpm1.h>
 
 /* Change this to the value used by your board */
 #ifndef IMAP_ADDR
index 480c2d7794ebdcfbcb38667a0807a156a1b2fac3..961c07ee47b7337005a2997b557b0539aebff2fa 100644 (file)
@@ -3878,9 +3878,8 @@ hfcmulti_initmode(struct dchannel *dch)
                if (hc->dnum[pt]) {
                        mode_hfcmulti(hc, dch->slot, dch->dev.D.protocol,
                                      -1, 0, -1, 0);
-                       dch->timer.function = (void *) hfcmulti_dbusy_timer;
-                       dch->timer.data = (long) dch;
-                       init_timer(&dch->timer);
+                       setup_timer(&dch->timer, (void *)hfcmulti_dbusy_timer,
+                                   (long)dch);
                }
                for (i = 1; i <= 31; i++) {
                        if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */
@@ -3986,9 +3985,8 @@ hfcmulti_initmode(struct dchannel *dch)
                hc->chan[i].slot_rx = -1;
                hc->chan[i].conf = -1;
                mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0);
-               dch->timer.function = (void *) hfcmulti_dbusy_timer;
-               dch->timer.data = (long) dch;
-               init_timer(&dch->timer);
+               setup_timer(&dch->timer, (void *)hfcmulti_dbusy_timer,
+                           (long)dch);
                hc->chan[i - 2].slot_tx = -1;
                hc->chan[i - 2].slot_rx = -1;
                hc->chan[i - 2].conf = -1;
index ff48da61c94c849bf06cbb9ab9cb149515dcd626..5dc246d71c167d5a69f449603cbd4a329a85fa9c 100644 (file)
@@ -1717,9 +1717,8 @@ static void
 inithfcpci(struct hfc_pci *hc)
 {
        printk(KERN_DEBUG "inithfcpci: entered\n");
-       hc->dch.timer.function = (void *) hfcpci_dbusy_timer;
-       hc->dch.timer.data = (long) &hc->dch;
-       init_timer(&hc->dch.timer);
+       setup_timer(&hc->dch.timer, (void *)hfcpci_dbusy_timer,
+                   (long)&hc->dch);
        hc->chanlimit = 2;
        mode_hfcpci(&hc->bch[0], 1, -1);
        mode_hfcpci(&hc->bch[1], 2, -1);
@@ -2044,9 +2043,7 @@ setup_hw(struct hfc_pci *hc)
        Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
        /* At this point the needed PCI config is done */
        /* fifos are still not enabled */
-       hc->hw.timer.function = (void *) hfcpci_Timer;
-       hc->hw.timer.data = (long) hc;
-       init_timer(&hc->hw.timer);
+       setup_timer(&hc->hw.timer, (void *)hfcpci_Timer, (long)hc);
        /* default PCM master */
        test_and_set_bit(HFC_CFG_MASTER, &hc->cfg);
        return 0;
index 77dec28ba874c7f220944afc07e6ebcb67fa00a0..6742b0dc082115df347955c77c619e5a47ffb2d6 100644 (file)
@@ -796,9 +796,8 @@ isac_init(struct isac_hw *isac)
        }
        isac->mon_tx = NULL;
        isac->mon_rx = NULL;
-       isac->dch.timer.function = (void *) dbusy_timer_handler;
-       isac->dch.timer.data = (long)isac;
-       init_timer(&isac->dch.timer);
+       setup_timer(&isac->dch.timer, (void *)dbusy_timer_handler,
+                   (long)isac);
        isac->mocr = 0xaa;
        if (isac->type & IPAC_TYPE_ISACX) {
                /* Disable all IRQ */
index feafa91c2ed99088c0958059e0de6c04dd832128..5b078591b6ee846455ac25642c25c8d932bde99d 100644 (file)
@@ -1635,13 +1635,11 @@ init_isar(struct isar_hw *isar)
        }
        if (isar->version != 1)
                return -EINVAL;
-       isar->ch[0].ftimer.function = &ftimer_handler;
-       isar->ch[0].ftimer.data = (long)&isar->ch[0];
-       init_timer(&isar->ch[0].ftimer);
+       setup_timer(&isar->ch[0].ftimer, &ftimer_handler,
+                   (long)&isar->ch[0]);
        test_and_set_bit(FLG_INITIALIZED, &isar->ch[0].bch.Flags);
-       isar->ch[1].ftimer.function = &ftimer_handler;
-       isar->ch[1].ftimer.data = (long)&isar->ch[1];
-       init_timer(&isar->ch[1].ftimer);
+       setup_timer(&isar->ch[1].ftimer, &ftimer_handler,
+                   (long)&isar->ch[1]);
        test_and_set_bit(FLG_INITIALIZED, &isar->ch[1].bch.Flags);
        return 0;
 }
index 3b067ea656bd9260d0172d7e2387d602d459d170..3052c836b89f70bc441520e439635a6e91dd2248 100644 (file)
@@ -852,9 +852,8 @@ static void initW6692(struct w6692_hw *card)
 {
        u8      val;
 
-       card->dch.timer.function = (void *)dbusy_timer_handler;
-       card->dch.timer.data = (u_long)&card->dch;
-       init_timer(&card->dch.timer);
+       setup_timer(&card->dch.timer, (void *)dbusy_timer_handler,
+                   (u_long)&card->dch);
        w6692_mode(&card->bc[0], ISDN_P_NONE);
        w6692_mode(&card->bc[1], ISDN_P_NONE);
        WriteW6692(card, W_D_CTL, 0x00);
index 36817e0a0b9465df6d1f9f8c4eee7cb0e5d24157..3a4c2f9e19e9adf81c4d93eb7847ed8b549745e2 100644 (file)
@@ -789,7 +789,5 @@ void Amd7930_init(struct IsdnCardState *cs)
 void setup_Amd7930(struct IsdnCardState *cs)
 {
        INIT_WORK(&cs->tqueue, Amd7930_bh);
-       cs->dbusytimer.function = (void *) dbusy_timer_handler;
-       cs->dbusytimer.data = (long) cs;
-       init_timer(&cs->dbusytimer);
+       setup_timer(&cs->dbusytimer, (void *)dbusy_timer_handler, (long)cs);
 }
index 29ec2dfbd155521022438964f24ea91ff333926d..9826bad49e2c1f60f11108661ecb610bcaf878b2 100644 (file)
@@ -125,9 +125,7 @@ clear_arcofi(struct IsdnCardState *cs) {
 
 void
 init_arcofi(struct IsdnCardState *cs) {
-       cs->dc.isac.arcofitimer.function = (void *) arcofi_timer;
-       cs->dc.isac.arcofitimer.data = (long) cs;
-       init_timer(&cs->dc.isac.arcofitimer);
+       setup_timer(&cs->dc.isac.arcofitimer, (void *)arcofi_timer, (long)cs);
        init_waitqueue_head(&cs->dc.isac.arcofi_wait);
        test_and_set_bit(HW_ARCOFI, &cs->HW_Flags);
 }
index 4fc90de68d18a46941cfd8c629dd5f6624600173..079336e593f95a2970b1b43606ee92b4b7d7ae0f 100644 (file)
@@ -976,9 +976,8 @@ static int setup_diva_common(struct IsdnCardState *cs)
                printk(KERN_INFO "Diva: IPACX Design Id: %x\n",
                       MemReadISAC_IPACX(cs, IPACX_ID) & 0x3F);
        } else { /* DIVA 2.0 */
-               cs->hw.diva.tl.function = (void *) diva_led_handler;
-               cs->hw.diva.tl.data = (long) cs;
-               init_timer(&cs->hw.diva.tl);
+               setup_timer(&cs->hw.diva.tl, (void *)diva_led_handler,
+                           (long)cs);
                cs->readisac  = &ReadISAC;
                cs->writeisac = &WriteISAC;
                cs->readisacfifo  = &ReadISACfifo;
index d8ef64da26f1fe6e0a25c5515401079ed19ff3cb..03bc5d504e2266774ab866c695a52546d72816b4 100644 (file)
@@ -1147,9 +1147,7 @@ static int setup_elsa_common(struct IsdnCard *card)
        init_arcofi(cs);
 #endif
        setup_isac(cs);
-       cs->hw.elsa.tl.function = (void *) elsa_led_handler;
-       cs->hw.elsa.tl.data = (long) cs;
-       init_timer(&cs->hw.elsa.tl);
+       setup_timer(&cs->hw.elsa.tl, (void *)elsa_led_handler, (long)cs);
        /* Teste Timer */
        if (cs->hw.elsa.timer) {
                byteout(cs->hw.elsa.trig, 0xff);
index c7a94713e9ec98fae0c36d61ffdc40cd27884d70..d63266fa8cbdb93bf37e630af6663f511292786d 100644 (file)
@@ -98,13 +98,11 @@ void
 FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft)
 {
        ft->fi = fi;
-       ft->tl.function = (void *) FsmExpireTimer;
-       ft->tl.data = (long) ft;
 #if FSM_TIMER_DEBUG
        if (ft->fi->debug)
                ft->fi->printdebug(ft->fi, "FsmInitTimer %lx", (long) ft);
 #endif
-       init_timer(&ft->tl);
+       setup_timer(&ft->tl, (void *)FsmExpireTimer, (long)ft);
 }
 
 void
index e034ed847ff32ca1fcf0a55e66eebe2171e3d32f..90f051ce02590f3e70ee2e1227d7aa507441a24c 100644 (file)
@@ -1396,9 +1396,8 @@ setup_instance(hfc4s8s_hw *hw)
                l1p = hw->l1 + i;
                spin_lock_init(&l1p->lock);
                l1p->hw = hw;
-               l1p->l1_timer.function = (void *) hfc_l1_timer;
-               l1p->l1_timer.data = (long) (l1p);
-               init_timer(&l1p->l1_timer);
+               setup_timer(&l1p->l1_timer, (void *)hfc_l1_timer,
+                           (long)(l1p));
                l1p->st_num = i;
                skb_queue_head_init(&l1p->d_tx_queue);
                l1p->d_if.ifc.priv = hw->l1 + i;
index a756e5cb6871cbbee83810875bfa29ed8ca93529..ad8597a1a07efd75e1979054fdcda31ecab8b7a6 100644 (file)
@@ -1073,8 +1073,6 @@ set_cs_func(struct IsdnCardState *cs)
        cs->writeisacfifo = &dummyf;
        cs->BC_Read_Reg = &ReadReg;
        cs->BC_Write_Reg = &WriteReg;
-       cs->dbusytimer.function = (void *) hfc_dbusy_timer;
-       cs->dbusytimer.data = (long) cs;
-       init_timer(&cs->dbusytimer);
+       setup_timer(&cs->dbusytimer, (void *)hfc_dbusy_timer, (long)cs);
        INIT_WORK(&cs->tqueue, hfcd_bh);
 }
index 90449e1e91e5a27924da01df6215dfa6c326651e..f9ca35cc32b135bc2ae8d31b7f2c87c05d5cfb73 100644 (file)
@@ -1582,9 +1582,7 @@ inithfcpci(struct IsdnCardState *cs)
        cs->bcs[1].BC_SetStack = setstack_2b;
        cs->bcs[0].BC_Close = close_hfcpci;
        cs->bcs[1].BC_Close = close_hfcpci;
-       cs->dbusytimer.function = (void *) hfcpci_dbusy_timer;
-       cs->dbusytimer.data = (long) cs;
-       init_timer(&cs->dbusytimer);
+       setup_timer(&cs->dbusytimer, (void *)hfcpci_dbusy_timer, (long)cs);
        mode_hfcpci(cs->bcs, 0, 0);
        mode_hfcpci(cs->bcs + 1, 0, 1);
 }
@@ -1746,9 +1744,7 @@ setup_hfcpci(struct IsdnCard *card)
        cs->BC_Write_Reg = NULL;
        cs->irq_func = &hfcpci_interrupt;
        cs->irq_flags |= IRQF_SHARED;
-       cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer;
-       cs->hw.hfcpci.timer.data = (long) cs;
-       init_timer(&cs->hw.hfcpci.timer);
+       setup_timer(&cs->hw.hfcpci.timer, (void *)hfcpci_Timer, (long)cs);
        cs->cardmsg = &hfcpci_card_msg;
        cs->auxcmd = &hfcpci_auxcmd;
 
index 13b2151c10f54ff9fd5bcec76e1ceef2b99aa472..3aef8e1a90e4455c6b1aa1e5eb935c95cd018210 100644 (file)
@@ -1495,9 +1495,7 @@ int setup_hfcsx(struct IsdnCard *card)
        } else
                return (0);     /* no valid card type */
 
-       cs->dbusytimer.function = (void *) hfcsx_dbusy_timer;
-       cs->dbusytimer.data = (long) cs;
-       init_timer(&cs->dbusytimer);
+       setup_timer(&cs->dbusytimer, (void *)hfcsx_dbusy_timer, (long)cs);
        INIT_WORK(&cs->tqueue, hfcsx_bh);
        cs->readisac = NULL;
        cs->writeisac = NULL;
@@ -1507,11 +1505,9 @@ int setup_hfcsx(struct IsdnCard *card)
        cs->BC_Write_Reg = NULL;
        cs->irq_func = &hfcsx_interrupt;
 
-       cs->hw.hfcsx.timer.function = (void *) hfcsx_Timer;
-       cs->hw.hfcsx.timer.data = (long) cs;
        cs->hw.hfcsx.b_fifo_size = 0; /* fifo size still unknown */
        cs->hw.hfcsx.cirm = ccd_sp_irqtab[cs->irq & 0xF]; /* RAM not evaluated */
-       init_timer(&cs->hw.hfcsx.timer);
+       setup_timer(&cs->hw.hfcsx.timer, (void *)hfcsx_Timer, (long)cs);
 
        reset_hfcsx(cs);
        cs->cardmsg = &hfcsx_card_msg;
index 678bd5224bc338a2767a106fb8057f544f218050..6dbd1f1da14f1e8752caadf6c9f2ca192cc63a3d 100644 (file)
@@ -1165,14 +1165,10 @@ hfc_usb_init(hfcusb_data *hfc)
        hfc->old_led_state = 0;
 
        /* init the t3 timer */
-       init_timer(&hfc->t3_timer);
-       hfc->t3_timer.data = (long) hfc;
-       hfc->t3_timer.function = (void *) l1_timer_expire_t3;
+       setup_timer(&hfc->t3_timer, (void *)l1_timer_expire_t3, (long)hfc);
 
        /* init the t4 timer */
-       init_timer(&hfc->t4_timer);
-       hfc->t4_timer.data = (long) hfc;
-       hfc->t4_timer.function = (void *) l1_timer_expire_t4;
+       setup_timer(&hfc->t4_timer, (void *)l1_timer_expire_t4, (long)hfc);
 
        /* init the background machinery for control requests */
        hfc->ctrl_read.bRequestType = 0xc0;
index 394da646e97b7316e618f054519095f83c338caa..467287096918a6661c93ab552a1759fb83fd45c8 100644 (file)
@@ -253,9 +253,7 @@ int setup_hfcs(struct IsdnCard *card)
                outb(0x57, cs->hw.hfcD.addr | 1);
        }
        set_cs_func(cs);
-       cs->hw.hfcD.timer.function = (void *) hfcs_Timer;
-       cs->hw.hfcD.timer.data = (long) cs;
-       init_timer(&cs->hw.hfcD.timer);
+       setup_timer(&cs->hw.hfcD.timer, (void *)hfcs_Timer, (long)cs);
        cs->cardmsg = &hfcs_card_msg;
        cs->irq_func = &hfcs_interrupt;
        return (1);
index 96d1df05044fb48ffceb988dd90540db9f125cdd..c7c3797a817ebecc285248985fbb4dedc50d6493 100644 (file)
@@ -676,7 +676,5 @@ clear_pending_icc_ints(struct IsdnCardState *cs)
 void setup_icc(struct IsdnCardState *cs)
 {
        INIT_WORK(&cs->tqueue, icc_bh);
-       cs->dbusytimer.function = (void *) dbusy_timer_handler;
-       cs->dbusytimer.data = (long) cs;
-       init_timer(&cs->dbusytimer);
+       setup_timer(&cs->dbusytimer, (void *)dbusy_timer_handler, (long)cs);
 }
index 9cc26b40a43771dee4d670f2416711897bc9ac1a..43effe7082ed9bba9537648eef9d950a0e172785 100644 (file)
@@ -424,9 +424,7 @@ dch_init(struct IsdnCardState *cs)
 
        cs->setstack_d      = dch_setstack;
 
-       cs->dbusytimer.function = (void *) dbusy_timer_handler;
-       cs->dbusytimer.data = (long) cs;
-       init_timer(&cs->dbusytimer);
+       setup_timer(&cs->dbusytimer, (void *)dbusy_timer_handler, (long)cs);
 
        cs->writeisac(cs, IPACX_TR_CONF0, 0x00);  // clear LDD
        cs->writeisac(cs, IPACX_TR_CONF2, 0x00);  // enable transmitter
index df7e05ca8f9c197acf94040606555fcd45a51067..4273b4548825136624a3687cae20b971fa3e0244 100644 (file)
@@ -677,7 +677,5 @@ void clear_pending_isac_ints(struct IsdnCardState *cs)
 void setup_isac(struct IsdnCardState *cs)
 {
        INIT_WORK(&cs->tqueue, isac_bh);
-       cs->dbusytimer.function = (void *) dbusy_timer_handler;
-       cs->dbusytimer.data = (long) cs;
-       init_timer(&cs->dbusytimer);
+       setup_timer(&cs->dbusytimer, (void *)dbusy_timer_handler, (long)cs);
 }
index f4956c73aa116de71a99a9ae705c81088f3fbed2..0dc60b287c4b000b94f3bb97183ad10bfc1254b9 100644 (file)
@@ -1902,10 +1902,8 @@ void initisar(struct IsdnCardState *cs)
        cs->bcs[1].BC_SetStack = setstack_isar;
        cs->bcs[0].BC_Close = close_isarstate;
        cs->bcs[1].BC_Close = close_isarstate;
-       cs->bcs[0].hw.isar.ftimer.function = (void *) ftimer_handler;
-       cs->bcs[0].hw.isar.ftimer.data = (long) &cs->bcs[0];
-       init_timer(&cs->bcs[0].hw.isar.ftimer);
-       cs->bcs[1].hw.isar.ftimer.function = (void *) ftimer_handler;
-       cs->bcs[1].hw.isar.ftimer.data = (long) &cs->bcs[1];
-       init_timer(&cs->bcs[1].hw.isar.ftimer);
+       setup_timer(&cs->bcs[0].hw.isar.ftimer, (void *)ftimer_handler,
+                   (long)&cs->bcs[0]);
+       setup_timer(&cs->bcs[1].hw.isar.ftimer, (void *)ftimer_handler,
+                   (long)&cs->bcs[1]);
 }
index c754706f83cdc190ca18e299896590430e7b9824..569ce52c567b2beb9f42097bebe88af025d6dbed 100644 (file)
@@ -169,9 +169,7 @@ void
 L3InitTimer(struct l3_process *pc, struct L3Timer *t)
 {
        t->pc = pc;
-       t->tl.function = (void *) L3ExpireTimer;
-       t->tl.data = (long) t;
-       init_timer(&t->tl);
+       setup_timer(&t->tl, (void *)L3ExpireTimer, (long)t);
 }
 
 void
index 409849165838fba631eeb74983c7fc7e8e68b152..f64a36007800cf91132b015773a78436cf488227 100644 (file)
@@ -239,7 +239,7 @@ static void st5481B_mode(struct st5481_bcs *bcs, int mode)
                        }
                }
        } else {
-               // Disble B channel interrupts
+               // Disable B channel interrupts
                st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel * 2), 0, NULL, NULL);
 
                // Disable B channel FIFOs
index bf647545c70c45b9bc5b0e381af27f67f17a3662..950399f066ef109cc558f09e438dc729a0843643 100644 (file)
@@ -278,9 +278,7 @@ int setup_TeleInt(struct IsdnCard *card)
        cs->bcs[0].hw.hfc.send = NULL;
        cs->bcs[1].hw.hfc.send = NULL;
        cs->hw.hfc.fifosize = 7 * 1024 + 512;
-       cs->hw.hfc.timer.function = (void *) TeleInt_Timer;
-       cs->hw.hfc.timer.data = (long) cs;
-       init_timer(&cs->hw.hfc.timer);
+       setup_timer(&cs->hw.hfc.timer, (void *)TeleInt_Timer, (long)cs);
        if (!request_region(cs->hw.hfc.addr, 2, "TeleInt isdn")) {
                printk(KERN_WARNING
                       "HiSax: TeleInt config port %x-%x already in use\n",
index a85895585d906a6367db5cce95b00f031484d05d..c99f0ec58a0189885cdfdea5ae612f73e30c5925 100644 (file)
@@ -901,9 +901,8 @@ static void initW6692(struct IsdnCardState *cs, int part)
        if (part & 1) {
                cs->setstack_d = setstack_W6692;
                cs->DC_Close = DC_Close_W6692;
-               cs->dbusytimer.function = (void *) dbusy_timer_handler;
-               cs->dbusytimer.data = (long) cs;
-               init_timer(&cs->dbusytimer);
+               setup_timer(&cs->dbusytimer, (void *)dbusy_timer_handler,
+                           (long)cs);
                resetW6692(cs);
                ph_command(cs, W_L1CMD_RST);
                cs->dc.w6692.ph_state = W_L1CMD_RST;
index 9c1e8adaf4fc825c54ff84e9c85d36a68ecb5da7..d07dd5196ffca59c11532051fb88e2ecdc7326c9 100644 (file)
@@ -2370,9 +2370,8 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s
                rs->state = CCPResetIdle;
                rs->is = is;
                rs->id = id;
-               init_timer(&rs->timer);
-               rs->timer.data = (unsigned long)rs;
-               rs->timer.function = isdn_ppp_ccp_timer_callback;
+               setup_timer(&rs->timer, isdn_ppp_ccp_timer_callback,
+                           (unsigned long)rs);
                is->reset->rs[id] = rs;
        }
        return rs;
index 1b169559a240b0c41b9fb8c34d4837ea55d96f77..ddd8207e4e54cf617e76a2f784bc56aeff76b22e 100644 (file)
@@ -1812,9 +1812,8 @@ isdn_tty_modem_init(void)
                info->isdn_channel = -1;
                info->drv_index = -1;
                info->xmit_size = ISDN_SERIAL_XMIT_SIZE;
-               init_timer(&info->nc_timer);
-               info->nc_timer.function = isdn_tty_modem_do_ncarrier;
-               info->nc_timer.data = (unsigned long) info;
+               setup_timer(&info->nc_timer, isdn_tty_modem_do_ncarrier,
+                           (unsigned long)info);
                skb_queue_head_init(&info->xmit_queue);
 #ifdef CONFIG_ISDN_AUDIO
                skb_queue_head_init(&info->dtmf_queue);
index 9b85295aa6578f5ac5c86803e0923f29404b2e13..880e9d367a399ae6efc450a4eca70623717b8f51 100644 (file)
@@ -1092,9 +1092,7 @@ dspcreate(struct channel_req *crq)
        ndsp->pcm_bank_tx = -1;
        ndsp->hfc_conf = -1; /* current conference number */
        /* set tone timer */
-       ndsp->tone.tl.function = (void *)dsp_tone_timeout;
-       ndsp->tone.tl.data = (long) ndsp;
-       init_timer(&ndsp->tone.tl);
+       setup_timer(&ndsp->tone.tl, (void *)dsp_tone_timeout, (long)ndsp);
 
        if (dtmfthreshold < 20 || dtmfthreshold > 500)
                dtmfthreshold = 200;
index 26477d48bbda99cba2697e93f075703047098bab..78fc5d5e90514353b258658da350a85c408b89db 100644 (file)
@@ -110,13 +110,11 @@ void
 mISDN_FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft)
 {
        ft->fi = fi;
-       ft->tl.function = (void *) FsmExpireTimer;
-       ft->tl.data = (long) ft;
 #if FSM_TIMER_DEBUG
        if (ft->fi->debug)
                ft->fi->printdebug(ft->fi, "mISDN_FsmInitTimer %lx", (long) ft);
 #endif
-       init_timer(&ft->tl);
+       setup_timer(&ft->tl, (void *)FsmExpireTimer, (long)ft);
 }
 EXPORT_SYMBOL(mISDN_FsmInitTimer);
 
index 6ceca7db62ad42c91c10561a2e6f3330ddbefa2a..6be2041248d34832e0ee9af75d753b96c1ca8df2 100644 (file)
@@ -1443,9 +1443,7 @@ init_card(struct l1oip *hc, int pri, int bundle)
        hc->keep_tl.expires = jiffies + 2 * HZ; /* two seconds first time */
        add_timer(&hc->keep_tl);
 
-       hc->timeout_tl.function = (void *)l1oip_timeout;
-       hc->timeout_tl.data = (ulong)hc;
-       init_timer(&hc->timeout_tl);
+       setup_timer(&hc->timeout_tl, (void *)l1oip_timeout, (ulong)hc);
        hc->timeout_on = 0; /* state that we have timer off */
 
        return 0;
index 3f041b1870335ab48daeb847b986a274f619677b..f757cef293f86881667333f3ecc05bbd5f41a197 100644 (file)
@@ -392,6 +392,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
         * To get all the fields, copy all archdata
         */
        dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
+       dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops;
 #endif /* CONFIG_PCI */
 
 #ifdef DEBUG
index a126919ed102763e9d86da2a9ce615ff0b8a2001..5d13930f0f22fc42e40228cee8eee1ecb7cfa8e5 100644 (file)
@@ -4,7 +4,6 @@
 
 #include <linux/blkdev.h>
 #include <linux/errno.h>
-#include <linux/blkdev.h>
 #include <linux/kernel.h>
 #include <linux/sched/clock.h>
 #include <linux/llist.h>
index f4ffd1eb8f44c3d5c44c50277fb703545157dbcf..dfb75979e4555d806ea52a494e161d4c6f8fa86b 100644 (file)
@@ -989,26 +989,29 @@ static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
        struct dm_offload *o = container_of(cb, struct dm_offload, cb);
        struct bio_list list;
        struct bio *bio;
+       int i;
 
        INIT_LIST_HEAD(&o->cb.list);
 
        if (unlikely(!current->bio_list))
                return;
 
-       list = *current->bio_list;
-       bio_list_init(current->bio_list);
-
-       while ((bio = bio_list_pop(&list))) {
-               struct bio_set *bs = bio->bi_pool;
-               if (unlikely(!bs) || bs == fs_bio_set) {
-                       bio_list_add(current->bio_list, bio);
-                       continue;
+       for (i = 0; i < 2; i++) {
+               list = current->bio_list[i];
+               bio_list_init(&current->bio_list[i]);
+
+               while ((bio = bio_list_pop(&list))) {
+                       struct bio_set *bs = bio->bi_pool;
+                       if (unlikely(!bs) || bs == fs_bio_set) {
+                               bio_list_add(&current->bio_list[i], bio);
+                               continue;
+                       }
+
+                       spin_lock(&bs->rescue_lock);
+                       bio_list_add(&bs->rescue_list, bio);
+                       queue_work(bs->rescue_workqueue, &bs->rescue_work);
+                       spin_unlock(&bs->rescue_lock);
                }
-
-               spin_lock(&bs->rescue_lock);
-               bio_list_add(&bs->rescue_list, bio);
-               queue_work(bs->rescue_workqueue, &bs->rescue_work);
-               spin_unlock(&bs->rescue_lock);
        }
 }
 
index 2b13117fb918cbe27775ba61cc68c6f78e5408ff..321ecac23027804d18ded577a5c05604ec46220a 100644 (file)
@@ -777,7 +777,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
                bm_lockres->flags |= DLM_LKF_NOQUEUE;
                ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
                if (ret == -EAGAIN) {
-                       memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE);
                        s = read_resync_info(mddev, bm_lockres);
                        if (s) {
                                pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
@@ -974,6 +973,7 @@ static int leave(struct mddev *mddev)
        lockres_free(cinfo->bitmap_lockres);
        unlock_all_bitmaps(mddev);
        dlm_release_lockspace(cinfo->lockspace, 2);
+       kfree(cinfo);
        return 0;
 }
 
index 548d1b8014f89e9f4b1170daff8fa677d758f39a..f6ae1d67bcd02c6b743258ef3ff6a05896828cb5 100644 (file)
@@ -440,14 +440,6 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
 }
 EXPORT_SYMBOL(md_flush_request);
 
-void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
-{
-       struct mddev *mddev = cb->data;
-       md_wakeup_thread(mddev->thread);
-       kfree(cb);
-}
-EXPORT_SYMBOL(md_unplug);
-
 static inline struct mddev *mddev_get(struct mddev *mddev)
 {
        atomic_inc(&mddev->active);
@@ -1887,7 +1879,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
        }
        sb = page_address(rdev->sb_page);
        sb->data_size = cpu_to_le64(num_sectors);
-       sb->super_offset = rdev->sb_start;
+       sb->super_offset = cpu_to_le64(rdev->sb_start);
        sb->sb_csum = calc_sb_1_csum(sb);
        do {
                md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
@@ -2295,7 +2287,7 @@ static bool does_sb_need_changing(struct mddev *mddev)
        /* Check if any mddev parameters have changed */
        if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
            (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
-           (mddev->layout != le64_to_cpu(sb->layout)) ||
+           (mddev->layout != le32_to_cpu(sb->layout)) ||
            (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
            (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
                return true;
@@ -6458,11 +6450,10 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
        mddev->layout        = info->layout;
        mddev->chunk_sectors = info->chunk_size >> 9;
 
-       mddev->max_disks     = MD_SB_DISKS;
-
        if (mddev->persistent) {
-               mddev->flags         = 0;
-               mddev->sb_flags         = 0;
+               mddev->max_disks = MD_SB_DISKS;
+               mddev->flags = 0;
+               mddev->sb_flags = 0;
        }
        set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 
@@ -6533,8 +6524,12 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
                        return -ENOSPC;
        }
        rv = mddev->pers->resize(mddev, num_sectors);
-       if (!rv)
-               revalidate_disk(mddev->gendisk);
+       if (!rv) {
+               if (mddev->queue) {
+                       set_capacity(mddev->gendisk, mddev->array_sectors);
+                       revalidate_disk(mddev->gendisk);
+               }
+       }
        return rv;
 }
 
index b8859cbf84b618b39ed3d92a2887e8764c403919..dde8ecb760c87113ba36d50c0d6867bc6e215f02 100644 (file)
@@ -676,16 +676,10 @@ extern void mddev_resume(struct mddev *mddev);
 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
                                   struct mddev *mddev);
 
-extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
 extern void md_reload_sb(struct mddev *mddev, int raid_disk);
 extern void md_update_sb(struct mddev *mddev, int force);
 extern void md_kick_rdev_from_array(struct md_rdev * rdev);
 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
-static inline int mddev_check_plugged(struct mddev *mddev)
-{
-       return !!blk_check_plugged(md_unplug, mddev,
-                                  sizeof(struct blk_plug_cb));
-}
 
 static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
 {
index fbc2d7851b497fec0cacd45832bbd9c9d258eaae..a34f58772022c9f40243e1d117a3473332bd76a2 100644 (file)
@@ -1027,7 +1027,7 @@ static int get_unqueued_pending(struct r1conf *conf)
 static void freeze_array(struct r1conf *conf, int extra)
 {
        /* Stop sync I/O and normal I/O and wait for everything to
-        * go quite.
+        * go quiet.
         * This is called in two situations:
         * 1) management command handlers (reshape, remove disk, quiesce).
         * 2) one normal I/O request failed.
@@ -1587,9 +1587,30 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio)
                        split = bio;
                }
 
-               if (bio_data_dir(split) == READ)
+               if (bio_data_dir(split) == READ) {
                        raid1_read_request(mddev, split);
-               else
+
+                       /*
+                        * If a bio is splitted, the first part of bio will
+                        * pass barrier but the bio is queued in
+                        * current->bio_list (see generic_make_request). If
+                        * there is a raise_barrier() called here, the second
+                        * part of bio can't pass barrier. But since the first
+                        * part bio isn't dispatched to underlaying disks yet,
+                        * the barrier is never released, hence raise_barrier
+                        * will alays wait. We have a deadlock.
+                        * Note, this only happens in read path. For write
+                        * path, the first part of bio is dispatched in a
+                        * schedule() call (because of blk plug) or offloaded
+                        * to raid10d.
+                        * Quitting from the function immediately can change
+                        * the bio order queued in bio_list and avoid the deadlock.
+                        */
+                       if (split != bio) {
+                               generic_make_request(bio);
+                               break;
+                       }
+               } else
                        raid1_write_request(mddev, split);
        } while (split != bio);
 }
@@ -3246,8 +3267,6 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
                        return ret;
        }
        md_set_array_sectors(mddev, newsize);
-       set_capacity(mddev->gendisk, mddev->array_sectors);
-       revalidate_disk(mddev->gendisk);
        if (sectors > mddev->dev_sectors &&
            mddev->recovery_cp > mddev->dev_sectors) {
                mddev->recovery_cp = mddev->dev_sectors;
index 063c43d83b72c2f0f753edb7b08f8dd608fa15ad..e89a8d78a9ed537f417c414b2081ef5f9a97f291 100644 (file)
@@ -974,7 +974,8 @@ static void wait_barrier(struct r10conf *conf)
                                    !conf->barrier ||
                                    (atomic_read(&conf->nr_pending) &&
                                     current->bio_list &&
-                                    !bio_list_empty(current->bio_list)),
+                                    (!bio_list_empty(&current->bio_list[0]) ||
+                                     !bio_list_empty(&current->bio_list[1]))),
                                    conf->resync_lock);
                conf->nr_waiting--;
                if (!conf->nr_waiting)
@@ -1477,11 +1478,24 @@ retry_write:
                        mbio->bi_bdev = (void*)rdev;
 
                        atomic_inc(&r10_bio->remaining);
+
+                       cb = blk_check_plugged(raid10_unplug, mddev,
+                                              sizeof(*plug));
+                       if (cb)
+                               plug = container_of(cb, struct raid10_plug_cb,
+                                                   cb);
+                       else
+                               plug = NULL;
                        spin_lock_irqsave(&conf->device_lock, flags);
-                       bio_list_add(&conf->pending_bio_list, mbio);
-                       conf->pending_count++;
+                       if (plug) {
+                               bio_list_add(&plug->pending, mbio);
+                               plug->pending_cnt++;
+                       } else {
+                               bio_list_add(&conf->pending_bio_list, mbio);
+                               conf->pending_count++;
+                       }
                        spin_unlock_irqrestore(&conf->device_lock, flags);
-                       if (!mddev_check_plugged(mddev))
+                       if (!plug)
                                md_wakeup_thread(mddev->thread);
                }
        }
@@ -1571,7 +1585,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
                        split = bio;
                }
 
+               /*
+                * If a bio is splitted, the first part of bio will pass
+                * barrier but the bio is queued in current->bio_list (see
+                * generic_make_request). If there is a raise_barrier() called
+                * here, the second part of bio can't pass barrier. But since
+                * the first part bio isn't dispatched to underlaying disks
+                * yet, the barrier is never released, hence raise_barrier will
+                * alays wait. We have a deadlock.
+                * Note, this only happens in read path. For write path, the
+                * first part of bio is dispatched in a schedule() call
+                * (because of blk plug) or offloaded to raid10d.
+                * Quitting from the function immediately can change the bio
+                * order queued in bio_list and avoid the deadlock.
+                */
                __make_request(mddev, split);
+               if (split != bio && bio_data_dir(bio) == READ) {
+                       generic_make_request(bio);
+                       break;
+               }
        } while (split != bio);
 
        /* In case raid10d snuck in to freeze_array */
@@ -3943,10 +3975,6 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
                        return ret;
        }
        md_set_array_sectors(mddev, size);
-       if (mddev->queue) {
-               set_capacity(mddev->gendisk, mddev->array_sectors);
-               revalidate_disk(mddev->gendisk);
-       }
        if (sectors > mddev->dev_sectors &&
            mddev->recovery_cp > oldsize) {
                mddev->recovery_cp = oldsize;
index 4fb09b3fcb410468a9b1939b93d9529e70dd592d..ed5cd705b985f13611d26b44e81aefbb0e93c306 100644 (file)
@@ -1401,7 +1401,8 @@ static int set_syndrome_sources(struct page **srcs,
                     (test_bit(R5_Wantdrain, &dev->flags) ||
                      test_bit(R5_InJournal, &dev->flags))) ||
                    (srctype == SYNDROME_SRC_WRITTEN &&
-                    dev->written)) {
+                    (dev->written ||
+                     test_bit(R5_InJournal, &dev->flags)))) {
                        if (test_bit(R5_InJournal, &dev->flags))
                                srcs[slot] = sh->dev[i].orig_page;
                        else
@@ -7605,8 +7606,6 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
                        return ret;
        }
        md_set_array_sectors(mddev, newsize);
-       set_capacity(mddev->gendisk, mddev->array_sectors);
-       revalidate_disk(mddev->gendisk);
        if (sectors > mddev->dev_sectors &&
            mddev->recovery_cp > mddev->dev_sectors) {
                mddev->recovery_cp = mddev->dev_sectors;
index 7a681d8202c7ee9e9eed6dd2dbb4bf118d8f9508..4442e478db72a2420207efc2deca49d56c92c30c 100644 (file)
@@ -256,8 +256,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
 *
 * The actual DAP implementation may be restricted to only one of the modes.
 * A compiler warning or error will be generated if the DAP implementation
-* overides or cannot handle the mode defined below.
-*
+* overrides or cannot handle the mode defined below.
 */
 #ifndef DRXDAP_SINGLE_MASTER
 #define DRXDAP_SINGLE_MASTER 1
@@ -272,7 +271,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
 *
 * This maximum size may be restricted by the actual DAP implementation.
 * A compiler warning or error will be generated if the DAP implementation
-* overides or cannot handle the chunksize defined below.
+* overrides or cannot handle the chunksize defined below.
 *
 * Beware that the DAP uses  DRXDAP_MAX_WCHUNKSIZE to create a temporary data
 * buffer. Do not undefine or choose too large, unless your system is able to
@@ -292,8 +291,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
 *
 * This maximum size may be restricted by the actual DAP implementation.
 * A compiler warning or error will be generated if the DAP implementation
-* overides or cannot handle the chunksize defined below.
-*
+* overrides or cannot handle the chunksize defined below.
 */
 #ifndef DRXDAP_MAX_RCHUNKSIZE
 #define  DRXDAP_MAX_RCHUNKSIZE 60
index 67fd8ffa60a418f6538d6b358c3e3d91f4f385b6..669a4c82f1ffa4c79b5e47173536faedecf52c34 100644 (file)
@@ -321,7 +321,7 @@ static const struct of_device_id vdoa_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, vdoa_dt_ids);
 
-static const struct platform_driver vdoa_driver = {
+static struct platform_driver vdoa_driver = {
        .probe          = vdoa_probe,
        .remove         = vdoa_remove,
        .driver         = {
index cbb03768f5d73574b4d95939a08d7605e94dd6d7..0f0c389f889713ad024eda400ea1f70799710dae 100644 (file)
@@ -861,9 +861,7 @@ int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb,
 
        if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) ||
                (frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) ||
-               (frame->fmt->pixelformat == V4L2_PIX_FMT_NV61) ||
                (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) ||
-               (frame->fmt->pixelformat == V4L2_PIX_FMT_NV21) ||
                (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M))
                swap(addr->cb, addr->cr);
 
index 823608112d89c14272c9ca2f5f8ebf46ffd30383..7918b928f0589b59b52c17c8470c3e3159d33c1e 100644 (file)
@@ -632,8 +632,8 @@ static int bdisp_open(struct file *file)
 
 error_ctrls:
        bdisp_ctrls_delete(ctx);
-error_fh:
        v4l2_fh_del(&ctx->fh);
+error_fh:
        v4l2_fh_exit(&ctx->fh);
        bdisp_hw_free_nodes(ctx);
 mem_ctx:
index b4b583f7137a54eb86f8592724603b296afb9347..b4c0f10fc3b0f12eb9f114ac063ac5b0a85ecb1a 100644 (file)
@@ -54,12 +54,11 @@ EXPORT_SYMBOL_GPL(vsp1_du_init);
 /**
  * vsp1_du_setup_lif - Setup the output part of the VSP pipeline
  * @dev: the VSP device
- * @width: output frame width in pixels
- * @height: output frame height in pixels
+ * @cfg: the LIF configuration
  *
- * Configure the output part of VSP DRM pipeline for the given frame @width and
- * @height. This sets up formats on the BRU source pad, the WPF0 sink and source
- * pads, and the LIF sink pad.
+ * Configure the output part of VSP DRM pipeline for the given frame @cfg.width
+ * and @cfg.height. This sets up formats on the BRU source pad, the WPF0 sink
+ * and source pads, and the LIF sink pad.
  *
  * As the media bus code on the BRU source pad is conditioned by the
  * configuration of the BRU sink 0 pad, we also set up the formats on all BRU
@@ -69,8 +68,7 @@ EXPORT_SYMBOL_GPL(vsp1_du_init);
  *
  * Return 0 on success or a negative error code on failure.
  */
-int vsp1_du_setup_lif(struct device *dev, unsigned int width,
-                     unsigned int height)
+int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
 {
        struct vsp1_device *vsp1 = dev_get_drvdata(dev);
        struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
@@ -79,11 +77,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
        unsigned int i;
        int ret;
 
-       dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n",
-               __func__, width, height);
-
-       if (width == 0 || height == 0) {
-               /* Zero width or height means the CRTC is being disabled, stop
+       if (!cfg) {
+               /* NULL configuration means the CRTC is being disabled, stop
                 * the pipeline and turn the light off.
                 */
                ret = vsp1_pipeline_stop(pipe);
@@ -108,6 +103,9 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
                return 0;
        }
 
+       dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n",
+               __func__, cfg->width, cfg->height);
+
        /* Configure the format at the BRU sinks and propagate it through the
         * pipeline.
         */
@@ -117,8 +115,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
        for (i = 0; i < bru->entity.source_pad; ++i) {
                format.pad = i;
 
-               format.format.width = width;
-               format.format.height = height;
+               format.format.width = cfg->width;
+               format.format.height = cfg->height;
                format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
                format.format.field = V4L2_FIELD_NONE;
 
@@ -133,8 +131,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
        }
 
        format.pad = bru->entity.source_pad;
-       format.format.width = width;
-       format.format.height = height;
+       format.format.width = cfg->width;
+       format.format.height = cfg->height;
        format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
        format.format.field = V4L2_FIELD_NONE;
 
@@ -180,7 +178,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
        /* Verify that the format at the output of the pipeline matches the
         * requested frame size and media bus code.
         */
-       if (format.format.width != width || format.format.height != height ||
+       if (format.format.width != cfg->width ||
+           format.format.height != cfg->height ||
            format.format.code != MEDIA_BUS_FMT_ARGB8888_1X32) {
                dev_dbg(vsp1->dev, "%s: format mismatch\n", __func__);
                return -EPIPE;
index 393dccaabdd02ac83744faf049ef1a08675fe7ae..1688893a65bb57d2d2ff0d667f82d27fbd88dc37 100644 (file)
@@ -436,6 +436,8 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
                return -ERESTARTSYS;
 
        ir = irctls[iminor(inode)];
+       mutex_unlock(&lirc_dev_lock);
+
        if (!ir) {
                retval = -ENODEV;
                goto error;
@@ -476,8 +478,6 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
        }
 
 error:
-       mutex_unlock(&lirc_dev_lock);
-
        nonseekable_open(inode, file);
 
        return retval;
index b109f8246b968d99cacde9b6ee73719f554a4bfd..ec4b25bd2ec29912f062ae1b654a5ac05434b6f7 100644 (file)
@@ -176,12 +176,13 @@ static void nvt_write_wakeup_codes(struct rc_dev *dev,
 {
        u8 tolerance, config;
        struct nvt_dev *nvt = dev->priv;
+       unsigned long flags;
        int i;
 
        /* hardcode the tolerance to 10% */
        tolerance = DIV_ROUND_UP(count, 10);
 
-       spin_lock(&nvt->lock);
+       spin_lock_irqsave(&nvt->lock, flags);
 
        nvt_clear_cir_wake_fifo(nvt);
        nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
@@ -203,7 +204,7 @@ static void nvt_write_wakeup_codes(struct rc_dev *dev,
 
        nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
 
-       spin_unlock(&nvt->lock);
+       spin_unlock_irqrestore(&nvt->lock, flags);
 }
 
 static ssize_t wakeup_data_show(struct device *dev,
index 2424946740e64fb602f55a30d5f158a212cc88ce..d84533699668d20e1797bc7feef1693f74e87be5 100644 (file)
@@ -1663,6 +1663,7 @@ static int rc_setup_rx_device(struct rc_dev *dev)
 {
        int rc;
        struct rc_map *rc_map;
+       u64 rc_type;
 
        if (!dev->map_name)
                return -EINVAL;
@@ -1677,15 +1678,18 @@ static int rc_setup_rx_device(struct rc_dev *dev)
        if (rc)
                return rc;
 
-       if (dev->change_protocol) {
-               u64 rc_type = (1ll << rc_map->rc_type);
+       rc_type = BIT_ULL(rc_map->rc_type);
 
+       if (dev->change_protocol) {
                rc = dev->change_protocol(dev, &rc_type);
                if (rc < 0)
                        goto out_table;
                dev->enabled_protocols = rc_type;
        }
 
+       if (dev->driver_type == RC_DRIVER_IR_RAW)
+               ir_raw_load_modules(&rc_type);
+
        set_bit(EV_KEY, dev->input_dev->evbit);
        set_bit(EV_REP, dev->input_dev->evbit);
        set_bit(EV_MSC, dev->input_dev->evbit);
@@ -1777,12 +1781,6 @@ int rc_register_device(struct rc_dev *dev)
                dev->input_name ?: "Unspecified device", path ?: "N/A");
        kfree(path);
 
-       if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
-               rc = rc_setup_rx_device(dev);
-               if (rc)
-                       goto out_dev;
-       }
-
        if (dev->driver_type == RC_DRIVER_IR_RAW ||
            dev->driver_type == RC_DRIVER_IR_RAW_TX) {
                if (!raw_init) {
@@ -1791,7 +1789,13 @@ int rc_register_device(struct rc_dev *dev)
                }
                rc = ir_raw_event_register(dev);
                if (rc < 0)
-                       goto out_rx;
+                       goto out_dev;
+       }
+
+       if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
+               rc = rc_setup_rx_device(dev);
+               if (rc)
+                       goto out_raw;
        }
 
        /* Allow the RC sysfs nodes to be accessible */
@@ -1803,8 +1807,8 @@ int rc_register_device(struct rc_dev *dev)
 
        return 0;
 
-out_rx:
-       rc_free_rx_device(dev);
+out_raw:
+       ir_raw_event_unregister(dev);
 out_dev:
        device_del(&dev->dev);
 out_unlock:
index 923fb2299553cb96c0db87368a322ea875da4652..41b54e40176c2393b846a1fb59f6e2cacf187c74 100644 (file)
@@ -487,10 +487,69 @@ static void serial_ir_timeout(unsigned long arg)
        ir_raw_event_handle(serial_ir.rcdev);
 }
 
+/* Needed by serial_ir_probe() */
+static int serial_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
+                       unsigned int count);
+static int serial_ir_tx_duty_cycle(struct rc_dev *dev, u32 cycle);
+static int serial_ir_tx_carrier(struct rc_dev *dev, u32 carrier);
+static int serial_ir_open(struct rc_dev *rcdev);
+static void serial_ir_close(struct rc_dev *rcdev);
+
 static int serial_ir_probe(struct platform_device *dev)
 {
+       struct rc_dev *rcdev;
        int i, nlow, nhigh, result;
 
+       rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW);
+       if (!rcdev)
+               return -ENOMEM;
+
+       if (hardware[type].send_pulse && hardware[type].send_space)
+               rcdev->tx_ir = serial_ir_tx;
+       if (hardware[type].set_send_carrier)
+               rcdev->s_tx_carrier = serial_ir_tx_carrier;
+       if (hardware[type].set_duty_cycle)
+               rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle;
+
+       switch (type) {
+       case IR_HOMEBREW:
+               rcdev->input_name = "Serial IR type home-brew";
+               break;
+       case IR_IRDEO:
+               rcdev->input_name = "Serial IR type IRdeo";
+               break;
+       case IR_IRDEO_REMOTE:
+               rcdev->input_name = "Serial IR type IRdeo remote";
+               break;
+       case IR_ANIMAX:
+               rcdev->input_name = "Serial IR type AnimaX";
+               break;
+       case IR_IGOR:
+               rcdev->input_name = "Serial IR type IgorPlug";
+               break;
+       }
+
+       rcdev->input_phys = KBUILD_MODNAME "/input0";
+       rcdev->input_id.bustype = BUS_HOST;
+       rcdev->input_id.vendor = 0x0001;
+       rcdev->input_id.product = 0x0001;
+       rcdev->input_id.version = 0x0100;
+       rcdev->open = serial_ir_open;
+       rcdev->close = serial_ir_close;
+       rcdev->dev.parent = &serial_ir.pdev->dev;
+       rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
+       rcdev->driver_name = KBUILD_MODNAME;
+       rcdev->map_name = RC_MAP_RC6_MCE;
+       rcdev->min_timeout = 1;
+       rcdev->timeout = IR_DEFAULT_TIMEOUT;
+       rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
+       rcdev->rx_resolution = 250000;
+
+       serial_ir.rcdev = rcdev;
+
+       setup_timer(&serial_ir.timeout_timer, serial_ir_timeout,
+                   (unsigned long)&serial_ir);
+
        result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler,
                                  share_irq ? IRQF_SHARED : 0,
                                  KBUILD_MODNAME, &hardware);
@@ -516,9 +575,6 @@ static int serial_ir_probe(struct platform_device *dev)
                return -EBUSY;
        }
 
-       setup_timer(&serial_ir.timeout_timer, serial_ir_timeout,
-                   (unsigned long)&serial_ir);
-
        result = hardware_init_port();
        if (result < 0)
                return result;
@@ -552,7 +608,8 @@ static int serial_ir_probe(struct platform_device *dev)
                         sense ? "low" : "high");
 
        dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io);
-       return 0;
+
+       return devm_rc_register_device(&dev->dev, rcdev);
 }
 
 static int serial_ir_open(struct rc_dev *rcdev)
@@ -723,7 +780,6 @@ static void serial_ir_exit(void)
 
 static int __init serial_ir_init_module(void)
 {
-       struct rc_dev *rcdev;
        int result;
 
        switch (type) {
@@ -754,63 +810,9 @@ static int __init serial_ir_init_module(void)
                sense = !!sense;
 
        result = serial_ir_init();
-       if (result)
-               return result;
-
-       rcdev = devm_rc_allocate_device(&serial_ir.pdev->dev, RC_DRIVER_IR_RAW);
-       if (!rcdev) {
-               result = -ENOMEM;
-               goto serial_cleanup;
-       }
-
-       if (hardware[type].send_pulse && hardware[type].send_space)
-               rcdev->tx_ir = serial_ir_tx;
-       if (hardware[type].set_send_carrier)
-               rcdev->s_tx_carrier = serial_ir_tx_carrier;
-       if (hardware[type].set_duty_cycle)
-               rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle;
-
-       switch (type) {
-       case IR_HOMEBREW:
-               rcdev->input_name = "Serial IR type home-brew";
-               break;
-       case IR_IRDEO:
-               rcdev->input_name = "Serial IR type IRdeo";
-               break;
-       case IR_IRDEO_REMOTE:
-               rcdev->input_name = "Serial IR type IRdeo remote";
-               break;
-       case IR_ANIMAX:
-               rcdev->input_name = "Serial IR type AnimaX";
-               break;
-       case IR_IGOR:
-               rcdev->input_name = "Serial IR type IgorPlug";
-               break;
-       }
-
-       rcdev->input_phys = KBUILD_MODNAME "/input0";
-       rcdev->input_id.bustype = BUS_HOST;
-       rcdev->input_id.vendor = 0x0001;
-       rcdev->input_id.product = 0x0001;
-       rcdev->input_id.version = 0x0100;
-       rcdev->open = serial_ir_open;
-       rcdev->close = serial_ir_close;
-       rcdev->dev.parent = &serial_ir.pdev->dev;
-       rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
-       rcdev->driver_name = KBUILD_MODNAME;
-       rcdev->map_name = RC_MAP_RC6_MCE;
-       rcdev->min_timeout = 1;
-       rcdev->timeout = IR_DEFAULT_TIMEOUT;
-       rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
-       rcdev->rx_resolution = 250000;
-
-       serial_ir.rcdev = rcdev;
-
-       result = rc_register_device(rcdev);
-
        if (!result)
                return 0;
-serial_cleanup:
+
        serial_ir_exit();
        return result;
 }
@@ -818,7 +820,6 @@ serial_cleanup:
 static void __exit serial_ir_exit_module(void)
 {
        del_timer_sync(&serial_ir.timeout_timer);
-       rc_unregister_device(serial_ir.rcdev);
        serial_ir_exit();
 }
 
index ab9866024ec7983d597efd157476820222ad8134..04033efe7ad5394d4fd9493ce6b790de1404666b 100644 (file)
@@ -36,16 +36,18 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
 {
        struct hexline *hx;
-       u8 reset;
-       int ret,pos=0;
+       u8 *buf;
+       int ret, pos = 0;
+       u16 cpu_cs_register = cypress[type].cpu_cs_register;
 
-       hx = kmalloc(sizeof(*hx), GFP_KERNEL);
-       if (!hx)
+       buf = kmalloc(sizeof(*hx), GFP_KERNEL);
+       if (!buf)
                return -ENOMEM;
+       hx = (struct hexline *)buf;
 
        /* stop the CPU */
-       reset = 1;
-       if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
+       buf[0] = 1;
+       if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1)
                err("could not stop the USB controller CPU.");
 
        while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
@@ -61,21 +63,21 @@ int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw
        }
        if (ret < 0) {
                err("firmware download failed at %d with %d",pos,ret);
-               kfree(hx);
+               kfree(buf);
                return ret;
        }
 
        if (ret == 0) {
                /* restart the CPU */
-               reset = 0;
-               if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
+               buf[0] = 0;
+               if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) {
                        err("could not restart the USB controller CPU.");
                        ret = -EINVAL;
                }
        } else
                ret = -EIO;
 
-       kfree(hx);
+       kfree(buf);
 
        return ret;
 }
index 6ca502d834b4f2cfcc0e6c6a3699bdaaea04d293..4f42d57f81d9541d25f02af65086f6465af90728 100644 (file)
@@ -68,6 +68,7 @@
 struct dw2102_state {
        u8 initialized;
        u8 last_lock;
+       u8 data[MAX_XFER_SIZE + 4];
        struct i2c_client *i2c_client_demod;
        struct i2c_client *i2c_client_tuner;
 
@@ -661,62 +662,72 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
                                                                int num)
 {
        struct dvb_usb_device *d = i2c_get_adapdata(adap);
-       u8 obuf[0x40], ibuf[0x40];
+       struct dw2102_state *state;
 
        if (!d)
                return -ENODEV;
+
+       state = d->priv;
+
        if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
                return -EAGAIN;
+       if (mutex_lock_interruptible(&d->data_mutex) < 0) {
+               mutex_unlock(&d->i2c_mutex);
+               return -EAGAIN;
+       }
 
        switch (num) {
        case 1:
                switch (msg[0].addr) {
                case SU3000_STREAM_CTRL:
-                       obuf[0] = msg[0].buf[0] + 0x36;
-                       obuf[1] = 3;
-                       obuf[2] = 0;
-                       if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 0, 0) < 0)
+                       state->data[0] = msg[0].buf[0] + 0x36;
+                       state->data[1] = 3;
+                       state->data[2] = 0;
+                       if (dvb_usb_generic_rw(d, state->data, 3,
+                                       state->data, 0, 0) < 0)
                                err("i2c transfer failed.");
                        break;
                case DW2102_RC_QUERY:
-                       obuf[0] = 0x10;
-                       if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 2, 0) < 0)
+                       state->data[0] = 0x10;
+                       if (dvb_usb_generic_rw(d, state->data, 1,
+                                       state->data, 2, 0) < 0)
                                err("i2c transfer failed.");
-                       msg[0].buf[1] = ibuf[0];
-                       msg[0].buf[0] = ibuf[1];
+                       msg[0].buf[1] = state->data[0];
+                       msg[0].buf[0] = state->data[1];
                        break;
                default:
                        /* always i2c write*/
-                       obuf[0] = 0x08;
-                       obuf[1] = msg[0].addr;
-                       obuf[2] = msg[0].len;
+                       state->data[0] = 0x08;
+                       state->data[1] = msg[0].addr;
+                       state->data[2] = msg[0].len;
 
-                       memcpy(&obuf[3], msg[0].buf, msg[0].len);
+                       memcpy(&state->data[3], msg[0].buf, msg[0].len);
 
-                       if (dvb_usb_generic_rw(d, obuf, msg[0].len + 3,
-                                               ibuf, 1, 0) < 0)
+                       if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3,
+                                               state->data, 1, 0) < 0)
                                err("i2c transfer failed.");
 
                }
                break;
        case 2:
                /* always i2c read */
-               obuf[0] = 0x09;
-               obuf[1] = msg[0].len;
-               obuf[2] = msg[1].len;
-               obuf[3] = msg[0].addr;
-               memcpy(&obuf[4], msg[0].buf, msg[0].len);
-
-               if (dvb_usb_generic_rw(d, obuf, msg[0].len + 4,
-                                       ibuf, msg[1].len + 1, 0) < 0)
+               state->data[0] = 0x09;
+               state->data[1] = msg[0].len;
+               state->data[2] = msg[1].len;
+               state->data[3] = msg[0].addr;
+               memcpy(&state->data[4], msg[0].buf, msg[0].len);
+
+               if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4,
+                                       state->data, msg[1].len + 1, 0) < 0)
                        err("i2c transfer failed.");
 
-               memcpy(msg[1].buf, &ibuf[1], msg[1].len);
+               memcpy(msg[1].buf, &state->data[1], msg[1].len);
                break;
        default:
                warn("more than 2 i2c messages at a time is not handled yet.");
                break;
        }
+       mutex_unlock(&d->data_mutex);
        mutex_unlock(&d->i2c_mutex);
        return num;
 }
@@ -844,17 +855,23 @@ static int su3000_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
 static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
 {
        struct dw2102_state *state = (struct dw2102_state *)d->priv;
-       u8 obuf[] = {0xde, 0};
+       int ret = 0;
 
        info("%s: %d, initialized %d", __func__, i, state->initialized);
 
        if (i && !state->initialized) {
+               mutex_lock(&d->data_mutex);
+
+               state->data[0] = 0xde;
+               state->data[1] = 0;
+
                state->initialized = 1;
                /* reset board */
-               return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
+               ret = dvb_usb_generic_rw(d, state->data, 2, NULL, 0, 0);
+               mutex_unlock(&d->data_mutex);
        }
 
-       return 0;
+       return ret;
 }
 
 static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
@@ -1309,49 +1326,57 @@ static int prof_7500_frontend_attach(struct dvb_usb_adapter *d)
        return 0;
 }
 
-static int su3000_frontend_attach(struct dvb_usb_adapter *d)
+static int su3000_frontend_attach(struct dvb_usb_adapter *adap)
 {
-       u8 obuf[3] = { 0xe, 0x80, 0 };
-       u8 ibuf[] = { 0 };
+       struct dvb_usb_device *d = adap->dev;
+       struct dw2102_state *state = d->priv;
+
+       mutex_lock(&d->data_mutex);
+
+       state->data[0] = 0xe;
+       state->data[1] = 0x80;
+       state->data[2] = 0;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x02;
-       obuf[2] = 1;
+       state->data[0] = 0xe;
+       state->data[1] = 0x02;
+       state->data[2] = 1;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
        msleep(300);
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x83;
-       obuf[2] = 0;
+       state->data[0] = 0xe;
+       state->data[1] = 0x83;
+       state->data[2] = 0;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x83;
-       obuf[2] = 1;
+       state->data[0] = 0xe;
+       state->data[1] = 0x83;
+       state->data[2] = 1;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0x51;
+       state->data[0] = 0x51;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
                err("command 0x51 transfer failed.");
 
-       d->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
-                                       &d->dev->i2c_adap);
-       if (d->fe_adap[0].fe == NULL)
+       mutex_unlock(&d->data_mutex);
+
+       adap->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
+                                       &d->i2c_adap);
+       if (adap->fe_adap[0].fe == NULL)
                return -EIO;
 
-       if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+       if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
                                &dw2104_ts2020_config,
-                               &d->dev->i2c_adap)) {
+                               &d->i2c_adap)) {
                info("Attached DS3000/TS2020!");
                return 0;
        }
@@ -1360,47 +1385,55 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d)
        return -EIO;
 }
 
-static int t220_frontend_attach(struct dvb_usb_adapter *d)
+static int t220_frontend_attach(struct dvb_usb_adapter *adap)
 {
-       u8 obuf[3] = { 0xe, 0x87, 0 };
-       u8 ibuf[] = { 0 };
+       struct dvb_usb_device *d = adap->dev;
+       struct dw2102_state *state = d->priv;
+
+       mutex_lock(&d->data_mutex);
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+       state->data[0] = 0xe;
+       state->data[1] = 0x87;
+       state->data[2] = 0x0;
+
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x86;
-       obuf[2] = 1;
+       state->data[0] = 0xe;
+       state->data[1] = 0x86;
+       state->data[2] = 1;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x80;
-       obuf[2] = 0;
+       state->data[0] = 0xe;
+       state->data[1] = 0x80;
+       state->data[2] = 0;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
        msleep(50);
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x80;
-       obuf[2] = 1;
+       state->data[0] = 0xe;
+       state->data[1] = 0x80;
+       state->data[2] = 1;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0x51;
+       state->data[0] = 0x51;
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
                err("command 0x51 transfer failed.");
 
-       d->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
-                                       &d->dev->i2c_adap, NULL);
-       if (d->fe_adap[0].fe != NULL) {
-               if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60,
-                                       &d->dev->i2c_adap, &tda18271_config)) {
+       mutex_unlock(&d->data_mutex);
+
+       adap->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
+                                       &d->i2c_adap, NULL);
+       if (adap->fe_adap[0].fe != NULL) {
+               if (dvb_attach(tda18271_attach, adap->fe_adap[0].fe, 0x60,
+                                       &d->i2c_adap, &tda18271_config)) {
                        info("Attached TDA18271HD/CXD2820R!");
                        return 0;
                }
@@ -1410,23 +1443,30 @@ static int t220_frontend_attach(struct dvb_usb_adapter *d)
        return -EIO;
 }
 
-static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d)
+static int m88rs2000_frontend_attach(struct dvb_usb_adapter *adap)
 {
-       u8 obuf[] = { 0x51 };
-       u8 ibuf[] = { 0 };
+       struct dvb_usb_device *d = adap->dev;
+       struct dw2102_state *state = d->priv;
+
+       mutex_lock(&d->data_mutex);
 
-       if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+       state->data[0] = 0x51;
+
+       if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
                err("command 0x51 transfer failed.");
 
-       d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config,
-                                       &d->dev->i2c_adap);
+       mutex_unlock(&d->data_mutex);
 
-       if (d->fe_adap[0].fe == NULL)
+       adap->fe_adap[0].fe = dvb_attach(m88rs2000_attach,
+                                       &s421_m88rs2000_config,
+                                       &d->i2c_adap);
+
+       if (adap->fe_adap[0].fe == NULL)
                return -EIO;
 
-       if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+       if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
                                &dw2104_ts2020_config,
-                               &d->dev->i2c_adap)) {
+                               &d->i2c_adap)) {
                info("Attached RS2000/TS2020!");
                return 0;
        }
@@ -1439,44 +1479,50 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
 {
        struct dvb_usb_device *d = adap->dev;
        struct dw2102_state *state = d->priv;
-       u8 obuf[3] = { 0xe, 0x80, 0 };
-       u8 ibuf[] = { 0 };
        struct i2c_adapter *i2c_adapter;
        struct i2c_client *client;
        struct i2c_board_info board_info;
        struct m88ds3103_platform_data m88ds3103_pdata = {};
        struct ts2020_config ts2020_config = {};
 
-       if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+       mutex_lock(&d->data_mutex);
+
+       state->data[0] = 0xe;
+       state->data[1] = 0x80;
+       state->data[2] = 0x0;
+
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x02;
-       obuf[2] = 1;
+       state->data[0] = 0xe;
+       state->data[1] = 0x02;
+       state->data[2] = 1;
 
-       if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
        msleep(300);
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x83;
-       obuf[2] = 0;
+       state->data[0] = 0xe;
+       state->data[1] = 0x83;
+       state->data[2] = 0;
 
-       if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0xe;
-       obuf[1] = 0x83;
-       obuf[2] = 1;
+       state->data[0] = 0xe;
+       state->data[1] = 0x83;
+       state->data[2] = 1;
 
-       if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
                err("command 0x0e transfer failed.");
 
-       obuf[0] = 0x51;
+       state->data[0] = 0x51;
 
-       if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 1, 0) < 0)
+       if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
                err("command 0x51 transfer failed.");
 
+       mutex_unlock(&d->data_mutex);
+
        /* attach demod */
        m88ds3103_pdata.clk = 27000000;
        m88ds3103_pdata.i2c_wr_max = 33;
index 5457c361ad586424050c98958bdefd22a4c17db7..bf0fe0137dfed2c893001abb8cfe8b83861dae08 100644 (file)
@@ -1947,9 +1947,7 @@ static int gpmc_probe_onenand_child(struct platform_device *pdev,
        if (!of_property_read_u32(child, "dma-channel", &val))
                gpmc_onenand_data->dma_channel = val;
 
-       gpmc_onenand_init(gpmc_onenand_data);
-
-       return 0;
+       return gpmc_onenand_init(gpmc_onenand_data);
 }
 #else
 static int gpmc_probe_onenand_child(struct platform_device *pdev,
index 91f645992c9416ab07e765a05c006143fb15a17f..b27ea98b781f77747c010f90f20d9da6883a3070 100644 (file)
@@ -1792,15 +1792,14 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
 
        /* If we're permanently dead, give up. */
        if (state == pci_channel_io_perm_failure) {
-               /* Tell the AFU drivers; but we don't care what they
-                * say, we're going away.
-                */
                for (i = 0; i < adapter->slices; i++) {
                        afu = adapter->afu[i];
-                       /* Only participate in EEH if we are on a virtual PHB */
-                       if (afu->phb == NULL)
-                               return PCI_ERS_RESULT_NONE;
-                       cxl_vphb_error_detected(afu, state);
+                       /*
+                        * Tell the AFU drivers; but we don't care what they
+                        * say, we're going away.
+                        */
+                       if (afu->phb != NULL)
+                               cxl_vphb_error_detected(afu, state);
                }
                return PCI_ERS_RESULT_DISCONNECT;
        }
index 3600c9993a9830504d0cc5bcd61c4b8f19376762..29f2daed37e07b1e1359a2d36ff54bd04208233a 100644 (file)
@@ -112,11 +112,9 @@ struct mkhi_msg {
 
 static int mei_osver(struct mei_cl_device *cldev)
 {
-       int ret;
        const size_t size = sizeof(struct mkhi_msg_hdr) +
                            sizeof(struct mkhi_fwcaps) +
                            sizeof(struct mei_os_ver);
-       size_t length = 8;
        char buf[size];
        struct mkhi_msg *req;
        struct mkhi_fwcaps *fwcaps;
@@ -137,15 +135,7 @@ static int mei_osver(struct mei_cl_device *cldev)
        os_ver = (struct mei_os_ver *)fwcaps->data;
        os_ver->os_type = OSTYPE_LINUX;
 
-       ret = __mei_cl_send(cldev->cl, buf, size, mode);
-       if (ret < 0)
-               return ret;
-
-       ret = __mei_cl_recv(cldev->cl, buf, length, 0);
-       if (ret < 0)
-               return ret;
-
-       return 0;
+       return __mei_cl_send(cldev->cl, buf, size, mode);
 }
 
 static void mei_mkhi_fix(struct mei_cl_device *cldev)
@@ -160,7 +150,7 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
                return;
 
        ret = mei_osver(cldev);
-       if (ret)
+       if (ret < 0)
                dev_err(&cldev->dev, "OS version command failed %d\n", ret);
 
        mei_cldev_disable(cldev);
index cfb1cdf176fa9001e83894c63c9eb6480516d230..13c55b8f9261861c20710c4ce039c0d605937909 100644 (file)
@@ -124,8 +124,6 @@ int mei_reset(struct mei_device *dev)
 
        mei_clear_interrupts(dev);
 
-       mei_synchronize_irq(dev);
-
        /* we're already in reset, cancel the init timer
         * if the reset was called due the hbm protocol error
         * we need to call it before hw start
@@ -304,6 +302,9 @@ static void mei_reset_work(struct work_struct *work)
                container_of(work, struct mei_device,  reset_work);
        int ret;
 
+       mei_clear_interrupts(dev);
+       mei_synchronize_irq(dev);
+
        mutex_lock(&dev->device_lock);
 
        ret = mei_reset(dev);
@@ -328,6 +329,9 @@ void mei_stop(struct mei_device *dev)
 
        mei_cancel_work(dev);
 
+       mei_clear_interrupts(dev);
+       mei_synchronize_irq(dev);
+
        mutex_lock(&dev->device_lock);
 
        dev->dev_state = MEI_DEV_POWER_DOWN;
index 6fb773dbcd0c3233d62136dcf673afb7b80efcea..93be82fc338ad8b2c3e454dad1e1491f20d47c1c 100644 (file)
@@ -219,15 +219,20 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
        int write, unsigned long *paddr, int *pageshift)
 {
        pgd_t *pgdp;
-       pmd_t *pmdp;
+       p4d_t *p4dp;
        pud_t *pudp;
+       pmd_t *pmdp;
        pte_t pte;
 
        pgdp = pgd_offset(vma->vm_mm, vaddr);
        if (unlikely(pgd_none(*pgdp)))
                goto err;
 
-       pudp = pud_offset(pgdp, vaddr);
+       p4dp = p4d_offset(pgdp, vaddr);
+       if (unlikely(p4d_none(*p4dp)))
+               goto err;
+
+       pudp = pud_offset(p4dp, vaddr);
        if (unlikely(pud_none(*pudp)))
                goto err;
 
index 9d659542a335b444914f2ead245f0975ac970a2b..dad5abee656ef550b0ac041e354e2c7ef89164e3 100644 (file)
@@ -566,10 +566,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
         */
        error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS,
                        PCI_IRQ_MSIX);
-       if (error) {
+       if (error < 0) {
                error = pci_alloc_irq_vectors(pdev, 1, 1,
                                PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY);
-               if (error)
+               if (error < 0)
                        goto err_remove_bitmap;
        } else {
                vmci_dev->exclusive_vectors = true;
index 1621fa08e2069298f6a8438c8babdf11ae4a817c..ff3da960c4736147b2c1348681412953508ac49b 100644 (file)
@@ -1560,11 +1560,8 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
                               struct mmc_blk_request *brq, struct request *req,
                               bool old_req_pending)
 {
-       struct mmc_queue_req *mq_rq;
        bool req_pending;
 
-       mq_rq = container_of(brq, struct mmc_queue_req, brq);
-
        /*
         * If this is an SD card and we're writing, we can first
         * mark the known good sectors as ok.
@@ -1701,7 +1698,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
                case MMC_BLK_CMD_ERR:
                        req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
                        if (mmc_blk_reset(md, card->host, type)) {
-                               mmc_blk_rw_cmd_abort(card, old_req);
+                               if (req_pending)
+                                       mmc_blk_rw_cmd_abort(card, old_req);
                                mmc_blk_rw_try_restart(mq, new_req);
                                return;
                        }
@@ -1817,6 +1815,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
                mmc_blk_issue_flush(mq, req);
        } else {
                mmc_blk_issue_rw_rq(mq, req);
+               card->host->context_info.is_waiting_last_req = false;
        }
 
 out:
index 7fd722868875f396e3e4e8147774913ab860b0e2..b502601df228156c60e03e3ddabd64006bd515f7 100644 (file)
@@ -1730,7 +1730,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                err = mmc_select_hs400(card);
                if (err)
                        goto free_card;
-       } else {
+       } else if (!mmc_card_hs400es(card)) {
                /* Select the desired bus width optionally */
                err = mmc_select_bus_width(card);
                if (err > 0 && mmc_card_hs(card)) {
index 8e32580c12b520017eb73af884f1b04607c3a78b..b235d8da0602a84e78ff8a4478016f642c67bf43 100644 (file)
@@ -580,7 +580,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
                }
        }
        sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
-                       (mode << 8) | (div % 0xff));
+                     (mode << 8) | div);
        sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
        while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
                cpu_relax();
@@ -1559,7 +1559,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
        host->src_clk_freq = clk_get_rate(host->src_clk);
        /* Set host parameters to mmc */
        mmc->ops = &mt_msdc_ops;
-       mmc->f_min = host->src_clk_freq / (4 * 255);
+       mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
 
        mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
        /* MMC core transfer sizes tunable parameters */
index 410a55b1c25fe5f2ef32ff8f2d26c4c3286f4b71..1cfd7f90033944c6a6441aaf7256b1ce44350354 100644 (file)
 #include "sdhci-pltfm.h"
 #include <linux/of.h>
 
-#define SDHCI_ARASAN_CLK_CTRL_OFFSET   0x2c
 #define SDHCI_ARASAN_VENDOR_REGISTER   0x78
 
 #define VENDOR_ENHANCED_STROBE         BIT(0)
-#define CLK_CTRL_TIMEOUT_SHIFT         16
-#define CLK_CTRL_TIMEOUT_MASK          (0xf << CLK_CTRL_TIMEOUT_SHIFT)
-#define CLK_CTRL_TIMEOUT_MIN_EXP       13
 
 #define PHY_CLK_TOO_SLOW_HZ            400000
 
@@ -163,15 +159,15 @@ static int sdhci_arasan_syscon_write(struct sdhci_host *host,
 
 static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
 {
-       u32 div;
        unsigned long freq;
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 
-       div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET);
-       div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT;
+       /* SDHCI timeout clock is in kHz */
+       freq = DIV_ROUND_UP(clk_get_rate(pltfm_host->clk), 1000);
 
-       freq = clk_get_rate(pltfm_host->clk);
-       freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div);
+       /* or in MHz */
+       if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
+               freq = DIV_ROUND_UP(freq, 1000);
 
        return freq;
 }
index 2f9ad213377a2ccb9091693ba749266fa4b420c4..d5430ed02a67896616bd1371a22f51bb59be8ee2 100644 (file)
@@ -29,6 +29,8 @@
 
 #include "sdhci-pltfm.h"
 
+#define SDMMC_MC1R     0x204
+#define                SDMMC_MC1R_DDR          BIT(3)
 #define SDMMC_CACR     0x230
 #define                SDMMC_CACR_CAPWREN      BIT(0)
 #define                SDMMC_CACR_KEY          (0x46 << 8)
@@ -85,11 +87,37 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
        sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
 }
 
+/*
+ * In this specific implementation of the SDHCI controller, the power register
+ * needs to have a valid voltage set even when the power supply is managed by
+ * an external regulator.
+ */
+static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
+                    unsigned short vdd)
+{
+       if (!IS_ERR(host->mmc->supply.vmmc)) {
+               struct mmc_host *mmc = host->mmc;
+
+               spin_unlock_irq(&host->lock);
+               mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+               spin_lock_irq(&host->lock);
+       }
+       sdhci_set_power_noreg(host, mode, vdd);
+}
+
+void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
+{
+       if (timing == MMC_TIMING_MMC_DDR52)
+               sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
+       sdhci_set_uhs_signaling(host, timing);
+}
+
 static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
        .set_clock              = sdhci_at91_set_clock,
        .set_bus_width          = sdhci_set_bus_width,
        .reset                  = sdhci_reset,
-       .set_uhs_signaling      = sdhci_set_uhs_signaling,
+       .set_uhs_signaling      = sdhci_at91_set_uhs_signaling,
+       .set_power              = sdhci_at91_set_power,
 };
 
 static const struct sdhci_pltfm_data soc_data_sama5d2 = {
index 982b3e349426141710abef87f75819c518bf4a45..86560d590786f3f62a65c8668f2e601fd27b75be 100644 (file)
@@ -451,6 +451,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
        if (mode == MMC_POWER_OFF)
                return;
 
+       spin_unlock_irq(&host->lock);
+
        /*
         * Bus power might not enable after D3 -> D0 transition due to the
         * present state not yet having propagated. Retry for up to 2ms.
@@ -463,6 +465,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
                reg |= SDHCI_POWER_ON;
                sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
        }
+
+       spin_lock_irq(&host->lock);
 }
 
 static const struct sdhci_ops sdhci_intel_byt_ops = {
index 6fdd7a70f229b8bfd08f6d8b9df509dd0fec2bbd..63bc33a54d0dd8e63b50197611be31aae6f288fc 100644 (file)
@@ -1362,7 +1362,9 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
                        return;
                }
                timeout--;
-               mdelay(1);
+               spin_unlock_irq(&host->lock);
+               usleep_range(900, 1100);
+               spin_lock_irq(&host->lock);
        }
 
        clk |= SDHCI_CLOCK_CARD_EN;
@@ -1828,6 +1830,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
        struct sdhci_host *host = mmc_priv(mmc);
        unsigned long flags;
 
+       if (enable)
+               pm_runtime_get_noresume(host->mmc->parent);
+
        spin_lock_irqsave(&host->lock, flags);
        if (enable)
                host->flags |= SDHCI_SDIO_IRQ_ENABLED;
@@ -1836,6 +1841,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
 
        sdhci_enable_sdio_irq_nolock(host, enable);
        spin_unlock_irqrestore(&host->lock, flags);
+
+       if (!enable)
+               pm_runtime_put_noidle(host->mmc->parent);
 }
 
 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
index d2c386f09d69f4edd20b0ac778624b40f826f1e3..1d843357422e8a398590aa1bcd883ab644b5d56e 100644 (file)
@@ -426,6 +426,9 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id
        struct ushc_data *ushc;
        int ret;
 
+       if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+               return -ENODEV;
+
        mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
        if (mmc == NULL)
                return -ENOMEM;
index 1ae872bfc3ba5be342993f4d176b5a2d4390da57..747645c74134de4cd620a284e0f26ca8a61d4991 100644 (file)
@@ -186,7 +186,7 @@ static inline int write_enable(struct spi_nor *nor)
 }
 
 /*
- * Send write disble instruction to the chip.
+ * Send write disable instruction to the chip.
  */
 static inline int write_disable(struct spi_nor *nor)
 {
index 98ed4d96987c87fda074a219a234cbf8e48f2b9a..57fc47ad5ab3cb79943f91fdb3969e63ee0c3bd8 100644 (file)
@@ -18,7 +18,7 @@ obj-$(CONFIG_MII) += mii.o
 obj-$(CONFIG_MDIO) += mdio.o
 obj-$(CONFIG_NET) += Space.o loopback.o
 obj-$(CONFIG_NETCONSOLE) += netconsole.o
-obj-$(CONFIG_PHYLIB) += phy/
+obj-y += phy/
 obj-$(CONFIG_RIONET) += rionet.o
 obj-$(CONFIG_NET_TEAM) += team/
 obj-$(CONFIG_TUN) += tun.o
index edc70ffad6607ac06d0a40b48316bef554c5f4c2..c5fd4259da331b27503644938ab22787e2eea8ae 100644 (file)
@@ -92,6 +92,7 @@ enum ad_link_speed_type {
        AD_LINK_SPEED_2500MBPS,
        AD_LINK_SPEED_10000MBPS,
        AD_LINK_SPEED_20000MBPS,
+       AD_LINK_SPEED_25000MBPS,
        AD_LINK_SPEED_40000MBPS,
        AD_LINK_SPEED_56000MBPS,
        AD_LINK_SPEED_100000MBPS,
@@ -260,6 +261,7 @@ static inline int __check_agg_selection_timer(struct port *port)
  *     %AD_LINK_SPEED_2500MBPS,
  *     %AD_LINK_SPEED_10000MBPS
  *     %AD_LINK_SPEED_20000MBPS
+ *     %AD_LINK_SPEED_25000MBPS
  *     %AD_LINK_SPEED_40000MBPS
  *     %AD_LINK_SPEED_56000MBPS
  *     %AD_LINK_SPEED_100000MBPS
@@ -302,6 +304,10 @@ static u16 __get_link_speed(struct port *port)
                        speed = AD_LINK_SPEED_20000MBPS;
                        break;
 
+               case SPEED_25000:
+                       speed = AD_LINK_SPEED_25000MBPS;
+                       break;
+
                case SPEED_40000:
                        speed = AD_LINK_SPEED_40000MBPS;
                        break;
@@ -707,6 +713,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
                case AD_LINK_SPEED_20000MBPS:
                        bandwidth = nports * 20000;
                        break;
+               case AD_LINK_SPEED_25000MBPS:
+                       bandwidth = nports * 25000;
+                       break;
                case AD_LINK_SPEED_40000MBPS:
                        bandwidth = nports * 40000;
                        break;
@@ -1052,8 +1061,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
                port->sm_rx_state = AD_RX_INITIALIZE;
                port->sm_vars |= AD_PORT_CHURNED;
        /* check if port is not enabled */
-       } else if (!(port->sm_vars & AD_PORT_BEGIN)
-                && !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED))
+       } else if (!(port->sm_vars & AD_PORT_BEGIN) && !port->is_enabled)
                port->sm_rx_state = AD_RX_PORT_DISABLED;
        /* check if new lacpdu arrived */
        else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) ||
@@ -1081,11 +1089,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
                        /* if no lacpdu arrived and no timer is on */
                        switch (port->sm_rx_state) {
                        case AD_RX_PORT_DISABLED:
-                               if (port->sm_vars & AD_PORT_MOVED)
-                                       port->sm_rx_state = AD_RX_INITIALIZE;
-                               else if (port->is_enabled
-                                        && (port->sm_vars
-                                            & AD_PORT_LACP_ENABLED))
+                               if (port->is_enabled &&
+                                   (port->sm_vars & AD_PORT_LACP_ENABLED))
                                        port->sm_rx_state = AD_RX_EXPIRED;
                                else if (port->is_enabled
                                         && ((port->sm_vars
@@ -1115,7 +1120,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
                        port->sm_vars &= ~AD_PORT_SELECTED;
                        __record_default(port);
                        port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
-                       port->sm_vars &= ~AD_PORT_MOVED;
                        port->sm_rx_state = AD_RX_PORT_DISABLED;
 
                        /* Fall Through */
@@ -2442,9 +2446,9 @@ void bond_3ad_adapter_speed_duplex_changed(struct slave *slave)
 
        spin_lock_bh(&slave->bond->mode_lock);
        ad_update_actor_keys(port, false);
+       spin_unlock_bh(&slave->bond->mode_lock);
        netdev_dbg(slave->bond->dev, "Port %d slave %s changed speed/duplex\n",
                   port->actor_port_number, slave->dev->name);
-       spin_unlock_bh(&slave->bond->mode_lock);
 }
 
 /**
@@ -2488,12 +2492,12 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
        agg = __get_first_agg(port);
        ad_agg_selection_logic(agg, &dummy);
 
+       spin_unlock_bh(&slave->bond->mode_lock);
+
        netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
                   port->actor_port_number,
                   link == BOND_LINK_UP ? "UP" : "DOWN");
 
-       spin_unlock_bh(&slave->bond->mode_lock);
-
        /* RTNL is held and mode_lock is released so it's safe
         * to update slave_array here.
         */
index c80b023092dde89f6b361e3879e06d70a9d3ad6f..7d7a3cec149a61241715c092a2fff02c0ce30fb0 100644 (file)
@@ -687,7 +687,8 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
                /* the arp must be sent on the selected rx channel */
                tx_slave = rlb_choose_channel(skb, bond);
                if (tx_slave)
-                       ether_addr_copy(arp->mac_src, tx_slave->dev->dev_addr);
+                       bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr,
+                                         tx_slave->dev->addr_len);
                netdev_dbg(bond->dev, "Server sent ARP Reply packet\n");
        } else if (arp->op_code == htons(ARPOP_REQUEST)) {
                /* Create an entry in the rx_hashtbl for this client as a
@@ -1017,22 +1018,23 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
        rcu_read_unlock();
 }
 
-static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
+static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[],
+                                 unsigned int len)
 {
        struct net_device *dev = slave->dev;
-       struct sockaddr s_addr;
+       struct sockaddr_storage ss;
 
        if (BOND_MODE(slave->bond) == BOND_MODE_TLB) {
-               memcpy(dev->dev_addr, addr, dev->addr_len);
+               memcpy(dev->dev_addr, addr, len);
                return 0;
        }
 
        /* for rlb each slave must have a unique hw mac addresses so that
         * each slave will receive packets destined to a different mac
         */
-       memcpy(s_addr.sa_data, addr, dev->addr_len);
-       s_addr.sa_family = dev->type;
-       if (dev_set_mac_address(dev, &s_addr)) {
+       memcpy(ss.__data, addr, len);
+       ss.ss_family = dev->type;
+       if (dev_set_mac_address(dev, (struct sockaddr *)&ss)) {
                netdev_err(slave->bond->dev, "dev_set_mac_address of dev %s failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n",
                           dev->name);
                return -EOPNOTSUPP;
@@ -1046,11 +1048,14 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
  */
 static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
 {
-       u8 tmp_mac_addr[ETH_ALEN];
+       u8 tmp_mac_addr[MAX_ADDR_LEN];
 
-       ether_addr_copy(tmp_mac_addr, slave1->dev->dev_addr);
-       alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr);
-       alb_set_slave_mac_addr(slave2, tmp_mac_addr);
+       bond_hw_addr_copy(tmp_mac_addr, slave1->dev->dev_addr,
+                         slave1->dev->addr_len);
+       alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr,
+                              slave2->dev->addr_len);
+       alb_set_slave_mac_addr(slave2, tmp_mac_addr,
+                              slave1->dev->addr_len);
 
 }
 
@@ -1177,7 +1182,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
                /* Try setting slave mac to bond address and fall-through
                 * to code handling that situation below...
                 */
-               alb_set_slave_mac_addr(slave, bond->dev->dev_addr);
+               alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
+                                      bond->dev->addr_len);
        }
 
        /* The slave's address is equal to the address of the bond.
@@ -1202,7 +1208,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
        }
 
        if (free_mac_slave) {
-               alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
+               alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
+                                      free_mac_slave->dev->addr_len);
 
                netdev_warn(bond->dev, "the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
                            slave->dev->name, free_mac_slave->dev->name);
@@ -1234,8 +1241,8 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
 {
        struct slave *slave, *rollback_slave;
        struct list_head *iter;
-       struct sockaddr sa;
-       char tmp_addr[ETH_ALEN];
+       struct sockaddr_storage ss;
+       char tmp_addr[MAX_ADDR_LEN];
        int res;
 
        if (bond->alb_info.rlb_enabled)
@@ -1243,12 +1250,14 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
 
        bond_for_each_slave(bond, slave, iter) {
                /* save net_device's current hw address */
-               ether_addr_copy(tmp_addr, slave->dev->dev_addr);
+               bond_hw_addr_copy(tmp_addr, slave->dev->dev_addr,
+                                 slave->dev->addr_len);
 
                res = dev_set_mac_address(slave->dev, addr);
 
                /* restore net_device's hw address */
-               ether_addr_copy(slave->dev->dev_addr, tmp_addr);
+               bond_hw_addr_copy(slave->dev->dev_addr, tmp_addr,
+                                 slave->dev->addr_len);
 
                if (res)
                        goto unwind;
@@ -1257,16 +1266,19 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
        return 0;
 
 unwind:
-       memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
-       sa.sa_family = bond->dev->type;
+       memcpy(ss.__data, bond->dev->dev_addr, bond->dev->addr_len);
+       ss.ss_family = bond->dev->type;
 
        /* unwind from head to the slave that failed */
        bond_for_each_slave(bond, rollback_slave, iter) {
                if (rollback_slave == slave)
                        break;
-               ether_addr_copy(tmp_addr, rollback_slave->dev->dev_addr);
-               dev_set_mac_address(rollback_slave->dev, &sa);
-               ether_addr_copy(rollback_slave->dev->dev_addr, tmp_addr);
+               bond_hw_addr_copy(tmp_addr, rollback_slave->dev->dev_addr,
+                                 rollback_slave->dev->addr_len);
+               dev_set_mac_address(rollback_slave->dev,
+                                   (struct sockaddr *)&ss);
+               bond_hw_addr_copy(rollback_slave->dev->dev_addr, tmp_addr,
+                                 rollback_slave->dev->addr_len);
        }
 
        return res;
@@ -1582,7 +1594,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
 {
        int res;
 
-       res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr);
+       res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr,
+                                    slave->dev->addr_len);
        if (res)
                return res;
 
@@ -1696,17 +1709,20 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
         * and thus filter bond->dev_addr's packets, so force bond's mac
         */
        if (BOND_MODE(bond) == BOND_MODE_TLB) {
-               struct sockaddr sa;
-               u8 tmp_addr[ETH_ALEN];
+               struct sockaddr_storage ss;
+               u8 tmp_addr[MAX_ADDR_LEN];
 
-               ether_addr_copy(tmp_addr, new_slave->dev->dev_addr);
+               bond_hw_addr_copy(tmp_addr, new_slave->dev->dev_addr,
+                                 new_slave->dev->addr_len);
 
-               memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
-               sa.sa_family = bond->dev->type;
+               bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
+                                 bond->dev->addr_len);
+               ss.ss_family = bond->dev->type;
                /* we don't care if it can't change its mac, best effort */
-               dev_set_mac_address(new_slave->dev, &sa);
+               dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss);
 
-               ether_addr_copy(new_slave->dev->dev_addr, tmp_addr);
+               bond_hw_addr_copy(new_slave->dev->dev_addr, tmp_addr,
+                                 new_slave->dev->addr_len);
        }
 
        /* curr_active_slave must be set before calling alb_swap_mac_addr */
@@ -1716,7 +1732,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
                alb_fasten_mac_swap(bond, swap_slave, new_slave);
        } else {
                /* set the new_slave to the bond mac address */
-               alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
+               alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
+                                      bond->dev->addr_len);
                alb_send_learning_packets(new_slave, bond->dev->dev_addr,
                                          false);
        }
@@ -1726,19 +1743,19 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
 int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct sockaddr *sa = addr;
+       struct sockaddr_storage *ss = addr;
        struct slave *curr_active;
        struct slave *swap_slave;
        int res;
 
-       if (!is_valid_ether_addr(sa->sa_data))
+       if (!is_valid_ether_addr(ss->__data))
                return -EADDRNOTAVAIL;
 
        res = alb_set_mac_address(bond, addr);
        if (res)
                return res;
 
-       memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
+       bond_hw_addr_copy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
 
        /* If there is no curr_active_slave there is nothing else to do.
         * Otherwise we'll need to pass the new address to it and handle
@@ -1754,7 +1771,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
                alb_swap_mac_addr(swap_slave, curr_active);
                alb_fasten_mac_swap(bond, swap_slave, curr_active);
        } else {
-               alb_set_slave_mac_addr(curr_active, bond_dev->dev_addr);
+               alb_set_slave_mac_addr(curr_active, bond_dev->dev_addr,
+                                      bond_dev->addr_len);
 
                alb_send_learning_packets(curr_active,
                                          bond_dev->dev_addr, false);
index 8a4ba8b88e52f9d5b1ba318e5dbfb53344f6ebca..aba7352906a5a3744cd96337cd3c391cc722177c 100644 (file)
@@ -201,12 +201,6 @@ atomic_t netpoll_block_tx = ATOMIC_INIT(0);
 
 unsigned int bond_net_id __read_mostly;
 
-static __be32 arp_target[BOND_MAX_ARP_TARGETS];
-static int arp_ip_count;
-static int bond_mode   = BOND_MODE_ROUNDROBIN;
-static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
-static int lacp_fast;
-
 /*-------------------------- Forward declarations ---------------------------*/
 
 static int bond_init(struct net_device *bond_dev);
@@ -371,9 +365,10 @@ down:
 /* Get link speed and duplex from the slave's base driver
  * using ethtool. If for some reason the call fails or the
  * values are invalid, set speed and duplex to -1,
- * and return.
+ * and return. Return 1 if speed or duplex settings are
+ * UNKNOWN; 0 otherwise.
  */
-static void bond_update_speed_duplex(struct slave *slave)
+static int bond_update_speed_duplex(struct slave *slave)
 {
        struct net_device *slave_dev = slave->dev;
        struct ethtool_link_ksettings ecmd;
@@ -384,23 +379,21 @@ static void bond_update_speed_duplex(struct slave *slave)
 
        res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
        if (res < 0)
-               return;
-
+               return 1;
        if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
-               return;
-
+               return 1;
        switch (ecmd.base.duplex) {
        case DUPLEX_FULL:
        case DUPLEX_HALF:
                break;
        default:
-               return;
+               return 1;
        }
 
        slave->speed = ecmd.base.speed;
        slave->duplex = ecmd.base.duplex;
 
-       return;
+       return 0;
 }
 
 const char *bond_slave_link_status(s8 link)
@@ -652,8 +645,8 @@ static void bond_do_fail_over_mac(struct bonding *bond,
                                  struct slave *new_active,
                                  struct slave *old_active)
 {
-       u8 tmp_mac[ETH_ALEN];
-       struct sockaddr saddr;
+       u8 tmp_mac[MAX_ADDR_LEN];
+       struct sockaddr_storage ss;
        int rv;
 
        switch (bond->params.fail_over_mac) {
@@ -673,16 +666,20 @@ static void bond_do_fail_over_mac(struct bonding *bond,
                        old_active = bond_get_old_active(bond, new_active);
 
                if (old_active) {
-                       ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
-                       ether_addr_copy(saddr.sa_data,
-                                       old_active->dev->dev_addr);
-                       saddr.sa_family = new_active->dev->type;
+                       bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr,
+                                         new_active->dev->addr_len);
+                       bond_hw_addr_copy(ss.__data,
+                                         old_active->dev->dev_addr,
+                                         old_active->dev->addr_len);
+                       ss.ss_family = new_active->dev->type;
                } else {
-                       ether_addr_copy(saddr.sa_data, bond->dev->dev_addr);
-                       saddr.sa_family = bond->dev->type;
+                       bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
+                                         bond->dev->addr_len);
+                       ss.ss_family = bond->dev->type;
                }
 
-               rv = dev_set_mac_address(new_active->dev, &saddr);
+               rv = dev_set_mac_address(new_active->dev,
+                                        (struct sockaddr *)&ss);
                if (rv) {
                        netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
                                   -rv, new_active->dev->name);
@@ -692,10 +689,12 @@ static void bond_do_fail_over_mac(struct bonding *bond,
                if (!old_active)
                        goto out;
 
-               ether_addr_copy(saddr.sa_data, tmp_mac);
-               saddr.sa_family = old_active->dev->type;
+               bond_hw_addr_copy(ss.__data, tmp_mac,
+                                 new_active->dev->addr_len);
+               ss.ss_family = old_active->dev->type;
 
-               rv = dev_set_mac_address(old_active->dev, &saddr);
+               rv = dev_set_mac_address(old_active->dev,
+                                        (struct sockaddr *)&ss);
                if (rv)
                        netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
                                   -rv, new_active->dev->name);
@@ -1191,7 +1190,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
                        kfree_skb(skb);
                        return RX_HANDLER_CONSUMED;
                }
-               ether_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr);
+               bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
+                                 bond->dev->addr_len);
        }
 
        return ret;
@@ -1330,7 +1330,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        struct bonding *bond = netdev_priv(bond_dev);
        const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
        struct slave *new_slave = NULL, *prev_slave;
-       struct sockaddr addr;
+       struct sockaddr_storage ss;
        int link_reporting;
        int res = 0, i;
 
@@ -1481,16 +1481,17 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
         * that need it, and for restoring it upon release, and then
         * set it to the master's address
         */
-       ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr);
+       bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr,
+                         slave_dev->addr_len);
 
        if (!bond->params.fail_over_mac ||
            BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
                /* Set slave to master's mac address.  The application already
                 * set the master's mac address to that of the first slave
                 */
-               memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
-               addr.sa_family = slave_dev->type;
-               res = dev_set_mac_address(slave_dev, &addr);
+               memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
+               ss.ss_family = slave_dev->type;
+               res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
                if (res) {
                        netdev_dbg(bond_dev, "Error %d calling set_mac_address\n", res);
                        goto err_restore_mtu;
@@ -1565,7 +1566,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        new_slave->delay = 0;
        new_slave->link_failure_count = 0;
 
-       bond_update_speed_duplex(new_slave);
+       if (bond_update_speed_duplex(new_slave))
+               new_slave->link = BOND_LINK_DOWN;
 
        new_slave->last_rx = jiffies -
                (msecs_to_jiffies(bond->params.arp_interval) + 1);
@@ -1773,9 +1775,10 @@ err_restore_mac:
                 * MAC if this slave's MAC is in use by the bond, or at
                 * least print a warning.
                 */
-               ether_addr_copy(addr.sa_data, new_slave->perm_hwaddr);
-               addr.sa_family = slave_dev->type;
-               dev_set_mac_address(slave_dev, &addr);
+               bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
+                                 new_slave->dev->addr_len);
+               ss.ss_family = slave_dev->type;
+               dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
        }
 
 err_restore_mtu:
@@ -1818,7 +1821,7 @@ static int __bond_release_one(struct net_device *bond_dev,
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave, *oldcurrent;
-       struct sockaddr addr;
+       struct sockaddr_storage ss;
        int old_flags = bond_dev->flags;
        netdev_features_t old_features = bond_dev->features;
 
@@ -1953,9 +1956,10 @@ static int __bond_release_one(struct net_device *bond_dev,
        if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
            BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
                /* restore original ("permanent") mac address */
-               ether_addr_copy(addr.sa_data, slave->perm_hwaddr);
-               addr.sa_family = slave_dev->type;
-               dev_set_mac_address(slave_dev, &addr);
+               bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
+                                 slave->dev->addr_len);
+               ss.ss_family = slave_dev->type;
+               dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
        }
 
        dev_set_mtu(slave_dev, slave->original_mtu);
@@ -2039,8 +2043,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                        if (link_state)
                                continue;
 
-                       bond_set_slave_link_state(slave, BOND_LINK_FAIL,
-                                                 BOND_SLAVE_NOTIFY_LATER);
+                       bond_propose_link_state(slave, BOND_LINK_FAIL);
                        slave->delay = bond->params.downdelay;
                        if (slave->delay) {
                                netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
@@ -2055,8 +2058,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                case BOND_LINK_FAIL:
                        if (link_state) {
                                /* recovered before downdelay expired */
-                               bond_set_slave_link_state(slave, BOND_LINK_UP,
-                                                         BOND_SLAVE_NOTIFY_LATER);
+                               bond_propose_link_state(slave, BOND_LINK_UP);
                                slave->last_link_up = jiffies;
                                netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
                                            (bond->params.downdelay - slave->delay) *
@@ -2078,8 +2080,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                        if (!link_state)
                                continue;
 
-                       bond_set_slave_link_state(slave, BOND_LINK_BACK,
-                                                 BOND_SLAVE_NOTIFY_LATER);
+                       bond_propose_link_state(slave, BOND_LINK_BACK);
                        slave->delay = bond->params.updelay;
 
                        if (slave->delay) {
@@ -2092,9 +2093,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                        /*FALLTHRU*/
                case BOND_LINK_BACK:
                        if (!link_state) {
-                               bond_set_slave_link_state(slave,
-                                                         BOND_LINK_DOWN,
-                                                         BOND_SLAVE_NOTIFY_LATER);
+                               bond_propose_link_state(slave, BOND_LINK_DOWN);
                                netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
                                            (bond->params.updelay - slave->delay) *
                                            bond->params.miimon,
@@ -2132,7 +2131,13 @@ static void bond_miimon_commit(struct bonding *bond)
                        continue;
 
                case BOND_LINK_UP:
-                       bond_update_speed_duplex(slave);
+                       if (bond_update_speed_duplex(slave)) {
+                               slave->link = BOND_LINK_DOWN;
+                               netdev_warn(bond->dev,
+                                           "failed to get link speed/duplex for %s\n",
+                                           slave->dev->name);
+                               continue;
+                       }
                        bond_set_slave_link_state(slave, BOND_LINK_UP,
                                                  BOND_SLAVE_NOTIFY_NOW);
                        slave->last_link_up = jiffies;
@@ -2231,6 +2236,8 @@ static void bond_mii_monitor(struct work_struct *work)
                                            mii_work.work);
        bool should_notify_peers = false;
        unsigned long delay;
+       struct slave *slave;
+       struct list_head *iter;
 
        delay = msecs_to_jiffies(bond->params.miimon);
 
@@ -2251,6 +2258,9 @@ static void bond_mii_monitor(struct work_struct *work)
                        goto re_arm;
                }
 
+               bond_for_each_slave(bond, slave, iter) {
+                       bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
+               }
                bond_miimon_commit(bond);
 
                rtnl_unlock();  /* might sleep, hold no other locks */
@@ -2575,10 +2585,8 @@ static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
  * arp is transmitted to generate traffic. see activebackup_arp_monitor for
  * arp monitoring in active backup mode.
  */
-static void bond_loadbalance_arp_mon(struct work_struct *work)
+static void bond_loadbalance_arp_mon(struct bonding *bond)
 {
-       struct bonding *bond = container_of(work, struct bonding,
-                                           arp_work.work);
        struct slave *slave, *oldcurrent;
        struct list_head *iter;
        int do_failover = 0, slave_state_changed = 0;
@@ -2916,10 +2924,8 @@ check_state:
        return should_notify_rtnl;
 }
 
-static void bond_activebackup_arp_mon(struct work_struct *work)
+static void bond_activebackup_arp_mon(struct bonding *bond)
 {
-       struct bonding *bond = container_of(work, struct bonding,
-                                           arp_work.work);
        bool should_notify_peers = false;
        bool should_notify_rtnl = false;
        int delta_in_ticks;
@@ -2972,6 +2978,17 @@ re_arm:
        }
 }
 
+static void bond_arp_monitor(struct work_struct *work)
+{
+       struct bonding *bond = container_of(work, struct bonding,
+                                           arp_work.work);
+
+       if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
+               bond_activebackup_arp_mon(bond);
+       else
+               bond_loadbalance_arp_mon(bond);
+}
+
 /*-------------------------- netdev event handling --------------------------*/
 
 /* Change device name */
@@ -3228,10 +3245,7 @@ static void bond_work_init_all(struct bonding *bond)
                          bond_resend_igmp_join_requests_delayed);
        INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
        INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
-       if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
-               INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
-       else
-               INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
+       INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
        INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
        INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
 }
@@ -3266,8 +3280,6 @@ static int bond_open(struct net_device *bond_dev)
                }
        }
 
-       bond_work_init_all(bond);
-
        if (bond_is_lb(bond)) {
                /* bond_alb_initialize must be called before the timer
                 * is started.
@@ -3327,12 +3339,17 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
        for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
                u64 nv = new[i];
                u64 ov = old[i];
+               s64 delta = nv - ov;
 
                /* detects if this particular field is 32bit only */
                if (((nv | ov) >> 32) == 0)
-                       res[i] += (u32)nv - (u32)ov;
-               else
-                       res[i] += nv - ov;
+                       delta = (s64)(s32)((u32)nv - (u32)ov);
+
+               /* filter anomalies, some drivers reset their stats
+                * at down/up events.
+                */
+               if (delta > 0)
+                       res[i] += delta;
        }
 }
 
@@ -3619,7 +3636,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave, *rollback_slave;
-       struct sockaddr *sa = addr, tmp_sa;
+       struct sockaddr_storage *ss = addr, tmp_ss;
        struct list_head *iter;
        int res = 0;
 
@@ -3636,7 +3653,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
            BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
                return 0;
 
-       if (!is_valid_ether_addr(sa->sa_data))
+       if (!is_valid_ether_addr(ss->__data))
                return -EADDRNOTAVAIL;
 
        bond_for_each_slave(bond, slave, iter) {
@@ -3655,12 +3672,12 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
        }
 
        /* success */
-       memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
+       memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
        return 0;
 
 unwind:
-       memcpy(tmp_sa.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
-       tmp_sa.sa_family = bond_dev->type;
+       memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
+       tmp_ss.ss_family = bond_dev->type;
 
        /* unwind from head to the slave that failed */
        bond_for_each_slave(bond, rollback_slave, iter) {
@@ -3669,7 +3686,8 @@ unwind:
                if (rollback_slave == slave)
                        break;
 
-               tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_sa);
+               tmp_res = dev_set_mac_address(rollback_slave->dev,
+                                             (struct sockaddr *)&tmp_ss);
                if (tmp_res) {
                        netdev_dbg(bond_dev, "unwind err %d dev %s\n",
                                   tmp_res, rollback_slave->dev->name);
@@ -4252,6 +4270,12 @@ static int bond_check_params(struct bond_params *params)
        int arp_all_targets_value;
        u16 ad_actor_sys_prio = 0;
        u16 ad_user_port_key = 0;
+       __be32 arp_target[BOND_MAX_ARP_TARGETS];
+       int arp_ip_count;
+       int bond_mode   = BOND_MODE_ROUNDROBIN;
+       int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
+       int lacp_fast = 0;
+       int tlb_dynamic_lb = 0;
 
        /* Convert string parameters. */
        if (mode) {
@@ -4564,6 +4588,17 @@ static int bond_check_params(struct bond_params *params)
        }
        ad_user_port_key = valptr->value;
 
+       if (bond_mode == BOND_MODE_TLB) {
+               bond_opt_initstr(&newval, "default");
+               valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB),
+                                       &newval);
+               if (!valptr) {
+                       pr_err("Error: No tlb_dynamic_lb default value");
+                       return -EINVAL;
+               }
+               tlb_dynamic_lb = valptr->value;
+       }
+
        if (lp_interval == 0) {
                pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
                        INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
@@ -4591,7 +4626,7 @@ static int bond_check_params(struct bond_params *params)
        params->min_links = min_links;
        params->lp_interval = lp_interval;
        params->packets_per_slave = packets_per_slave;
-       params->tlb_dynamic_lb = 1; /* Default value */
+       params->tlb_dynamic_lb = tlb_dynamic_lb;
        params->ad_actor_sys_prio = ad_actor_sys_prio;
        eth_zero_addr(params->ad_actor_system);
        params->ad_user_port_key = ad_user_port_key;
@@ -4687,6 +4722,8 @@ int bond_create(struct net *net, const char *name)
 
        netif_carrier_off(bond_dev);
 
+       bond_work_init_all(bond);
+
        rtnl_unlock();
        if (res < 0)
                bond_destructor(bond_dev);
index f514fe5e80a5369a6cc8f2bf3e293a3f09cb8b4f..d8d4ada034b7d6cbddbdf8f227cfe44fa15b4b27 100644 (file)
@@ -183,7 +183,8 @@ static void bond_info_show_slave(struct seq_file *seq,
        seq_printf(seq, "Link Failure Count: %u\n",
                   slave->link_failure_count);
 
-       seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
+       seq_printf(seq, "Permanent HW addr: %*phC\n",
+                  slave->dev->addr_len, slave->perm_hwaddr);
        seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
 
        if (BOND_MODE(bond) == BOND_MODE_8023AD) {
index 148cae5871a6baffa17b43f9e6104a6422ad3f09..8f2e0dd7b7565769c768543e1097caff53d555ff 100644 (file)
@@ -1,6 +1,12 @@
 menu "CAN SPI interfaces"
        depends on SPI
 
+config CAN_HI311X
+       tristate "Holt HI311x SPI CAN controllers"
+       depends on CAN_DEV && SPI && HAS_DMA
+       ---help---
+         Driver for the Holt HI311x SPI CAN controllers.
+
 config CAN_MCP251X
        tristate "Microchip MCP251x SPI CAN controllers"
        depends on HAS_DMA
index 0e86040cdd8ce9c2ca037ed27c234dd362812a5f..f59fa37310736531f6927ff3b75e8503f481de68 100644 (file)
@@ -3,4 +3,5 @@
 #
 
 
+obj-$(CONFIG_CAN_HI311X)       += hi311x.o
 obj-$(CONFIG_CAN_MCP251X)      += mcp251x.o
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
new file mode 100644 (file)
index 0000000..5590c55
--- /dev/null
@@ -0,0 +1,1076 @@
+/* CAN bus driver for Holt HI3110 CAN Controller with SPI Interface
+ *
+ * Copyright(C) Timesys Corporation 2016
+ *
+ * Based on Microchip 251x CAN Controller (mcp251x) Linux kernel driver
+ * Copyright 2009 Christian Pellegrin EVOL S.r.l.
+ * Copyright 2007 Raymarine UK, Ltd. All Rights Reserved.
+ * Copyright 2006 Arcom Control Systems Ltd.
+ *
+ * Based on CAN bus driver for the CCAN controller written by
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix
+ * - Simon Kallweit, intefo AG
+ * Copyright 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/can/core.h>
+#include <linux/can/dev.h>
+#include <linux/can/led.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/freezer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/uaccess.h>
+
+#define HI3110_MASTER_RESET 0x56
+#define HI3110_READ_CTRL0 0xD2
+#define HI3110_READ_CTRL1 0xD4
+#define HI3110_READ_STATF 0xE2
+#define HI3110_WRITE_CTRL0 0x14
+#define HI3110_WRITE_CTRL1 0x16
+#define HI3110_WRITE_INTE 0x1C
+#define HI3110_WRITE_BTR0 0x18
+#define HI3110_WRITE_BTR1 0x1A
+#define HI3110_READ_BTR0 0xD6
+#define HI3110_READ_BTR1 0xD8
+#define HI3110_READ_INTF 0xDE
+#define HI3110_READ_ERR 0xDC
+#define HI3110_READ_FIFO_WOTIME 0x48
+#define HI3110_WRITE_FIFO 0x12
+#define HI3110_READ_MESSTAT 0xDA
+#define HI3110_READ_REC 0xEA
+#define HI3110_READ_TEC 0xEC
+
+#define HI3110_CTRL0_MODE_MASK (7 << 5)
+#define HI3110_CTRL0_NORMAL_MODE (0 << 5)
+#define HI3110_CTRL0_LOOPBACK_MODE (1 << 5)
+#define HI3110_CTRL0_MONITOR_MODE (2 << 5)
+#define HI3110_CTRL0_SLEEP_MODE (3 << 5)
+#define HI3110_CTRL0_INIT_MODE (4 << 5)
+
+#define HI3110_CTRL1_TXEN BIT(7)
+
+#define HI3110_INT_RXTMP BIT(7)
+#define HI3110_INT_RXFIFO BIT(6)
+#define HI3110_INT_TXCPLT BIT(5)
+#define HI3110_INT_BUSERR BIT(4)
+#define HI3110_INT_MCHG BIT(3)
+#define HI3110_INT_WAKEUP BIT(2)
+#define HI3110_INT_F1MESS BIT(1)
+#define HI3110_INT_F0MESS BIT(0)
+
+#define HI3110_ERR_BUSOFF BIT(7)
+#define HI3110_ERR_TXERRP BIT(6)
+#define HI3110_ERR_RXERRP BIT(5)
+#define HI3110_ERR_BITERR BIT(4)
+#define HI3110_ERR_FRMERR BIT(3)
+#define HI3110_ERR_CRCERR BIT(2)
+#define HI3110_ERR_ACKERR BIT(1)
+#define HI3110_ERR_STUFERR BIT(0)
+#define HI3110_ERR_PROTOCOL_MASK (0x1F)
+#define HI3110_ERR_PASSIVE_MASK (0x60)
+
+#define HI3110_STAT_RXFMTY BIT(1)
+#define HI3110_STAT_BUSOFF BIT(2)
+#define HI3110_STAT_ERRP BIT(3)
+#define HI3110_STAT_ERRW BIT(4)
+
+#define HI3110_BTR0_SJW_SHIFT 6
+#define HI3110_BTR0_BRP_SHIFT 0
+
+#define HI3110_BTR1_SAMP_3PERBIT (1 << 7)
+#define HI3110_BTR1_SAMP_1PERBIT (0 << 7)
+#define HI3110_BTR1_TSEG2_SHIFT 4
+#define HI3110_BTR1_TSEG1_SHIFT 0
+
+#define HI3110_FIFO_WOTIME_TAG_OFF 0
+#define HI3110_FIFO_WOTIME_ID_OFF 1
+#define HI3110_FIFO_WOTIME_DLC_OFF 5
+#define HI3110_FIFO_WOTIME_DAT_OFF 6
+
+#define HI3110_FIFO_WOTIME_TAG_IDE BIT(7)
+#define HI3110_FIFO_WOTIME_ID_RTR BIT(0)
+
+#define HI3110_FIFO_TAG_OFF 0
+#define HI3110_FIFO_ID_OFF 1
+#define HI3110_FIFO_STD_DLC_OFF 3
+#define HI3110_FIFO_STD_DATA_OFF 4
+#define HI3110_FIFO_EXT_DLC_OFF 5
+#define HI3110_FIFO_EXT_DATA_OFF 6
+
+#define HI3110_CAN_MAX_DATA_LEN 8
+#define HI3110_RX_BUF_LEN 15
+#define HI3110_TX_STD_BUF_LEN 12
+#define HI3110_TX_EXT_BUF_LEN 14
+#define HI3110_CAN_FRAME_MAX_BITS 128
+#define HI3110_EFF_FLAGS 0x18 /* IDE + SRR */
+
+#define HI3110_TX_ECHO_SKB_MAX 1
+
+#define HI3110_OST_DELAY_MS (10)
+
+#define DEVICE_NAME "hi3110"
+
+static int hi3110_enable_dma = 1; /* Enable SPI DMA. Default: 1 (On) */
+module_param(hi3110_enable_dma, int, 0444);
+MODULE_PARM_DESC(hi3110_enable_dma, "Enable SPI DMA. Default: 1 (On)");
+
+static const struct can_bittiming_const hi3110_bittiming_const = {
+       .name = DEVICE_NAME,
+       .tseg1_min = 2,
+       .tseg1_max = 16,
+       .tseg2_min = 2,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 64,
+       .brp_inc = 1,
+};
+
+enum hi3110_model {
+       CAN_HI3110_HI3110 = 0x3110,
+};
+
+struct hi3110_priv {
+       struct can_priv can;
+       struct net_device *net;
+       struct spi_device *spi;
+       enum hi3110_model model;
+
+       struct mutex hi3110_lock; /* SPI device lock */
+
+       u8 *spi_tx_buf;
+       u8 *spi_rx_buf;
+       dma_addr_t spi_tx_dma;
+       dma_addr_t spi_rx_dma;
+
+       struct sk_buff *tx_skb;
+       int tx_len;
+
+       struct workqueue_struct *wq;
+       struct work_struct tx_work;
+       struct work_struct restart_work;
+
+       int force_quit;
+       int after_suspend;
+#define HI3110_AFTER_SUSPEND_UP 1
+#define HI3110_AFTER_SUSPEND_DOWN 2
+#define HI3110_AFTER_SUSPEND_POWER 4
+#define HI3110_AFTER_SUSPEND_RESTART 8
+       int restart_tx;
+       struct regulator *power;
+       struct regulator *transceiver;
+       struct clk *clk;
+};
+
+static void hi3110_clean(struct net_device *net)
+{
+       struct hi3110_priv *priv = netdev_priv(net);
+
+       if (priv->tx_skb || priv->tx_len)
+               net->stats.tx_errors++;
+       if (priv->tx_skb)
+               dev_kfree_skb(priv->tx_skb);
+       if (priv->tx_len)
+               can_free_echo_skb(priv->net, 0);
+       priv->tx_skb = NULL;
+       priv->tx_len = 0;
+}
+
+/* Note about handling of error return of hi3110_spi_trans: accessing
+ * registers via SPI is not really different conceptually than using
+ * normal I/O assembler instructions, although it's much more
+ * complicated from a practical POV. So it's not advisable to always
+ * check the return value of this function. Imagine that every
+ * read{b,l}, write{b,l} and friends would be bracketed in "if ( < 0)
+ * error();", it would be a great mess (well there are some situation
+ * when exception handling C++ like could be useful after all). So we
+ * just check that transfers are OK at the beginning of our
+ * conversation with the chip and to avoid doing really nasty things
+ * (like injecting bogus packets in the network stack).
+ */
+static int hi3110_spi_trans(struct spi_device *spi, int len)
+{
+       struct hi3110_priv *priv = spi_get_drvdata(spi);
+       struct spi_transfer t = {
+               .tx_buf = priv->spi_tx_buf,
+               .rx_buf = priv->spi_rx_buf,
+               .len = len,
+               .cs_change = 0,
+       };
+       struct spi_message m;
+       int ret;
+
+       spi_message_init(&m);
+
+       if (hi3110_enable_dma) {
+               t.tx_dma = priv->spi_tx_dma;
+               t.rx_dma = priv->spi_rx_dma;
+               m.is_dma_mapped = 1;
+       }
+
+       spi_message_add_tail(&t, &m);
+
+       ret = spi_sync(spi, &m);
+
+       if (ret)
+               dev_err(&spi->dev, "spi transfer failed: ret = %d\n", ret);
+       return ret;
+}
+
+static u8 hi3110_cmd(struct spi_device *spi, u8 command)
+{
+       struct hi3110_priv *priv = spi_get_drvdata(spi);
+
+       priv->spi_tx_buf[0] = command;
+       dev_dbg(&spi->dev, "hi3110_cmd: %02X\n", command);
+
+       return hi3110_spi_trans(spi, 1);
+}
+
+static u8 hi3110_read(struct spi_device *spi, u8 command)
+{
+       struct hi3110_priv *priv = spi_get_drvdata(spi);
+       u8 val = 0;
+
+       priv->spi_tx_buf[0] = command;
+       hi3110_spi_trans(spi, 2);
+       val = priv->spi_rx_buf[1];
+
+       return val;
+}
+
+static void hi3110_write(struct spi_device *spi, u8 reg, u8 val)
+{
+       struct hi3110_priv *priv = spi_get_drvdata(spi);
+
+       priv->spi_tx_buf[0] = reg;
+       priv->spi_tx_buf[1] = val;
+       hi3110_spi_trans(spi, 2);
+}
+
+static void hi3110_hw_tx_frame(struct spi_device *spi, u8 *buf, int len)
+{
+       struct hi3110_priv *priv = spi_get_drvdata(spi);
+
+       priv->spi_tx_buf[0] = HI3110_WRITE_FIFO;
+       memcpy(priv->spi_tx_buf + 1, buf, len);
+       hi3110_spi_trans(spi, len + 1);
+}
+
+static void hi3110_hw_tx(struct spi_device *spi, struct can_frame *frame)
+{
+       u8 buf[HI3110_TX_EXT_BUF_LEN];
+
+       buf[HI3110_FIFO_TAG_OFF] = 0;
+
+       if (frame->can_id & CAN_EFF_FLAG) {
+               /* Extended frame */
+               buf[HI3110_FIFO_ID_OFF] = (frame->can_id & CAN_EFF_MASK) >> 21;
+               buf[HI3110_FIFO_ID_OFF + 1] =
+                       (((frame->can_id & CAN_EFF_MASK) >> 13) & 0xe0) |
+                       HI3110_EFF_FLAGS |
+                       (((frame->can_id & CAN_EFF_MASK) >> 15) & 0x07);
+               buf[HI3110_FIFO_ID_OFF + 2] =
+                       (frame->can_id & CAN_EFF_MASK) >> 7;
+               buf[HI3110_FIFO_ID_OFF + 3] =
+                       ((frame->can_id & CAN_EFF_MASK) << 1) |
+                       ((frame->can_id & CAN_RTR_FLAG) ? 1 : 0);
+
+               buf[HI3110_FIFO_EXT_DLC_OFF] = frame->can_dlc;
+
+               memcpy(buf + HI3110_FIFO_EXT_DATA_OFF,
+                      frame->data, frame->can_dlc);
+
+               hi3110_hw_tx_frame(spi, buf, HI3110_TX_EXT_BUF_LEN -
+                                  (HI3110_CAN_MAX_DATA_LEN - frame->can_dlc));
+       } else {
+               /* Standard frame */
+               buf[HI3110_FIFO_ID_OFF] =   (frame->can_id & CAN_SFF_MASK) >> 3;
+               buf[HI3110_FIFO_ID_OFF + 1] =
+                       ((frame->can_id & CAN_SFF_MASK) << 5) |
+                       ((frame->can_id & CAN_RTR_FLAG) ? (1 << 4) : 0);
+
+               buf[HI3110_FIFO_STD_DLC_OFF] = frame->can_dlc;
+
+               memcpy(buf + HI3110_FIFO_STD_DATA_OFF,
+                      frame->data, frame->can_dlc);
+
+               hi3110_hw_tx_frame(spi, buf, HI3110_TX_STD_BUF_LEN -
+                                  (HI3110_CAN_MAX_DATA_LEN - frame->can_dlc));
+       }
+}
+
+static void hi3110_hw_rx_frame(struct spi_device *spi, u8 *buf)
+{
+       struct hi3110_priv *priv = spi_get_drvdata(spi);
+
+       priv->spi_tx_buf[0] = HI3110_READ_FIFO_WOTIME;
+       hi3110_spi_trans(spi, HI3110_RX_BUF_LEN);
+       memcpy(buf, priv->spi_rx_buf + 1, HI3110_RX_BUF_LEN - 1);
+}
+
+static void hi3110_hw_rx(struct spi_device *spi)
+{
+       struct hi3110_priv *priv = spi_get_drvdata(spi);
+       struct sk_buff *skb;
+       struct can_frame *frame;
+       u8 buf[HI3110_RX_BUF_LEN - 1];
+
+       skb = alloc_can_skb(priv->net, &frame);
+       if (!skb) {
+               priv->net->stats.rx_dropped++;
+               return;
+       }
+
+       hi3110_hw_rx_frame(spi, buf);
+       if (buf[HI3110_FIFO_WOTIME_TAG_OFF] & HI3110_FIFO_WOTIME_TAG_IDE) {
+               /* IDE is recessive (1), indicating extended 29-bit frame */
+               frame->can_id = CAN_EFF_FLAG;
+               frame->can_id |=
+                       (buf[HI3110_FIFO_WOTIME_ID_OFF] << 21) |
+                       (((buf[HI3110_FIFO_WOTIME_ID_OFF + 1] & 0xE0) >> 5) << 18) |
+                       ((buf[HI3110_FIFO_WOTIME_ID_OFF + 1] & 0x07) << 15) |
+                       (buf[HI3110_FIFO_WOTIME_ID_OFF + 2] << 7) |
+                       (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] >> 1);
+       } else {
+               /* IDE is dominant (0), frame indicating standard 11-bit */
+               frame->can_id =
+                       (buf[HI3110_FIFO_WOTIME_ID_OFF] << 3) |
+                       ((buf[HI3110_FIFO_WOTIME_ID_OFF + 1] & 0xE0) >> 5);
+       }
+
+       /* Data length */
+       frame->can_dlc = get_can_dlc(buf[HI3110_FIFO_WOTIME_DLC_OFF] & 0x0F);
+
+       if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR)
+               frame->can_id |= CAN_RTR_FLAG;
+       else
+               memcpy(frame->data, buf + HI3110_FIFO_WOTIME_DAT_OFF,
+                      frame->can_dlc);
+
+       priv->net->stats.rx_packets++;
+       priv->net->stats.rx_bytes += frame->can_dlc;
+
+       can_led_event(priv->net, CAN_LED_EVENT_RX);
+
+       netif_rx_ni(skb);
+}
+
+static void hi3110_hw_sleep(struct spi_device *spi)
+{
+       hi3110_write(spi, HI3110_WRITE_CTRL0, HI3110_CTRL0_SLEEP_MODE);
+}
+
+static netdev_tx_t hi3110_hard_start_xmit(struct sk_buff *skb,
+                                         struct net_device *net)
+{
+       struct hi3110_priv *priv = netdev_priv(net);
+       struct spi_device *spi = priv->spi;
+
+       if (priv->tx_skb || priv->tx_len) {
+               dev_err(&spi->dev, "hard_xmit called while tx busy\n");
+               return NETDEV_TX_BUSY;
+       }
+
+       if (can_dropped_invalid_skb(net, skb))
+               return NETDEV_TX_OK;
+
+       netif_stop_queue(net);
+       priv->tx_skb = skb;
+       queue_work(priv->wq, &priv->tx_work);
+
+       return NETDEV_TX_OK;
+}
+
+static int hi3110_do_set_mode(struct net_device *net, enum can_mode mode)
+{
+       struct hi3110_priv *priv = netdev_priv(net);
+
+       switch (mode) {
+       case CAN_MODE_START:
+               hi3110_clean(net);
+               /* We have to delay work since SPI I/O may sleep */
+               priv->can.state = CAN_STATE_ERROR_ACTIVE;
+               priv->restart_tx = 1;
+               if (priv->can.restart_ms == 0)
+                       priv->after_suspend = HI3110_AFTER_SUSPEND_RESTART;
+               queue_work(priv->wq, &priv->restart_work);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int hi3110_get_berr_counter(const struct net_device *net,
+                                  struct can_berr_counter *bec)
+{
+       struct hi3110_priv *priv = netdev_priv(net);
+       struct spi_device *spi = priv->spi;
+
+       bec->txerr = hi3110_read(spi, HI3110_READ_TEC);
+       bec->rxerr = hi3110_read(spi, HI3110_READ_REC);
+
+       return 0;
+}
+
+static int hi3110_set_normal_mode(struct spi_device *spi)
+{
+       struct hi3110_priv *priv = spi_get_drvdata(spi);
+       u8 reg = 0;
+
+       hi3110_write(spi, HI3110_WRITE_INTE, HI3110_INT_BUSERR |
+                    HI3110_INT_RXFIFO | HI3110_INT_TXCPLT);
+
+       /* Enable TX */
+       hi3110_write(spi, HI3110_WRITE_CTRL1, HI3110_CTRL1_TXEN);
+
+       if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+               reg = HI3110_CTRL0_LOOPBACK_MODE;
+       else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+               reg = HI3110_CTRL0_MONITOR_MODE;
+       else
+               reg = HI3110_CTRL0_NORMAL_MODE;
+
+       hi3110_write(spi, HI3110_WRITE_CTRL0, reg);
+
+       /* Wait for the device to enter the mode */
+       mdelay(HI3110_OST_DELAY_MS);
+       reg = hi3110_read(spi, HI3110_READ_CTRL0);
+       if ((reg & HI3110_CTRL0_MODE_MASK) != reg)
+               return -EBUSY;
+
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+       return 0;
+}
+
+static int hi3110_do_set_bittiming(struct net_device *net)
+{
+       struct hi3110_priv *priv = netdev_priv(net);
+       struct can_bittiming *bt = &priv->can.bittiming;
+       struct spi_device *spi = priv->spi;
+
+       hi3110_write(spi, HI3110_WRITE_BTR0,
+                    ((bt->sjw - 1) << HI3110_BTR0_SJW_SHIFT) |
+                    ((bt->brp - 1) << HI3110_BTR0_BRP_SHIFT));
+
+       hi3110_write(spi, HI3110_WRITE_BTR1,
+                    (priv->can.ctrlmode &
+                     CAN_CTRLMODE_3_SAMPLES ?
+                     HI3110_BTR1_SAMP_3PERBIT : HI3110_BTR1_SAMP_1PERBIT) |
+                    ((bt->phase_seg1 + bt->prop_seg - 1)
+                     << HI3110_BTR1_TSEG1_SHIFT) |
+                    ((bt->phase_seg2 - 1) << HI3110_BTR1_TSEG2_SHIFT));
+
+       dev_dbg(&spi->dev, "BT: 0x%02x 0x%02x\n",
+               hi3110_read(spi, HI3110_READ_BTR0),
+               hi3110_read(spi, HI3110_READ_BTR1));
+
+       return 0;
+}
+
+static int hi3110_setup(struct net_device *net)
+{
+       hi3110_do_set_bittiming(net);
+       return 0;
+}
+
+static int hi3110_hw_reset(struct spi_device *spi)
+{
+       u8 reg;
+       int ret;
+
+       /* Wait for oscillator startup timer after power up */
+       mdelay(HI3110_OST_DELAY_MS);
+
+       ret = hi3110_cmd(spi, HI3110_MASTER_RESET);
+       if (ret)
+               return ret;
+
+       /* Wait for oscillator startup timer after reset */
+       mdelay(HI3110_OST_DELAY_MS);
+
+       reg = hi3110_read(spi, HI3110_READ_CTRL0);
+       if ((reg & HI3110_CTRL0_MODE_MASK) != HI3110_CTRL0_INIT_MODE)
+               return -ENODEV;
+
+       /* As per the datasheet it appears the error flags are
+        * not cleared on reset. Explicitly clear them by performing a read
+        */
+       hi3110_read(spi, HI3110_READ_ERR);
+
+       return 0;
+}
+
+static int hi3110_hw_probe(struct spi_device *spi)
+{
+       u8 statf;
+
+       hi3110_hw_reset(spi);
+
+       /* Confirm correct operation by checking against reset values
+        * in datasheet
+        */
+       statf = hi3110_read(spi, HI3110_READ_STATF);
+
+       dev_dbg(&spi->dev, "statf: %02X\n", statf);
+
+       if (statf != 0x82)
+               return -ENODEV;
+
+       return 0;
+}
+
+static int hi3110_power_enable(struct regulator *reg, int enable)
+{
+       if (IS_ERR_OR_NULL(reg))
+               return 0;
+
+       if (enable)
+               return regulator_enable(reg);
+       else
+               return regulator_disable(reg);
+}
+
+static int hi3110_stop(struct net_device *net)
+{
+       struct hi3110_priv *priv = netdev_priv(net);
+       struct spi_device *spi = priv->spi;
+
+       close_candev(net);
+
+       priv->force_quit = 1;
+       free_irq(spi->irq, priv);
+       destroy_workqueue(priv->wq);
+       priv->wq = NULL;
+
+       mutex_lock(&priv->hi3110_lock);
+
+       /* Disable transmit, interrupts and clear flags */
+       hi3110_write(spi, HI3110_WRITE_CTRL1, 0x0);
+       hi3110_write(spi, HI3110_WRITE_INTE, 0x0);
+       hi3110_read(spi, HI3110_READ_INTF);
+
+       hi3110_clean(net);
+
+       hi3110_hw_sleep(spi);
+
+       hi3110_power_enable(priv->transceiver, 0);
+
+       priv->can.state = CAN_STATE_STOPPED;
+
+       mutex_unlock(&priv->hi3110_lock);
+
+       can_led_event(net, CAN_LED_EVENT_STOP);
+
+       return 0;
+}
+
+static void hi3110_tx_work_handler(struct work_struct *ws)
+{
+       struct hi3110_priv *priv = container_of(ws, struct hi3110_priv,
+                                               tx_work);
+       struct spi_device *spi = priv->spi;
+       struct net_device *net = priv->net;
+       struct can_frame *frame;
+
+       mutex_lock(&priv->hi3110_lock);
+       if (priv->tx_skb) {
+               if (priv->can.state == CAN_STATE_BUS_OFF) {
+                       hi3110_clean(net);
+               } else {
+                       frame = (struct can_frame *)priv->tx_skb->data;
+                       hi3110_hw_tx(spi, frame);
+                       priv->tx_len = 1 + frame->can_dlc;
+                       can_put_echo_skb(priv->tx_skb, net, 0);
+                       priv->tx_skb = NULL;
+               }
+       }
+       mutex_unlock(&priv->hi3110_lock);
+}
+
+static void hi3110_restart_work_handler(struct work_struct *ws)
+{
+       struct hi3110_priv *priv = container_of(ws, struct hi3110_priv,
+                                               restart_work);
+       struct spi_device *spi = priv->spi;
+       struct net_device *net = priv->net;
+
+       mutex_lock(&priv->hi3110_lock);
+       if (priv->after_suspend) {
+               hi3110_hw_reset(spi);
+               hi3110_setup(net);
+               if (priv->after_suspend & HI3110_AFTER_SUSPEND_RESTART) {
+                       hi3110_set_normal_mode(spi);
+               } else if (priv->after_suspend & HI3110_AFTER_SUSPEND_UP) {
+                       netif_device_attach(net);
+                       hi3110_clean(net);
+                       hi3110_set_normal_mode(spi);
+                       netif_wake_queue(net);
+               } else {
+                       hi3110_hw_sleep(spi);
+               }
+               priv->after_suspend = 0;
+               priv->force_quit = 0;
+       }
+
+       if (priv->restart_tx) {
+               priv->restart_tx = 0;
+               hi3110_hw_reset(spi);
+               hi3110_setup(net);
+               hi3110_clean(net);
+               hi3110_set_normal_mode(spi);
+               netif_wake_queue(net);
+       }
+       mutex_unlock(&priv->hi3110_lock);
+}
+
+static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
+{
+       struct hi3110_priv *priv = dev_id;
+       struct spi_device *spi = priv->spi;
+       struct net_device *net = priv->net;
+
+       mutex_lock(&priv->hi3110_lock);
+
+       while (!priv->force_quit) {
+               enum can_state new_state;
+               u8 intf, eflag, statf;
+
+               while (!(HI3110_STAT_RXFMTY &
+                        (statf = hi3110_read(spi, HI3110_READ_STATF)))) {
+                       hi3110_hw_rx(spi);
+               }
+
+               intf = hi3110_read(spi, HI3110_READ_INTF);
+               eflag = hi3110_read(spi, HI3110_READ_ERR);
+               /* Update can state */
+               if (eflag & HI3110_ERR_BUSOFF)
+                       new_state = CAN_STATE_BUS_OFF;
+               else if (eflag & HI3110_ERR_PASSIVE_MASK)
+                       new_state = CAN_STATE_ERROR_PASSIVE;
+               else if (statf & HI3110_STAT_ERRW)
+                       new_state = CAN_STATE_ERROR_WARNING;
+               else
+                       new_state = CAN_STATE_ERROR_ACTIVE;
+
+               if (new_state != priv->can.state) {
+                       struct can_frame *cf;
+                       struct sk_buff *skb;
+                       enum can_state rx_state, tx_state;
+                       u8 rxerr, txerr;
+
+                       skb = alloc_can_err_skb(net, &cf);
+                       if (!skb)
+                               break;
+
+                       txerr = hi3110_read(spi, HI3110_READ_TEC);
+                       rxerr = hi3110_read(spi, HI3110_READ_REC);
+                       cf->data[6] = txerr;
+                       cf->data[7] = rxerr;
+                       tx_state = txerr >= rxerr ? new_state : 0;
+                       rx_state = txerr <= rxerr ? new_state : 0;
+                       can_change_state(net, cf, tx_state, rx_state);
+                       netif_rx_ni(skb);
+
+                       if (new_state == CAN_STATE_BUS_OFF) {
+                               can_bus_off(net);
+                               if (priv->can.restart_ms == 0) {
+                                       priv->force_quit = 1;
+                                       hi3110_hw_sleep(spi);
+                                       break;
+                               }
+                       }
+               }
+
+               /* Update bus errors */
+               if ((intf & HI3110_INT_BUSERR) &&
+                   (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
+                       struct can_frame *cf;
+                       struct sk_buff *skb;
+
+                       /* Check for protocol errors */
+                       if (eflag & HI3110_ERR_PROTOCOL_MASK) {
+                               skb = alloc_can_err_skb(net, &cf);
+                               if (!skb)
+                                       break;
+
+                               cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+                               priv->can.can_stats.bus_error++;
+                               priv->net->stats.rx_errors++;
+                               if (eflag & HI3110_ERR_BITERR)
+                                       cf->data[2] |= CAN_ERR_PROT_BIT;
+                               else if (eflag & HI3110_ERR_FRMERR)
+                                       cf->data[2] |= CAN_ERR_PROT_FORM;
+                               else if (eflag & HI3110_ERR_STUFERR)
+                                       cf->data[2] |= CAN_ERR_PROT_STUFF;
+                               else if (eflag & HI3110_ERR_CRCERR)
+                                       cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+                               else if (eflag & HI3110_ERR_ACKERR)
+                                       cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+
+                               cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
+                               cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
+                               netdev_dbg(priv->net, "Bus Error\n");
+                               netif_rx_ni(skb);
+                       }
+               }
+
+               if (intf == 0)
+                       break;
+
+               if (intf & HI3110_INT_TXCPLT) {
+                       net->stats.tx_packets++;
+                       net->stats.tx_bytes += priv->tx_len - 1;
+                       can_led_event(net, CAN_LED_EVENT_TX);
+                       if (priv->tx_len) {
+                               can_get_echo_skb(net, 0);
+                               priv->tx_len = 0;
+                       }
+                       netif_wake_queue(net);
+               }
+       }
+       mutex_unlock(&priv->hi3110_lock);
+       return IRQ_HANDLED;
+}
+
+static int hi3110_open(struct net_device *net)
+{
+       struct hi3110_priv *priv = netdev_priv(net);
+       struct spi_device *spi = priv->spi;
+       unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_RISING;
+       int ret;
+
+       ret = open_candev(net);
+       if (ret)
+               return ret;
+
+       mutex_lock(&priv->hi3110_lock);
+       hi3110_power_enable(priv->transceiver, 1);
+
+       priv->force_quit = 0;
+       priv->tx_skb = NULL;
+       priv->tx_len = 0;
+
+       ret = request_threaded_irq(spi->irq, NULL, hi3110_can_ist,
+                                  flags, DEVICE_NAME, priv);
+       if (ret) {
+               dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
+               goto out_close;
+       }
+
+       priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
+                                  0);
+       if (!priv->wq) {
+               ret = -ENOMEM;
+               goto out_free_irq;
+       }
+       INIT_WORK(&priv->tx_work, hi3110_tx_work_handler);
+       INIT_WORK(&priv->restart_work, hi3110_restart_work_handler);
+
+       ret = hi3110_hw_reset(spi);
+       if (ret)
+               goto out_free_wq;
+
+       ret = hi3110_setup(net);
+       if (ret)
+               goto out_free_wq;
+
+       ret = hi3110_set_normal_mode(spi);
+       if (ret)
+               goto out_free_wq;
+
+       can_led_event(net, CAN_LED_EVENT_OPEN);
+       netif_wake_queue(net);
+       mutex_unlock(&priv->hi3110_lock);
+
+       return 0;
+
+ out_free_wq:
+       destroy_workqueue(priv->wq);
+ out_free_irq:
+       free_irq(spi->irq, priv);
+       hi3110_hw_sleep(spi);
+ out_close:
+       hi3110_power_enable(priv->transceiver, 0);
+       close_candev(net);
+       mutex_unlock(&priv->hi3110_lock);
+       return ret;
+}
+
+static const struct net_device_ops hi3110_netdev_ops = {
+       .ndo_open = hi3110_open,
+       .ndo_stop = hi3110_stop,
+       .ndo_start_xmit = hi3110_hard_start_xmit,
+};
+
+static const struct of_device_id hi3110_of_match[] = {
+       {
+               .compatible     = "holt,hi3110",
+               .data           = (void *)CAN_HI3110_HI3110,
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(of, hi3110_of_match);
+
+static const struct spi_device_id hi3110_id_table[] = {
+       {
+               .name           = "hi3110",
+               .driver_data    = (kernel_ulong_t)CAN_HI3110_HI3110,
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, hi3110_id_table);
+
+static int hi3110_can_probe(struct spi_device *spi)
+{
+       const struct of_device_id *of_id = of_match_device(hi3110_of_match,
+                                                          &spi->dev);
+       struct net_device *net;
+       struct hi3110_priv *priv;
+       struct clk *clk;
+       int freq, ret;
+
+       clk = devm_clk_get(&spi->dev, NULL);
+       if (IS_ERR(clk)) {
+               dev_err(&spi->dev, "no CAN clock source defined\n");
+               return PTR_ERR(clk);
+       }
+       freq = clk_get_rate(clk);
+
+       /* Sanity check */
+       if (freq > 40000000)
+               return -ERANGE;
+
+       /* Allocate can/net device */
+       net = alloc_candev(sizeof(struct hi3110_priv), HI3110_TX_ECHO_SKB_MAX);
+       if (!net)
+               return -ENOMEM;
+
+       if (!IS_ERR(clk)) {
+               ret = clk_prepare_enable(clk);
+               if (ret)
+                       goto out_free;
+       }
+
+       net->netdev_ops = &hi3110_netdev_ops;
+       net->flags |= IFF_ECHO;
+
+       priv = netdev_priv(net);
+       priv->can.bittiming_const = &hi3110_bittiming_const;
+       priv->can.do_set_mode = hi3110_do_set_mode;
+       priv->can.do_get_berr_counter = hi3110_get_berr_counter;
+       priv->can.clock.freq = freq / 2;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+               CAN_CTRLMODE_LOOPBACK |
+               CAN_CTRLMODE_LISTENONLY |
+               CAN_CTRLMODE_BERR_REPORTING;
+
+       if (of_id)
+               priv->model = (enum hi3110_model)of_id->data;
+       else
+               priv->model = spi_get_device_id(spi)->driver_data;
+       priv->net = net;
+       priv->clk = clk;
+
+       spi_set_drvdata(spi, priv);
+
+       /* Configure the SPI bus */
+       spi->bits_per_word = 8;
+       ret = spi_setup(spi);
+       if (ret)
+               goto out_clk;
+
+       priv->power = devm_regulator_get_optional(&spi->dev, "vdd");
+       priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
+       if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
+           (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
+               ret = -EPROBE_DEFER;
+               goto out_clk;
+       }
+
+       ret = hi3110_power_enable(priv->power, 1);
+       if (ret)
+               goto out_clk;
+
+       priv->spi = spi;
+       mutex_init(&priv->hi3110_lock);
+
+       /* If requested, allocate DMA buffers */
+       if (hi3110_enable_dma) {
+               spi->dev.coherent_dma_mask = ~0;
+
+               /* Minimum coherent DMA allocation is PAGE_SIZE, so allocate
+                * that much and share it between Tx and Rx DMA buffers.
+                */
+               priv->spi_tx_buf = dmam_alloc_coherent(&spi->dev,
+                                                      PAGE_SIZE,
+                                                      &priv->spi_tx_dma,
+                                                      GFP_DMA);
+
+               if (priv->spi_tx_buf) {
+                       priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
+                       priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
+                                                       (PAGE_SIZE / 2));
+               } else {
+                       /* Fall back to non-DMA */
+                       hi3110_enable_dma = 0;
+               }
+       }
+
+       /* Allocate non-DMA buffers */
+       if (!hi3110_enable_dma) {
+               priv->spi_tx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN,
+                                               GFP_KERNEL);
+               if (!priv->spi_tx_buf) {
+                       ret = -ENOMEM;
+                       goto error_probe;
+               }
+               priv->spi_rx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN,
+                                               GFP_KERNEL);
+
+               if (!priv->spi_rx_buf) {
+                       ret = -ENOMEM;
+                       goto error_probe;
+               }
+       }
+
+       SET_NETDEV_DEV(net, &spi->dev);
+
+       ret = hi3110_hw_probe(spi);
+       if (ret) {
+               if (ret == -ENODEV)
+                       dev_err(&spi->dev, "Cannot initialize %x. Wrong wiring?\n",
+                               priv->model);
+               goto error_probe;
+       }
+       hi3110_hw_sleep(spi);
+
+       ret = register_candev(net);
+       if (ret)
+               goto error_probe;
+
+       devm_can_led_init(net);
+       netdev_info(net, "%x successfully initialized.\n", priv->model);
+
+       return 0;
+
+ error_probe:
+       hi3110_power_enable(priv->power, 0);
+
+ out_clk:
+       if (!IS_ERR(clk))
+               clk_disable_unprepare(clk);
+
+ out_free:
+       free_candev(net);
+
+       dev_err(&spi->dev, "Probe failed, err=%d\n", -ret);
+       return ret;
+}
+
+static int hi3110_can_remove(struct spi_device *spi)
+{
+       struct hi3110_priv *priv = spi_get_drvdata(spi);
+       struct net_device *net = priv->net;
+
+       unregister_candev(net);
+
+       hi3110_power_enable(priv->power, 0);
+
+       if (!IS_ERR(priv->clk))
+               clk_disable_unprepare(priv->clk);
+
+       free_candev(net);
+
+       return 0;
+}
+
+static int __maybe_unused hi3110_can_suspend(struct device *dev)
+{
+       struct spi_device *spi = to_spi_device(dev);
+       struct hi3110_priv *priv = spi_get_drvdata(spi);
+       struct net_device *net = priv->net;
+
+       priv->force_quit = 1;
+       disable_irq(spi->irq);
+
+       /* Note: at this point neither IST nor workqueues are running.
+        * open/stop cannot be called anyway so locking is not needed
+        */
+       if (netif_running(net)) {
+               netif_device_detach(net);
+
+               hi3110_hw_sleep(spi);
+               hi3110_power_enable(priv->transceiver, 0);
+               priv->after_suspend = HI3110_AFTER_SUSPEND_UP;
+       } else {
+               priv->after_suspend = HI3110_AFTER_SUSPEND_DOWN;
+       }
+
+       if (!IS_ERR_OR_NULL(priv->power)) {
+               regulator_disable(priv->power);
+               priv->after_suspend |= HI3110_AFTER_SUSPEND_POWER;
+       }
+
+       return 0;
+}
+
+static int __maybe_unused hi3110_can_resume(struct device *dev)
+{
+       struct spi_device *spi = to_spi_device(dev);
+       struct hi3110_priv *priv = spi_get_drvdata(spi);
+
+       if (priv->after_suspend & HI3110_AFTER_SUSPEND_POWER)
+               hi3110_power_enable(priv->power, 1);
+
+       if (priv->after_suspend & HI3110_AFTER_SUSPEND_UP) {
+               hi3110_power_enable(priv->transceiver, 1);
+               queue_work(priv->wq, &priv->restart_work);
+       } else {
+               priv->after_suspend = 0;
+       }
+
+       priv->force_quit = 0;
+       enable_irq(spi->irq);
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(hi3110_can_pm_ops, hi3110_can_suspend, hi3110_can_resume);
+
+static struct spi_driver hi3110_can_driver = {
+       .driver = {
+               .name = DEVICE_NAME,
+               .of_match_table = hi3110_of_match,
+               .pm = &hi3110_can_pm_ops,
+       },
+       .id_table = hi3110_id_table,
+       .probe = hi3110_can_probe,
+       .remove = hi3110_can_remove,
+};
+
+module_spi_driver(hi3110_can_driver);
+
+MODULE_AUTHOR("Akshay Bhat <akshay.bhat@timesys.com>");
+MODULE_AUTHOR("Casey Fitzpatrick <casey.fitzpatrick@timesys.com>");
+MODULE_DESCRIPTION("Holt HI-3110 CAN driver");
+MODULE_LICENSE("GPL v2");
index 6749b1829469411315dedac1634b7be974cf21d8..b8aac538275c3e6c8da27533a98a1f00d53017c0 100644 (file)
  *
  */
 
-/*
- * Your platform definitions should specify module ram offsets and interrupt
- * number to use as follows:
- *
- * static struct ti_hecc_platform_data am3517_evm_hecc_pdata = {
- *         .scc_hecc_offset        = 0,
- *         .scc_ram_offset         = 0x3000,
- *         .hecc_ram_offset        = 0x3000,
- *         .mbx_offset             = 0x2000,
- *         .int_line               = 0,
- *         .revision               = 1,
- *         .transceiver_switch     = hecc_phy_control,
- * };
- *
- * Please see include/linux/can/platform/ti_hecc.h for description of
- * above fields.
- *
- */
-
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
 
 #include <linux/can/dev.h>
 #include <linux/can/error.h>
 #include <linux/can/led.h>
-#include <linux/can/platform/ti_hecc.h>
 
 #define DRV_NAME "ti_hecc"
 #define HECC_MODULE_VERSION     "0.7"
@@ -214,15 +197,14 @@ struct ti_hecc_priv {
        struct net_device *ndev;
        struct clk *clk;
        void __iomem *base;
-       u32 scc_ram_offset;
-       u32 hecc_ram_offset;
-       u32 mbx_offset;
-       u32 int_line;
+       void __iomem *hecc_ram;
+       void __iomem *mbx;
+       bool use_hecc1int;
        spinlock_t mbx_lock; /* CANME register needs protection */
        u32 tx_head;
        u32 tx_tail;
        u32 rx_next;
-       void (*transceiver_switch)(int);
+       struct regulator *reg_xceiver;
 };
 
 static inline int get_tx_head_mb(struct ti_hecc_priv *priv)
@@ -242,20 +224,18 @@ static inline int get_tx_head_prio(struct ti_hecc_priv *priv)
 
 static inline void hecc_write_lam(struct ti_hecc_priv *priv, u32 mbxno, u32 val)
 {
-       __raw_writel(val, priv->base + priv->hecc_ram_offset + mbxno * 4);
+       __raw_writel(val, priv->hecc_ram + mbxno * 4);
 }
 
 static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno,
        u32 reg, u32 val)
 {
-       __raw_writel(val, priv->base + priv->mbx_offset + mbxno * 0x10 +
-                       reg);
+       __raw_writel(val, priv->mbx + mbxno * 0x10 + reg);
 }
 
 static inline u32 hecc_read_mbx(struct ti_hecc_priv *priv, u32 mbxno, u32 reg)
 {
-       return __raw_readl(priv->base + priv->mbx_offset + mbxno * 0x10 +
-                       reg);
+       return __raw_readl(priv->mbx + mbxno * 0x10 + reg);
 }
 
 static inline void hecc_write(struct ti_hecc_priv *priv, u32 reg, u32 val)
@@ -311,11 +291,16 @@ static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
        return 0;
 }
 
-static void ti_hecc_transceiver_switch(const struct ti_hecc_priv *priv,
-                                       int on)
+static int ti_hecc_transceiver_switch(const struct ti_hecc_priv *priv,
+                                     int on)
 {
-       if (priv->transceiver_switch)
-               priv->transceiver_switch(on);
+       if (!priv->reg_xceiver)
+               return 0;
+
+       if (on)
+               return regulator_enable(priv->reg_xceiver);
+       else
+               return regulator_disable(priv->reg_xceiver);
 }
 
 static void ti_hecc_reset(struct net_device *ndev)
@@ -409,7 +394,7 @@ static void ti_hecc_start(struct net_device *ndev)
 
        /* Prevent message over-write & Enable interrupts */
        hecc_write(priv, HECC_CANOPC, HECC_SET_REG);
-       if (priv->int_line) {
+       if (priv->use_hecc1int) {
                hecc_write(priv, HECC_CANMIL, HECC_SET_REG);
                hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK |
                        HECC_CANGIM_I1EN | HECC_CANGIM_SIL);
@@ -760,7 +745,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
        unsigned long ack, flags;
 
        int_status = hecc_read(priv,
-               (priv->int_line) ? HECC_CANGIF1 : HECC_CANGIF0);
+               (priv->use_hecc1int) ? HECC_CANGIF1 : HECC_CANGIF0);
 
        if (!int_status)
                return IRQ_NONE;
@@ -806,7 +791,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
        }
 
        /* clear all interrupt conditions - read back to avoid spurious ints */
-       if (priv->int_line) {
+       if (priv->use_hecc1int) {
                hecc_write(priv, HECC_CANGIF1, HECC_SET_REG);
                int_status = hecc_read(priv, HECC_CANGIF1);
        } else {
@@ -872,58 +857,87 @@ static const struct net_device_ops ti_hecc_netdev_ops = {
        .ndo_change_mtu         = can_change_mtu,
 };
 
+static const struct of_device_id ti_hecc_dt_ids[] = {
+       {
+               .compatible = "ti,am3517-hecc",
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(of, ti_hecc_dt_ids);
+
 static int ti_hecc_probe(struct platform_device *pdev)
 {
        struct net_device *ndev = (struct net_device *)0;
        struct ti_hecc_priv *priv;
-       struct ti_hecc_platform_data *pdata;
-       struct resource *mem, *irq;
-       void __iomem *addr;
+       struct device_node *np = pdev->dev.of_node;
+       struct resource *res, *irq;
+       struct regulator *reg_xceiver;
        int err = -ENODEV;
 
-       pdata = dev_get_platdata(&pdev->dev);
-       if (!pdata) {
-               dev_err(&pdev->dev, "No platform data\n");
-               goto probe_exit;
+       if (!IS_ENABLED(CONFIG_OF) || !np)
+               return -EINVAL;
+
+       reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
+       if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
+       else if (IS_ERR(reg_xceiver))
+               reg_xceiver = NULL;
+
+       ndev = alloc_candev(sizeof(struct ti_hecc_priv), HECC_MAX_TX_MBOX);
+       if (!ndev) {
+               dev_err(&pdev->dev, "alloc_candev failed\n");
+               return -ENOMEM;
        }
+       priv = netdev_priv(ndev);
 
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!mem) {
-               dev_err(&pdev->dev, "No mem resources\n");
-               goto probe_exit;
+       /* handle hecc memory */
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hecc");
+       if (!res) {
+               dev_err(&pdev->dev, "can't get IORESOURCE_MEM hecc\n");
+               return -EINVAL;
        }
-       irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!irq) {
-               dev_err(&pdev->dev, "No irq resource\n");
-               goto probe_exit;
+
+       priv->base = devm_ioremap_resource(&pdev->dev, res);
+       if (!priv->base) {
+               dev_err(&pdev->dev, "hecc ioremap failed\n");
+               return -ENOMEM;
        }
-       if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
-               dev_err(&pdev->dev, "HECC region already claimed\n");
-               err = -EBUSY;
-               goto probe_exit;
+
+       /* handle hecc-ram memory */
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hecc-ram");
+       if (!res) {
+               dev_err(&pdev->dev, "can't get IORESOURCE_MEM hecc-ram\n");
+               return -EINVAL;
        }
-       addr = ioremap(mem->start, resource_size(mem));
-       if (!addr) {
-               dev_err(&pdev->dev, "ioremap failed\n");
-               err = -ENOMEM;
-               goto probe_exit_free_region;
+
+       priv->hecc_ram = devm_ioremap_resource(&pdev->dev, res);
+       if (!priv->hecc_ram) {
+               dev_err(&pdev->dev, "hecc-ram ioremap failed\n");
+               return -ENOMEM;
        }
 
-       ndev = alloc_candev(sizeof(struct ti_hecc_priv), HECC_MAX_TX_MBOX);
-       if (!ndev) {
-               dev_err(&pdev->dev, "alloc_candev failed\n");
-               err = -ENOMEM;
-               goto probe_exit_iounmap;
+       /* handle mbx memory */
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mbx");
+       if (!res) {
+               dev_err(&pdev->dev, "can't get IORESOURCE_MEM mbx\n");
+               return -EINVAL;
+       }
+
+       priv->mbx = devm_ioremap_resource(&pdev->dev, res);
+       if (!priv->mbx) {
+               dev_err(&pdev->dev, "mbx ioremap failed\n");
+               return -ENOMEM;
+       }
+
+       irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!irq) {
+               dev_err(&pdev->dev, "No irq resource\n");
+               goto probe_exit;
        }
 
-       priv = netdev_priv(ndev);
        priv->ndev = ndev;
-       priv->base = addr;
-       priv->scc_ram_offset = pdata->scc_ram_offset;
-       priv->hecc_ram_offset = pdata->hecc_ram_offset;
-       priv->mbx_offset = pdata->mbx_offset;
-       priv->int_line = pdata->int_line;
-       priv->transceiver_switch = pdata->transceiver_switch;
+       priv->reg_xceiver = reg_xceiver;
+       priv->use_hecc1int = of_property_read_bool(np, "ti,use-hecc1int");
 
        priv->can.bittiming_const = &ti_hecc_bittiming_const;
        priv->can.do_set_mode = ti_hecc_do_set_mode;
@@ -971,32 +985,23 @@ probe_exit_clk:
        clk_put(priv->clk);
 probe_exit_candev:
        free_candev(ndev);
-probe_exit_iounmap:
-       iounmap(addr);
-probe_exit_free_region:
-       release_mem_region(mem->start, resource_size(mem));
 probe_exit:
        return err;
 }
 
 static int ti_hecc_remove(struct platform_device *pdev)
 {
-       struct resource *res;
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct ti_hecc_priv *priv = netdev_priv(ndev);
 
        unregister_candev(ndev);
        clk_disable_unprepare(priv->clk);
        clk_put(priv->clk);
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       iounmap(priv->base);
-       release_mem_region(res->start, resource_size(res));
        free_candev(ndev);
 
        return 0;
 }
 
-
 #ifdef CONFIG_PM
 static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
 {
@@ -1045,6 +1050,7 @@ static int ti_hecc_resume(struct platform_device *pdev)
 static struct platform_driver ti_hecc_driver = {
        .driver = {
                .name    = DRV_NAME,
+               .of_match_table = ti_hecc_dt_ids,
        },
        .probe = ti_hecc_probe,
        .remove = ti_hecc_remove,
index 91c876a0a647ba1b17bdc734db168c9ee456409e..da020418a6526bed0465eeaa67a3fd61d83afb34 100644 (file)
@@ -1412,31 +1412,39 @@ e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        return rc;
 }
 
-static int e100_get_settings(struct net_device *dev,
-                            struct ethtool_cmd *cmd)
+static int e100_get_link_ksettings(struct net_device *dev,
+                                  struct ethtool_link_ksettings *cmd)
 {
        struct net_local *np = netdev_priv(dev);
+       u32 supported;
        int err;
 
        spin_lock_irq(&np->lock);
-       err = mii_ethtool_gset(&np->mii_if, cmd);
+       err = mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
        spin_unlock_irq(&np->lock);
 
        /* The PHY may support 1000baseT, but the Etrax100 does not.  */
-       cmd->supported &= ~(SUPPORTED_1000baseT_Half
-                           | SUPPORTED_1000baseT_Full);
+       ethtool_convert_link_mode_to_legacy_u32(&supported,
+                                               cmd->link_modes.supported);
+
+       supported &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full);
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+
        return err;
 }
 
-static int e100_set_settings(struct net_device *dev,
-                            struct ethtool_cmd *ecmd)
+static int e100_set_link_ksettings(struct net_device *dev,
+                                  const struct ethtool_link_ksettings *ecmd)
 {
-       if (ecmd->autoneg == AUTONEG_ENABLE) {
+       if (ecmd->base.autoneg == AUTONEG_ENABLE) {
                e100_set_duplex(dev, autoneg);
                e100_set_speed(dev, 0);
        } else {
-               e100_set_duplex(dev, ecmd->duplex == DUPLEX_HALF ? half : full);
-               e100_set_speed(dev, ecmd->speed == SPEED_10 ? 10: 100);
+               e100_set_duplex(dev, ecmd->base.duplex == DUPLEX_HALF ?
+                               half : full);
+               e100_set_speed(dev, ecmd->base.speed == SPEED_10 ? 10 : 100);
        }
 
        return 0;
@@ -1459,11 +1467,11 @@ static int e100_nway_reset(struct net_device *dev)
 }
 
 static const struct ethtool_ops e100_ethtool_ops = {
-       .get_settings   = e100_get_settings,
-       .set_settings   = e100_set_settings,
        .get_drvinfo    = e100_get_drvinfo,
        .nway_reset     = e100_nway_reset,
        .get_link       = ethtool_op_get_link,
+       .get_link_ksettings     = e100_get_link_ksettings,
+       .set_link_ksettings     = e100_set_link_ksettings,
 };
 
 static int
index 065984670ff19a0a03c4818097f574de17907e87..31a2b229106dd40efd1b1a8525636e23515dbc87 100644 (file)
@@ -11,7 +11,7 @@ config NET_DSA_MV88E6060
 
 config NET_DSA_BCM_SF2
        tristate "Broadcom Starfighter 2 Ethernet switch support"
-       depends on HAS_IOMEM && NET_DSA
+       depends on HAS_IOMEM && NET_DSA && OF_MDIO
        select NET_DSA_TAG_BRCM
        select FIXED_PHY
        select BCM7XXX_PHY
@@ -34,4 +34,20 @@ config NET_DSA_QCA8K
          This enables support for the Qualcomm Atheros QCA8K Ethernet
          switch chips.
 
+config NET_DSA_LOOP
+       tristate "DSA mock-up Ethernet switch chip support"
+       depends on NET_DSA
+       select FIXED_PHY
+       ---help---
+         This enables support for a fake mock-up switch chip which
+         exercises the DSA APIs.
+
+config NET_DSA_MT7530
+       tristate "Mediatek MT7530 Ethernet switch support"
+       depends on NET_DSA
+       select NET_DSA_TAG_MTK
+       ---help---
+         This enables support for the Mediatek MT7530 Ethernet switch
+         chip.
+
 endmenu
index a3c94163221723c610791bd0e0a98df05c7dc221..2ae07f4fbf63501d767bfe172225952e8fbbd713 100644 (file)
@@ -2,6 +2,7 @@ obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
 obj-$(CONFIG_NET_DSA_BCM_SF2)  += bcm-sf2.o
 bcm-sf2-objs                   := bcm_sf2.o bcm_sf2_cfp.o
 obj-$(CONFIG_NET_DSA_QCA8K)    += qca8k.o
-
+obj-$(CONFIG_NET_DSA_MT7530)   += mt7530.o
 obj-y                          += b53/
 obj-y                          += mv88e6xxx/
+obj-$(CONFIG_NET_DSA_LOOP)     += dsa_loop.o dsa_loop_bdinfo.o
index 346dd9a1232dff12e24fef05b6e6352f106ed2be..2fb32d67065f8aa164b3ea03d5cb914120c4e0c6 100644 (file)
  */
 
 #include <linux/list.h>
-#include <net/dsa.h>
 #include <linux/ethtool.h>
 #include <linux/if_ether.h>
 #include <linux/in.h>
+#include <linux/netdevice.h>
+#include <net/dsa.h>
 #include <linux/bitmap.h>
 
 #include "bcm_sf2.h"
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
new file mode 100644 (file)
index 0000000..f0fc4de
--- /dev/null
@@ -0,0 +1,328 @@
+/*
+ * Distributed Switch Architecture loopback driver
+ *
+ * Copyright (C) 2016, Florian Fainelli <f.fainelli@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/export.h>
+#include <linux/workqueue.h>
+#include <linux/module.h>
+#include <linux/if_bridge.h>
+#include <net/switchdev.h>
+#include <net/dsa.h>
+
+#include "dsa_loop.h"
+
+struct dsa_loop_vlan {
+       u16 members;
+       u16 untagged;
+};
+
+#define DSA_LOOP_VLANS 5
+
+struct dsa_loop_priv {
+       struct mii_bus  *bus;
+       unsigned int    port_base;
+       struct dsa_loop_vlan vlans[DSA_LOOP_VLANS];
+       struct net_device *netdev;
+       u16 pvid;
+};
+
+static struct phy_device *phydevs[PHY_MAX_ADDR];
+
+static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds)
+{
+       dev_dbg(ds->dev, "%s\n", __func__);
+
+       return DSA_TAG_PROTO_NONE;
+}
+
+static int dsa_loop_setup(struct dsa_switch *ds)
+{
+       dev_dbg(ds->dev, "%s\n", __func__);
+
+       return 0;
+}
+
+static int dsa_loop_set_addr(struct dsa_switch *ds, u8 *addr)
+{
+       dev_dbg(ds->dev, "%s\n", __func__);
+
+       return 0;
+}
+
+static int dsa_loop_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+       struct dsa_loop_priv *ps = ds->priv;
+       struct mii_bus *bus = ps->bus;
+
+       dev_dbg(ds->dev, "%s\n", __func__);
+
+       return mdiobus_read_nested(bus, ps->port_base + port, regnum);
+}
+
+static int dsa_loop_phy_write(struct dsa_switch *ds, int port,
+                             int regnum, u16 value)
+{
+       struct dsa_loop_priv *ps = ds->priv;
+       struct mii_bus *bus = ps->bus;
+
+       dev_dbg(ds->dev, "%s\n", __func__);
+
+       return mdiobus_write_nested(bus, ps->port_base + port, regnum, value);
+}
+
+static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port,
+                                    struct net_device *bridge)
+{
+       dev_dbg(ds->dev, "%s\n", __func__);
+
+       return 0;
+}
+
+static void dsa_loop_port_bridge_leave(struct dsa_switch *ds, int port,
+                                      struct net_device *bridge)
+{
+       dev_dbg(ds->dev, "%s\n", __func__);
+}
+
+static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port,
+                                       u8 state)
+{
+       dev_dbg(ds->dev, "%s\n", __func__);
+}
+
+static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port,
+                                       bool vlan_filtering)
+{
+       dev_dbg(ds->dev, "%s\n", __func__);
+
+       return 0;
+}
+
+static int dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
+                                     const struct switchdev_obj_port_vlan *vlan,
+                                     struct switchdev_trans *trans)
+{
+       struct dsa_loop_priv *ps = ds->priv;
+       struct mii_bus *bus = ps->bus;
+
+       dev_dbg(ds->dev, "%s\n", __func__);
+
+       /* Just do a sleeping operation to make lockdep checks effective */
+       mdiobus_read(bus, ps->port_base + port, MII_BMSR);
+
+       if (vlan->vid_end > DSA_LOOP_VLANS)
+               return -ERANGE;
+
+       return 0;
+}
+
+static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port,
+                                  const struct switchdev_obj_port_vlan *vlan,
+                                  struct switchdev_trans *trans)
+{
+       bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+       bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+       struct dsa_loop_priv *ps = ds->priv;
+       struct mii_bus *bus = ps->bus;
+       struct dsa_loop_vlan *vl;
+       u16 vid;
+
+       dev_dbg(ds->dev, "%s\n", __func__);
+
+       /* Just do a sleeping operation to make lockdep checks effective */
+       mdiobus_read(bus, ps->port_base + port, MII_BMSR);
+
+       for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+               vl = &ps->vlans[vid];
+
+               vl->members |= BIT(port);
+               if (untagged)
+                       vl->untagged |= BIT(port);
+               else
+                       vl->untagged &= ~BIT(port);
+       }
+
+       if (pvid)
+               ps->pvid = vid;
+}
+
+static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port,
+                                 const struct switchdev_obj_port_vlan *vlan)
+{
+       bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+       struct dsa_loop_priv *ps = ds->priv;
+       struct mii_bus *bus = ps->bus;
+       struct dsa_loop_vlan *vl;
+       u16 vid, pvid = ps->pvid;
+
+       dev_dbg(ds->dev, "%s\n", __func__);
+
+       /* Just do a sleeping operation to make lockdep checks effective */
+       mdiobus_read(bus, ps->port_base + port, MII_BMSR);
+
+       for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+               vl = &ps->vlans[vid];
+
+               vl->members &= ~BIT(port);
+               if (untagged)
+                       vl->untagged &= ~BIT(port);
+
+               if (pvid == vid)
+                       pvid = 1;
+       }
+       ps->pvid = pvid;
+
+       return 0;
+}
+
+static int dsa_loop_port_vlan_dump(struct dsa_switch *ds, int port,
+                                  struct switchdev_obj_port_vlan *vlan,
+                                  int (*cb)(struct switchdev_obj *obj))
+{
+       struct dsa_loop_priv *ps = ds->priv;
+       struct mii_bus *bus = ps->bus;
+       struct dsa_loop_vlan *vl;
+       u16 vid, vid_start = 0;
+       int err = 0;
+
+       dev_dbg(ds->dev, "%s\n", __func__);
+
+       /* Just do a sleeping operation to make lockdep checks effective */
+       mdiobus_read(bus, ps->port_base + port, MII_BMSR);
+
+       for (vid = vid_start; vid < DSA_LOOP_VLANS; vid++) {
+               vl = &ps->vlans[vid];
+
+               if (!(vl->members & BIT(port)))
+                       continue;
+
+               vlan->vid_begin = vlan->vid_end = vid;
+               vlan->flags = 0;
+
+               if (vl->untagged & BIT(port))
+                       vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+               if (ps->pvid == vid)
+                       vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+
+               err = cb(&vlan->obj);
+               if (err)
+                       break;
+       }
+
+       return err;
+}
+
+static struct dsa_switch_ops dsa_loop_driver = {
+       .get_tag_protocol       = dsa_loop_get_protocol,
+       .setup                  = dsa_loop_setup,
+       .set_addr               = dsa_loop_set_addr,
+       .phy_read               = dsa_loop_phy_read,
+       .phy_write              = dsa_loop_phy_write,
+       .port_bridge_join       = dsa_loop_port_bridge_join,
+       .port_bridge_leave      = dsa_loop_port_bridge_leave,
+       .port_stp_state_set     = dsa_loop_port_stp_state_set,
+       .port_vlan_filtering    = dsa_loop_port_vlan_filtering,
+       .port_vlan_prepare      = dsa_loop_port_vlan_prepare,
+       .port_vlan_add          = dsa_loop_port_vlan_add,
+       .port_vlan_del          = dsa_loop_port_vlan_del,
+       .port_vlan_dump         = dsa_loop_port_vlan_dump,
+};
+
+static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
+{
+       struct dsa_loop_pdata *pdata = mdiodev->dev.platform_data;
+       struct dsa_loop_priv *ps;
+       struct dsa_switch *ds;
+
+       if (!pdata)
+               return -ENODEV;
+
+       dev_info(&mdiodev->dev, "%s: 0x%0x\n",
+                pdata->name, pdata->enabled_ports);
+
+       ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
+       if (!ds)
+               return -ENOMEM;
+
+       ps = devm_kzalloc(&mdiodev->dev, sizeof(*ps), GFP_KERNEL);
+       ps->netdev = dev_get_by_name(&init_net, pdata->netdev);
+       if (!ps->netdev)
+               return -EPROBE_DEFER;
+
+       pdata->cd.netdev[DSA_LOOP_CPU_PORT] = &ps->netdev->dev;
+
+       ds->dev = &mdiodev->dev;
+       ds->ops = &dsa_loop_driver;
+       ds->priv = ps;
+       ps->bus = mdiodev->bus;
+
+       dev_set_drvdata(&mdiodev->dev, ds);
+
+       return dsa_register_switch(ds, ds->dev);
+}
+
+static void dsa_loop_drv_remove(struct mdio_device *mdiodev)
+{
+       struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+       struct dsa_loop_priv *ps = ds->priv;
+
+       dsa_unregister_switch(ds);
+       dev_put(ps->netdev);
+}
+
+static struct mdio_driver dsa_loop_drv = {
+       .mdiodrv.driver = {
+               .name   = "dsa-loop",
+       },
+       .probe  = dsa_loop_drv_probe,
+       .remove = dsa_loop_drv_remove,
+};
+
+#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
+
+static void unregister_fixed_phys(void)
+{
+       unsigned int i;
+
+       for (i = 0; i < NUM_FIXED_PHYS; i++)
+               if (phydevs[i])
+                       fixed_phy_unregister(phydevs[i]);
+}
+
+static int __init dsa_loop_init(void)
+{
+       struct fixed_phy_status status = {
+               .link = 1,
+               .speed = SPEED_100,
+               .duplex = DUPLEX_FULL,
+       };
+       unsigned int i;
+
+       for (i = 0; i < NUM_FIXED_PHYS; i++)
+               phydevs[i] = fixed_phy_register(PHY_POLL, &status, -1, NULL);
+
+       return mdio_driver_register(&dsa_loop_drv);
+}
+module_init(dsa_loop_init);
+
+static void __exit dsa_loop_exit(void)
+{
+       mdio_driver_unregister(&dsa_loop_drv);
+       unregister_fixed_phys();
+}
+module_exit(dsa_loop_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Florian Fainelli");
+MODULE_DESCRIPTION("DSA loopback driver");
diff --git a/drivers/net/dsa/dsa_loop.h b/drivers/net/dsa/dsa_loop.h
new file mode 100644 (file)
index 0000000..dc39687
--- /dev/null
@@ -0,0 +1,19 @@
+#ifndef __DSA_LOOP_H
+#define __DSA_LOOP_H
+
+struct dsa_chip_data;
+
+struct dsa_loop_pdata {
+       /* Must be first, such that dsa_register_switch() can access this
+        * without gory pointer manipulations
+        */
+       struct dsa_chip_data cd;
+       const char *name;
+       unsigned int enabled_ports;
+       const char *netdev;
+};
+
+#define DSA_LOOP_NUM_PORTS     6
+#define DSA_LOOP_CPU_PORT      (DSA_LOOP_NUM_PORTS - 1)
+
+#endif /* __DSA_LOOP_H */
diff --git a/drivers/net/dsa/dsa_loop_bdinfo.c b/drivers/net/dsa/dsa_loop_bdinfo.c
new file mode 100644 (file)
index 0000000..fb8d5dc
--- /dev/null
@@ -0,0 +1,34 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+
+#include "dsa_loop.h"
+
+static struct dsa_loop_pdata dsa_loop_pdata = {
+       .cd = {
+               .port_names[0] = "lan1",
+               .port_names[1] = "lan2",
+               .port_names[2] = "lan3",
+               .port_names[3] = "lan4",
+               .port_names[DSA_LOOP_CPU_PORT] = "cpu",
+       },
+       .name = "DSA mockup driver",
+       .enabled_ports = 0x1f,
+       .netdev = "eth0",
+};
+
+static const struct mdio_board_info bdinfo = {
+       .bus_id = "fixed-0",
+       .modalias = "dsa-loop",
+       .mdio_addr = 31,
+       .platform_data = &dsa_loop_pdata,
+};
+
+static int __init dsa_loop_bdinfo_init(void)
+{
+       return mdiobus_register_board_info(&bdinfo, 1);
+}
+arch_initcall(dsa_loop_bdinfo_init)
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
new file mode 100644 (file)
index 0000000..b070c16
--- /dev/null
@@ -0,0 +1,1126 @@
+/*
+ * Mediatek MT7530 DSA Switch driver
+ * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/iopoll.h>
+#include <linux/mdio.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_gpio.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/gpio/consumer.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "mt7530.h"
+
+/* String, offset, and register size in bytes if different from 4 bytes */
+static const struct mt7530_mib_desc mt7530_mib[] = {
+       MIB_DESC(1, 0x00, "TxDrop"),
+       MIB_DESC(1, 0x04, "TxCrcErr"),
+       MIB_DESC(1, 0x08, "TxUnicast"),
+       MIB_DESC(1, 0x0c, "TxMulticast"),
+       MIB_DESC(1, 0x10, "TxBroadcast"),
+       MIB_DESC(1, 0x14, "TxCollision"),
+       MIB_DESC(1, 0x18, "TxSingleCollision"),
+       MIB_DESC(1, 0x1c, "TxMultipleCollision"),
+       MIB_DESC(1, 0x20, "TxDeferred"),
+       MIB_DESC(1, 0x24, "TxLateCollision"),
+       MIB_DESC(1, 0x28, "TxExcessiveCollistion"),
+       MIB_DESC(1, 0x2c, "TxPause"),
+       MIB_DESC(1, 0x30, "TxPktSz64"),
+       MIB_DESC(1, 0x34, "TxPktSz65To127"),
+       MIB_DESC(1, 0x38, "TxPktSz128To255"),
+       MIB_DESC(1, 0x3c, "TxPktSz256To511"),
+       MIB_DESC(1, 0x40, "TxPktSz512To1023"),
+       MIB_DESC(1, 0x44, "Tx1024ToMax"),
+       MIB_DESC(2, 0x48, "TxBytes"),
+       MIB_DESC(1, 0x60, "RxDrop"),
+       MIB_DESC(1, 0x64, "RxFiltering"),
+       MIB_DESC(1, 0x6c, "RxMulticast"),
+       MIB_DESC(1, 0x70, "RxBroadcast"),
+       MIB_DESC(1, 0x74, "RxAlignErr"),
+       MIB_DESC(1, 0x78, "RxCrcErr"),
+       MIB_DESC(1, 0x7c, "RxUnderSizeErr"),
+       MIB_DESC(1, 0x80, "RxFragErr"),
+       MIB_DESC(1, 0x84, "RxOverSzErr"),
+       MIB_DESC(1, 0x88, "RxJabberErr"),
+       MIB_DESC(1, 0x8c, "RxPause"),
+       MIB_DESC(1, 0x90, "RxPktSz64"),
+       MIB_DESC(1, 0x94, "RxPktSz65To127"),
+       MIB_DESC(1, 0x98, "RxPktSz128To255"),
+       MIB_DESC(1, 0x9c, "RxPktSz256To511"),
+       MIB_DESC(1, 0xa0, "RxPktSz512To1023"),
+       MIB_DESC(1, 0xa4, "RxPktSz1024ToMax"),
+       MIB_DESC(2, 0xa8, "RxBytes"),
+       MIB_DESC(1, 0xb0, "RxCtrlDrop"),
+       MIB_DESC(1, 0xb4, "RxIngressDrop"),
+       MIB_DESC(1, 0xb8, "RxArlDrop"),
+};
+
+static int
+mt7623_trgmii_write(struct mt7530_priv *priv,  u32 reg, u32 val)
+{
+       int ret;
+
+       ret =  regmap_write(priv->ethernet, TRGMII_BASE(reg), val);
+       if (ret < 0)
+               dev_err(priv->dev,
+                       "failed to priv write register\n");
+       return ret;
+}
+
+static u32
+mt7623_trgmii_read(struct mt7530_priv *priv, u32 reg)
+{
+       int ret;
+       u32 val;
+
+       ret = regmap_read(priv->ethernet, TRGMII_BASE(reg), &val);
+       if (ret < 0) {
+               dev_err(priv->dev,
+                       "failed to priv read register\n");
+               return ret;
+       }
+
+       return val;
+}
+
+static void
+mt7623_trgmii_rmw(struct mt7530_priv *priv, u32 reg,
+                 u32 mask, u32 set)
+{
+       u32 val;
+
+       val = mt7623_trgmii_read(priv, reg);
+       val &= ~mask;
+       val |= set;
+       mt7623_trgmii_write(priv, reg, val);
+}
+
+static void
+mt7623_trgmii_set(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+       mt7623_trgmii_rmw(priv, reg, 0, val);
+}
+
+static void
+mt7623_trgmii_clear(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+       mt7623_trgmii_rmw(priv, reg, val, 0);
+}
+
+static int
+core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad)
+{
+       struct mii_bus *bus = priv->bus;
+       int value, ret;
+
+       /* Write the desired MMD Devad */
+       ret = bus->write(bus, 0, MII_MMD_CTRL, devad);
+       if (ret < 0)
+               goto err;
+
+       /* Write the desired MMD register address */
+       ret = bus->write(bus, 0, MII_MMD_DATA, prtad);
+       if (ret < 0)
+               goto err;
+
+       /* Select the Function : DATA with no post increment */
+       ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
+       if (ret < 0)
+               goto err;
+
+       /* Read the content of the MMD's selected register */
+       value = bus->read(bus, 0, MII_MMD_DATA);
+
+       return value;
+err:
+       dev_err(&bus->dev,  "failed to read mmd register\n");
+
+       return ret;
+}
+
+static int
+core_write_mmd_indirect(struct mt7530_priv *priv, int prtad,
+                       int devad, u32 data)
+{
+       struct mii_bus *bus = priv->bus;
+       int ret;
+
+       /* Write the desired MMD Devad */
+       ret = bus->write(bus, 0, MII_MMD_CTRL, devad);
+       if (ret < 0)
+               goto err;
+
+       /* Write the desired MMD register address */
+       ret = bus->write(bus, 0, MII_MMD_DATA, prtad);
+       if (ret < 0)
+               goto err;
+
+       /* Select the Function : DATA with no post increment */
+       ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
+       if (ret < 0)
+               goto err;
+
+       /* Write the data into MMD's selected register */
+       ret = bus->write(bus, 0, MII_MMD_DATA, data);
+err:
+       if (ret < 0)
+               dev_err(&bus->dev,
+                       "failed to write mmd register\n");
+       return ret;
+}
+
+static void
+core_write(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+       struct mii_bus *bus = priv->bus;
+
+       mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+       core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
+
+       mutex_unlock(&bus->mdio_lock);
+}
+
+static void
+core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set)
+{
+       struct mii_bus *bus = priv->bus;
+       u32 val;
+
+       mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+       val = core_read_mmd_indirect(priv, reg, MDIO_MMD_VEND2);
+       val &= ~mask;
+       val |= set;
+       core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
+
+       mutex_unlock(&bus->mdio_lock);
+}
+
+static void
+core_set(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+       core_rmw(priv, reg, 0, val);
+}
+
+static void
+core_clear(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+       core_rmw(priv, reg, val, 0);
+}
+
+static int
+mt7530_mii_write(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+       struct mii_bus *bus = priv->bus;
+       u16 page, r, lo, hi;
+       int ret;
+
+       page = (reg >> 6) & 0x3ff;
+       r  = (reg >> 2) & 0xf;
+       lo = val & 0xffff;
+       hi = val >> 16;
+
+       /* MT7530 uses 31 as the pseudo port */
+       ret = bus->write(bus, 0x1f, 0x1f, page);
+       if (ret < 0)
+               goto err;
+
+       ret = bus->write(bus, 0x1f, r,  lo);
+       if (ret < 0)
+               goto err;
+
+       ret = bus->write(bus, 0x1f, 0x10, hi);
+err:
+       if (ret < 0)
+               dev_err(&bus->dev,
+                       "failed to write mt7530 register\n");
+       return ret;
+}
+
+static u32
+mt7530_mii_read(struct mt7530_priv *priv, u32 reg)
+{
+       struct mii_bus *bus = priv->bus;
+       u16 page, r, lo, hi;
+       int ret;
+
+       page = (reg >> 6) & 0x3ff;
+       r = (reg >> 2) & 0xf;
+
+       /* MT7530 uses 31 as the pseudo port */
+       ret = bus->write(bus, 0x1f, 0x1f, page);
+       if (ret < 0) {
+               dev_err(&bus->dev,
+                       "failed to read mt7530 register\n");
+               return ret;
+       }
+
+       lo = bus->read(bus, 0x1f, r);
+       hi = bus->read(bus, 0x1f, 0x10);
+
+       return (hi << 16) | (lo & 0xffff);
+}
+
+static void
+mt7530_write(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+       struct mii_bus *bus = priv->bus;
+
+       mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+       mt7530_mii_write(priv, reg, val);
+
+       mutex_unlock(&bus->mdio_lock);
+}
+
+static u32
+_mt7530_read(struct mt7530_dummy_poll *p)
+{
+       struct mii_bus          *bus = p->priv->bus;
+       u32 val;
+
+       mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+       val = mt7530_mii_read(p->priv, p->reg);
+
+       mutex_unlock(&bus->mdio_lock);
+
+       return val;
+}
+
+static u32
+mt7530_read(struct mt7530_priv *priv, u32 reg)
+{
+       struct mt7530_dummy_poll p;
+
+       INIT_MT7530_DUMMY_POLL(&p, priv, reg);
+       return _mt7530_read(&p);
+}
+
+static void
+mt7530_rmw(struct mt7530_priv *priv, u32 reg,
+          u32 mask, u32 set)
+{
+       struct mii_bus *bus = priv->bus;
+       u32 val;
+
+       mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+       val = mt7530_mii_read(priv, reg);
+       val &= ~mask;
+       val |= set;
+       mt7530_mii_write(priv, reg, val);
+
+       mutex_unlock(&bus->mdio_lock);
+}
+
+static void
+mt7530_set(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+       mt7530_rmw(priv, reg, 0, val);
+}
+
+static void
+mt7530_clear(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+       mt7530_rmw(priv, reg, val, 0);
+}
+
+static int
+mt7530_fdb_cmd(struct mt7530_priv *priv, enum mt7530_fdb_cmd cmd, u32 *rsp)
+{
+       u32 val;
+       int ret;
+       struct mt7530_dummy_poll p;
+
+       /* Set the command operating upon the MAC address entries */
+       val = ATC_BUSY | ATC_MAT(0) | cmd;
+       mt7530_write(priv, MT7530_ATC, val);
+
+       INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_ATC);
+       ret = readx_poll_timeout(_mt7530_read, &p, val,
+                                !(val & ATC_BUSY), 20, 20000);
+       if (ret < 0) {
+               dev_err(priv->dev, "reset timeout\n");
+               return ret;
+       }
+
+       /* Additional sanity for read command if the specified
+        * entry is invalid
+        */
+       val = mt7530_read(priv, MT7530_ATC);
+       if ((cmd == MT7530_FDB_READ) && (val & ATC_INVALID))
+               return -EINVAL;
+
+       if (rsp)
+               *rsp = val;
+
+       return 0;
+}
+
+static void
+mt7530_fdb_read(struct mt7530_priv *priv, struct mt7530_fdb *fdb)
+{
+       u32 reg[3];
+       int i;
+
+       /* Read from ARL table into an array */
+       for (i = 0; i < 3; i++) {
+               reg[i] = mt7530_read(priv, MT7530_TSRA1 + (i * 4));
+
+               dev_dbg(priv->dev, "%s(%d) reg[%d]=0x%x\n",
+                       __func__, __LINE__, i, reg[i]);
+       }
+
+       fdb->vid = (reg[1] >> CVID) & CVID_MASK;
+       fdb->aging = (reg[2] >> AGE_TIMER) & AGE_TIMER_MASK;
+       fdb->port_mask = (reg[2] >> PORT_MAP) & PORT_MAP_MASK;
+       fdb->mac[0] = (reg[0] >> MAC_BYTE_0) & MAC_BYTE_MASK;
+       fdb->mac[1] = (reg[0] >> MAC_BYTE_1) & MAC_BYTE_MASK;
+       fdb->mac[2] = (reg[0] >> MAC_BYTE_2) & MAC_BYTE_MASK;
+       fdb->mac[3] = (reg[0] >> MAC_BYTE_3) & MAC_BYTE_MASK;
+       fdb->mac[4] = (reg[1] >> MAC_BYTE_4) & MAC_BYTE_MASK;
+       fdb->mac[5] = (reg[1] >> MAC_BYTE_5) & MAC_BYTE_MASK;
+       fdb->noarp = ((reg[2] >> ENT_STATUS) & ENT_STATUS_MASK) == STATIC_ENT;
+}
+
+static void
+mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
+                u8 port_mask, const u8 *mac,
+                u8 aging, u8 type)
+{
+       u32 reg[3] = { 0 };
+       int i;
+
+       reg[1] |= vid & CVID_MASK;
+       reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER;
+       reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP;
+       /* STATIC_ENT indicate that entry is static wouldn't
+        * be aged out and STATIC_EMP specified as erasing an
+        * entry
+        */
+       reg[2] |= (type & ENT_STATUS_MASK) << ENT_STATUS;
+       reg[1] |= mac[5] << MAC_BYTE_5;
+       reg[1] |= mac[4] << MAC_BYTE_4;
+       reg[0] |= mac[3] << MAC_BYTE_3;
+       reg[0] |= mac[2] << MAC_BYTE_2;
+       reg[0] |= mac[1] << MAC_BYTE_1;
+       reg[0] |= mac[0] << MAC_BYTE_0;
+
+       /* Write array into the ARL table */
+       for (i = 0; i < 3; i++)
+               mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
+}
+
+static int
+mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
+{
+       struct mt7530_priv *priv = ds->priv;
+       u32 ncpo1, ssc_delta, trgint, i;
+
+       switch (mode) {
+       case PHY_INTERFACE_MODE_RGMII:
+               trgint = 0;
+               ncpo1 = 0x0c80;
+               ssc_delta = 0x87;
+               break;
+       case PHY_INTERFACE_MODE_TRGMII:
+               trgint = 1;
+               ncpo1 = 0x1400;
+               ssc_delta = 0x57;
+               break;
+       default:
+               dev_err(priv->dev, "xMII mode %d not supported\n", mode);
+               return -EINVAL;
+       }
+
+       mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
+                  P6_INTF_MODE(trgint));
+
+       /* Lower Tx Driving for TRGMII path */
+       for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
+               mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
+                            TD_DM_DRVP(8) | TD_DM_DRVN(8));
+
+       /* Setup core clock for MT7530 */
+       if (!trgint) {
+               /* Disable MT7530 core clock */
+               core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+
+               /* Disable PLL, since phy_device has not yet been created
+                * provided for phy_[read,write]_mmd_indirect is called, we
+                * provide our own core_write_mmd_indirect to complete this
+                * function.
+                */
+               core_write_mmd_indirect(priv,
+                                       CORE_GSWPLL_GRP1,
+                                       MDIO_MMD_VEND2,
+                                       0);
+
+               /* Set core clock into 500Mhz */
+               core_write(priv, CORE_GSWPLL_GRP2,
+                          RG_GSWPLL_POSDIV_500M(1) |
+                          RG_GSWPLL_FBKDIV_500M(25));
+
+               /* Enable PLL */
+               core_write(priv, CORE_GSWPLL_GRP1,
+                          RG_GSWPLL_EN_PRE |
+                          RG_GSWPLL_POSDIV_200M(2) |
+                          RG_GSWPLL_FBKDIV_200M(32));
+
+               /* Enable MT7530 core clock */
+               core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+       }
+
+       /* Setup the MT7530 TRGMII Tx Clock */
+       core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+       core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+       core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
+       core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
+       core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
+       core_write(priv, CORE_PLL_GROUP4,
+                  RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
+                  RG_SYSPLL_BIAS_LPF_EN);
+       core_write(priv, CORE_PLL_GROUP2,
+                  RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
+                  RG_SYSPLL_POSDIV(1));
+       core_write(priv, CORE_PLL_GROUP7,
+                  RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
+                  RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+       core_set(priv, CORE_TRGMII_GSW_CLK_CG,
+                REG_GSWCK_EN | REG_TRGMIICK_EN);
+
+       if (!trgint)
+               for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+                       mt7530_rmw(priv, MT7530_TRGMII_RD(i),
+                                  RD_TAP_MASK, RD_TAP(16));
+       else
+               mt7623_trgmii_set(priv, GSW_INTF_MODE, INTF_MODE_TRGMII);
+
+       return 0;
+}
+
+static int
+mt7623_pad_clk_setup(struct dsa_switch *ds)
+{
+       struct mt7530_priv *priv = ds->priv;
+       int i;
+
+       for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+               mt7623_trgmii_write(priv, GSW_TRGMII_TD_ODT(i),
+                                   TD_DM_DRVP(8) | TD_DM_DRVN(8));
+
+       mt7623_trgmii_set(priv, GSW_TRGMII_RCK_CTRL, RX_RST | RXC_DQSISEL);
+       mt7623_trgmii_clear(priv, GSW_TRGMII_RCK_CTRL, RX_RST);
+
+       return 0;
+}
+
+static void
+mt7530_mib_reset(struct dsa_switch *ds)
+{
+       struct mt7530_priv *priv = ds->priv;
+
+       mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_FLUSH);
+       mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE);
+}
+
+static void
+mt7530_port_set_status(struct mt7530_priv *priv, int port, int enable)
+{
+       u32 mask = PMCR_TX_EN | PMCR_RX_EN;
+
+       if (enable)
+               mt7530_set(priv, MT7530_PMCR_P(port), mask);
+       else
+               mt7530_clear(priv, MT7530_PMCR_P(port), mask);
+}
+
+static int mt7530_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+       struct mt7530_priv *priv = ds->priv;
+
+       return mdiobus_read_nested(priv->bus, port, regnum);
+}
+
+int mt7530_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
+{
+       struct mt7530_priv *priv = ds->priv;
+
+       return mdiobus_write_nested(priv->bus, port, regnum, val);
+}
+
+static void
+mt7530_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++)
+               strncpy(data + i * ETH_GSTRING_LEN, mt7530_mib[i].name,
+                       ETH_GSTRING_LEN);
+}
+
+static void
+mt7530_get_ethtool_stats(struct dsa_switch *ds, int port,
+                        uint64_t *data)
+{
+       struct mt7530_priv *priv = ds->priv;
+       const struct mt7530_mib_desc *mib;
+       u32 reg, i;
+       u64 hi;
+
+       for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) {
+               mib = &mt7530_mib[i];
+               reg = MT7530_PORT_MIB_COUNTER(port) + mib->offset;
+
+               data[i] = mt7530_read(priv, reg);
+               if (mib->size == 2) {
+                       hi = mt7530_read(priv, reg + 4);
+                       data[i] |= hi << 32;
+               }
+       }
+}
+
+static int
+mt7530_get_sset_count(struct dsa_switch *ds)
+{
+       return ARRAY_SIZE(mt7530_mib);
+}
+
+static void mt7530_adjust_link(struct dsa_switch *ds, int port,
+                              struct phy_device *phydev)
+{
+       struct mt7530_priv *priv = ds->priv;
+
+       if (phy_is_pseudo_fixed_link(phydev)) {
+               dev_dbg(priv->dev, "phy-mode for master device = %x\n",
+                       phydev->interface);
+
+               /* Setup TX circuit incluing relevant PAD and driving */
+               mt7530_pad_clk_setup(ds, phydev->interface);
+
+               /* Setup RX circuit, relevant PAD and driving on the host
+                * which must be placed after the setup on the device side is
+                * all finished.
+                */
+               mt7623_pad_clk_setup(ds);
+       }
+}
+
+static int
+mt7530_cpu_port_enable(struct mt7530_priv *priv,
+                      int port)
+{
+       /* Enable Mediatek header mode on the cpu port */
+       mt7530_write(priv, MT7530_PVC_P(port),
+                    PORT_SPEC_TAG);
+
+       /* Setup the MAC by default for the cpu port */
+       mt7530_write(priv, MT7530_PMCR_P(port), PMCR_CPUP_LINK);
+
+       /* Disable auto learning on the cpu port */
+       mt7530_set(priv, MT7530_PSC_P(port), SA_DIS);
+
+       /* Unknown unicast frame fordwarding to the cpu port */
+       mt7530_set(priv, MT7530_MFC, UNU_FFP(BIT(port)));
+
+       /* CPU port gets connected to all user ports of
+        * the switch
+        */
+       mt7530_write(priv, MT7530_PCR_P(port),
+                    PCR_MATRIX(priv->ds->enabled_port_mask));
+
+       return 0;
+}
+
+static int
+mt7530_port_enable(struct dsa_switch *ds, int port,
+                  struct phy_device *phy)
+{
+       struct mt7530_priv *priv = ds->priv;
+
+       mutex_lock(&priv->reg_mutex);
+
+       /* Setup the MAC for the user port */
+       mt7530_write(priv, MT7530_PMCR_P(port), PMCR_USERP_LINK);
+
+       /* Allow the user port gets connected to the cpu port and also
+        * restore the port matrix if the port is the member of a certain
+        * bridge.
+        */
+       priv->ports[port].pm |= PCR_MATRIX(BIT(MT7530_CPU_PORT));
+       priv->ports[port].enable = true;
+       mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
+                  priv->ports[port].pm);
+       mt7530_port_set_status(priv, port, 1);
+
+       mutex_unlock(&priv->reg_mutex);
+
+       return 0;
+}
+
+static void
+mt7530_port_disable(struct dsa_switch *ds, int port,
+                   struct phy_device *phy)
+{
+       struct mt7530_priv *priv = ds->priv;
+
+       mutex_lock(&priv->reg_mutex);
+
+       /* Clear up all port matrix which could be restored in the next
+        * enablement for the port.
+        */
+       priv->ports[port].enable = false;
+       mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
+                  PCR_MATRIX_CLR);
+       mt7530_port_set_status(priv, port, 0);
+
+       mutex_unlock(&priv->reg_mutex);
+}
+
+static void
+mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+       struct mt7530_priv *priv = ds->priv;
+       u32 stp_state;
+
+       switch (state) {
+       case BR_STATE_DISABLED:
+               stp_state = MT7530_STP_DISABLED;
+               break;
+       case BR_STATE_BLOCKING:
+               stp_state = MT7530_STP_BLOCKING;
+               break;
+       case BR_STATE_LISTENING:
+               stp_state = MT7530_STP_LISTENING;
+               break;
+       case BR_STATE_LEARNING:
+               stp_state = MT7530_STP_LEARNING;
+               break;
+       case BR_STATE_FORWARDING:
+       default:
+               stp_state = MT7530_STP_FORWARDING;
+               break;
+       }
+
+       mt7530_rmw(priv, MT7530_SSP_P(port), FID_PST_MASK, stp_state);
+}
+
+static int
+mt7530_port_bridge_join(struct dsa_switch *ds, int port,
+                       struct net_device *bridge)
+{
+       struct mt7530_priv *priv = ds->priv;
+       u32 port_bitmap = BIT(MT7530_CPU_PORT);
+       int i;
+
+       mutex_lock(&priv->reg_mutex);
+
+       for (i = 0; i < MT7530_NUM_PORTS; i++) {
+               /* Add this port to the port matrix of the other ports in the
+                * same bridge. If the port is disabled, port matrix is kept
+                * and not being setup until the port becomes enabled.
+                */
+               if (ds->enabled_port_mask & BIT(i) && i != port) {
+                       if (ds->ports[i].bridge_dev != bridge)
+                               continue;
+                       if (priv->ports[i].enable)
+                               mt7530_set(priv, MT7530_PCR_P(i),
+                                          PCR_MATRIX(BIT(port)));
+                       priv->ports[i].pm |= PCR_MATRIX(BIT(port));
+
+                       port_bitmap |= BIT(i);
+               }
+       }
+
+       /* Add the all other ports to this port matrix. */
+       if (priv->ports[port].enable)
+               mt7530_rmw(priv, MT7530_PCR_P(port),
+                          PCR_MATRIX_MASK, PCR_MATRIX(port_bitmap));
+       priv->ports[port].pm |= PCR_MATRIX(port_bitmap);
+
+       mutex_unlock(&priv->reg_mutex);
+
+       return 0;
+}
+
+static void
+mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
+                        struct net_device *bridge)
+{
+       struct mt7530_priv *priv = ds->priv;
+       int i;
+
+       mutex_lock(&priv->reg_mutex);
+
+       for (i = 0; i < MT7530_NUM_PORTS; i++) {
+               /* Remove this port from the port matrix of the other ports
+                * in the same bridge. If the port is disabled, port matrix
+                * is kept and not being setup until the port becomes enabled.
+                */
+               if (ds->enabled_port_mask & BIT(i) && i != port) {
+                       if (ds->ports[i].bridge_dev != bridge)
+                               continue;
+                       if (priv->ports[i].enable)
+                               mt7530_clear(priv, MT7530_PCR_P(i),
+                                            PCR_MATRIX(BIT(port)));
+                       priv->ports[i].pm &= ~PCR_MATRIX(BIT(port));
+               }
+       }
+
+       /* Set the cpu port to be the only one in the port matrix of
+        * this port.
+        */
+       if (priv->ports[port].enable)
+               mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
+                          PCR_MATRIX(BIT(MT7530_CPU_PORT)));
+       priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT));
+
+       mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+mt7530_port_fdb_prepare(struct dsa_switch *ds, int port,
+                       const struct switchdev_obj_port_fdb *fdb,
+                       struct switchdev_trans *trans)
+{
+       struct mt7530_priv *priv = ds->priv;
+       int ret;
+
+       /* Because auto-learned entrie shares the same FDB table.
+        * an entry is reserved with no port_mask to make sure fdb_add
+        * is called while the entry is still available.
+        */
+       mutex_lock(&priv->reg_mutex);
+       mt7530_fdb_write(priv, fdb->vid, 0, fdb->addr, -1, STATIC_ENT);
+       ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0);
+       mutex_unlock(&priv->reg_mutex);
+
+       return ret;
+}
+
+static void
+mt7530_port_fdb_add(struct dsa_switch *ds, int port,
+                   const struct switchdev_obj_port_fdb *fdb,
+                   struct switchdev_trans *trans)
+{
+       struct mt7530_priv *priv = ds->priv;
+       u8 port_mask = BIT(port);
+
+       mutex_lock(&priv->reg_mutex);
+       mt7530_fdb_write(priv, fdb->vid, port_mask, fdb->addr, -1, STATIC_ENT);
+       mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0);
+       mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+mt7530_port_fdb_del(struct dsa_switch *ds, int port,
+                   const struct switchdev_obj_port_fdb *fdb)
+{
+       struct mt7530_priv *priv = ds->priv;
+       int ret;
+       u8 port_mask = BIT(port);
+
+       mutex_lock(&priv->reg_mutex);
+       mt7530_fdb_write(priv, fdb->vid, port_mask, fdb->addr, -1, STATIC_EMP);
+       ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0);
+       mutex_unlock(&priv->reg_mutex);
+
+       return ret;
+}
+
+static int
+mt7530_port_fdb_dump(struct dsa_switch *ds, int port,
+                    struct switchdev_obj_port_fdb *fdb,
+                    int (*cb)(struct switchdev_obj *obj))
+{
+       struct mt7530_priv *priv = ds->priv;
+       struct mt7530_fdb _fdb = { 0 };
+       int cnt = MT7530_NUM_FDB_RECORDS;
+       int ret = 0;
+       u32 rsp = 0;
+
+       mutex_lock(&priv->reg_mutex);
+
+       ret = mt7530_fdb_cmd(priv, MT7530_FDB_START, &rsp);
+       if (ret < 0)
+               goto err;
+
+       do {
+               if (rsp & ATC_SRCH_HIT) {
+                       mt7530_fdb_read(priv, &_fdb);
+                       if (_fdb.port_mask & BIT(port)) {
+                               ether_addr_copy(fdb->addr, _fdb.mac);
+                               fdb->vid = _fdb.vid;
+                               fdb->ndm_state = _fdb.noarp ?
+                                               NUD_NOARP : NUD_REACHABLE;
+                               ret = cb(&fdb->obj);
+                               if (ret < 0)
+                                       break;
+                       }
+               }
+       } while (--cnt &&
+                !(rsp & ATC_SRCH_END) &&
+                !mt7530_fdb_cmd(priv, MT7530_FDB_NEXT, &rsp));
+err:
+       mutex_unlock(&priv->reg_mutex);
+
+       return 0;
+}
+
+static enum dsa_tag_protocol
+mtk_get_tag_protocol(struct dsa_switch *ds)
+{
+       struct mt7530_priv *priv = ds->priv;
+
+       if (!dsa_is_cpu_port(ds, MT7530_CPU_PORT)) {
+               dev_warn(priv->dev,
+                        "port not matched with tagging CPU port\n");
+               return DSA_TAG_PROTO_NONE;
+       } else {
+               return DSA_TAG_PROTO_MTK;
+       }
+}
+
+static int
+mt7530_setup(struct dsa_switch *ds)
+{
+       struct mt7530_priv *priv = ds->priv;
+       int ret, i;
+       u32 id, val;
+       struct device_node *dn;
+       struct mt7530_dummy_poll p;
+
+       /* The parent node of master_netdev which holds the common system
+        * controller also is the container for two GMACs nodes representing
+        * as two netdev instances.
+        */
+       dn = ds->master_netdev->dev.of_node->parent;
+       priv->ethernet = syscon_node_to_regmap(dn);
+       if (IS_ERR(priv->ethernet))
+               return PTR_ERR(priv->ethernet);
+
+       regulator_set_voltage(priv->core_pwr, 1000000, 1000000);
+       ret = regulator_enable(priv->core_pwr);
+       if (ret < 0) {
+               dev_err(priv->dev,
+                       "Failed to enable core power: %d\n", ret);
+               return ret;
+       }
+
+       regulator_set_voltage(priv->io_pwr, 3300000, 3300000);
+       ret = regulator_enable(priv->io_pwr);
+       if (ret < 0) {
+               dev_err(priv->dev, "Failed to enable io pwr: %d\n",
+                       ret);
+               return ret;
+       }
+
+       /* Reset whole chip through gpio pin or memory-mapped registers for
+        * different type of hardware
+        */
+       if (priv->mcm) {
+               reset_control_assert(priv->rstc);
+               usleep_range(1000, 1100);
+               reset_control_deassert(priv->rstc);
+       } else {
+               gpiod_set_value_cansleep(priv->reset, 0);
+               usleep_range(1000, 1100);
+               gpiod_set_value_cansleep(priv->reset, 1);
+       }
+
+       /* Waiting for MT7530 got to stable */
+       INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
+       ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
+                                20, 1000000);
+       if (ret < 0) {
+               dev_err(priv->dev, "reset timeout\n");
+               return ret;
+       }
+
+       id = mt7530_read(priv, MT7530_CREV);
+       id >>= CHIP_NAME_SHIFT;
+       if (id != MT7530_ID) {
+               dev_err(priv->dev, "chip %x can't be supported\n", id);
+               return -ENODEV;
+       }
+
+       /* Reset the switch through internal reset */
+       mt7530_write(priv, MT7530_SYS_CTRL,
+                    SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
+                    SYS_CTRL_REG_RST);
+
+       /* Enable Port 6 only; P5 as GMAC5 which currently is not supported */
+       val = mt7530_read(priv, MT7530_MHWTRAP);
+       val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
+       val |= MHWTRAP_MANUAL;
+       mt7530_write(priv, MT7530_MHWTRAP, val);
+
+       /* Enable and reset MIB counters */
+       mt7530_mib_reset(ds);
+
+       mt7530_clear(priv, MT7530_MFC, UNU_FFP_MASK);
+
+       for (i = 0; i < MT7530_NUM_PORTS; i++) {
+               /* Disable forwarding by default on all ports */
+               mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
+                          PCR_MATRIX_CLR);
+
+               if (dsa_is_cpu_port(ds, i))
+                       mt7530_cpu_port_enable(priv, i);
+               else
+                       mt7530_port_disable(ds, i, NULL);
+       }
+
+       /* Flush the FDB table */
+       ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, 0);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static struct dsa_switch_ops mt7530_switch_ops = {
+       .get_tag_protocol       = mtk_get_tag_protocol,
+       .setup                  = mt7530_setup,
+       .get_strings            = mt7530_get_strings,
+       .phy_read               = mt7530_phy_read,
+       .phy_write              = mt7530_phy_write,
+       .get_ethtool_stats      = mt7530_get_ethtool_stats,
+       .get_sset_count         = mt7530_get_sset_count,
+       .adjust_link            = mt7530_adjust_link,
+       .port_enable            = mt7530_port_enable,
+       .port_disable           = mt7530_port_disable,
+       .port_stp_state_set     = mt7530_stp_state_set,
+       .port_bridge_join       = mt7530_port_bridge_join,
+       .port_bridge_leave      = mt7530_port_bridge_leave,
+       .port_fdb_prepare       = mt7530_port_fdb_prepare,
+       .port_fdb_add           = mt7530_port_fdb_add,
+       .port_fdb_del           = mt7530_port_fdb_del,
+       .port_fdb_dump          = mt7530_port_fdb_dump,
+};
+
+static int
+mt7530_probe(struct mdio_device *mdiodev)
+{
+       struct mt7530_priv *priv;
+       struct device_node *dn;
+
+       dn = mdiodev->dev.of_node;
+
+       priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
+       if (!priv->ds)
+               return -ENOMEM;
+
+       /* Use medatek,mcm property to distinguish hardware type that would
+        * casues a little bit differences on power-on sequence.
+        */
+       priv->mcm = of_property_read_bool(dn, "mediatek,mcm");
+       if (priv->mcm) {
+               dev_info(&mdiodev->dev, "MT7530 adapts as multi-chip module\n");
+
+               priv->rstc = devm_reset_control_get(&mdiodev->dev, "mcm");
+               if (IS_ERR(priv->rstc)) {
+                       dev_err(&mdiodev->dev, "Couldn't get our reset line\n");
+                       return PTR_ERR(priv->rstc);
+               }
+       }
+
+       priv->core_pwr = devm_regulator_get(&mdiodev->dev, "core");
+       if (IS_ERR(priv->core_pwr))
+               return PTR_ERR(priv->core_pwr);
+
+       priv->io_pwr = devm_regulator_get(&mdiodev->dev, "io");
+       if (IS_ERR(priv->io_pwr))
+               return PTR_ERR(priv->io_pwr);
+
+       /* Not MCM that indicates switch works as the remote standalone
+        * integrated circuit so the GPIO pin would be used to complete
+        * the reset, otherwise memory-mapped register accessing used
+        * through syscon provides in the case of MCM.
+        */
+       if (!priv->mcm) {
+               priv->reset = devm_gpiod_get_optional(&mdiodev->dev, "reset",
+                                                     GPIOD_OUT_LOW);
+               if (IS_ERR(priv->reset)) {
+                       dev_err(&mdiodev->dev, "Couldn't get our reset line\n");
+                       return PTR_ERR(priv->reset);
+               }
+       }
+
+       priv->bus = mdiodev->bus;
+       priv->dev = &mdiodev->dev;
+       priv->ds->priv = priv;
+       priv->ds->ops = &mt7530_switch_ops;
+       mutex_init(&priv->reg_mutex);
+       dev_set_drvdata(&mdiodev->dev, priv);
+
+       return dsa_register_switch(priv->ds, &mdiodev->dev);
+}
+
+static void
+mt7530_remove(struct mdio_device *mdiodev)
+{
+       struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
+       int ret = 0;
+
+       ret = regulator_disable(priv->core_pwr);
+       if (ret < 0)
+               dev_err(priv->dev,
+                       "Failed to disable core power: %d\n", ret);
+
+       ret = regulator_disable(priv->io_pwr);
+       if (ret < 0)
+               dev_err(priv->dev, "Failed to disable io pwr: %d\n",
+                       ret);
+
+       dsa_unregister_switch(priv->ds);
+       mutex_destroy(&priv->reg_mutex);
+}
+
+static const struct of_device_id mt7530_of_match[] = {
+       { .compatible = "mediatek,mt7530" },
+       { /* sentinel */ },
+};
+
+static struct mdio_driver mt7530_mdio_driver = {
+       .probe  = mt7530_probe,
+       .remove = mt7530_remove,
+       .mdiodrv.driver = {
+               .name = "mt7530",
+               .of_match_table = mt7530_of_match,
+       },
+};
+
+mdio_module_driver(mt7530_mdio_driver);
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mediatek-mt7530");
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
new file mode 100644 (file)
index 0000000..b83d76b
--- /dev/null
@@ -0,0 +1,402 @@
+/*
+ * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7530_H
+#define __MT7530_H
+
+#define MT7530_NUM_PORTS               7
+#define MT7530_CPU_PORT                        6
+#define MT7530_NUM_FDB_RECORDS         2048
+
+#define        NUM_TRGMII_CTRL                 5
+
+#define TRGMII_BASE(x)                 (0x10000 + (x))
+
+/* Registers to ethsys access */
+#define ETHSYS_CLKCFG0                 0x2c
+#define  ETHSYS_TRGMII_CLK_SEL362_5    BIT(11)
+
+#define SYSC_REG_RSTCTRL               0x34
+#define  RESET_MCM                     BIT(2)
+
+/* Registers to mac forward control for unknown frames */
+#define MT7530_MFC                     0x10
+#define  BC_FFP(x)                     (((x) & 0xff) << 24)
+#define  UNM_FFP(x)                    (((x) & 0xff) << 16)
+#define  UNU_FFP(x)                    (((x) & 0xff) << 8)
+#define  UNU_FFP_MASK                  UNU_FFP(~0)
+
+/* Registers for address table access */
+#define MT7530_ATA1                    0x74
+#define  STATIC_EMP                    0
+#define  STATIC_ENT                    3
+#define MT7530_ATA2                    0x78
+
+/* Register for address table write data */
+#define MT7530_ATWD                    0x7c
+
+/* Register for address table control */
+#define MT7530_ATC                     0x80
+#define  ATC_HASH                      (((x) & 0xfff) << 16)
+#define  ATC_BUSY                      BIT(15)
+#define  ATC_SRCH_END                  BIT(14)
+#define  ATC_SRCH_HIT                  BIT(13)
+#define  ATC_INVALID                   BIT(12)
+#define  ATC_MAT(x)                    (((x) & 0xf) << 8)
+#define  ATC_MAT_MACTAB                        ATC_MAT(0)
+
+enum mt7530_fdb_cmd {
+       MT7530_FDB_READ = 0,
+       MT7530_FDB_WRITE = 1,
+       MT7530_FDB_FLUSH = 2,
+       MT7530_FDB_START = 4,
+       MT7530_FDB_NEXT = 5,
+};
+
+/* Registers for table search read address */
+#define MT7530_TSRA1                   0x84
+#define  MAC_BYTE_0                    24
+#define  MAC_BYTE_1                    16
+#define  MAC_BYTE_2                    8
+#define  MAC_BYTE_3                    0
+#define  MAC_BYTE_MASK                 0xff
+
+#define MT7530_TSRA2                   0x88
+#define  MAC_BYTE_4                    24
+#define  MAC_BYTE_5                    16
+#define  CVID                          0
+#define  CVID_MASK                     0xfff
+
+#define MT7530_ATRD                    0x8C
+#define         AGE_TIMER                      24
+#define  AGE_TIMER_MASK                        0xff
+#define  PORT_MAP                      4
+#define  PORT_MAP_MASK                 0xff
+#define  ENT_STATUS                    2
+#define  ENT_STATUS_MASK               0x3
+
+/* Register for vlan table control */
+#define MT7530_VTCR                    0x90
+#define  VTCR_BUSY                     BIT(31)
+#define  VTCR_FUNC                     (((x) & 0xf) << 12)
+#define  VTCR_FUNC_RD_VID              0x1
+#define  VTCR_FUNC_WR_VID              0x2
+#define  VTCR_FUNC_INV_VID             0x3
+#define  VTCR_FUNC_VAL_VID             0x4
+#define  VTCR_VID                      ((x) & 0xfff)
+
+/* Register for setup vlan and acl write data */
+#define MT7530_VAWD1                   0x94
+#define  PORT_STAG                     BIT(31)
+#define  IVL_MAC                       BIT(30)
+#define  PORT_MEM(x)                   (((x) & 0xff) << 16)
+#define  VALID                         BIT(1)
+
+#define MT7530_VAWD2                   0x98
+
+/* Register for port STP state control */
+#define MT7530_SSP_P(x)                        (0x2000 + ((x) * 0x100))
+#define  FID_PST(x)                    ((x) & 0x3)
+#define  FID_PST_MASK                  FID_PST(0x3)
+
+enum mt7530_stp_state {
+       MT7530_STP_DISABLED = 0,
+       MT7530_STP_BLOCKING = 1,
+       MT7530_STP_LISTENING = 1,
+       MT7530_STP_LEARNING = 2,
+       MT7530_STP_FORWARDING  = 3
+};
+
+/* Register for port control */
+#define MT7530_PCR_P(x)                        (0x2004 + ((x) * 0x100))
+#define  PORT_VLAN(x)                  ((x) & 0x3)
+#define  PCR_MATRIX(x)                 (((x) & 0xff) << 16)
+#define  PORT_PRI(x)                   (((x) & 0x7) << 24)
+#define  EG_TAG(x)                     (((x) & 0x3) << 28)
+#define  PCR_MATRIX_MASK               PCR_MATRIX(0xff)
+#define  PCR_MATRIX_CLR                        PCR_MATRIX(0)
+
+/* Register for port security control */
+#define MT7530_PSC_P(x)                        (0x200c + ((x) * 0x100))
+#define  SA_DIS                                BIT(4)
+
+/* Register for port vlan control */
+#define MT7530_PVC_P(x)                        (0x2010 + ((x) * 0x100))
+#define  PORT_SPEC_TAG                 BIT(5)
+#define  VLAN_ATTR(x)                  (((x) & 0x3) << 6)
+#define  STAG_VPID                     (((x) & 0xffff) << 16)
+
+/* Register for port port-and-protocol based vlan 1 control */
+#define MT7530_PPBV1_P(x)              (0x2014 + ((x) * 0x100))
+
+/* Register for port MAC control register */
+#define MT7530_PMCR_P(x)               (0x3000 + ((x) * 0x100))
+#define  PMCR_IFG_XMIT(x)              (((x) & 0x3) << 18)
+#define  PMCR_MAC_MODE                 BIT(16)
+#define  PMCR_FORCE_MODE               BIT(15)
+#define  PMCR_TX_EN                    BIT(14)
+#define  PMCR_RX_EN                    BIT(13)
+#define  PMCR_BACKOFF_EN               BIT(9)
+#define  PMCR_BACKPR_EN                        BIT(8)
+#define  PMCR_TX_FC_EN                 BIT(5)
+#define  PMCR_RX_FC_EN                 BIT(4)
+#define  PMCR_FORCE_SPEED_1000         BIT(3)
+#define  PMCR_FORCE_FDX                        BIT(1)
+#define  PMCR_FORCE_LNK                        BIT(0)
+#define  PMCR_COMMON_LINK              (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
+                                        PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \
+                                        PMCR_TX_EN | PMCR_RX_EN | \
+                                        PMCR_TX_FC_EN | PMCR_RX_FC_EN)
+#define  PMCR_CPUP_LINK                        (PMCR_COMMON_LINK | PMCR_FORCE_MODE | \
+                                        PMCR_FORCE_SPEED_1000 | \
+                                        PMCR_FORCE_FDX | \
+                                        PMCR_FORCE_LNK)
+#define  PMCR_USERP_LINK               PMCR_COMMON_LINK
+#define  PMCR_FIXED_LINK               (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
+                                        PMCR_FORCE_MODE | PMCR_TX_EN | \
+                                        PMCR_RX_EN | PMCR_BACKPR_EN | \
+                                        PMCR_BACKOFF_EN | \
+                                        PMCR_FORCE_SPEED_1000 | \
+                                        PMCR_FORCE_FDX | \
+                                        PMCR_FORCE_LNK)
+#define PMCR_FIXED_LINK_FC             (PMCR_FIXED_LINK | \
+                                        PMCR_TX_FC_EN | PMCR_RX_FC_EN)
+
+#define MT7530_PMSR_P(x)               (0x3008 + (x) * 0x100)
+
+/* Register for MIB */
+#define MT7530_PORT_MIB_COUNTER(x)     (0x4000 + (x) * 0x100)
+#define MT7530_MIB_CCR                 0x4fe0
+#define  CCR_MIB_ENABLE                        BIT(31)
+#define  CCR_RX_OCT_CNT_GOOD           BIT(7)
+#define  CCR_RX_OCT_CNT_BAD            BIT(6)
+#define  CCR_TX_OCT_CNT_GOOD           BIT(5)
+#define  CCR_TX_OCT_CNT_BAD            BIT(4)
+#define  CCR_MIB_FLUSH                 (CCR_RX_OCT_CNT_GOOD | \
+                                        CCR_RX_OCT_CNT_BAD | \
+                                        CCR_TX_OCT_CNT_GOOD | \
+                                        CCR_TX_OCT_CNT_BAD)
+#define  CCR_MIB_ACTIVATE              (CCR_MIB_ENABLE | \
+                                        CCR_RX_OCT_CNT_GOOD | \
+                                        CCR_RX_OCT_CNT_BAD | \
+                                        CCR_TX_OCT_CNT_GOOD | \
+                                        CCR_TX_OCT_CNT_BAD)
+/* Register for system reset */
+#define MT7530_SYS_CTRL                        0x7000
+#define  SYS_CTRL_PHY_RST              BIT(2)
+#define  SYS_CTRL_SW_RST               BIT(1)
+#define  SYS_CTRL_REG_RST              BIT(0)
+
+/* Register for hw trap status */
+#define MT7530_HWTRAP                  0x7800
+
+/* Register for hw trap modification */
+#define MT7530_MHWTRAP                 0x7804
+#define  MHWTRAP_MANUAL                        BIT(16)
+#define  MHWTRAP_P5_MAC_SEL            BIT(13)
+#define  MHWTRAP_P6_DIS                        BIT(8)
+#define  MHWTRAP_P5_RGMII_MODE         BIT(7)
+#define  MHWTRAP_P5_DIS                        BIT(6)
+#define  MHWTRAP_PHY_ACCESS            BIT(5)
+
+/* Register for TOP signal control */
+#define MT7530_TOP_SIG_CTRL            0x7808
+#define  TOP_SIG_CTRL_NORMAL           (BIT(17) | BIT(16))
+
+#define MT7530_IO_DRV_CR               0x7810
+#define  P5_IO_CLK_DRV(x)              ((x) & 0x3)
+#define  P5_IO_DATA_DRV(x)             (((x) & 0x3) << 4)
+
+#define MT7530_P6ECR                   0x7830
+#define  P6_INTF_MODE_MASK             0x3
+#define  P6_INTF_MODE(x)               ((x) & 0x3)
+
+/* Registers for TRGMII on the both side */
+#define MT7530_TRGMII_RCK_CTRL         0x7a00
+#define GSW_TRGMII_RCK_CTRL            0x300
+#define  RX_RST                                BIT(31)
+#define  RXC_DQSISEL                   BIT(30)
+#define  DQSI1_TAP_MASK                        (0x7f << 8)
+#define  DQSI0_TAP_MASK                        0x7f
+#define  DQSI1_TAP(x)                  (((x) & 0x7f) << 8)
+#define  DQSI0_TAP(x)                  ((x) & 0x7f)
+
+#define MT7530_TRGMII_RCK_RTT          0x7a04
+#define GSW_TRGMII_RCK_RTT             0x304
+#define  DQS1_GATE                     BIT(31)
+#define  DQS0_GATE                     BIT(30)
+
+#define MT7530_TRGMII_RD(x)            (0x7a10 + (x) * 8)
+#define GSW_TRGMII_RD(x)               (0x310 + (x) * 8)
+#define  BSLIP_EN                      BIT(31)
+#define  EDGE_CHK                      BIT(30)
+#define  RD_TAP_MASK                   0x7f
+#define  RD_TAP(x)                     ((x) & 0x7f)
+
+#define GSW_TRGMII_TXCTRL              0x340
+#define MT7530_TRGMII_TXCTRL           0x7a40
+#define  TRAIN_TXEN                    BIT(31)
+#define  TXC_INV                       BIT(30)
+#define  TX_RST                                BIT(28)
+
+#define MT7530_TRGMII_TD_ODT(i)                (0x7a54 + 8 * (i))
+#define GSW_TRGMII_TD_ODT(i)           (0x354 + 8 * (i))
+#define  TD_DM_DRVP(x)                 ((x) & 0xf)
+#define  TD_DM_DRVN(x)                 (((x) & 0xf) << 4)
+
+#define GSW_INTF_MODE                  0x390
+#define  INTF_MODE_TRGMII              BIT(1)
+
+#define MT7530_TRGMII_TCK_CTRL         0x7a78
+#define  TCK_TAP(x)                    (((x) & 0xf) << 8)
+
+#define MT7530_P5RGMIIRXCR             0x7b00
+#define  CSR_RGMII_EDGE_ALIGN          BIT(8)
+#define  CSR_RGMII_RXC_0DEG_CFG(x)     ((x) & 0xf)
+
+#define MT7530_P5RGMIITXCR             0x7b04
+#define  CSR_RGMII_TXC_CFG(x)          ((x) & 0x1f)
+
+#define MT7530_CREV                    0x7ffc
+#define  CHIP_NAME_SHIFT               16
+#define  MT7530_ID                     0x7530
+
+/* Registers for core PLL access through mmd indirect */
+#define CORE_PLL_GROUP2                        0x401
+#define  RG_SYSPLL_EN_NORMAL           BIT(15)
+#define  RG_SYSPLL_VODEN               BIT(14)
+#define  RG_SYSPLL_LF                  BIT(13)
+#define  RG_SYSPLL_RST_DLY(x)          (((x) & 0x3) << 12)
+#define  RG_SYSPLL_LVROD_EN            BIT(10)
+#define  RG_SYSPLL_PREDIV(x)           (((x) & 0x3) << 8)
+#define  RG_SYSPLL_POSDIV(x)           (((x) & 0x3) << 5)
+#define  RG_SYSPLL_FBKSEL              BIT(4)
+#define  RT_SYSPLL_EN_AFE_OLT          BIT(0)
+
+#define CORE_PLL_GROUP4                        0x403
+#define  RG_SYSPLL_DDSFBK_EN           BIT(12)
+#define  RG_SYSPLL_BIAS_EN             BIT(11)
+#define  RG_SYSPLL_BIAS_LPF_EN         BIT(10)
+
+#define CORE_PLL_GROUP5                        0x404
+#define  RG_LCDDS_PCW_NCPO1(x)         ((x) & 0xffff)
+
+#define CORE_PLL_GROUP6                        0x405
+#define  RG_LCDDS_PCW_NCPO0(x)         ((x) & 0xffff)
+
+#define CORE_PLL_GROUP7                        0x406
+#define  RG_LCDDS_PWDB                 BIT(15)
+#define  RG_LCDDS_ISO_EN               BIT(13)
+#define  RG_LCCDS_C(x)                 (((x) & 0x7) << 4)
+#define  RG_LCDDS_PCW_NCPO_CHG         BIT(3)
+
+#define CORE_PLL_GROUP10               0x409
+#define  RG_LCDDS_SSC_DELTA(x)         ((x) & 0xfff)
+
+#define CORE_PLL_GROUP11               0x40a
+#define  RG_LCDDS_SSC_DELTA1(x)                ((x) & 0xfff)
+
+#define CORE_GSWPLL_GRP1               0x40d
+#define  RG_GSWPLL_PREDIV(x)           (((x) & 0x3) << 14)
+#define  RG_GSWPLL_POSDIV_200M(x)      (((x) & 0x3) << 12)
+#define  RG_GSWPLL_EN_PRE              BIT(11)
+#define  RG_GSWPLL_FBKSEL              BIT(10)
+#define  RG_GSWPLL_BP                  BIT(9)
+#define  RG_GSWPLL_BR                  BIT(8)
+#define  RG_GSWPLL_FBKDIV_200M(x)      ((x) & 0xff)
+
+#define CORE_GSWPLL_GRP2               0x40e
+#define  RG_GSWPLL_POSDIV_500M(x)      (((x) & 0x3) << 8)
+#define  RG_GSWPLL_FBKDIV_500M(x)      ((x) & 0xff)
+
+#define CORE_TRGMII_GSW_CLK_CG         0x410
+#define  REG_GSWCK_EN                  BIT(0)
+#define  REG_TRGMIICK_EN               BIT(1)
+
+#define MIB_DESC(_s, _o, _n)   \
+       {                       \
+               .size = (_s),   \
+               .offset = (_o), \
+               .name = (_n),   \
+       }
+
+struct mt7530_mib_desc {
+       unsigned int size;
+       unsigned int offset;
+       const char *name;
+};
+
+struct mt7530_fdb {
+       u16 vid;
+       u8 port_mask;
+       u8 aging;
+       u8 mac[6];
+       bool noarp;
+};
+
+struct mt7530_port {
+       bool enable;
+       u32 pm;
+};
+
+/* struct mt7530_priv -        This is the main data structure for holding the state
+ *                     of the driver
+ * @dev:               The device pointer
+ * @ds:                        The pointer to the dsa core structure
+ * @bus:               The bus used for the device and built-in PHY
+ * @rstc:              The pointer to reset control used by MCM
+ * @ethernet:          The regmap used for access TRGMII-based registers
+ * @core_pwr:          The power supplied into the core
+ * @io_pwr:            The power supplied into the I/O
+ * @reset:             The descriptor for GPIO line tied to its reset pin
+ * @mcm:               Flag for distinguishing if standalone IC or module
+ *                     coupling
+ * @ports:             Holding the state among ports
+ * @reg_mutex:         The lock for protecting among process accessing
+ *                     registers
+ */
+struct mt7530_priv {
+       struct device           *dev;
+       struct dsa_switch       *ds;
+       struct mii_bus          *bus;
+       struct reset_control    *rstc;
+       struct regmap           *ethernet;
+       struct regulator        *core_pwr;
+       struct regulator        *io_pwr;
+       struct gpio_desc        *reset;
+       bool                    mcm;
+
+       struct mt7530_port      ports[MT7530_NUM_PORTS];
+       /* protect among processes for registers access*/
+       struct mutex reg_mutex;
+};
+
+struct mt7530_hw_stats {
+       const char      *string;
+       u16             reg;
+       u8              sizeof_stat;
+};
+
+struct mt7530_dummy_poll {
+       struct mt7530_priv *priv;
+       u32 reg;
+};
+
+static inline void INIT_MT7530_DUMMY_POLL(struct mt7530_dummy_poll *p,
+                                         struct mt7530_priv *priv, u32 reg)
+{
+       p->priv = priv;
+       p->reg = reg;
+}
+
+#endif /* __MT7530_H */
index c36be318de1aaf1d6d936ad97194b1524112bcbd..31d37a90cec7f853940664a313296bab6743d3a2 100644 (file)
@@ -1,5 +1,6 @@
 obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
 mv88e6xxx-objs := chip.o
 mv88e6xxx-objs += global1.o
+mv88e6xxx-objs += global1_atu.o
 mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2.o
 mv88e6xxx-objs += port.o
index 03dc886ed3d6be1747d5cef7616f2eb3074a5492..44ba8cff5631ca7360106189badc9704c2372100 100644 (file)
@@ -8,6 +8,9 @@
  *
  * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
  *
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ *     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -687,11 +690,6 @@ static bool mv88e6xxx_6165_family(struct mv88e6xxx_chip *chip)
        return chip->info->family == MV88E6XXX_FAMILY_6165;
 }
 
-static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip)
-{
-       return chip->info->family == MV88E6XXX_FAMILY_6320;
-}
-
 static bool mv88e6xxx_6341_family(struct mv88e6xxx_chip *chip)
 {
        return chip->info->family == MV88E6XXX_FAMILY_6341;
@@ -1066,11 +1064,6 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
        mutex_unlock(&chip->reg_lock);
 }
 
-static int _mv88e6xxx_atu_wait(struct mv88e6xxx_chip *chip)
-{
-       return mv88e6xxx_g1_wait(chip, GLOBAL_ATU_OP, GLOBAL_ATU_OP_BUSY);
-}
-
 static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
                             struct ethtool_eee *e)
 {
@@ -1130,143 +1123,42 @@ out:
        return err;
 }
 
-static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_chip *chip, u16 fid, u16 cmd)
-{
-       u16 val;
-       int err;
-
-       if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_ATU_FID)) {
-               err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_FID, fid);
-               if (err)
-                       return err;
-       } else if (mv88e6xxx_num_databases(chip) == 256) {
-               /* ATU DBNum[7:4] are located in ATU Control 15:12 */
-               err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
-               if (err)
-                       return err;
-
-               err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL,
-                                        (val & 0xfff) | ((fid << 8) & 0xf000));
-               if (err)
-                       return err;
-
-               /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
-               cmd |= fid & 0xf;
-       }
-
-       err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_OP, cmd);
-       if (err)
-               return err;
-
-       return _mv88e6xxx_atu_wait(chip);
-}
-
-static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_chip *chip,
-                                    struct mv88e6xxx_atu_entry *entry)
-{
-       u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
-
-       if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
-               unsigned int mask, shift;
-
-               if (entry->trunk) {
-                       data |= GLOBAL_ATU_DATA_TRUNK;
-                       mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
-                       shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
-               } else {
-                       mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
-                       shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
-               }
-
-               data |= (entry->portv_trunkid << shift) & mask;
-       }
-
-       return mv88e6xxx_g1_write(chip, GLOBAL_ATU_DATA, data);
-}
-
-static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_chip *chip,
-                                    struct mv88e6xxx_atu_entry *entry,
-                                    bool static_too)
+static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
 {
-       int op;
-       int err;
-
-       err = _mv88e6xxx_atu_wait(chip);
-       if (err)
-               return err;
-
-       err = _mv88e6xxx_atu_data_write(chip, entry);
-       if (err)
-               return err;
-
-       if (entry->fid) {
-               op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
-                       GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
-       } else {
-               op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
-                       GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
-       }
-
-       return _mv88e6xxx_atu_cmd(chip, entry->fid, op);
-}
-
-static int _mv88e6xxx_atu_flush(struct mv88e6xxx_chip *chip,
-                               u16 fid, bool static_too)
-{
-       struct mv88e6xxx_atu_entry entry = {
-               .fid = fid,
-               .state = 0, /* EntryState bits must be 0 */
-       };
+       struct dsa_switch *ds = NULL;
+       struct net_device *br;
+       u16 pvlan;
+       int i;
 
-       return _mv88e6xxx_atu_flush_move(chip, &entry, static_too);
-}
+       if (dev < DSA_MAX_SWITCHES)
+               ds = chip->ds->dst->ds[dev];
 
-static int _mv88e6xxx_atu_move(struct mv88e6xxx_chip *chip, u16 fid,
-                              int from_port, int to_port, bool static_too)
-{
-       struct mv88e6xxx_atu_entry entry = {
-               .trunk = false,
-               .fid = fid,
-       };
+       /* Prevent frames from unknown switch or port */
+       if (!ds || port >= ds->num_ports)
+               return 0;
 
-       /* EntryState bits must be 0xF */
-       entry.state = GLOBAL_ATU_DATA_STATE_MASK;
+       /* Frames from DSA links and CPU ports can egress any local port */
+       if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+               return mv88e6xxx_port_mask(chip);
 
-       /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
-       entry.portv_trunkid = (to_port & 0x0f) << 4;
-       entry.portv_trunkid |= from_port & 0x0f;
+       br = ds->ports[port].bridge_dev;
+       pvlan = 0;
 
-       return _mv88e6xxx_atu_flush_move(chip, &entry, static_too);
-}
+       /* Frames from user ports can egress any local DSA links and CPU ports,
+        * as well as any local member of their bridge group.
+        */
+       for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
+               if (dsa_is_cpu_port(chip->ds, i) ||
+                   dsa_is_dsa_port(chip->ds, i) ||
+                   (br && chip->ds->ports[i].bridge_dev == br))
+                       pvlan |= BIT(i);
 
-static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid,
-                                int port, bool static_too)
-{
-       /* Destination port 0xF means remove the entries */
-       return _mv88e6xxx_atu_move(chip, fid, port, 0x0f, static_too);
+       return pvlan;
 }
 
-static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
+static int mv88e6xxx_port_vlan_map(struct mv88e6xxx_chip *chip, int port)
 {
-       struct dsa_switch *ds = chip->ds;
-       struct net_device *bridge = ds->ports[port].bridge_dev;
-       u16 output_ports = 0;
-       int i;
-
-       /* allow CPU port or DSA link(s) to send frames to every port */
-       if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
-               output_ports = ~0;
-       } else {
-               for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
-                       /* allow sending frames to every group member */
-                       if (bridge && ds->ports[i].bridge_dev == bridge)
-                               output_ports |= BIT(i);
-
-                       /* allow sending frames to CPU port and DSA link(s) */
-                       if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
-                               output_ports |= BIT(i);
-               }
-       }
+       u16 output_ports = mv88e6xxx_port_vlan(chip, chip->ds->index, port);
 
        /* prevent frames from going back out of the port they came in on */
        output_ports &= ~BIT(port);
@@ -1306,13 +1198,68 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
                netdev_err(ds->ports[port].netdev, "failed to update state\n");
 }
 
+static int mv88e6xxx_atu_setup(struct mv88e6xxx_chip *chip)
+{
+       int err;
+
+       err = mv88e6xxx_g1_atu_flush(chip, 0, true);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_g1_atu_set_learn2all(chip, true);
+       if (err)
+               return err;
+
+       return mv88e6xxx_g1_atu_set_age_time(chip, 300000);
+}
+
+static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
+{
+       u16 pvlan = 0;
+
+       if (!mv88e6xxx_has_pvt(chip))
+               return -EOPNOTSUPP;
+
+       /* Skip the local source device, which uses in-chip port VLAN */
+       if (dev != chip->ds->index)
+               pvlan = mv88e6xxx_port_vlan(chip, dev, port);
+
+       return mv88e6xxx_g2_pvt_write(chip, dev, port, pvlan);
+}
+
+static int mv88e6xxx_pvt_setup(struct mv88e6xxx_chip *chip)
+{
+       int dev, port;
+       int err;
+
+       if (!mv88e6xxx_has_pvt(chip))
+               return 0;
+
+       /* Clear 5 Bit Port for usage with Marvell Link Street devices:
+        * use 4 bits for the Src_Port/Src_Trunk and 5 bits for the Src_Dev.
+        */
+       err = mv88e6xxx_g2_misc_4_bit_port(chip);
+       if (err)
+               return err;
+
+       for (dev = 0; dev < MV88E6XXX_MAX_PVT_SWITCHES; ++dev) {
+               for (port = 0; port < MV88E6XXX_MAX_PVT_PORTS; ++port) {
+                       err = mv88e6xxx_pvt_map(chip, dev, port);
+                       if (err)
+                               return err;
+               }
+       }
+
+       return 0;
+}
+
 static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
 {
        struct mv88e6xxx_chip *chip = ds->priv;
        int err;
 
        mutex_lock(&chip->reg_lock);
-       err = _mv88e6xxx_atu_remove(chip, 0, port, false);
+       err = mv88e6xxx_g1_atu_remove(chip, 0, port, false);
        mutex_unlock(&chip->reg_lock);
 
        if (err)
@@ -1662,7 +1609,7 @@ loadpurge:
        return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE);
 }
 
-static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
+static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
 {
        DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
        struct mv88e6xxx_vtu_entry vlan;
@@ -1703,7 +1650,7 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
                return -ENOSPC;
 
        /* Clear the database */
-       return _mv88e6xxx_atu_flush(chip, *fid, true);
+       return mv88e6xxx_g1_atu_flush(chip, *fid, true);
 }
 
 static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
@@ -1716,7 +1663,7 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
        };
        int i, err;
 
-       err = _mv88e6xxx_fid_new(chip, &vlan.fid);
+       err = mv88e6xxx_atu_new(chip, &vlan.fid);
        if (err)
                return err;
 
@@ -1964,7 +1911,7 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
        if (err)
                return err;
 
-       return _mv88e6xxx_atu_remove(chip, vlan.fid, port, false);
+       return mv88e6xxx_g1_atu_remove(chip, vlan.fid, port, false);
 }
 
 static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
@@ -2001,96 +1948,6 @@ unlock:
        return err;
 }
 
-static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_chip *chip,
-                                   const unsigned char *addr)
-{
-       int i, err;
-
-       for (i = 0; i < 3; i++) {
-               err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_MAC_01 + i,
-                                        (addr[i * 2] << 8) | addr[i * 2 + 1]);
-               if (err)
-                       return err;
-       }
-
-       return 0;
-}
-
-static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_chip *chip,
-                                  unsigned char *addr)
-{
-       u16 val;
-       int i, err;
-
-       for (i = 0; i < 3; i++) {
-               err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_MAC_01 + i, &val);
-               if (err)
-                       return err;
-
-               addr[i * 2] = val >> 8;
-               addr[i * 2 + 1] = val & 0xff;
-       }
-
-       return 0;
-}
-
-static int _mv88e6xxx_atu_load(struct mv88e6xxx_chip *chip,
-                              struct mv88e6xxx_atu_entry *entry)
-{
-       int ret;
-
-       ret = _mv88e6xxx_atu_wait(chip);
-       if (ret < 0)
-               return ret;
-
-       ret = _mv88e6xxx_atu_mac_write(chip, entry->mac);
-       if (ret < 0)
-               return ret;
-
-       ret = _mv88e6xxx_atu_data_write(chip, entry);
-       if (ret < 0)
-               return ret;
-
-       return _mv88e6xxx_atu_cmd(chip, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
-}
-
-static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
-                                 struct mv88e6xxx_atu_entry *entry);
-
-static int mv88e6xxx_atu_get(struct mv88e6xxx_chip *chip, int fid,
-                            const u8 *addr, struct mv88e6xxx_atu_entry *entry)
-{
-       struct mv88e6xxx_atu_entry next;
-       int err;
-
-       memcpy(next.mac, addr, ETH_ALEN);
-       eth_addr_dec(next.mac);
-
-       err = _mv88e6xxx_atu_mac_write(chip, next.mac);
-       if (err)
-               return err;
-
-       do {
-               err = _mv88e6xxx_atu_getnext(chip, fid, &next);
-               if (err)
-                       return err;
-
-               if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
-                       break;
-
-               if (ether_addr_equal(next.mac, addr)) {
-                       *entry = next;
-                       return 0;
-               }
-       } while (ether_addr_greater(addr, next.mac));
-
-       memset(entry, 0, sizeof(*entry));
-       entry->fid = fid;
-       ether_addr_copy(entry->mac, addr);
-
-       return 0;
-}
-
 static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
                                        const unsigned char *addr, u16 vid,
                                        u8 state)
@@ -2107,21 +1964,32 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
        if (err)
                return err;
 
-       err = mv88e6xxx_atu_get(chip, vlan.fid, addr, &entry);
+       entry.state = GLOBAL_ATU_DATA_STATE_UNUSED;
+       ether_addr_copy(entry.mac, addr);
+       eth_addr_dec(entry.mac);
+
+       err = mv88e6xxx_g1_atu_getnext(chip, vlan.fid, &entry);
        if (err)
                return err;
 
+       /* Initialize a fresh ATU entry if it isn't found */
+       if (entry.state == GLOBAL_ATU_DATA_STATE_UNUSED ||
+           !ether_addr_equal(entry.mac, addr)) {
+               memset(&entry, 0, sizeof(entry));
+               ether_addr_copy(entry.mac, addr);
+       }
+
        /* Purge the ATU entry only if no port is using it anymore */
        if (state == GLOBAL_ATU_DATA_STATE_UNUSED) {
-               entry.portv_trunkid &= ~BIT(port);
-               if (!entry.portv_trunkid)
+               entry.portvec &= ~BIT(port);
+               if (!entry.portvec)
                        entry.state = GLOBAL_ATU_DATA_STATE_UNUSED;
        } else {
-               entry.portv_trunkid |= BIT(port);
+               entry.portvec |= BIT(port);
                entry.state = state;
        }
 
-       return _mv88e6xxx_atu_load(chip, &entry);
+       return mv88e6xxx_g1_atu_loadpurge(chip, vlan.fid, &entry);
 }
 
 static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
@@ -2161,75 +2029,26 @@ static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
        return err;
 }
 
-static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
-                                 struct mv88e6xxx_atu_entry *entry)
-{
-       struct mv88e6xxx_atu_entry next = { 0 };
-       u16 val;
-       int err;
-
-       next.fid = fid;
-
-       err = _mv88e6xxx_atu_wait(chip);
-       if (err)
-               return err;
-
-       err = _mv88e6xxx_atu_cmd(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
-       if (err)
-               return err;
-
-       err = _mv88e6xxx_atu_mac_read(chip, next.mac);
-       if (err)
-               return err;
-
-       err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_DATA, &val);
-       if (err)
-               return err;
-
-       next.state = val & GLOBAL_ATU_DATA_STATE_MASK;
-       if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
-               unsigned int mask, shift;
-
-               if (val & GLOBAL_ATU_DATA_TRUNK) {
-                       next.trunk = true;
-                       mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
-                       shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
-               } else {
-                       next.trunk = false;
-                       mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
-                       shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
-               }
-
-               next.portv_trunkid = (val & mask) >> shift;
-       }
-
-       *entry = next;
-       return 0;
-}
-
 static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip,
                                      u16 fid, u16 vid, int port,
                                      struct switchdev_obj *obj,
                                      int (*cb)(struct switchdev_obj *obj))
 {
-       struct mv88e6xxx_atu_entry addr = {
-               .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
-       };
+       struct mv88e6xxx_atu_entry addr;
        int err;
 
-       err = _mv88e6xxx_atu_mac_write(chip, addr.mac);
-       if (err)
-               return err;
+       addr.state = GLOBAL_ATU_DATA_STATE_UNUSED;
+       eth_broadcast_addr(addr.mac);
 
        do {
-               err = _mv88e6xxx_atu_getnext(chip, fid, &addr);
+               err = mv88e6xxx_g1_atu_getnext(chip, fid, &addr);
                if (err)
                        return err;
 
                if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
                        break;
 
-               if (addr.trunk || (addr.portv_trunkid & BIT(port)) == 0)
+               if (addr.trunk || (addr.portvec & BIT(port)) == 0)
                        continue;
 
                if (obj->id == SWITCHDEV_OBJ_ID_PORT_FDB) {
@@ -2321,23 +2140,52 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
        return err;
 }
 
-static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
-                                     struct net_device *br)
+static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
+                               struct net_device *br)
 {
-       struct mv88e6xxx_chip *chip = ds->priv;
-       int i, err = 0;
-
-       mutex_lock(&chip->reg_lock);
+       struct dsa_switch *ds;
+       int port;
+       int dev;
+       int err;
 
-       /* Remap each port's VLANTable */
-       for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
-               if (ds->ports[i].bridge_dev == br) {
-                       err = _mv88e6xxx_port_based_vlan_map(chip, i);
+       /* Remap the Port VLAN of each local bridge group member */
+       for (port = 0; port < mv88e6xxx_num_ports(chip); ++port) {
+               if (chip->ds->ports[port].bridge_dev == br) {
+                       err = mv88e6xxx_port_vlan_map(chip, port);
                        if (err)
-                               break;
+                               return err;
                }
        }
 
+       if (!mv88e6xxx_has_pvt(chip))
+               return 0;
+
+       /* Remap the Port VLAN of each cross-chip bridge group member */
+       for (dev = 0; dev < DSA_MAX_SWITCHES; ++dev) {
+               ds = chip->ds->dst->ds[dev];
+               if (!ds)
+                       break;
+
+               for (port = 0; port < ds->num_ports; ++port) {
+                       if (ds->ports[port].bridge_dev == br) {
+                               err = mv88e6xxx_pvt_map(chip, dev, port);
+                               if (err)
+                                       return err;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
+                                     struct net_device *br)
+{
+       struct mv88e6xxx_chip *chip = ds->priv;
+       int err;
+
+       mutex_lock(&chip->reg_lock);
+       err = mv88e6xxx_bridge_map(chip, br);
        mutex_unlock(&chip->reg_lock);
 
        return err;
@@ -2347,17 +2195,41 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
                                        struct net_device *br)
 {
        struct mv88e6xxx_chip *chip = ds->priv;
-       int i;
 
        mutex_lock(&chip->reg_lock);
+       if (mv88e6xxx_bridge_map(chip, br) ||
+           mv88e6xxx_port_vlan_map(chip, port))
+               dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
+       mutex_unlock(&chip->reg_lock);
+}
 
-       /* Remap each port's VLANTable */
-       for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
-               if (i == port || ds->ports[i].bridge_dev == br)
-                       if (_mv88e6xxx_port_based_vlan_map(chip, i))
-                               netdev_warn(ds->ports[i].netdev,
-                                           "failed to remap\n");
+static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, int dev,
+                                          int port, struct net_device *br)
+{
+       struct mv88e6xxx_chip *chip = ds->priv;
+       int err;
+
+       if (!mv88e6xxx_has_pvt(chip))
+               return 0;
 
+       mutex_lock(&chip->reg_lock);
+       err = mv88e6xxx_pvt_map(chip, dev, port);
+       mutex_unlock(&chip->reg_lock);
+
+       return err;
+}
+
+static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds, int dev,
+                                            int port, struct net_device *br)
+{
+       struct mv88e6xxx_chip *chip = ds->priv;
+
+       if (!mv88e6xxx_has_pvt(chip))
+               return;
+
+       mutex_lock(&chip->reg_lock);
+       if (mv88e6xxx_pvt_map(chip, dev, port))
+               dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
        mutex_unlock(&chip->reg_lock);
 }
 
@@ -2433,70 +2305,85 @@ static int mv88e6xxx_serdes_power_on(struct mv88e6xxx_chip *chip)
        return err;
 }
 
-static int mv88e6xxx_setup_port_dsa(struct mv88e6xxx_chip *chip, int port,
-                                   int upstream_port)
+static int mv88e6xxx_set_port_mode(struct mv88e6xxx_chip *chip, int port,
+                                  enum mv88e6xxx_frame_mode frame, u16 egress,
+                                  u16 etype)
 {
        int err;
 
-       err = chip->info->ops->port_set_frame_mode(
-               chip, port, MV88E6XXX_FRAME_MODE_DSA);
+       if (!chip->info->ops->port_set_frame_mode)
+               return -EOPNOTSUPP;
+
+       err = mv88e6xxx_port_set_egress_mode(chip, port, egress);
        if (err)
                return err;
 
-       return chip->info->ops->port_set_egress_unknowns(
-               chip, port, port == upstream_port);
+       err = chip->info->ops->port_set_frame_mode(chip, port, frame);
+       if (err)
+               return err;
+
+       if (chip->info->ops->port_set_ether_type)
+               return chip->info->ops->port_set_ether_type(chip, port, etype);
+
+       return 0;
 }
 
-static int mv88e6xxx_setup_port_cpu(struct mv88e6xxx_chip *chip, int port)
+static int mv88e6xxx_set_port_mode_normal(struct mv88e6xxx_chip *chip, int port)
 {
-       int err;
+       return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_NORMAL,
+                                      PORT_CONTROL_EGRESS_UNMODIFIED,
+                                      PORT_ETH_TYPE_DEFAULT);
+}
 
-       switch (chip->info->tag_protocol) {
-       case DSA_TAG_PROTO_EDSA:
-               err = chip->info->ops->port_set_frame_mode(
-                       chip, port, MV88E6XXX_FRAME_MODE_ETHERTYPE);
-               if (err)
-                       return err;
+static int mv88e6xxx_set_port_mode_dsa(struct mv88e6xxx_chip *chip, int port)
+{
+       return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_DSA,
+                                      PORT_CONTROL_EGRESS_UNMODIFIED,
+                                      PORT_ETH_TYPE_DEFAULT);
+}
 
-               err = mv88e6xxx_port_set_egress_mode(
-                       chip, port, PORT_CONTROL_EGRESS_ADD_TAG);
-               if (err)
-                       return err;
+static int mv88e6xxx_set_port_mode_edsa(struct mv88e6xxx_chip *chip, int port)
+{
+       return mv88e6xxx_set_port_mode(chip, port,
+                                      MV88E6XXX_FRAME_MODE_ETHERTYPE,
+                                      PORT_CONTROL_EGRESS_ADD_TAG, ETH_P_EDSA);
+}
 
-               if (chip->info->ops->port_set_ether_type)
-                       err = chip->info->ops->port_set_ether_type(
-                               chip, port, ETH_P_EDSA);
-               break;
+static int mv88e6xxx_setup_port_mode(struct mv88e6xxx_chip *chip, int port)
+{
+       if (dsa_is_dsa_port(chip->ds, port))
+               return mv88e6xxx_set_port_mode_dsa(chip, port);
 
-       case DSA_TAG_PROTO_DSA:
-               err = chip->info->ops->port_set_frame_mode(
-                       chip, port, MV88E6XXX_FRAME_MODE_DSA);
-               if (err)
-                       return err;
+       if (dsa_is_normal_port(chip->ds, port))
+               return mv88e6xxx_set_port_mode_normal(chip, port);
 
-               err = mv88e6xxx_port_set_egress_mode(
-                       chip, port, PORT_CONTROL_EGRESS_UNMODIFIED);
-               break;
-       default:
-               err = -EINVAL;
-       }
+       /* Setup CPU port mode depending on its supported tag format */
+       if (chip->info->tag_protocol == DSA_TAG_PROTO_DSA)
+               return mv88e6xxx_set_port_mode_dsa(chip, port);
 
-       if (err)
-               return err;
+       if (chip->info->tag_protocol == DSA_TAG_PROTO_EDSA)
+               return mv88e6xxx_set_port_mode_edsa(chip, port);
 
-       return chip->info->ops->port_set_egress_unknowns(chip, port, true);
+       return -EINVAL;
 }
 
-static int mv88e6xxx_setup_port_normal(struct mv88e6xxx_chip *chip, int port)
+static int mv88e6xxx_setup_message_port(struct mv88e6xxx_chip *chip, int port)
 {
-       int err;
+       bool message = dsa_is_dsa_port(chip->ds, port);
 
-       err = chip->info->ops->port_set_frame_mode(
-               chip, port, MV88E6XXX_FRAME_MODE_NORMAL);
-       if (err)
-               return err;
+       return mv88e6xxx_port_set_message_port(chip, port, message);
+}
 
-       return chip->info->ops->port_set_egress_unknowns(chip, port, false);
+static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port)
+{
+       bool flood = port == dsa_upstream_port(chip->ds);
+
+       /* Upstream ports flood frames with unknown unicast or multicast DA */
+       if (chip->info->ops->port_set_egress_floods)
+               return chip->info->ops->port_set_egress_floods(chip, port,
+                                                              flood, flood);
+
+       return 0;
 }
 
 static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
@@ -2541,14 +2428,11 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
        if (err)
                return err;
 
-       if (dsa_is_cpu_port(ds, port)) {
-               err = mv88e6xxx_setup_port_cpu(chip, port);
-       } else if (dsa_is_dsa_port(ds, port)) {
-               err = mv88e6xxx_setup_port_dsa(chip, port,
-                                              dsa_upstream_port(ds));
-       } else {
-               err = mv88e6xxx_setup_port_normal(chip, port);
-       }
+       err = mv88e6xxx_setup_port_mode(chip, port);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_setup_egress_floods(chip, port);
        if (err)
                return err;
 
@@ -2623,20 +2507,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
                        return err;
        }
 
-       if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
-           mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
-           mv88e6xxx_6320_family(chip) || mv88e6xxx_6341_family(chip)) {
-               /* Port ATU control: disable limiting the number of
-                * address database entries that this port is allowed
-                * to use.
-                */
-               err = mv88e6xxx_port_write(chip, port, PORT_ATU_CONTROL,
-                                          0x0000);
-               /* Priority Override: disable DA, SA and VTU priority
-                * override.
-                */
-               err = mv88e6xxx_port_write(chip, port, PORT_PRI_OVERRIDE,
-                                          0x0000);
+       if (chip->info->ops->port_disable_learn_limit) {
+               err = chip->info->ops->port_disable_learn_limit(chip, port);
+               if (err)
+                       return err;
+       }
+
+       if (chip->info->ops->port_disable_pri_override) {
+               err = chip->info->ops->port_disable_pri_override(chip, port);
                if (err)
                        return err;
        }
@@ -2653,10 +2531,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
                        return err;
        }
 
-       /* Port Control 1: disable trunking, disable sending
-        * learning messages to this port.
-        */
-       err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, 0x0000);
+       err = mv88e6xxx_setup_message_port(chip, port);
        if (err)
                return err;
 
@@ -2668,7 +2543,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
        if (err)
                return err;
 
-       err = _mv88e6xxx_port_based_vlan_map(chip, port);
+       err = mv88e6xxx_port_vlan_map(chip, port);
        if (err)
                return err;
 
@@ -2697,33 +2572,6 @@ static int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
        return 0;
 }
 
-static int mv88e6xxx_g1_set_age_time(struct mv88e6xxx_chip *chip,
-                                    unsigned int msecs)
-{
-       const unsigned int coeff = chip->info->age_time_coeff;
-       const unsigned int min = 0x01 * coeff;
-       const unsigned int max = 0xff * coeff;
-       u8 age_time;
-       u16 val;
-       int err;
-
-       if (msecs < min || msecs > max)
-               return -ERANGE;
-
-       /* Round to nearest multiple of coeff */
-       age_time = (msecs + coeff / 2) / coeff;
-
-       err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
-       if (err)
-               return err;
-
-       /* AgeTime is 11:4 bits */
-       val &= ~0xff0;
-       val |= age_time << 4;
-
-       return mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
-}
-
 static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
                                     unsigned int ageing_time)
 {
@@ -2731,7 +2579,7 @@ static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
        int err;
 
        mutex_lock(&chip->reg_lock);
-       err = mv88e6xxx_g1_set_age_time(chip, ageing_time);
+       err = mv88e6xxx_g1_atu_set_age_time(chip, ageing_time);
        mutex_unlock(&chip->reg_lock);
 
        return err;
@@ -2774,24 +2622,6 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
        if (err < 0)
                return err;
 
-       /* Set the default address aging time to 5 minutes, and
-        * enable address learn messages to be sent to all message
-        * ports.
-        */
-       err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL,
-                                GLOBAL_ATU_CONTROL_LEARN2ALL);
-       if (err)
-               return err;
-
-       err = mv88e6xxx_g1_set_age_time(chip, 300000);
-       if (err)
-               return err;
-
-       /* Clear all ATU entries */
-       err = _mv88e6xxx_atu_flush(chip, 0, true);
-       if (err)
-               return err;
-
        /* Configure the IP ToS mapping registers. */
        err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_0, 0x0000);
        if (err)
@@ -2872,6 +2702,14 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
                        goto unlock;
        }
 
+       err = mv88e6xxx_pvt_setup(chip);
+       if (err)
+               goto unlock;
+
+       err = mv88e6xxx_atu_setup(chip);
+       if (err)
+               goto unlock;
+
        /* Some generations have the configuration of sending reserved
         * management frames to the CPU in global2, others in
         * global1. Hence it does not fit the two setup functions
@@ -3101,10 +2939,12 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
        .port_set_speed = mv88e6185_port_set_speed,
        .port_tag_remap = mv88e6095_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_config = mv88e6097_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3127,7 +2967,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
        .port_set_duplex = mv88e6xxx_port_set_duplex,
        .port_set_speed = mv88e6185_port_set_speed,
        .port_set_frame_mode = mv88e6085_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6185_port_set_egress_floods,
        .port_set_upstream_port = mv88e6095_port_set_upstream_port,
        .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3149,11 +2989,13 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
        .port_set_speed = mv88e6185_port_set_speed,
        .port_tag_remap = mv88e6095_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_jumbo_config = mv88e6165_port_jumbo_config,
        .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
        .port_pause_config = mv88e6097_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3174,7 +3016,9 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
        .port_set_duplex = mv88e6xxx_port_set_duplex,
        .port_set_speed = mv88e6185_port_set_speed,
        .port_set_frame_mode = mv88e6085_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6085_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3196,7 +3040,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
        .port_set_speed = mv88e6185_port_set_speed,
        .port_tag_remap = mv88e6095_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6185_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_set_upstream_port = mv88e6095_port_set_upstream_port,
        .port_jumbo_config = mv88e6165_port_jumbo_config,
@@ -3215,6 +3059,37 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
        .reset = mv88e6185_g1_reset,
 };
 
+static const struct mv88e6xxx_ops mv88e6141_ops = {
+       /* MV88E6XXX_FAMILY_6341 */
+       .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+       .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+       .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+       .phy_read = mv88e6xxx_g2_smi_phy_read,
+       .phy_write = mv88e6xxx_g2_smi_phy_write,
+       .port_set_link = mv88e6xxx_port_set_link,
+       .port_set_duplex = mv88e6xxx_port_set_duplex,
+       .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+       .port_set_speed = mv88e6390_port_set_speed,
+       .port_tag_remap = mv88e6095_port_tag_remap,
+       .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+       .port_set_ether_type = mv88e6351_port_set_ether_type,
+       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+       .port_pause_config = mv88e6097_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+       .stats_snapshot = mv88e6390_g1_stats_snapshot,
+       .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+       .stats_get_strings = mv88e6320_stats_get_strings,
+       .stats_get_stats = mv88e6390_stats_get_stats,
+       .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
+       .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .watchdog_ops = &mv88e6390_watchdog_ops,
+       .mgmt_rsvd2cpu =  mv88e6390_g1_mgmt_rsvd2cpu,
+       .reset = mv88e6352_g1_reset,
+};
+
 static const struct mv88e6xxx_ops mv88e6161_ops = {
        /* MV88E6XXX_FAMILY_6165 */
        .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -3225,11 +3100,13 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
        .port_set_speed = mv88e6185_port_set_speed,
        .port_tag_remap = mv88e6095_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_jumbo_config = mv88e6165_port_jumbo_config,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_config = mv88e6097_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3249,6 +3126,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
        .port_set_link = mv88e6xxx_port_set_link,
        .port_set_duplex = mv88e6xxx_port_set_duplex,
        .port_set_speed = mv88e6185_port_set_speed,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3271,11 +3150,13 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
        .port_set_speed = mv88e6185_port_set_speed,
        .port_tag_remap = mv88e6095_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_jumbo_config = mv88e6165_port_jumbo_config,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_config = mv88e6097_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3300,11 +3181,13 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
        .port_set_speed = mv88e6352_port_set_speed,
        .port_tag_remap = mv88e6095_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_jumbo_config = mv88e6165_port_jumbo_config,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_config = mv88e6097_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3327,11 +3210,13 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
        .port_set_speed = mv88e6185_port_set_speed,
        .port_tag_remap = mv88e6095_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_jumbo_config = mv88e6165_port_jumbo_config,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_config = mv88e6097_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3356,11 +3241,13 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
        .port_set_speed = mv88e6352_port_set_speed,
        .port_tag_remap = mv88e6095_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_jumbo_config = mv88e6165_port_jumbo_config,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_config = mv88e6097_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3381,7 +3268,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
        .port_set_duplex = mv88e6xxx_port_set_duplex,
        .port_set_speed = mv88e6185_port_set_speed,
        .port_set_frame_mode = mv88e6085_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6185_port_set_egress_floods,
        .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
        .port_set_upstream_port = mv88e6095_port_set_upstream_port,
        .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3410,9 +3297,11 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
        .port_set_speed = mv88e6390_port_set_speed,
        .port_tag_remap = mv88e6390_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_pause_config = mv88e6390_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6390_g1_stats_snapshot,
        .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3438,9 +3327,11 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
        .port_set_speed = mv88e6390x_port_set_speed,
        .port_tag_remap = mv88e6390_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_pause_config = mv88e6390_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6390_g1_stats_snapshot,
        .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3466,9 +3357,11 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
        .port_set_speed = mv88e6390_port_set_speed,
        .port_tag_remap = mv88e6390_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_pause_config = mv88e6390_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6390_g1_stats_snapshot,
        .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3494,11 +3387,13 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
        .port_set_speed = mv88e6352_port_set_speed,
        .port_tag_remap = mv88e6095_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_jumbo_config = mv88e6165_port_jumbo_config,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_config = mv88e6097_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3523,10 +3418,12 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
        .port_set_speed = mv88e6390_port_set_speed,
        .port_tag_remap = mv88e6390_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_pause_config = mv88e6390_port_pause_config,
        .port_set_cmode = mv88e6390x_port_set_cmode,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6390_g1_stats_snapshot,
        .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3551,11 +3448,13 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
        .port_set_speed = mv88e6185_port_set_speed,
        .port_tag_remap = mv88e6095_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_jumbo_config = mv88e6165_port_jumbo_config,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_config = mv88e6097_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
        .stats_get_strings = mv88e6320_stats_get_strings,
@@ -3578,11 +3477,13 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
        .port_set_speed = mv88e6185_port_set_speed,
        .port_tag_remap = mv88e6095_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_jumbo_config = mv88e6165_port_jumbo_config,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_config = mv88e6097_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
        .stats_get_strings = mv88e6320_stats_get_strings,
@@ -3592,6 +3493,37 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
        .reset = mv88e6352_g1_reset,
 };
 
+static const struct mv88e6xxx_ops mv88e6341_ops = {
+       /* MV88E6XXX_FAMILY_6341 */
+       .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+       .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+       .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+       .phy_read = mv88e6xxx_g2_smi_phy_read,
+       .phy_write = mv88e6xxx_g2_smi_phy_write,
+       .port_set_link = mv88e6xxx_port_set_link,
+       .port_set_duplex = mv88e6xxx_port_set_duplex,
+       .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+       .port_set_speed = mv88e6390_port_set_speed,
+       .port_tag_remap = mv88e6095_port_tag_remap,
+       .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+       .port_set_ether_type = mv88e6351_port_set_ether_type,
+       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+       .port_pause_config = mv88e6097_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+       .stats_snapshot = mv88e6390_g1_stats_snapshot,
+       .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+       .stats_get_strings = mv88e6320_stats_get_strings,
+       .stats_get_stats = mv88e6390_stats_get_stats,
+       .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
+       .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .watchdog_ops = &mv88e6390_watchdog_ops,
+       .mgmt_rsvd2cpu =  mv88e6390_g1_mgmt_rsvd2cpu,
+       .reset = mv88e6352_g1_reset,
+};
+
 static const struct mv88e6xxx_ops mv88e6350_ops = {
        /* MV88E6XXX_FAMILY_6351 */
        .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -3603,11 +3535,13 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
        .port_set_speed = mv88e6185_port_set_speed,
        .port_tag_remap = mv88e6095_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_jumbo_config = mv88e6165_port_jumbo_config,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_config = mv88e6097_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3630,11 +3564,13 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
        .port_set_speed = mv88e6185_port_set_speed,
        .port_tag_remap = mv88e6095_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_jumbo_config = mv88e6165_port_jumbo_config,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_config = mv88e6097_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3659,11 +3595,13 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
        .port_set_speed = mv88e6352_port_set_speed,
        .port_tag_remap = mv88e6095_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_jumbo_config = mv88e6165_port_jumbo_config,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_config = mv88e6097_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6320_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3675,64 +3613,6 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
        .reset = mv88e6352_g1_reset,
 };
 
-static const struct mv88e6xxx_ops mv88e6141_ops = {
-       /* MV88E6XXX_FAMILY_6341 */
-       .get_eeprom = mv88e6xxx_g2_get_eeprom8,
-       .set_eeprom = mv88e6xxx_g2_set_eeprom8,
-       .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
-       .phy_read = mv88e6xxx_g2_smi_phy_read,
-       .phy_write = mv88e6xxx_g2_smi_phy_write,
-       .port_set_link = mv88e6xxx_port_set_link,
-       .port_set_duplex = mv88e6xxx_port_set_duplex,
-       .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
-       .port_set_speed = mv88e6390_port_set_speed,
-       .port_tag_remap = mv88e6095_port_tag_remap,
-       .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
-       .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
-       .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
-       .stats_snapshot = mv88e6390_g1_stats_snapshot,
-       .stats_get_sset_count = mv88e6320_stats_get_sset_count,
-       .stats_get_strings = mv88e6320_stats_get_strings,
-       .stats_get_stats = mv88e6390_stats_get_stats,
-       .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6390_g1_set_egress_port,
-       .watchdog_ops = &mv88e6390_watchdog_ops,
-       .mgmt_rsvd2cpu =  mv88e6390_g1_mgmt_rsvd2cpu,
-       .reset = mv88e6352_g1_reset,
-};
-
-static const struct mv88e6xxx_ops mv88e6341_ops = {
-       /* MV88E6XXX_FAMILY_6341 */
-       .get_eeprom = mv88e6xxx_g2_get_eeprom8,
-       .set_eeprom = mv88e6xxx_g2_set_eeprom8,
-       .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
-       .phy_read = mv88e6xxx_g2_smi_phy_read,
-       .phy_write = mv88e6xxx_g2_smi_phy_write,
-       .port_set_link = mv88e6xxx_port_set_link,
-       .port_set_duplex = mv88e6xxx_port_set_duplex,
-       .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
-       .port_set_speed = mv88e6390_port_set_speed,
-       .port_tag_remap = mv88e6095_port_tag_remap,
-       .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
-       .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_jumbo_config = mv88e6165_port_jumbo_config,
-       .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
-       .port_pause_config = mv88e6097_port_pause_config,
-       .stats_snapshot = mv88e6390_g1_stats_snapshot,
-       .stats_get_sset_count = mv88e6320_stats_get_sset_count,
-       .stats_get_strings = mv88e6320_stats_get_strings,
-       .stats_get_stats = mv88e6390_stats_get_stats,
-       .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6390_g1_set_egress_port,
-       .watchdog_ops = &mv88e6390_watchdog_ops,
-       .mgmt_rsvd2cpu =  mv88e6390_g1_mgmt_rsvd2cpu,
-       .reset = mv88e6352_g1_reset,
-};
-
 static const struct mv88e6xxx_ops mv88e6390_ops = {
        /* MV88E6XXX_FAMILY_6390 */
        .get_eeprom = mv88e6xxx_g2_get_eeprom8,
@@ -3746,12 +3626,14 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
        .port_set_speed = mv88e6390_port_set_speed,
        .port_tag_remap = mv88e6390_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_jumbo_config = mv88e6165_port_jumbo_config,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_config = mv88e6390_port_pause_config,
        .port_set_cmode = mv88e6390x_port_set_cmode,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6390_g1_stats_snapshot,
        .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3777,11 +3659,13 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
        .port_set_speed = mv88e6390x_port_set_speed,
        .port_tag_remap = mv88e6390_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_jumbo_config = mv88e6165_port_jumbo_config,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_config = mv88e6390_port_pause_config,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .stats_snapshot = mv88e6390_g1_stats_snapshot,
        .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3794,50 +3678,6 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
        .reset = mv88e6352_g1_reset,
 };
 
-static const struct mv88e6xxx_ops mv88e6391_ops = {
-       /* MV88E6XXX_FAMILY_6390 */
-       .get_eeprom = mv88e6xxx_g2_get_eeprom8,
-       .set_eeprom = mv88e6xxx_g2_set_eeprom8,
-       .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
-       .phy_read = mv88e6xxx_g2_smi_phy_read,
-       .phy_write = mv88e6xxx_g2_smi_phy_write,
-       .port_set_link = mv88e6xxx_port_set_link,
-       .port_set_duplex = mv88e6xxx_port_set_duplex,
-       .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
-       .port_set_speed = mv88e6390_port_set_speed,
-       .port_tag_remap = mv88e6390_port_tag_remap,
-       .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
-       .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_pause_config = mv88e6390_port_pause_config,
-       .stats_snapshot = mv88e6390_g1_stats_snapshot,
-       .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
-       .stats_get_sset_count = mv88e6320_stats_get_sset_count,
-       .stats_get_strings = mv88e6320_stats_get_strings,
-       .stats_get_stats = mv88e6390_stats_get_stats,
-       .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
-       .g1_set_egress_port = mv88e6390_g1_set_egress_port,
-       .watchdog_ops = &mv88e6390_watchdog_ops,
-       .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
-       .reset = mv88e6352_g1_reset,
-};
-
-static int mv88e6xxx_verify_madatory_ops(struct mv88e6xxx_chip *chip,
-                                        const struct mv88e6xxx_ops *ops)
-{
-       if (!ops->port_set_frame_mode) {
-               dev_err(chip->dev, "Missing port_set_frame_mode");
-               return -EINVAL;
-       }
-
-       if (!ops->port_set_egress_unknowns) {
-               dev_err(chip->dev, "Missing port_set_egress_mode");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
 static const struct mv88e6xxx_info mv88e6xxx_table[] = {
        [MV88E6085] = {
                .prod_num = PORT_SWITCH_ID_PROD_NUM_6085,
@@ -3849,6 +3689,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 8,
+               .atu_move_port_mask = 0xf,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_DSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6097,
                .ops = &mv88e6085_ops,
@@ -3864,6 +3706,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 8,
+               .atu_move_port_mask = 0xf,
                .tag_protocol = DSA_TAG_PROTO_DSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6095,
                .ops = &mv88e6095_ops,
@@ -3879,6 +3722,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 8,
+               .atu_move_port_mask = 0xf,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_EDSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6097,
                .ops = &mv88e6097_ops,
@@ -3894,6 +3739,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 9,
+               .atu_move_port_mask = 0xf,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_DSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6165,
                .ops = &mv88e6123_ops,
@@ -3909,11 +3756,28 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 9,
+               .atu_move_port_mask = 0xf,
                .tag_protocol = DSA_TAG_PROTO_DSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6185,
                .ops = &mv88e6131_ops,
        },
 
+       [MV88E6141] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6141,
+               .family = MV88E6XXX_FAMILY_6341,
+               .name = "Marvell 88E6341",
+               .num_databases = 4096,
+               .num_ports = 6,
+               .port_base_addr = 0x10,
+               .global1_addr = 0x1b,
+               .age_time_coeff = 3750,
+               .atu_move_port_mask = 0x1f,
+               .pvt = true,
+               .tag_protocol = DSA_TAG_PROTO_EDSA,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6341,
+               .ops = &mv88e6141_ops,
+       },
+
        [MV88E6161] = {
                .prod_num = PORT_SWITCH_ID_PROD_NUM_6161,
                .family = MV88E6XXX_FAMILY_6165,
@@ -3924,6 +3788,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 9,
+               .atu_move_port_mask = 0xf,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_DSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6165,
                .ops = &mv88e6161_ops,
@@ -3939,6 +3805,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 9,
+               .atu_move_port_mask = 0xf,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_DSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6165,
                .ops = &mv88e6165_ops,
@@ -3954,6 +3822,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 9,
+               .atu_move_port_mask = 0xf,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_EDSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6351,
                .ops = &mv88e6171_ops,
@@ -3969,6 +3839,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 9,
+               .atu_move_port_mask = 0xf,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_EDSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6352,
                .ops = &mv88e6172_ops,
@@ -3984,6 +3856,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 9,
+               .atu_move_port_mask = 0xf,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_EDSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6351,
                .ops = &mv88e6175_ops,
@@ -3999,6 +3873,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 9,
+               .atu_move_port_mask = 0xf,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_EDSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6352,
                .ops = &mv88e6176_ops,
@@ -4014,6 +3890,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 8,
+               .atu_move_port_mask = 0xf,
                .tag_protocol = DSA_TAG_PROTO_EDSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6185,
                .ops = &mv88e6185_ops,
@@ -4030,6 +3907,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .tag_protocol = DSA_TAG_PROTO_DSA,
                .age_time_coeff = 3750,
                .g1_irqs = 9,
+               .pvt = true,
+               .atu_move_port_mask = 0x1f,
                .flags = MV88E6XXX_FLAGS_FAMILY_6390,
                .ops = &mv88e6190_ops,
        },
@@ -4044,6 +3923,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 3750,
                .g1_irqs = 9,
+               .atu_move_port_mask = 0x1f,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_DSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6390,
                .ops = &mv88e6190x_ops,
@@ -4059,9 +3940,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 3750,
                .g1_irqs = 9,
+               .atu_move_port_mask = 0x1f,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_DSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6390,
-               .ops = &mv88e6391_ops,
+               .ops = &mv88e6191_ops,
        },
 
        [MV88E6240] = {
@@ -4074,6 +3957,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 9,
+               .atu_move_port_mask = 0xf,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_EDSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6352,
                .ops = &mv88e6240_ops,
@@ -4089,6 +3974,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 3750,
                .g1_irqs = 9,
+               .atu_move_port_mask = 0x1f,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_DSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6390,
                .ops = &mv88e6290_ops,
@@ -4104,6 +3991,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 8,
+               .atu_move_port_mask = 0xf,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_EDSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6320,
                .ops = &mv88e6320_ops,
@@ -4119,25 +4008,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 8,
+               .atu_move_port_mask = 0xf,
                .tag_protocol = DSA_TAG_PROTO_EDSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6320,
                .ops = &mv88e6321_ops,
        },
 
-       [MV88E6141] = {
-               .prod_num = PORT_SWITCH_ID_PROD_NUM_6141,
-               .family = MV88E6XXX_FAMILY_6341,
-               .name = "Marvell 88E6341",
-               .num_databases = 4096,
-               .num_ports = 6,
-               .port_base_addr = 0x10,
-               .global1_addr = 0x1b,
-               .age_time_coeff = 3750,
-               .tag_protocol = DSA_TAG_PROTO_EDSA,
-               .flags = MV88E6XXX_FLAGS_FAMILY_6341,
-               .ops = &mv88e6141_ops,
-       },
-
        [MV88E6341] = {
                .prod_num = PORT_SWITCH_ID_PROD_NUM_6341,
                .family = MV88E6XXX_FAMILY_6341,
@@ -4147,6 +4023,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .port_base_addr = 0x10,
                .global1_addr = 0x1b,
                .age_time_coeff = 3750,
+               .atu_move_port_mask = 0x1f,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_EDSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6341,
                .ops = &mv88e6341_ops,
@@ -4162,6 +4040,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 9,
+               .atu_move_port_mask = 0xf,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_EDSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6351,
                .ops = &mv88e6350_ops,
@@ -4177,6 +4057,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 9,
+               .atu_move_port_mask = 0xf,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_EDSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6351,
                .ops = &mv88e6351_ops,
@@ -4192,6 +4074,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 15000,
                .g1_irqs = 9,
+               .atu_move_port_mask = 0xf,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_EDSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6352,
                .ops = &mv88e6352_ops,
@@ -4206,6 +4090,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 3750,
                .g1_irqs = 9,
+               .atu_move_port_mask = 0x1f,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_DSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6390,
                .ops = &mv88e6390_ops,
@@ -4220,6 +4106,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .global1_addr = 0x1b,
                .age_time_coeff = 3750,
                .g1_irqs = 9,
+               .atu_move_port_mask = 0x1f,
+               .pvt = true,
                .tag_protocol = DSA_TAG_PROTO_DSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6390,
                .ops = &mv88e6390x_ops,
@@ -4455,6 +4343,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
        .port_mdb_add           = mv88e6xxx_port_mdb_add,
        .port_mdb_del           = mv88e6xxx_port_mdb_del,
        .port_mdb_dump          = mv88e6xxx_port_mdb_dump,
+       .crosschip_bridge_join  = mv88e6xxx_crosschip_bridge_join,
+       .crosschip_bridge_leave = mv88e6xxx_crosschip_bridge_leave,
 };
 
 static struct dsa_switch_driver mv88e6xxx_switch_drv = {
@@ -4466,12 +4356,14 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
        struct device *dev = chip->dev;
        struct dsa_switch *ds;
 
-       ds = dsa_switch_alloc(dev, DSA_MAX_PORTS);
+       ds = dsa_switch_alloc(dev, mv88e6xxx_num_ports(chip));
        if (!ds)
                return -ENOMEM;
 
        ds->priv = chip;
        ds->ops = &mv88e6xxx_switch_ops;
+       ds->ageing_time_min = chip->info->age_time_coeff;
+       ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX;
 
        dev_set_drvdata(dev, ds);
 
@@ -4502,10 +4394,6 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
 
        chip->info = compat_info;
 
-       err = mv88e6xxx_verify_madatory_ops(chip, chip->info->ops);
-       if (err)
-               return err;
-
        err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
        if (err)
                return err;
index 75af86a7fad80feb606fa6bab4cf78720d228eb0..39825837a1c9c062accd80af5a8dc3d6ccd4b771 100644 (file)
@@ -3,7 +3,8 @@
  *
  * Copyright (c) 2008 Marvell Semiconductor
  *
- * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ *     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 1aec7382c02dff90864995d9910ab6f94676a792..e30cbe480d5b71efe212cb332833aaf759eb0a5a 100644 (file)
@@ -3,7 +3,8 @@
  *
  * Copyright (c) 2008 Marvell Semiconductor
  *
- * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ *     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -38,4 +39,15 @@ int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
 int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
 int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
 
+int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all);
+int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
+                                 unsigned int msecs);
+int mv88e6xxx_g1_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
+                            struct mv88e6xxx_atu_entry *entry);
+int mv88e6xxx_g1_atu_loadpurge(struct mv88e6xxx_chip *chip, u16 fid,
+                              struct mv88e6xxx_atu_entry *entry);
+int mv88e6xxx_g1_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool all);
+int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
+                           bool all);
+
 #endif /* _MV88E6XXX_GLOBAL1_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
new file mode 100644 (file)
index 0000000..fa7e7db
--- /dev/null
@@ -0,0 +1,305 @@
+/*
+ * Marvell 88E6xxx Address Translation Unit (ATU) support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ * Copyright (c) 2017 Savoir-faire Linux, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "mv88e6xxx.h"
+#include "global1.h"
+
+/* Offset 0x01: ATU FID Register */
+
+static int mv88e6xxx_g1_atu_fid_write(struct mv88e6xxx_chip *chip, u16 fid)
+{
+       return mv88e6xxx_g1_write(chip, GLOBAL_ATU_FID, fid & 0xfff);
+}
+
+/* Offset 0x0A: ATU Control Register */
+
+int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all)
+{
+       u16 val;
+       int err;
+
+       err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
+       if (err)
+               return err;
+
+       if (learn2all)
+               val |= GLOBAL_ATU_CONTROL_LEARN2ALL;
+       else
+               val &= ~GLOBAL_ATU_CONTROL_LEARN2ALL;
+
+       return mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
+}
+
+int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
+                                 unsigned int msecs)
+{
+       const unsigned int coeff = chip->info->age_time_coeff;
+       const unsigned int min = 0x01 * coeff;
+       const unsigned int max = 0xff * coeff;
+       u8 age_time;
+       u16 val;
+       int err;
+
+       if (msecs < min || msecs > max)
+               return -ERANGE;
+
+       /* Round to nearest multiple of coeff */
+       age_time = (msecs + coeff / 2) / coeff;
+
+       err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
+       if (err)
+               return err;
+
+       /* AgeTime is 11:4 bits */
+       val &= ~0xff0;
+       val |= age_time << 4;
+
+       err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
+       if (err)
+               return err;
+
+       dev_dbg(chip->dev, "AgeTime set to 0x%02x (%d ms)\n", age_time,
+               age_time * coeff);
+
+       return 0;
+}
+
+/* Offset 0x0B: ATU Operation Register */
+
+static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
+{
+       return mv88e6xxx_g1_wait(chip, GLOBAL_ATU_OP, GLOBAL_ATU_OP_BUSY);
+}
+
+static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
+{
+       u16 val;
+       int err;
+
+       /* FID bits are dispatched all around gradually as more are supported */
+       if (mv88e6xxx_num_databases(chip) > 256) {
+               err = mv88e6xxx_g1_atu_fid_write(chip, fid);
+               if (err)
+                       return err;
+       } else {
+               if (mv88e6xxx_num_databases(chip) > 16) {
+                       /* ATU DBNum[7:4] are located in ATU Control 15:12 */
+                       err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
+                       if (err)
+                               return err;
+
+                       val = (val & 0x0fff) | ((fid << 8) & 0xf000);
+                       err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
+                       if (err)
+                               return err;
+               }
+
+               /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
+               op |= fid & 0xf;
+       }
+
+       err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_OP, op);
+       if (err)
+               return err;
+
+       return mv88e6xxx_g1_atu_op_wait(chip);
+}
+
+/* Offset 0x0C: ATU Data Register */
+
+static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip,
+                                     struct mv88e6xxx_atu_entry *entry)
+{
+       u16 val;
+       int err;
+
+       err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_DATA, &val);
+       if (err)
+               return err;
+
+       entry->state = val & 0xf;
+       if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+               entry->trunk = !!(val & GLOBAL_ATU_DATA_TRUNK);
+               entry->portvec = (val >> 4) & mv88e6xxx_port_mask(chip);
+       }
+
+       return 0;
+}
+
+static int mv88e6xxx_g1_atu_data_write(struct mv88e6xxx_chip *chip,
+                                      struct mv88e6xxx_atu_entry *entry)
+{
+       u16 data = entry->state & 0xf;
+
+       if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+               if (entry->trunk)
+                       data |= GLOBAL_ATU_DATA_TRUNK;
+
+               data |= (entry->portvec & mv88e6xxx_port_mask(chip)) << 4;
+       }
+
+       return mv88e6xxx_g1_write(chip, GLOBAL_ATU_DATA, data);
+}
+
+/* Offset 0x0D: ATU MAC Address Register Bytes 0 & 1
+ * Offset 0x0E: ATU MAC Address Register Bytes 2 & 3
+ * Offset 0x0F: ATU MAC Address Register Bytes 4 & 5
+ */
+
+static int mv88e6xxx_g1_atu_mac_read(struct mv88e6xxx_chip *chip,
+                                    struct mv88e6xxx_atu_entry *entry)
+{
+       u16 val;
+       int i, err;
+
+       for (i = 0; i < 3; i++) {
+               err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_MAC_01 + i, &val);
+               if (err)
+                       return err;
+
+               entry->mac[i * 2] = val >> 8;
+               entry->mac[i * 2 + 1] = val & 0xff;
+       }
+
+       return 0;
+}
+
+static int mv88e6xxx_g1_atu_mac_write(struct mv88e6xxx_chip *chip,
+                                     struct mv88e6xxx_atu_entry *entry)
+{
+       u16 val;
+       int i, err;
+
+       for (i = 0; i < 3; i++) {
+               val = (entry->mac[i * 2] << 8) | entry->mac[i * 2 + 1];
+               err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_MAC_01 + i, val);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+/* Address Translation Unit operations */
+
+int mv88e6xxx_g1_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
+                            struct mv88e6xxx_atu_entry *entry)
+{
+       int err;
+
+       err = mv88e6xxx_g1_atu_op_wait(chip);
+       if (err)
+               return err;
+
+       /* Write the MAC address to iterate from only once */
+       if (entry->state == GLOBAL_ATU_DATA_STATE_UNUSED) {
+               err = mv88e6xxx_g1_atu_mac_write(chip, entry);
+               if (err)
+                       return err;
+       }
+
+       err = mv88e6xxx_g1_atu_op(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_g1_atu_data_read(chip, entry);
+       if (err)
+               return err;
+
+       return mv88e6xxx_g1_atu_mac_read(chip, entry);
+}
+
+int mv88e6xxx_g1_atu_loadpurge(struct mv88e6xxx_chip *chip, u16 fid,
+                              struct mv88e6xxx_atu_entry *entry)
+{
+       int err;
+
+       err = mv88e6xxx_g1_atu_op_wait(chip);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_g1_atu_mac_write(chip, entry);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_g1_atu_data_write(chip, entry);
+       if (err)
+               return err;
+
+       return mv88e6xxx_g1_atu_op(chip, fid, GLOBAL_ATU_OP_LOAD_DB);
+}
+
+static int mv88e6xxx_g1_atu_flushmove(struct mv88e6xxx_chip *chip, u16 fid,
+                                     struct mv88e6xxx_atu_entry *entry,
+                                     bool all)
+{
+       u16 op;
+       int err;
+
+       err = mv88e6xxx_g1_atu_op_wait(chip);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_g1_atu_data_write(chip, entry);
+       if (err)
+               return err;
+
+       /* Flush/Move all or non-static entries from all or a given database */
+       if (all && fid)
+               op = GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB;
+       else if (fid)
+               op = GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
+       else if (all)
+               op = GLOBAL_ATU_OP_FLUSH_MOVE_ALL;
+       else
+               op = GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
+
+       return mv88e6xxx_g1_atu_op(chip, fid, op);
+}
+
+int mv88e6xxx_g1_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool all)
+{
+       struct mv88e6xxx_atu_entry entry = {
+               .state = 0, /* Null EntryState means Flush */
+       };
+
+       return mv88e6xxx_g1_atu_flushmove(chip, fid, &entry, all);
+}
+
+static int mv88e6xxx_g1_atu_move(struct mv88e6xxx_chip *chip, u16 fid,
+                                int from_port, int to_port, bool all)
+{
+       struct mv88e6xxx_atu_entry entry = { 0 };
+       unsigned long mask;
+       int shift;
+
+       if (!chip->info->atu_move_port_mask)
+               return -EOPNOTSUPP;
+
+       mask = chip->info->atu_move_port_mask;
+       shift = bitmap_weight(&mask, 16);
+
+       entry.state = 0xf, /* Full EntryState means Move */
+       entry.portvec = from_port & mask;
+       entry.portvec |= (to_port & mask) << shift;
+
+       return mv88e6xxx_g1_atu_flushmove(chip, fid, &entry, all);
+}
+
+int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
+                           bool all)
+{
+       int from_port = port;
+       int to_port = chip->info->atu_move_port_mask;
+
+       return mv88e6xxx_g1_atu_move(chip, fid, from_port, to_port, all);
+}
index 8f15bc7b1f5f88d2e8150f78cdf79b9be28cb07c..b3fea55071e3799063b04b485790e4bf73bc9f94 100644 (file)
@@ -4,7 +4,8 @@
  *
  * Copyright (c) 2008 Marvell Semiconductor
  *
- * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ *     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -12,6 +13,7 @@
  * (at your option) any later version.
  */
 
+#include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include "mv88e6xxx.h"
 #include "global2.h"
@@ -170,6 +172,50 @@ static int mv88e6xxx_g2_clear_irl(struct mv88e6xxx_chip *chip)
        return err;
 }
 
+/* Offset 0x0B: Cross-chip Port VLAN (Addr) Register
+ * Offset 0x0C: Cross-chip Port VLAN Data Register
+ */
+
+static int mv88e6xxx_g2_pvt_op_wait(struct mv88e6xxx_chip *chip)
+{
+       return mv88e6xxx_g2_wait(chip, GLOBAL2_PVT_ADDR, GLOBAL2_PVT_ADDR_BUSY);
+}
+
+static int mv88e6xxx_g2_pvt_op(struct mv88e6xxx_chip *chip, int src_dev,
+                              int src_port, u16 op)
+{
+       int err;
+
+       /* 9-bit Cross-chip PVT pointer: with GLOBAL2_MISC_5_BIT_PORT cleared,
+        * source device is 5-bit, source port is 4-bit.
+        */
+       op |= (src_dev & 0x1f) << 4;
+       op |= (src_port & 0xf);
+
+       err = mv88e6xxx_g2_write(chip, GLOBAL2_PVT_ADDR, op);
+       if (err)
+               return err;
+
+       return mv88e6xxx_g2_pvt_op_wait(chip);
+}
+
+int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
+                          int src_port, u16 data)
+{
+       int err;
+
+       err = mv88e6xxx_g2_pvt_op_wait(chip);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_g2_write(chip, GLOBAL2_PVT_DATA, data);
+       if (err)
+               return err;
+
+       return mv88e6xxx_g2_pvt_op(chip, src_dev, src_port,
+                                  GLOBAL2_PVT_ADDR_OP_WRITE_PVLAN);
+}
+
 /* Offset 0x0D: Switch MAC/WoL/WoF register */
 
 static int mv88e6xxx_g2_switch_mac_write(struct mv88e6xxx_chip *chip,
@@ -522,8 +568,9 @@ static int mv88e6xxx_g2_smi_phy_write_addr(struct mv88e6xxx_chip *chip,
        return mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
 }
 
-int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip, int addr,
-                                 int reg_c45, u16 *val, bool external)
+static int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
+                                        int addr, int reg_c45, u16 *val,
+                                        bool external)
 {
        int device = (reg_c45 >> 16) & 0x1f;
        int reg = reg_c45 & 0xffff;
@@ -553,8 +600,9 @@ int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip, int addr,
        return 0;
 }
 
-int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip, int addr,
-                                 int reg, u16 *val, bool external)
+static int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip,
+                                        int addr, int reg, u16 *val,
+                                        bool external)
 {
        u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA | (addr << 5) | reg;
        int err;
@@ -586,8 +634,9 @@ int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
        return mv88e6xxx_g2_smi_phy_read_c22(chip, addr, reg, val, external);
 }
 
-int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip, int addr,
-                                  int reg_c45, u16 val, bool external)
+static int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
+                                         int addr, int reg_c45, u16 val,
+                                         bool external)
 {
        int device = (reg_c45 >> 16) & 0x1f;
        int reg = reg_c45 & 0xffff;
@@ -615,8 +664,9 @@ int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip, int addr,
        return 0;
 }
 
-int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip, int addr,
-                                  int reg, u16 val, bool external)
+static int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip,
+                                         int addr, int reg, u16 val,
+                                         bool external)
 {
        u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA | (addr << 5) | reg;
        int err;
@@ -782,6 +832,31 @@ static int mv88e6xxx_g2_watchdog_setup(struct mv88e6xxx_chip *chip)
        return err;
 }
 
+/* Offset 0x1D: Misc Register */
+
+static int mv88e6xxx_g2_misc_5_bit_port(struct mv88e6xxx_chip *chip,
+                                       bool port_5_bit)
+{
+       u16 val;
+       int err;
+
+       err = mv88e6xxx_g2_read(chip, GLOBAL2_MISC, &val);
+       if (err)
+               return err;
+
+       if (port_5_bit)
+               val |= GLOBAL2_MISC_5_BIT_PORT;
+       else
+               val &= ~GLOBAL2_MISC_5_BIT_PORT;
+
+       return mv88e6xxx_g2_write(chip, GLOBAL2_MISC, val);
+}
+
+int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
+{
+       return mv88e6xxx_g2_misc_5_bit_port(chip, false);
+}
+
 static void mv88e6xxx_g2_irq_mask(struct irq_data *d)
 {
        struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
@@ -964,14 +1039,6 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
                                return err;
        }
 
-       if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_PVT)) {
-               /* Initialize Cross-chip Port VLAN Table to reset defaults */
-               err = mv88e6xxx_g2_write(chip, GLOBAL2_PVT_ADDR,
-                                        GLOBAL2_PVT_ADDR_OP_INIT_ONES);
-               if (err)
-                       return err;
-       }
-
        if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_POT)) {
                /* Clear the priority override table. */
                err = mv88e6xxx_g2_clear_pot(chip);
index a8b2f9486a4abad1227030f57ce00b6d8cbb1fbb..96046bb12ca17333530f237fddb46438c3298dea 100644 (file)
@@ -3,7 +3,8 @@
  *
  * Copyright (c) 2008 Marvell Semiconductor
  *
- * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ *     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -41,6 +42,10 @@ int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
 int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
                              struct ethtool_eeprom *eeprom, u8 *data);
 
+int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
+                          int src_port, u16 data);
+int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip);
+
 int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip);
 int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip);
 void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip);
@@ -109,6 +114,17 @@ static inline int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
        return -EOPNOTSUPP;
 }
 
+int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
+                          int src_port, u16 data)
+{
+       return -EOPNOTSUPP;
+}
+
+int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
+{
+       return -EOPNOTSUPP;
+}
+
 static inline int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
 {
        return -EOPNOTSUPP;
index 6033f2f6260a464418fcc981e5430e155e836bc3..c8f54986996b73893efbebff09e7bc40273c1959 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/irq.h>
 #include <linux/gpio/consumer.h>
 #include <linux/phy.h>
+#include <net/dsa.h>
 
 #ifndef UINT64_MAX
 #define UINT64_MAX             (u64)(~((u64)0))
 #define PORT_CONTROL_TAG_IF_BOTH       BIT(6)
 #define PORT_CONTROL_USE_IP            BIT(5)
 #define PORT_CONTROL_USE_TAG           BIT(4)
-#define PORT_CONTROL_FORWARD_UNKNOWN_MC        BIT(3)
 #define PORT_CONTROL_FORWARD_UNKNOWN   BIT(2)
-#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_DA             (0x0 << 2)
-#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_MULTICAST_DA   (0x1 << 2)
-#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_UNITCAST_DA    (0x2 << 2)
-#define PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA             (0x3 << 2)
+#define PORT_CONTROL_EGRESS_FLOODS_MASK                        (0x3 << 2)
+#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_DA       (0x0 << 2)
+#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_MC_DA    (0x1 << 2)
+#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_UC_DA    (0x2 << 2)
+#define PORT_CONTROL_EGRESS_FLOODS_ALL_UNKNOWN_DA      (0x3 << 2)
 #define PORT_CONTROL_STATE_MASK                0x03
 #define PORT_CONTROL_STATE_DISABLED    0x00
 #define PORT_CONTROL_STATE_BLOCKING    0x01
 #define PORT_CONTROL_STATE_LEARNING    0x02
 #define PORT_CONTROL_STATE_FORWARDING  0x03
 #define PORT_CONTROL_1         0x05
+#define PORT_CONTROL_1_MESSAGE_PORT    BIT(15)
 #define PORT_CONTROL_1_FID_11_4_MASK   (0xff << 0)
 #define PORT_BASE_VLAN         0x06
 #define PORT_BASE_VLAN_FID_3_0_MASK    (0xf << 12)
 #define PORT_CONTROL_2_DISCARD_UNTAGGED        BIT(8)
 #define PORT_CONTROL_2_MAP_DA          BIT(7)
 #define PORT_CONTROL_2_DEFAULT_FORWARD BIT(6)
-#define PORT_CONTROL_2_FORWARD_UNKNOWN BIT(6)
 #define PORT_CONTROL_2_EGRESS_MONITOR  BIT(5)
 #define PORT_CONTROL_2_INGRESS_MONITOR BIT(4)
 #define PORT_CONTROL_2_UPSTREAM_MASK   0x0f
 #define PORT_ATU_CONTROL       0x0c
 #define PORT_PRI_OVERRIDE      0x0d
 #define PORT_ETH_TYPE          0x0f
+#define PORT_ETH_TYPE_DEFAULT  0x9100
 #define PORT_IN_DISCARD_LO     0x10
 #define PORT_IN_DISCARD_HI     0x11
 #define PORT_IN_FILTERED       0x12
 #define GLOBAL2_WDOG_FORCE_IRQ                 BIT(0)
 #define GLOBAL2_QOS_WEIGHT     0x1c
 #define GLOBAL2_MISC           0x1d
+#define GLOBAL2_MISC_5_BIT_PORT        BIT(14)
 
 #define MV88E6XXX_N_FID                4096
 
+/* PVT limits for 4-bit port and 5-bit switch */
+#define MV88E6XXX_MAX_PVT_SWITCHES     32
+#define MV88E6XXX_MAX_PVT_PORTS                16
+
 enum mv88e6xxx_frame_mode {
        MV88E6XXX_FRAME_MODE_NORMAL,
        MV88E6XXX_FRAME_MODE_DSA,
@@ -525,8 +532,6 @@ enum mv88e6xxx_cap {
        MV88E6XXX_CAP_G2_MGMT_EN_0X,    /* (0x03) MGMT Enable Register 0x */
        MV88E6XXX_CAP_G2_IRL_CMD,       /* (0x09) Ingress Rate Command */
        MV88E6XXX_CAP_G2_IRL_DATA,      /* (0x0a) Ingress Rate Data */
-       MV88E6XXX_CAP_G2_PVT_ADDR,      /* (0x0b) Cross Chip Port VLAN Addr */
-       MV88E6XXX_CAP_G2_PVT_DATA,      /* (0x0c) Cross Chip Port VLAN Data */
        MV88E6XXX_CAP_G2_POT,           /* (0x0f) Priority Override Table */
 
        /* Per VLAN Spanning Tree Unit (STU).
@@ -551,7 +556,6 @@ enum mv88e6xxx_cap {
 
 #define MV88E6XXX_FLAG_SERDES          BIT_ULL(MV88E6XXX_CAP_SERDES)
 
-#define MV88E6XXX_FLAG_G1_ATU_FID      BIT_ULL(MV88E6XXX_CAP_G1_ATU_FID)
 #define MV88E6XXX_FLAG_G1_VTU_FID      BIT_ULL(MV88E6XXX_CAP_G1_VTU_FID)
 
 #define MV88E6XXX_FLAG_GLOBAL2         BIT_ULL(MV88E6XXX_CAP_GLOBAL2)
@@ -560,8 +564,6 @@ enum mv88e6xxx_cap {
 #define MV88E6XXX_FLAG_G2_MGMT_EN_0X   BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_0X)
 #define MV88E6XXX_FLAG_G2_IRL_CMD      BIT_ULL(MV88E6XXX_CAP_G2_IRL_CMD)
 #define MV88E6XXX_FLAG_G2_IRL_DATA     BIT_ULL(MV88E6XXX_CAP_G2_IRL_DATA)
-#define MV88E6XXX_FLAG_G2_PVT_ADDR     BIT_ULL(MV88E6XXX_CAP_G2_PVT_ADDR)
-#define MV88E6XXX_FLAG_G2_PVT_DATA     BIT_ULL(MV88E6XXX_CAP_G2_PVT_DATA)
 #define MV88E6XXX_FLAG_G2_POT          BIT_ULL(MV88E6XXX_CAP_G2_POT)
 
 #define MV88E6XXX_FLAG_STU             BIT_ULL(MV88E6XXX_CAP_STU)
@@ -577,11 +579,6 @@ enum mv88e6xxx_cap {
        (MV88E6XXX_FLAG_SMI_CMD |       \
         MV88E6XXX_FLAG_SMI_DATA)
 
-/* Cross-chip Port VLAN Table */
-#define MV88E6XXX_FLAGS_PVT            \
-       (MV88E6XXX_FLAG_G2_PVT_ADDR |   \
-        MV88E6XXX_FLAG_G2_PVT_DATA)
-
 /* Fiber/SERDES Registers at SMI address F, page 1 */
 #define MV88E6XXX_FLAGS_SERDES         \
        (MV88E6XXX_FLAG_PHY_PAGE |      \
@@ -594,8 +591,7 @@ enum mv88e6xxx_cap {
         MV88E6XXX_FLAGS_MULTI_CHIP)
 
 #define MV88E6XXX_FLAGS_FAMILY_6097    \
-       (MV88E6XXX_FLAG_G1_ATU_FID |    \
-        MV88E6XXX_FLAG_G1_VTU_FID |    \
+       (MV88E6XXX_FLAG_G1_VTU_FID |    \
         MV88E6XXX_FLAG_GLOBAL2 |       \
         MV88E6XXX_FLAG_G2_INT |        \
         MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
@@ -604,12 +600,10 @@ enum mv88e6xxx_cap {
         MV88E6XXX_FLAG_STU |           \
         MV88E6XXX_FLAG_VTU |           \
         MV88E6XXX_FLAGS_IRL |          \
-        MV88E6XXX_FLAGS_MULTI_CHIP |   \
-        MV88E6XXX_FLAGS_PVT)
+        MV88E6XXX_FLAGS_MULTI_CHIP)
 
 #define MV88E6XXX_FLAGS_FAMILY_6165    \
-       (MV88E6XXX_FLAG_G1_ATU_FID |    \
-        MV88E6XXX_FLAG_G1_VTU_FID |    \
+       (MV88E6XXX_FLAG_G1_VTU_FID |    \
         MV88E6XXX_FLAG_GLOBAL2 |       \
         MV88E6XXX_FLAG_G2_INT |        \
         MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
@@ -618,8 +612,7 @@ enum mv88e6xxx_cap {
         MV88E6XXX_FLAG_STU |           \
         MV88E6XXX_FLAG_VTU |           \
         MV88E6XXX_FLAGS_IRL |          \
-        MV88E6XXX_FLAGS_MULTI_CHIP |   \
-        MV88E6XXX_FLAGS_PVT)
+        MV88E6XXX_FLAGS_MULTI_CHIP)
 
 #define MV88E6XXX_FLAGS_FAMILY_6185    \
        (MV88E6XXX_FLAG_GLOBAL2 |       \
@@ -636,12 +629,10 @@ enum mv88e6xxx_cap {
         MV88E6XXX_FLAG_G2_POT |        \
         MV88E6XXX_FLAG_VTU |           \
         MV88E6XXX_FLAGS_IRL |          \
-        MV88E6XXX_FLAGS_MULTI_CHIP |   \
-        MV88E6XXX_FLAGS_PVT)
+        MV88E6XXX_FLAGS_MULTI_CHIP)
 
 #define MV88E6XXX_FLAGS_FAMILY_6341    \
        (MV88E6XXX_FLAG_EEE |           \
-        MV88E6XXX_FLAG_G1_ATU_FID |    \
         MV88E6XXX_FLAG_G1_VTU_FID |    \
         MV88E6XXX_FLAG_GLOBAL2 |       \
         MV88E6XXX_FLAG_G2_INT |        \
@@ -650,12 +641,10 @@ enum mv88e6xxx_cap {
         MV88E6XXX_FLAG_VTU |           \
         MV88E6XXX_FLAGS_IRL |          \
         MV88E6XXX_FLAGS_MULTI_CHIP |   \
-        MV88E6XXX_FLAGS_PVT |          \
         MV88E6XXX_FLAGS_SERDES)
 
 #define MV88E6XXX_FLAGS_FAMILY_6351    \
-       (MV88E6XXX_FLAG_G1_ATU_FID |    \
-        MV88E6XXX_FLAG_G1_VTU_FID |    \
+       (MV88E6XXX_FLAG_G1_VTU_FID |    \
         MV88E6XXX_FLAG_GLOBAL2 |       \
         MV88E6XXX_FLAG_G2_INT |        \
         MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
@@ -664,12 +653,10 @@ enum mv88e6xxx_cap {
         MV88E6XXX_FLAG_STU |           \
         MV88E6XXX_FLAG_VTU |           \
         MV88E6XXX_FLAGS_IRL |          \
-        MV88E6XXX_FLAGS_MULTI_CHIP |   \
-        MV88E6XXX_FLAGS_PVT)
+        MV88E6XXX_FLAGS_MULTI_CHIP)
 
 #define MV88E6XXX_FLAGS_FAMILY_6352    \
        (MV88E6XXX_FLAG_EEE |           \
-        MV88E6XXX_FLAG_G1_ATU_FID |    \
         MV88E6XXX_FLAG_G1_VTU_FID |    \
         MV88E6XXX_FLAG_GLOBAL2 |       \
         MV88E6XXX_FLAG_G2_INT |        \
@@ -680,7 +667,6 @@ enum mv88e6xxx_cap {
         MV88E6XXX_FLAG_VTU |           \
         MV88E6XXX_FLAGS_IRL |          \
         MV88E6XXX_FLAGS_MULTI_CHIP |   \
-        MV88E6XXX_FLAGS_PVT |          \
         MV88E6XXX_FLAGS_SERDES)
 
 #define MV88E6XXX_FLAGS_FAMILY_6390    \
@@ -690,8 +676,7 @@ enum mv88e6xxx_cap {
         MV88E6XXX_FLAG_STU |           \
         MV88E6XXX_FLAG_VTU |           \
         MV88E6XXX_FLAGS_IRL |          \
-        MV88E6XXX_FLAGS_MULTI_CHIP |   \
-        MV88E6XXX_FLAGS_PVT)
+        MV88E6XXX_FLAGS_MULTI_CHIP)
 
 struct mv88e6xxx_ops;
 
@@ -705,16 +690,21 @@ struct mv88e6xxx_info {
        unsigned int global1_addr;
        unsigned int age_time_coeff;
        unsigned int g1_irqs;
+       bool pvt;
        enum dsa_tag_protocol tag_protocol;
        unsigned long long flags;
+
+       /* Mask for FromPort and ToPort value of PortVec used in ATU Move
+        * operation. 0 means that the ATU Move operation is not supported.
+        */
+       u8 atu_move_port_mask;
        const struct mv88e6xxx_ops *ops;
 };
 
 struct mv88e6xxx_atu_entry {
-       u16     fid;
        u8      state;
        bool    trunk;
-       u16     portv_trunkid;
+       u16     portvec;
        u8      mac[ETH_ALEN];
 };
 
@@ -864,14 +854,16 @@ struct mv88e6xxx_ops {
 
        int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port,
                                   enum mv88e6xxx_frame_mode mode);
-       int (*port_set_egress_unknowns)(struct mv88e6xxx_chip *chip, int port,
-                                       bool on);
+       int (*port_set_egress_floods)(struct mv88e6xxx_chip *chip, int port,
+                                     bool unicast, bool multicast);
        int (*port_set_ether_type)(struct mv88e6xxx_chip *chip, int port,
                                   u16 etype);
        int (*port_jumbo_config)(struct mv88e6xxx_chip *chip, int port);
 
        int (*port_egress_rate_limiting)(struct mv88e6xxx_chip *chip, int port);
        int (*port_pause_config)(struct mv88e6xxx_chip *chip, int port);
+       int (*port_disable_learn_limit)(struct mv88e6xxx_chip *chip, int port);
+       int (*port_disable_pri_override)(struct mv88e6xxx_chip *chip, int port);
 
        /* CMODE control what PHY mode the MAC will use, eg. SGMII, RGMII, etc.
         * Some chips allow this to be configured on specific ports.
@@ -934,6 +926,11 @@ static inline bool mv88e6xxx_has(struct mv88e6xxx_chip *chip,
        return (chip->info->flags & flags) == flags;
 }
 
+static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip)
+{
+       return chip->info->pvt;
+}
+
 static inline unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
 {
        return chip->info->num_databases;
@@ -944,6 +941,11 @@ static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip)
        return chip->info->num_ports;
 }
 
+static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip)
+{
+       return GENMASK(mv88e6xxx_num_ports(chip) - 1, 0);
+}
+
 int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
 int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
 int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg,
index 8875784c4718feee699355f91c2092622417cce1..548a956637eec56b4aac9d7e4b2890e078f51285 100644 (file)
@@ -3,7 +3,8 @@
  *
  * Copyright (c) 2008 Marvell Semiconductor
  *
- * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ *     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -497,8 +498,8 @@ int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
        return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
 }
 
-int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
-                                      bool on)
+static int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
+                                             int port, bool unicast)
 {
        int err;
        u16 reg;
@@ -507,7 +508,7 @@ int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
        if (err)
                return err;
 
-       if (on)
+       if (unicast)
                reg |= PORT_CONTROL_FORWARD_UNKNOWN;
        else
                reg &= ~PORT_CONTROL_FORWARD_UNKNOWN;
@@ -515,8 +516,8 @@ int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
        return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
 }
 
-int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
-                                      bool on)
+int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
+                                    bool unicast, bool multicast)
 {
        int err;
        u16 reg;
@@ -525,21 +526,45 @@ int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
        if (err)
                return err;
 
-       if (on)
-               reg |= PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA;
+       reg &= ~PORT_CONTROL_EGRESS_FLOODS_MASK;
+
+       if (unicast && multicast)
+               reg |= PORT_CONTROL_EGRESS_FLOODS_ALL_UNKNOWN_DA;
+       else if (unicast)
+               reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_MC_DA;
+       else if (multicast)
+               reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_UC_DA;
        else
-               reg &= ~PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA;
+               reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_DA;
 
        return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
 }
 
 /* Offset 0x05: Port Control 1 */
 
+int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
+                                   bool message_port)
+{
+       u16 val;
+       int err;
+
+       err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_1, &val);
+       if (err)
+               return err;
+
+       if (message_port)
+               val |= PORT_CONTROL_1_MESSAGE_PORT;
+       else
+               val &= ~PORT_CONTROL_1_MESSAGE_PORT;
+
+       return mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, val);
+}
+
 /* Offset 0x06: Port Based VLAN Map */
 
 int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map)
 {
-       const u16 mask = GENMASK(mv88e6xxx_num_ports(chip) - 1, 0);
+       const u16 mask = mv88e6xxx_port_mask(chip);
        u16 reg;
        int err;
 
@@ -672,8 +697,8 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = {
        [PORT_CONTROL_2_8021Q_SECURE] = "Secure",
 };
 
-int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
-                                      bool on)
+static int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
+                                             int port, bool multicast)
 {
        int err;
        u16 reg;
@@ -682,14 +707,26 @@ int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
        if (err)
                return err;
 
-       if (on)
-               reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
+       if (multicast)
+               reg |= PORT_CONTROL_2_DEFAULT_FORWARD;
        else
-               reg &= ~PORT_CONTROL_2_FORWARD_UNKNOWN;
+               reg &= ~PORT_CONTROL_2_DEFAULT_FORWARD;
 
        return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
 }
 
+int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
+                                    bool unicast, bool multicast)
+{
+       int err;
+
+       err = mv88e6185_port_set_forward_unknown(chip, port, unicast);
+       if (err)
+               return err;
+
+       return mv88e6185_port_set_default_forward(chip, port, multicast);
+}
+
 int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
                                     int upstream_port)
 {
@@ -769,6 +806,20 @@ int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port)
        return mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL, 0x0001);
 }
 
+/* Offset 0x0C: Port ATU Control */
+
+int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port)
+{
+       return mv88e6xxx_port_write(chip, port, PORT_ATU_CONTROL, 0);
+}
+
+/* Offset 0x0D: (Priority) Override Register */
+
+int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port)
+{
+       return mv88e6xxx_port_write(chip, port, PORT_PRI_OVERRIDE, 0);
+}
+
 /* Offset 0x0f: Port Ether type */
 
 int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
index c83cbb3f449182317a21c9fb894a114d99a08ebb..86f40887b6d28d998316706fe36333c471ec1c25 100644 (file)
@@ -3,7 +3,8 @@
  *
  * Copyright (c) 2008 Marvell Semiconductor
  *
- * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ *     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -56,14 +57,14 @@ int mv88e6085_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
                                  enum mv88e6xxx_frame_mode mode);
 int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
                                  enum mv88e6xxx_frame_mode mode);
-int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
-                                      bool on);
-int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
-                                      bool on);
-int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
-                                      bool on);
+int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
+                                    bool unicast, bool multicast);
+int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
+                                    bool unicast, bool multicast);
 int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
                                  u16 etype);
+int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
+                                   bool message_port);
 int mv88e6165_port_jumbo_config(struct mv88e6xxx_chip *chip, int port);
 int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
 int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
@@ -75,4 +76,8 @@ int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
 int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
 int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
                                     int upstream_port);
+
+int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port);
+int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port);
+
 #endif /* _MV88E6XXX_PORT_H */
index 2c80611b94aef3c5ce9d0f98e8d92e497542123a..149244aac20aa765551b93ca25d78018b28f17f9 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/init.h>
 #include <linux/moduleparam.h>
 #include <linux/rtnetlink.h>
+#include <linux/net_tstamp.h>
 #include <net/rtnetlink.h>
 #include <linux/u64_stats_sync.h>
 
@@ -125,6 +126,7 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
        dstats->tx_bytes += skb->len;
        u64_stats_update_end(&dstats->syncp);
 
+       skb_tx_timestamp(skb);
        dev_kfree_skb(skb);
        return NETDEV_TX_OK;
 }
@@ -304,8 +306,21 @@ static void dummy_get_drvinfo(struct net_device *dev,
        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 }
 
+static int dummy_get_ts_info(struct net_device *dev,
+                             struct ethtool_ts_info *ts_info)
+{
+       ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+                                  SOF_TIMESTAMPING_RX_SOFTWARE |
+                                  SOF_TIMESTAMPING_SOFTWARE;
+
+       ts_info->phc_index = -1;
+
+       return 0;
+};
+
 static const struct ethtool_ops dummy_ethtool_ops = {
        .get_drvinfo            = dummy_get_drvinfo,
+       .get_ts_info            = dummy_get_ts_info,
 };
 
 static void dummy_free_netdev(struct net_device *dev)
index 084a6d58543a71b35e0aa184cf03a9f9e5af3044..be823c186517a54e6d0317263008267f30bfce9b 100644 (file)
@@ -283,7 +283,6 @@ struct typhoon {
        spinlock_t              command_lock    ____cacheline_aligned;
        struct basic_ring       cmdRing;
        struct basic_ring       respRing;
-       struct net_device_stats stats;
        struct net_device_stats stats_saved;
        struct typhoon_shared * shared;
        dma_addr_t              shared_dma;
@@ -898,7 +897,7 @@ typhoon_set_rx_mode(struct net_device *dev)
 static int
 typhoon_do_get_stats(struct typhoon *tp)
 {
-       struct net_device_stats *stats = &tp->stats;
+       struct net_device_stats *stats = &tp->dev->stats;
        struct net_device_stats *saved = &tp->stats_saved;
        struct cmd_desc xp_cmd;
        struct resp_desc xp_resp[7];
@@ -951,7 +950,7 @@ static struct net_device_stats *
 typhoon_get_stats(struct net_device *dev)
 {
        struct typhoon *tp = netdev_priv(dev);
-       struct net_device_stats *stats = &tp->stats;
+       struct net_device_stats *stats = &tp->dev->stats;
        struct net_device_stats *saved = &tp->stats_saved;
 
        smp_rmb();
@@ -1991,7 +1990,7 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type)
        tp->card_state = Sleeping;
        smp_wmb();
        typhoon_do_get_stats(tp);
-       memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
+       memcpy(&tp->stats_saved, &tp->dev->stats, sizeof(struct net_device_stats));
 
        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
        typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
index 8c08f9deef9268e4cacc939a2534110a42be6c3b..edae15ac0e982e7a1678627253dc5bcd696737bc 100644 (file)
@@ -180,5 +180,6 @@ source "drivers/net/ethernet/via/Kconfig"
 source "drivers/net/ethernet/wiznet/Kconfig"
 source "drivers/net/ethernet/xilinx/Kconfig"
 source "drivers/net/ethernet/xircom/Kconfig"
+source "drivers/net/ethernet/synopsys/Kconfig"
 
 endif # ETHERNET
index 26dce5bf2c18c966c5b378cf79b385aa726f9b4f..bf7f4502cabcf2b40f735d3a928a475f6d03c061 100644 (file)
@@ -91,3 +91,4 @@ obj-$(CONFIG_NET_VENDOR_VIA) += via/
 obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
 obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
 obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
+obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
index 8c3b56198e4b731125648284086850eab1fdf0f2..4ad5b9be3f84c52b9e04f6919431c198a6d5a011 100644 (file)
@@ -68,13 +68,6 @@ struct net_dma_desc_tx {
 };
 
 struct bfin_mac_local {
-       /*
-        * these are things that the kernel wants me to keep, so users
-        * can find out semi-useless statistics of how well the card is
-        * performing
-        */
-       struct net_device_stats stats;
-
        spinlock_t lock;
 
        int wol;                /* Wake On Lan */
index 9f7422ada704e9484b79d336ece46a0547667abb..d8e133ced7b8a026682ee5f60396e380b4fa5ee6 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/crc32.h>
 #include <linux/mii.h>
 #include <linux/of_device.h>
+#include <linux/of_net.h>
 #include <linux/of_platform.h>
 #include <linux/slab.h>
 #include <asm/cacheflush.h>
@@ -1454,11 +1455,10 @@ static int greth_of_probe(struct platform_device *ofdev)
                        break;
        }
        if (i == 6) {
-               const unsigned char *addr;
-               int len;
-               addr = of_get_property(ofdev->dev.of_node, "local-mac-address",
-                                       &len);
-               if (addr != NULL && len == 6) {
+               const u8 *addr;
+
+               addr = of_get_mac_address(ofdev->dev.of_node);
+               if (addr) {
                        for (i = 0; i < 6; i++)
                                macaddr[i] = (unsigned int) addr[i];
                } else {
index 35f19430c84ae2dc7ed0e098acbe3516f3e166b5..7c1214d7885566ded4dfc05e85c2ee86b8d3c949 100644 (file)
@@ -133,7 +133,7 @@ static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
                int irq_idx = ENA_IO_IRQ_IDX(i);
 
                rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
-                                     adapter->msix_entries[irq_idx].vector);
+                                     pci_irq_vector(adapter->pdev, irq_idx));
                if (rc) {
                        free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
                        adapter->netdev->rx_cpu_rmap = NULL;
@@ -1208,13 +1208,7 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
 
 static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
 {
-       int i, msix_vecs, rc;
-
-       if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
-               netif_err(adapter, probe, adapter->netdev,
-                         "Error, MSI-X is already enabled\n");
-               return -EPERM;
-       }
+       int msix_vecs, rc;
 
        /* Reserved the max msix vectors we might need */
        msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
@@ -1222,16 +1216,9 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
        netif_dbg(adapter, probe, adapter->netdev,
                  "trying to enable MSI-X, vectors %d\n", msix_vecs);
 
-       adapter->msix_entries = vzalloc(msix_vecs * sizeof(struct msix_entry));
-
-       if (!adapter->msix_entries)
-               return -ENOMEM;
-
-       for (i = 0; i < msix_vecs; i++)
-               adapter->msix_entries[i].entry = i;
-
-       rc = pci_enable_msix(adapter->pdev, adapter->msix_entries, msix_vecs);
-       if (rc != 0) {
+       rc = pci_alloc_irq_vectors(adapter->pdev, msix_vecs, msix_vecs,
+                       PCI_IRQ_MSIX);
+       if (rc < 0) {
                netif_err(adapter, probe, adapter->netdev,
                          "Failed to enable MSI-X, vectors %d rc %d\n",
                          msix_vecs, rc);
@@ -1248,7 +1235,6 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
        }
 
        adapter->msix_vecs = msix_vecs;
-       set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
 
        return 0;
 }
@@ -1264,7 +1250,7 @@ static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
                ena_intr_msix_mgmnt;
        adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
        adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
-               adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
+               pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
        cpu = cpumask_first(cpu_online_mask);
        adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
        cpumask_set_cpu(cpu,
@@ -1287,7 +1273,7 @@ static void ena_setup_io_intr(struct ena_adapter *adapter)
                adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
                adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
                adapter->irq_tbl[irq_idx].vector =
-                       adapter->msix_entries[irq_idx].vector;
+                       pci_irq_vector(adapter->pdev, irq_idx);
                adapter->irq_tbl[irq_idx].cpu = cpu;
 
                cpumask_set_cpu(cpu,
@@ -1325,12 +1311,6 @@ static int ena_request_io_irq(struct ena_adapter *adapter)
        struct ena_irq *irq;
        int rc = 0, i, k;
 
-       if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
-               netif_err(adapter, ifup, adapter->netdev,
-                         "Failed to request I/O IRQ: MSI-X is not enabled\n");
-               return -EINVAL;
-       }
-
        for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
                irq = &adapter->irq_tbl[i];
                rc = request_irq(irq->vector, irq->handler, flags, irq->name,
@@ -1389,16 +1369,6 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
        }
 }
 
-static void ena_disable_msix(struct ena_adapter *adapter)
-{
-       if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
-               pci_disable_msix(adapter->pdev);
-
-       if (adapter->msix_entries)
-               vfree(adapter->msix_entries);
-       adapter->msix_entries = NULL;
-}
-
 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
 {
        int i;
@@ -2479,8 +2449,7 @@ static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
        return 0;
 
 err_disable_msix:
-       ena_disable_msix(adapter);
-
+       pci_free_irq_vectors(adapter->pdev);
        return rc;
 }
 
@@ -2518,7 +2487,7 @@ static void ena_fw_reset_device(struct work_struct *work)
 
        ena_free_mgmnt_irq(adapter);
 
-       ena_disable_msix(adapter);
+       pci_free_irq_vectors(adapter->pdev);
 
        ena_com_abort_admin_commands(ena_dev);
 
@@ -2569,7 +2538,7 @@ static void ena_fw_reset_device(struct work_struct *work)
        return;
 err_disable_msix:
        ena_free_mgmnt_irq(adapter);
-       ena_disable_msix(adapter);
+       pci_free_irq_vectors(adapter->pdev);
 err_device_destroy:
        ena_com_admin_destroy(ena_dev);
 err:
@@ -3103,7 +3072,7 @@ err_rss:
 err_free_msix:
        ena_com_dev_reset(ena_dev);
        ena_free_mgmnt_irq(adapter);
-       ena_disable_msix(adapter);
+       pci_free_irq_vectors(adapter->pdev);
 err_worker_destroy:
        ena_com_destroy_interrupt_moderation(ena_dev);
        del_timer(&adapter->timer_service);
@@ -3188,7 +3157,7 @@ static void ena_remove(struct pci_dev *pdev)
 
        ena_free_mgmnt_irq(adapter);
 
-       ena_disable_msix(adapter);
+       pci_free_irq_vectors(adapter->pdev);
 
        free_netdev(netdev);
 
index ed62d8e231a155a693a1278862a967f3594635b4..0e22bce6239d0e06c73a366e0d98a2348a9b7fa9 100644 (file)
@@ -248,7 +248,6 @@ enum ena_flags_t {
        ENA_FLAG_DEVICE_RUNNING,
        ENA_FLAG_DEV_UP,
        ENA_FLAG_LINK_UP,
-       ENA_FLAG_MSIX_ENABLED,
        ENA_FLAG_TRIGGER_RESET
 };
 
@@ -267,7 +266,6 @@ struct ena_adapter {
 
        int num_queues;
 
-       struct msix_entry *msix_entries;
        int msix_vecs;
 
        u32 tx_usecs, rx_usecs; /* interrupt moderation */
index b556c926557a10599b1148ba52d7fcf39d6030bd..9c152d85840d79add7dc32bd1ee47579208ced25 100644 (file)
@@ -359,7 +359,6 @@ typedef struct _mace_statistics {
 
 typedef struct _mace_private {
        struct pcmcia_device    *p_dev;
-    struct net_device_stats linux_stats; /* Linux statistics counters */
     mace_statistics mace_stats; /* MACE chip statistics counters */
 
     /* restore_multicast_list() state variables */
@@ -879,7 +878,7 @@ static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
        service a transmit interrupt while we are in here.
     */
 
-    lp->linux_stats.tx_bytes += skb->len;
+    dev->stats.tx_bytes += skb->len;
     lp->tx_free_frames--;
 
     /* WARNING: Write the _exact_ number of bytes written in the header! */
@@ -967,7 +966,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
 
       fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC);
       if ((fifofc & MACE_FIFOFC_XMTFC)==0) {
-       lp->linux_stats.tx_errors++;
+       dev->stats.tx_errors++;
        outb(0xFF, ioaddr + AM2150_XMT_SKIP);
       }
 
@@ -1016,7 +1015,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
 
       } /* if (xmtfs & MACE_XMTFS_XMTSV) */
 
-      lp->linux_stats.tx_packets++;
+      dev->stats.tx_packets++;
       lp->tx_free_frames++;
       netif_wake_queue(dev);
     } /* if (status & MACE_IR_XMTINT) */
@@ -1077,7 +1076,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
          " 0x%X.\n", dev->name, rx_framecnt, rx_status);
 
     if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */
-      lp->linux_stats.rx_errors++;
+      dev->stats.rx_errors++;
       if (rx_status & MACE_RCVFS_OFLO) {
         lp->mace_stats.oflo++;
       }
@@ -1114,14 +1113,14 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
        
        netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
 
-       lp->linux_stats.rx_packets++;
-       lp->linux_stats.rx_bytes += pkt_len;
+       dev->stats.rx_packets++;
+       dev->stats.rx_bytes += pkt_len;
        outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
        continue;
       } else {
        pr_debug("%s: couldn't allocate a sk_buff of size"
              " %d.\n", dev->name, pkt_len);
-       lp->linux_stats.rx_dropped++;
+       dev->stats.rx_dropped++;
       }
     }
     outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
@@ -1231,13 +1230,13 @@ static void update_stats(unsigned int ioaddr, struct net_device *dev)
   lp->mace_stats.rntpc += mace_read(lp, ioaddr, MACE_RNTPC);
   lp->mace_stats.mpc += mace_read(lp, ioaddr, MACE_MPC);
   /* At this point, mace_stats is fully updated for this call.
-     We may now update the linux_stats. */
+     We may now update the netdev stats. */
 
-  /* The MACE has no equivalent for linux_stats field which are commented
+  /* The MACE has no equivalent for netdev stats field which are commented
      out. */
 
-  /* lp->linux_stats.multicast; */
-  lp->linux_stats.collisions = 
+  /* dev->stats.multicast; */
+  dev->stats.collisions =
     lp->mace_stats.rcvcco * 256 + lp->mace_stats.rcvcc;
     /* Collision: The MACE may retry sending a packet 15 times
        before giving up.  The retry count is in XMTRC.
@@ -1245,22 +1244,22 @@ static void update_stats(unsigned int ioaddr, struct net_device *dev)
        If so, why doesn't the RCVCC record these collisions? */
 
   /* detailed rx_errors: */
-  lp->linux_stats.rx_length_errors = 
+  dev->stats.rx_length_errors =
     lp->mace_stats.rntpco * 256 + lp->mace_stats.rntpc;
-  /* lp->linux_stats.rx_over_errors */
-  lp->linux_stats.rx_crc_errors = lp->mace_stats.fcs;
-  lp->linux_stats.rx_frame_errors = lp->mace_stats.fram;
-  lp->linux_stats.rx_fifo_errors = lp->mace_stats.oflo;
-  lp->linux_stats.rx_missed_errors = 
+  /* dev->stats.rx_over_errors */
+  dev->stats.rx_crc_errors = lp->mace_stats.fcs;
+  dev->stats.rx_frame_errors = lp->mace_stats.fram;
+  dev->stats.rx_fifo_errors = lp->mace_stats.oflo;
+  dev->stats.rx_missed_errors =
     lp->mace_stats.mpco * 256 + lp->mace_stats.mpc;
 
   /* detailed tx_errors */
-  lp->linux_stats.tx_aborted_errors = lp->mace_stats.rtry;
-  lp->linux_stats.tx_carrier_errors = lp->mace_stats.lcar;
+  dev->stats.tx_aborted_errors = lp->mace_stats.rtry;
+  dev->stats.tx_carrier_errors = lp->mace_stats.lcar;
     /* LCAR usually results from bad cabling. */
-  lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo;
-  lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr;
-  /* lp->linux_stats.tx_window_errors; */
+  dev->stats.tx_fifo_errors = lp->mace_stats.uflo;
+  dev->stats.tx_heartbeat_errors = lp->mace_stats.cerr;
+  /* dev->stats.tx_window_errors; */
 } /* update_stats */
 
 /* ----------------------------------------------------------------------------
@@ -1274,10 +1273,10 @@ static struct net_device_stats *mace_get_stats(struct net_device *dev)
   update_stats(dev->base_addr, dev);
 
   pr_debug("%s: updating the statistics.\n", dev->name);
-  pr_linux_stats(&lp->linux_stats);
+  pr_linux_stats(&dev->stats);
   pr_mace_stats(&lp->mace_stats);
 
-  return &lp->linux_stats;
+  return &dev->stats;
 } /* net_device_stats */
 
 /* ----------------------------------------------------------------------------
index 8a280e7d66bddc998763288a5756b2ae6a7f70bc..127adbeefb105cc031f3782b534d175f29fb7143 100644 (file)
 #define XP_ECC_CNT1_DESC_DED_WIDTH             8
 #define XP_ECC_CNT1_DESC_SEC_INDEX             0
 #define XP_ECC_CNT1_DESC_SEC_WIDTH             8
-#define XP_ECC_IER_DESC_DED_INDEX              0
+#define XP_ECC_IER_DESC_DED_INDEX              5
 #define XP_ECC_IER_DESC_DED_WIDTH              1
-#define XP_ECC_IER_DESC_SEC_INDEX              1
+#define XP_ECC_IER_DESC_SEC_INDEX              4
 #define XP_ECC_IER_DESC_SEC_WIDTH              1
-#define XP_ECC_IER_RX_DED_INDEX                        2
+#define XP_ECC_IER_RX_DED_INDEX                        3
 #define XP_ECC_IER_RX_DED_WIDTH                        1
-#define XP_ECC_IER_RX_SEC_INDEX                        3
+#define XP_ECC_IER_RX_SEC_INDEX                        2
 #define XP_ECC_IER_RX_SEC_WIDTH                        1
-#define XP_ECC_IER_TX_DED_INDEX                        4
+#define XP_ECC_IER_TX_DED_INDEX                        1
 #define XP_ECC_IER_TX_DED_WIDTH                        1
-#define XP_ECC_IER_TX_SEC_INDEX                        5
+#define XP_ECC_IER_TX_SEC_INDEX                        0
 #define XP_ECC_IER_TX_SEC_WIDTH                        1
-#define XP_ECC_ISR_DESC_DED_INDEX              0
+#define XP_ECC_ISR_DESC_DED_INDEX              5
 #define XP_ECC_ISR_DESC_DED_WIDTH              1
-#define XP_ECC_ISR_DESC_SEC_INDEX              1
+#define XP_ECC_ISR_DESC_SEC_INDEX              4
 #define XP_ECC_ISR_DESC_SEC_WIDTH              1
-#define XP_ECC_ISR_RX_DED_INDEX                        2
+#define XP_ECC_ISR_RX_DED_INDEX                        3
 #define XP_ECC_ISR_RX_DED_WIDTH                        1
-#define XP_ECC_ISR_RX_SEC_INDEX                        3
+#define XP_ECC_ISR_RX_SEC_INDEX                        2
 #define XP_ECC_ISR_RX_SEC_WIDTH                        1
-#define XP_ECC_ISR_TX_DED_INDEX                        4
+#define XP_ECC_ISR_TX_DED_INDEX                        1
 #define XP_ECC_ISR_TX_DED_WIDTH                        1
-#define XP_ECC_ISR_TX_SEC_INDEX                        5
+#define XP_ECC_ISR_TX_SEC_INDEX                        0
 #define XP_ECC_ISR_TX_SEC_WIDTH                        1
 #define XP_I2C_MUTEX_BUSY_INDEX                        31
 #define XP_I2C_MUTEX_BUSY_WIDTH                        1
 #define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH   1
 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX   1
 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH   1
-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX  2
-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH  1
+#define RX_PACKET_ATTRIBUTES_LAST_INDEX                2
+#define RX_PACKET_ATTRIBUTES_LAST_WIDTH                1
 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX        3
 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH        1
 #define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX     4
 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH   1
 #define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX    6
 #define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH    1
+#define RX_PACKET_ATTRIBUTES_FIRST_INDEX       7
+#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH       1
 
 #define RX_NORMAL_DESC0_OVT_INDEX              0
 #define RX_NORMAL_DESC0_OVT_WIDTH              16
index 937f37a5dcb2cded9963b3732bddb08557ad50ba..24a687ce4388182716438770c49e2dca7ff81114 100644 (file)
@@ -1896,10 +1896,15 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
 
        /* Get the header length */
        if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
+               XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+                              FIRST, 1);
                rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
                                                      RX_NORMAL_DESC2, HL);
                if (rdata->rx.hdr_len)
                        pdata->ext_stats.rx_split_header_packets++;
+       } else {
+               XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+                              FIRST, 0);
        }
 
        /* Get the RSS hash */
@@ -1922,19 +1927,16 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
                }
        }
 
-       /* Get the packet length */
-       rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
-
-       if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
-               /* Not all the data has been transferred for this packet */
-               XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
-                              INCOMPLETE, 1);
+       /* Not all the data has been transferred for this packet */
+       if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
                return 0;
-       }
 
        /* This is the last of the data for this packet */
        XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
-                      INCOMPLETE, 0);
+                      LAST, 1);
+
+       /* Get the packet length */
+       rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
 
        /* Set checksum done indicator as appropriate */
        if (netdev->features & NETIF_F_RXCSUM)
index 248f60d171a5a0ce76744a95e2d59039939e6538..c772420fa41caf57907610bce4528da9c70be63e 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/tcp.h>
 #include <linux/if_vlan.h>
+#include <linux/interrupt.h>
 #include <net/busy_poll.h>
 #include <linux/clk.h>
 #include <linux/if_ether.h>
@@ -1854,7 +1855,8 @@ static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
        if (tc_to_netdev->type != TC_SETUP_MQPRIO)
                return -EINVAL;
 
-       tc = tc_to_netdev->tc;
+       tc_to_netdev->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+       tc = tc_to_netdev->mqprio->num_tc;
 
        if (tc > pdata->hw_feat.tc_cnt)
                return -EINVAL;
@@ -1971,13 +1973,12 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
 {
        struct sk_buff *skb;
        u8 *packet;
-       unsigned int copy_len;
 
        skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
        if (!skb)
                return NULL;
 
-       /* Start with the header buffer which may contain just the header
+       /* Pull in the header buffer which may contain just the header
         * or the header plus data
         */
        dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
@@ -1986,30 +1987,49 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
 
        packet = page_address(rdata->rx.hdr.pa.pages) +
                 rdata->rx.hdr.pa.pages_offset;
-       copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
-       copy_len = min(rdata->rx.hdr.dma_len, copy_len);
-       skb_copy_to_linear_data(skb, packet, copy_len);
-       skb_put(skb, copy_len);
-
-       len -= copy_len;
-       if (len) {
-               /* Add the remaining data as a frag */
-               dma_sync_single_range_for_cpu(pdata->dev,
-                                             rdata->rx.buf.dma_base,
-                                             rdata->rx.buf.dma_off,
-                                             rdata->rx.buf.dma_len,
-                                             DMA_FROM_DEVICE);
-
-               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
-                               rdata->rx.buf.pa.pages,
-                               rdata->rx.buf.pa.pages_offset,
-                               len, rdata->rx.buf.dma_len);
-               rdata->rx.buf.pa.pages = NULL;
-       }
+       skb_copy_to_linear_data(skb, packet, len);
+       skb_put(skb, len);
 
        return skb;
 }
 
+static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
+                                    struct xgbe_packet_data *packet)
+{
+       /* Always zero if not the first descriptor */
+       if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
+               return 0;
+
+       /* First descriptor with split header, return header length */
+       if (rdata->rx.hdr_len)
+               return rdata->rx.hdr_len;
+
+       /* First descriptor but not the last descriptor and no split header,
+        * so the full buffer was used
+        */
+       if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
+               return rdata->rx.hdr.dma_len;
+
+       /* First descriptor and last descriptor and no split header, so
+        * calculate how much of the buffer was used
+        */
+       return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
+}
+
+static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
+                                    struct xgbe_packet_data *packet,
+                                    unsigned int len)
+{
+       /* Always the full buffer if not the last descriptor */
+       if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
+               return rdata->rx.buf.dma_len;
+
+       /* Last descriptor so calculate how much of the buffer was used
+        * for the last bit of data
+        */
+       return rdata->rx.len - len;
+}
+
 static int xgbe_tx_poll(struct xgbe_channel *channel)
 {
        struct xgbe_prv_data *pdata = channel->pdata;
@@ -2092,8 +2112,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
        struct napi_struct *napi;
        struct sk_buff *skb;
        struct skb_shared_hwtstamps *hwtstamps;
-       unsigned int incomplete, error, context_next, context;
-       unsigned int len, rdesc_len, max_len;
+       unsigned int last, error, context_next, context;
+       unsigned int len, buf1_len, buf2_len, max_len;
        unsigned int received = 0;
        int packet_count = 0;
 
@@ -2103,7 +2123,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
        if (!ring)
                return 0;
 
-       incomplete = 0;
+       last = 0;
        context_next = 0;
 
        napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
@@ -2137,9 +2157,8 @@ read_again:
                received++;
                ring->cur++;
 
-               incomplete = XGMAC_GET_BITS(packet->attributes,
-                                           RX_PACKET_ATTRIBUTES,
-                                           INCOMPLETE);
+               last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+                                     LAST);
                context_next = XGMAC_GET_BITS(packet->attributes,
                                              RX_PACKET_ATTRIBUTES,
                                              CONTEXT_NEXT);
@@ -2148,7 +2167,7 @@ read_again:
                                         CONTEXT);
 
                /* Earlier error, just drain the remaining data */
-               if ((incomplete || context_next) && error)
+               if ((!last || context_next) && error)
                        goto read_again;
 
                if (error || packet->errors) {
@@ -2160,16 +2179,22 @@ read_again:
                }
 
                if (!context) {
-                       /* Length is cumulative, get this descriptor's length */
-                       rdesc_len = rdata->rx.len - len;
-                       len += rdesc_len;
+                       /* Get the data length in the descriptor buffers */
+                       buf1_len = xgbe_rx_buf1_len(rdata, packet);
+                       len += buf1_len;
+                       buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
+                       len += buf2_len;
 
-                       if (rdesc_len && !skb) {
+                       if (!skb) {
                                skb = xgbe_create_skb(pdata, napi, rdata,
-                                                     rdesc_len);
-                               if (!skb)
+                                                     buf1_len);
+                               if (!skb) {
                                        error = 1;
-                       } else if (rdesc_len) {
+                                       goto skip_data;
+                               }
+                       }
+
+                       if (buf2_len) {
                                dma_sync_single_range_for_cpu(pdata->dev,
                                                        rdata->rx.buf.dma_base,
                                                        rdata->rx.buf.dma_off,
@@ -2179,13 +2204,14 @@ read_again:
                                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
                                                rdata->rx.buf.pa.pages,
                                                rdata->rx.buf.pa.pages_offset,
-                                               rdesc_len,
+                                               buf2_len,
                                                rdata->rx.buf.dma_len);
                                rdata->rx.buf.pa.pages = NULL;
                        }
                }
 
-               if (incomplete || context_next)
+skip_data:
+               if (!last || context_next)
                        goto read_again;
 
                if (!skb)
@@ -2243,7 +2269,7 @@ next_packet:
        }
 
        /* Check if we need to save state before leaving */
-       if (received && (incomplete || context_next)) {
+       if (received && (!last || context_next)) {
                rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
                rdata->state_saved = 1;
                rdata->state.skb = skb;
@@ -2272,10 +2298,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
        processed = xgbe_rx_poll(channel, budget);
 
        /* If we processed everything, we are done */
-       if (processed < budget) {
-               /* Turn off polling */
-               napi_complete_done(napi, processed);
-
+       if ((processed < budget) && napi_complete_done(napi, processed)) {
                /* Enable Tx and Rx interrupts */
                if (pdata->channel_irq_mode)
                        xgbe_enable_rx_tx_int(pdata, channel);
@@ -2317,10 +2340,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
        } while ((processed < budget) && (processed != last_processed));
 
        /* If we processed everything, we are done */
-       if (processed < budget) {
-               /* Turn off polling */
-               napi_complete_done(napi, processed);
-
+       if ((processed < budget) && napi_complete_done(napi, processed)) {
                /* Enable Tx and Rx interrupts */
                xgbe_enable_rx_tx_ints(pdata);
        }
index 0c7088a426e90ddaa32c09795506bc8805a377f6..417bdb5982a93ca252962f7fcd6141a2f53fca37 100644 (file)
  */
 
 #include <linux/module.h>
+#include <linux/interrupt.h>
 #include <linux/kmod.h>
 #include <linux/delay.h>
 #include <linux/completion.h>
index 4c5b90eea4af2e389decec1d80b31e7134cee140..b672d92495397bb3132c90e25db17872c4589fa6 100644 (file)
  *     THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/kmod.h>
 #include <linux/mdio.h>
index ec63d706d464710af057591df62cfd0c4500b14b..59efe5b145ddf562e11fda61acad07b2da82548e 100644 (file)
@@ -1 +1,2 @@
 source "drivers/net/ethernet/apm/xgene/Kconfig"
+source "drivers/net/ethernet/apm/xgene-v2/Kconfig"
index 65ce32ad1b2cc66a2c017cf8a06f8e2099bbaa9e..946b2a4c882d3cb627849ac25817e9d4e45591de 100644 (file)
@@ -3,3 +3,4 @@
 #
 
 obj-$(CONFIG_NET_XGENE) += xgene/
+obj-$(CONFIG_NET_XGENE_V2) += xgene-v2/
diff --git a/drivers/net/ethernet/apm/xgene-v2/Kconfig b/drivers/net/ethernet/apm/xgene-v2/Kconfig
new file mode 100644 (file)
index 0000000..1205861
--- /dev/null
@@ -0,0 +1,11 @@
+config NET_XGENE_V2
+       tristate "APM X-Gene SoC Ethernet-v2 Driver"
+       depends on HAS_DMA
+       depends on ARCH_XGENE || COMPILE_TEST
+       help
+         This is the Ethernet driver for the on-chip ethernet interface
+         which uses a linked list of DMA descriptor architecture (v2) for
+         APM X-Gene SoCs.
+
+         To compile this driver as a module, choose M here. This module will
+         be called xgene-enet-v2.
diff --git a/drivers/net/ethernet/apm/xgene-v2/Makefile b/drivers/net/ethernet/apm/xgene-v2/Makefile
new file mode 100644 (file)
index 0000000..f16a2b3
--- /dev/null
@@ -0,0 +1,6 @@
+#
+# Makefile for APM X-Gene Ethernet v2 driver
+#
+
+xgene-enet-v2-objs := main.o mac.o enet.o ring.o mdio.o ethtool.o
+obj-$(CONFIG_NET_XGENE_V2) += xgene-enet-v2.o
diff --git a/drivers/net/ethernet/apm/xgene-v2/enet.c b/drivers/net/ethernet/apm/xgene-v2/enet.c
new file mode 100644 (file)
index 0000000..5998da0
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *           Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+void xge_wr_csr(struct xge_pdata *pdata, u32 offset, u32 val)
+{
+       void __iomem *addr = pdata->resources.base_addr + offset;
+
+       iowrite32(val, addr);
+}
+
+u32 xge_rd_csr(struct xge_pdata *pdata, u32 offset)
+{
+       void __iomem *addr = pdata->resources.base_addr + offset;
+
+       return ioread32(addr);
+}
+
+int xge_port_reset(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       struct device *dev = &pdata->pdev->dev;
+       u32 data, wait = 10;
+
+       xge_wr_csr(pdata, ENET_CLKEN, 0x3);
+       xge_wr_csr(pdata, ENET_SRST, 0xf);
+       xge_wr_csr(pdata, ENET_SRST, 0);
+       xge_wr_csr(pdata, CFG_MEM_RAM_SHUTDOWN, 1);
+       xge_wr_csr(pdata, CFG_MEM_RAM_SHUTDOWN, 0);
+
+       do {
+               usleep_range(100, 110);
+               data = xge_rd_csr(pdata, BLOCK_MEM_RDY);
+       } while (data != MEM_RDY && wait--);
+
+       if (data != MEM_RDY) {
+               dev_err(dev, "ECC init failed: %x\n", data);
+               return -ETIMEDOUT;
+       }
+
+       xge_wr_csr(pdata, ENET_SHIM, DEVM_ARAUX_COH | DEVM_AWAUX_COH);
+
+       return 0;
+}
+
+static void xge_traffic_resume(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+
+       xge_wr_csr(pdata, CFG_FORCE_LINK_STATUS_EN, 1);
+       xge_wr_csr(pdata, FORCE_LINK_STATUS, 1);
+
+       xge_wr_csr(pdata, CFG_LINK_AGGR_RESUME, 1);
+       xge_wr_csr(pdata, RX_DV_GATE_REG, 1);
+}
+
+void xge_port_init(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+
+       pdata->phy_speed = SPEED_1000;
+       xge_mac_init(pdata);
+       xge_traffic_resume(ndev);
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/enet.h b/drivers/net/ethernet/apm/xgene-v2/enet.h
new file mode 100644 (file)
index 0000000..3fd36dc
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *           Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_ENET_H__
+#define __XGENE_ENET_V2_ENET_H__
+
+#define ENET_CLKEN             0xc008
+#define ENET_SRST              0xc000
+#define ENET_SHIM              0xc010
+#define CFG_MEM_RAM_SHUTDOWN   0xd070
+#define BLOCK_MEM_RDY          0xd074
+
+#define MEM_RDY                        0xffffffff
+#define DEVM_ARAUX_COH         BIT(19)
+#define DEVM_AWAUX_COH         BIT(3)
+
+#define CFG_FORCE_LINK_STATUS_EN       0x229c
+#define FORCE_LINK_STATUS              0x22a0
+#define CFG_LINK_AGGR_RESUME           0x27c8
+#define RX_DV_GATE_REG                 0x2dfc
+
+void xge_wr_csr(struct xge_pdata *pdata, u32 offset, u32 val);
+u32 xge_rd_csr(struct xge_pdata *pdata, u32 offset);
+int xge_port_reset(struct net_device *ndev);
+void xge_port_init(struct net_device *ndev);
+
+#endif  /* __XGENE_ENET_V2_ENET__H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/ethtool.c b/drivers/net/ethernet/apm/xgene-v2/ethtool.c
new file mode 100644 (file)
index 0000000..0c426f5
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *           Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+struct xge_gstrings_stats {
+       char name[ETH_GSTRING_LEN];
+       int offset;
+};
+
+#define XGE_STAT(m)            { #m, offsetof(struct xge_pdata, stats.m) }
+
+static const struct xge_gstrings_stats gstrings_stats[] = {
+       XGE_STAT(rx_packets),
+       XGE_STAT(tx_packets),
+       XGE_STAT(rx_bytes),
+       XGE_STAT(tx_bytes),
+       XGE_STAT(rx_errors)
+};
+
+#define XGE_STATS_LEN          ARRAY_SIZE(gstrings_stats)
+
+static void xge_get_drvinfo(struct net_device *ndev,
+                           struct ethtool_drvinfo *info)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       struct platform_device *pdev = pdata->pdev;
+
+       strcpy(info->driver, "xgene-enet-v2");
+       strcpy(info->version, XGENE_ENET_V2_VERSION);
+       snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "N/A");
+       sprintf(info->bus_info, "%s", pdev->name);
+}
+
+static void xge_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+       u8 *p = data;
+       int i;
+
+       if (stringset != ETH_SS_STATS)
+               return;
+
+       for (i = 0; i < XGE_STATS_LEN; i++) {
+               memcpy(p, gstrings_stats[i].name, ETH_GSTRING_LEN);
+               p += ETH_GSTRING_LEN;
+       }
+}
+
+static int xge_get_sset_count(struct net_device *ndev, int sset)
+{
+       if (sset != ETH_SS_STATS)
+               return -EINVAL;
+
+       return XGE_STATS_LEN;
+}
+
+static void xge_get_ethtool_stats(struct net_device *ndev,
+                                 struct ethtool_stats *dummy,
+                                 u64 *data)
+{
+       void *pdata = netdev_priv(ndev);
+       int i;
+
+       for (i = 0; i < XGE_STATS_LEN; i++)
+               *data++ = *(u64 *)(pdata + gstrings_stats[i].offset);
+}
+
+static int xge_get_link_ksettings(struct net_device *ndev,
+                                 struct ethtool_link_ksettings *cmd)
+{
+       struct phy_device *phydev = ndev->phydev;
+
+       if (!phydev)
+               return -ENODEV;
+
+       return phy_ethtool_ksettings_get(phydev, cmd);
+}
+
+static int xge_set_link_ksettings(struct net_device *ndev,
+                                 const struct ethtool_link_ksettings *cmd)
+{
+       struct phy_device *phydev = ndev->phydev;
+
+       if (!phydev)
+               return -ENODEV;
+
+       return phy_ethtool_ksettings_set(phydev, cmd);
+}
+
+static const struct ethtool_ops xge_ethtool_ops = {
+       .get_drvinfo = xge_get_drvinfo,
+       .get_link = ethtool_op_get_link,
+       .get_strings = xge_get_strings,
+       .get_sset_count = xge_get_sset_count,
+       .get_ethtool_stats = xge_get_ethtool_stats,
+       .get_link_ksettings = xge_get_link_ksettings,
+       .set_link_ksettings = xge_set_link_ksettings,
+};
+
+void xge_set_ethtool_ops(struct net_device *ndev)
+{
+       ndev->ethtool_ops = &xge_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.c b/drivers/net/ethernet/apm/xgene-v2/mac.c
new file mode 100644 (file)
index 0000000..ee431e3
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *           Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+void xge_mac_reset(struct xge_pdata *pdata)
+{
+       xge_wr_csr(pdata, MAC_CONFIG_1, SOFT_RESET);
+       xge_wr_csr(pdata, MAC_CONFIG_1, 0);
+}
+
+void xge_mac_set_speed(struct xge_pdata *pdata)
+{
+       u32 icm0, icm2, ecm0, mc2;
+       u32 intf_ctrl, rgmii;
+
+       icm0 = xge_rd_csr(pdata, ICM_CONFIG0_REG_0);
+       icm2 = xge_rd_csr(pdata, ICM_CONFIG2_REG_0);
+       ecm0 = xge_rd_csr(pdata, ECM_CONFIG0_REG_0);
+       rgmii = xge_rd_csr(pdata, RGMII_REG_0);
+       mc2 = xge_rd_csr(pdata, MAC_CONFIG_2);
+       intf_ctrl = xge_rd_csr(pdata, INTERFACE_CONTROL);
+       icm2 |= CFG_WAITASYNCRD_EN;
+
+       switch (pdata->phy_speed) {
+       case SPEED_10:
+               SET_REG_BITS(&mc2, INTF_MODE, 1);
+               SET_REG_BITS(&intf_ctrl, HD_MODE, 0);
+               SET_REG_BITS(&icm0, CFG_MACMODE, 0);
+               SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 500);
+               SET_REG_BIT(&rgmii, CFG_SPEED_125, 0);
+               break;
+       case SPEED_100:
+               SET_REG_BITS(&mc2, INTF_MODE, 1);
+               SET_REG_BITS(&intf_ctrl, HD_MODE, 1);
+               SET_REG_BITS(&icm0, CFG_MACMODE, 1);
+               SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 80);
+               SET_REG_BIT(&rgmii, CFG_SPEED_125, 0);
+               break;
+       default:
+               SET_REG_BITS(&mc2, INTF_MODE, 2);
+               SET_REG_BITS(&intf_ctrl, HD_MODE, 2);
+               SET_REG_BITS(&icm0, CFG_MACMODE, 2);
+               SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 16);
+               SET_REG_BIT(&rgmii, CFG_SPEED_125, 1);
+               break;
+       }
+
+       mc2 |= FULL_DUPLEX | CRC_EN | PAD_CRC;
+       SET_REG_BITS(&ecm0, CFG_WFIFOFULLTHR, 0x32);
+
+       xge_wr_csr(pdata, MAC_CONFIG_2, mc2);
+       xge_wr_csr(pdata, INTERFACE_CONTROL, intf_ctrl);
+       xge_wr_csr(pdata, RGMII_REG_0, rgmii);
+       xge_wr_csr(pdata, ICM_CONFIG0_REG_0, icm0);
+       xge_wr_csr(pdata, ICM_CONFIG2_REG_0, icm2);
+       xge_wr_csr(pdata, ECM_CONFIG0_REG_0, ecm0);
+}
+
+void xge_mac_set_station_addr(struct xge_pdata *pdata)
+{
+       u8 *dev_addr = pdata->ndev->dev_addr;
+       u32 addr0, addr1;
+
+       addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
+               (dev_addr[1] << 8) | dev_addr[0];
+       addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
+
+       xge_wr_csr(pdata, STATION_ADDR0, addr0);
+       xge_wr_csr(pdata, STATION_ADDR1, addr1);
+}
+
+void xge_mac_init(struct xge_pdata *pdata)
+{
+       xge_mac_reset(pdata);
+       xge_mac_set_speed(pdata);
+       xge_mac_set_station_addr(pdata);
+}
+
+void xge_mac_enable(struct xge_pdata *pdata)
+{
+       u32 data;
+
+       data = xge_rd_csr(pdata, MAC_CONFIG_1);
+       data |= TX_EN | RX_EN;
+       xge_wr_csr(pdata, MAC_CONFIG_1, data);
+
+       data = xge_rd_csr(pdata, MAC_CONFIG_1);
+}
+
+void xge_mac_disable(struct xge_pdata *pdata)
+{
+       u32 data;
+
+       data = xge_rd_csr(pdata, MAC_CONFIG_1);
+       data &= ~(TX_EN | RX_EN);
+       xge_wr_csr(pdata, MAC_CONFIG_1, data);
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.h b/drivers/net/ethernet/apm/xgene-v2/mac.h
new file mode 100644 (file)
index 0000000..18a9c9d
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *           Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_MAC_H__
+#define __XGENE_ENET_V2_MAC_H__
+
+/* Register offsets */
+#define MAC_CONFIG_1           0xa000
+#define MAC_CONFIG_2           0xa004
+#define MII_MGMT_CONFIG                0xa020
+#define MII_MGMT_COMMAND       0xa024
+#define MII_MGMT_ADDRESS       0xa028
+#define MII_MGMT_CONTROL       0xa02c
+#define MII_MGMT_STATUS                0xa030
+#define MII_MGMT_INDICATORS    0xa034
+#define INTERFACE_CONTROL      0xa038
+#define STATION_ADDR0          0xa040
+#define STATION_ADDR1          0xa044
+#define RBYT                   0xa09c
+#define RPKT                   0xa0a0
+#define RFCS                   0xa0a4
+
+#define RGMII_REG_0            0x27e0
+#define ICM_CONFIG0_REG_0      0x2c00
+#define ICM_CONFIG2_REG_0      0x2c08
+#define ECM_CONFIG0_REG_0      0x2d00
+
+/* Register fields */
+#define SOFT_RESET             BIT(31)
+#define TX_EN                  BIT(0)
+#define RX_EN                  BIT(2)
+#define PAD_CRC                        BIT(2)
+#define CRC_EN                 BIT(1)
+#define FULL_DUPLEX            BIT(0)
+
+#define INTF_MODE_POS          8
+#define INTF_MODE_LEN          2
+#define HD_MODE_POS            25
+#define HD_MODE_LEN            2
+#define CFG_MACMODE_POS                18
+#define CFG_MACMODE_LEN                2
+#define CFG_WAITASYNCRD_POS    0
+#define CFG_WAITASYNCRD_LEN    16
+#define CFG_SPEED_125_POS      24
+#define CFG_WFIFOFULLTHR_POS   0
+#define CFG_WFIFOFULLTHR_LEN   7
+#define MGMT_CLOCK_SEL_POS     0
+#define MGMT_CLOCK_SEL_LEN     3
+#define PHY_ADDR_POS           8
+#define PHY_ADDR_LEN           5
+#define REG_ADDR_POS           0
+#define REG_ADDR_LEN           5
+#define MII_MGMT_BUSY          BIT(0)
+#define MII_READ_CYCLE         BIT(0)
+#define CFG_WAITASYNCRD_EN     BIT(16)
+
+static inline void xgene_set_reg_bits(u32 *var, int pos, int len, u32 val)
+{
+       u32 mask = GENMASK(pos + len, pos);
+
+       *var &= ~mask;
+       *var |= ((val << pos) & mask);
+}
+
+static inline u32 xgene_get_reg_bits(u32 var, int pos, int len)
+{
+       u32 mask = GENMASK(pos + len, pos);
+
+       return (var & mask) >> pos;
+}
+
+#define SET_REG_BITS(var, field, val)                                  \
+       xgene_set_reg_bits(var, field ## _POS, field ## _LEN, val)
+
+#define SET_REG_BIT(var, field, val)                                   \
+       xgene_set_reg_bits(var, field ## _POS, 1, val)
+
+#define GET_REG_BITS(var, field)                                       \
+       xgene_get_reg_bits(var, field ## _POS, field ## _LEN)
+
+#define GET_REG_BIT(var, field)                ((var) & (field))
+
+struct xge_pdata;
+
+void xge_mac_reset(struct xge_pdata *pdata);
+void xge_mac_set_speed(struct xge_pdata *pdata);
+void xge_mac_enable(struct xge_pdata *pdata);
+void xge_mac_disable(struct xge_pdata *pdata);
+void xge_mac_init(struct xge_pdata *pdata);
+void xge_mac_set_station_addr(struct xge_pdata *pdata);
+
+#endif /* __XGENE_ENET_V2_MAC_H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
new file mode 100644 (file)
index 0000000..0f2ad50
--- /dev/null
@@ -0,0 +1,759 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *           Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+static const struct acpi_device_id xge_acpi_match[];
+
+static int xge_get_resources(struct xge_pdata *pdata)
+{
+       struct platform_device *pdev;
+       struct net_device *ndev;
+       int phy_mode, ret = 0;
+       struct resource *res;
+       struct device *dev;
+
+       pdev = pdata->pdev;
+       dev = &pdev->dev;
+       ndev = pdata->ndev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(dev, "Resource enet_csr not defined\n");
+               return -ENODEV;
+       }
+
+       pdata->resources.base_addr = devm_ioremap(dev, res->start,
+                                                 resource_size(res));
+       if (!pdata->resources.base_addr) {
+               dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
+               return -ENOMEM;
+       }
+
+       if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
+               eth_hw_addr_random(ndev);
+
+       memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+
+       phy_mode = device_get_phy_mode(dev);
+       if (phy_mode < 0) {
+               dev_err(dev, "Unable to get phy-connection-type\n");
+               return phy_mode;
+       }
+       pdata->resources.phy_mode = phy_mode;
+
+       if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
+               dev_err(dev, "Incorrect phy-connection-type specified\n");
+               return -ENODEV;
+       }
+
+       ret = platform_get_irq(pdev, 0);
+       if (ret < 0) {
+               dev_err(dev, "Unable to get irq\n");
+               return ret;
+       }
+       pdata->resources.irq = ret;
+
+       return 0;
+}
+
+static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       struct xge_desc_ring *ring = pdata->rx_ring;
+       const u8 slots = XGENE_ENET_NUM_DESC - 1;
+       struct device *dev = &pdata->pdev->dev;
+       struct xge_raw_desc *raw_desc;
+       u64 addr_lo, addr_hi;
+       u8 tail = ring->tail;
+       struct sk_buff *skb;
+       dma_addr_t dma_addr;
+       u16 len;
+       int i;
+
+       for (i = 0; i < nbuf; i++) {
+               raw_desc = &ring->raw_desc[tail];
+
+               len = XGENE_ENET_STD_MTU;
+               skb = netdev_alloc_skb(ndev, len);
+               if (unlikely(!skb))
+                       return -ENOMEM;
+
+               dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
+               if (dma_mapping_error(dev, dma_addr)) {
+                       netdev_err(ndev, "DMA mapping error\n");
+                       dev_kfree_skb_any(skb);
+                       return -EINVAL;
+               }
+
+               ring->pkt_info[tail].skb = skb;
+               ring->pkt_info[tail].dma_addr = dma_addr;
+
+               addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
+               addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
+               raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
+                                          SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
+                                          SET_BITS(PKT_ADDRH,
+                                                   upper_32_bits(dma_addr)));
+
+               dma_wmb();
+               raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
+                                          SET_BITS(E, 1));
+               tail = (tail + 1) & slots;
+       }
+
+       ring->tail = tail;
+
+       return 0;
+}
+
+static int xge_init_hw(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       int ret;
+
+       ret = xge_port_reset(ndev);
+       if (ret)
+               return ret;
+
+       xge_port_init(ndev);
+       pdata->nbufs = NUM_BUFS;
+
+       return 0;
+}
+
+static irqreturn_t xge_irq(const int irq, void *data)
+{
+       struct xge_pdata *pdata = data;
+
+       if (napi_schedule_prep(&pdata->napi)) {
+               xge_intr_disable(pdata);
+               __napi_schedule(&pdata->napi);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int xge_request_irq(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       int ret;
+
+       snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
+
+       ret = request_irq(pdata->resources.irq, xge_irq, 0, pdata->irq_name,
+                         pdata);
+       if (ret)
+               netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
+
+       return ret;
+}
+
+static void xge_free_irq(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+
+       free_irq(pdata->resources.irq, pdata);
+}
+
+static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
+{
+       if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
+           (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
+               return true;
+
+       return false;
+}
+
+static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       struct device *dev = &pdata->pdev->dev;
+       struct xge_desc_ring *tx_ring;
+       struct xge_raw_desc *raw_desc;
+       static dma_addr_t dma_addr;
+       u64 addr_lo, addr_hi;
+       void *pkt_buf;
+       u8 tail;
+       u16 len;
+
+       tx_ring = pdata->tx_ring;
+       tail = tx_ring->tail;
+       len = skb_headlen(skb);
+       raw_desc = &tx_ring->raw_desc[tail];
+
+       if (!is_tx_slot_available(raw_desc)) {
+               netif_stop_queue(ndev);
+               return NETDEV_TX_BUSY;
+       }
+
+       /* Packet buffers should be 64B aligned */
+       pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
+                                     GFP_ATOMIC);
+       if (unlikely(!pkt_buf)) {
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       }
+       memcpy(pkt_buf, skb->data, len);
+
+       addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
+       addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
+       raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
+                                  SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
+                                  SET_BITS(PKT_ADDRH,
+                                           upper_32_bits(dma_addr)));
+
+       tx_ring->pkt_info[tail].skb = skb;
+       tx_ring->pkt_info[tail].dma_addr = dma_addr;
+       tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
+
+       dma_wmb();
+
+       raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
+                                  SET_BITS(PKT_SIZE, len) |
+                                  SET_BITS(E, 0));
+       skb_tx_timestamp(skb);
+       xge_wr_csr(pdata, DMATXCTRL, 1);
+
+       tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
+
+       return NETDEV_TX_OK;
+}
+
+static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
+{
+       if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
+           !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
+               return true;
+
+       return false;
+}
+
+static void xge_txc_poll(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       struct device *dev = &pdata->pdev->dev;
+       struct xge_desc_ring *tx_ring;
+       struct xge_raw_desc *raw_desc;
+       dma_addr_t dma_addr;
+       struct sk_buff *skb;
+       void *pkt_buf;
+       u32 data;
+       u8 head;
+
+       tx_ring = pdata->tx_ring;
+       head = tx_ring->head;
+
+       data = xge_rd_csr(pdata, DMATXSTATUS);
+       if (!GET_BITS(TXPKTCOUNT, data))
+               return;
+
+       while (1) {
+               raw_desc = &tx_ring->raw_desc[head];
+
+               if (!is_tx_hw_done(raw_desc))
+                       break;
+
+               dma_rmb();
+
+               skb = tx_ring->pkt_info[head].skb;
+               dma_addr = tx_ring->pkt_info[head].dma_addr;
+               pkt_buf = tx_ring->pkt_info[head].pkt_buf;
+               pdata->stats.tx_packets++;
+               pdata->stats.tx_bytes += skb->len;
+               dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
+               dev_kfree_skb_any(skb);
+
+               /* clear pktstart address and pktsize */
+               raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
+                                          SET_BITS(PKT_SIZE, SLOT_EMPTY));
+               xge_wr_csr(pdata, DMATXSTATUS, 1);
+
+               head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
+       }
+
+       if (netif_queue_stopped(ndev))
+               netif_wake_queue(ndev);
+
+       tx_ring->head = head;
+}
+
+static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       struct device *dev = &pdata->pdev->dev;
+       struct xge_desc_ring *rx_ring;
+       struct xge_raw_desc *raw_desc;
+       struct sk_buff *skb;
+       dma_addr_t dma_addr;
+       int processed = 0;
+       u8 head, rx_error;
+       int i, ret;
+       u32 data;
+       u16 len;
+
+       rx_ring = pdata->rx_ring;
+       head = rx_ring->head;
+
+       data = xge_rd_csr(pdata, DMARXSTATUS);
+       if (!GET_BITS(RXPKTCOUNT, data))
+               return 0;
+
+       for (i = 0; i < budget; i++) {
+               raw_desc = &rx_ring->raw_desc[head];
+
+               if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
+                       break;
+
+               dma_rmb();
+
+               skb = rx_ring->pkt_info[head].skb;
+               rx_ring->pkt_info[head].skb = NULL;
+               dma_addr = rx_ring->pkt_info[head].dma_addr;
+               len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
+               dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
+                                DMA_FROM_DEVICE);
+
+               rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
+               if (unlikely(rx_error)) {
+                       pdata->stats.rx_errors++;
+                       dev_kfree_skb_any(skb);
+                       goto out;
+               }
+
+               skb_put(skb, len);
+               skb->protocol = eth_type_trans(skb, ndev);
+
+               pdata->stats.rx_packets++;
+               pdata->stats.rx_bytes += len;
+               napi_gro_receive(&pdata->napi, skb);
+out:
+               ret = xge_refill_buffers(ndev, 1);
+               xge_wr_csr(pdata, DMARXSTATUS, 1);
+               xge_wr_csr(pdata, DMARXCTRL, 1);
+
+               if (ret)
+                       break;
+
+               head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
+               processed++;
+       }
+
+       rx_ring->head = head;
+
+       return processed;
+}
+
+static void xge_delete_desc_ring(struct net_device *ndev,
+                                struct xge_desc_ring *ring)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       struct device *dev = &pdata->pdev->dev;
+       u16 size;
+
+       if (!ring)
+               return;
+
+       size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
+       if (ring->desc_addr)
+               dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
+
+       kfree(ring->pkt_info);
+       kfree(ring);
+}
+
+static void xge_free_buffers(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       struct xge_desc_ring *ring = pdata->rx_ring;
+       struct device *dev = &pdata->pdev->dev;
+       struct sk_buff *skb;
+       dma_addr_t dma_addr;
+       int i;
+
+       for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
+               skb = ring->pkt_info[i].skb;
+               dma_addr = ring->pkt_info[i].dma_addr;
+
+               if (!skb)
+                       continue;
+
+               dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
+                                DMA_FROM_DEVICE);
+               dev_kfree_skb_any(skb);
+       }
+}
+
+static void xge_delete_desc_rings(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+
+       xge_txc_poll(ndev);
+       xge_delete_desc_ring(ndev, pdata->tx_ring);
+
+       xge_rx_poll(ndev, 64);
+       xge_free_buffers(ndev);
+       xge_delete_desc_ring(ndev, pdata->rx_ring);
+}
+
+static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       struct device *dev = &pdata->pdev->dev;
+       struct xge_desc_ring *ring;
+       u16 size;
+
+       ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+       if (!ring)
+               return NULL;
+
+       ring->ndev = ndev;
+
+       size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
+       ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr,
+                                             GFP_KERNEL);
+       if (!ring->desc_addr)
+               goto err;
+
+       ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(*ring->pkt_info),
+                                GFP_KERNEL);
+       if (!ring->pkt_info)
+               goto err;
+
+       xge_setup_desc(ring);
+
+       return ring;
+
+err:
+       xge_delete_desc_ring(ndev, ring);
+
+       return NULL;
+}
+
+static int xge_create_desc_rings(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       struct xge_desc_ring *ring;
+       int ret;
+
+       /* create tx ring */
+       ring = xge_create_desc_ring(ndev);
+       if (!ring)
+               goto err;
+
+       pdata->tx_ring = ring;
+       xge_update_tx_desc_addr(pdata);
+
+       /* create rx ring */
+       ring = xge_create_desc_ring(ndev);
+       if (!ring)
+               goto err;
+
+       pdata->rx_ring = ring;
+       xge_update_rx_desc_addr(pdata);
+
+       ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
+       if (ret)
+               goto err;
+
+       return 0;
+err:
+       xge_delete_desc_rings(ndev);
+
+       return -ENOMEM;
+}
+
+static int xge_open(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       int ret;
+
+       ret = xge_create_desc_rings(ndev);
+       if (ret)
+               return ret;
+
+       napi_enable(&pdata->napi);
+       ret = xge_request_irq(ndev);
+       if (ret)
+               return ret;
+
+       xge_intr_enable(pdata);
+       xge_wr_csr(pdata, DMARXCTRL, 1);
+
+       phy_start(ndev->phydev);
+       xge_mac_enable(pdata);
+       netif_start_queue(ndev);
+
+       return 0;
+}
+
+static int xge_close(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+
+       netif_stop_queue(ndev);
+       xge_mac_disable(pdata);
+       phy_stop(ndev->phydev);
+
+       xge_intr_disable(pdata);
+       xge_free_irq(ndev);
+       napi_disable(&pdata->napi);
+       xge_delete_desc_rings(ndev);
+
+       return 0;
+}
+
+static int xge_napi(struct napi_struct *napi, const int budget)
+{
+       struct net_device *ndev = napi->dev;
+       struct xge_pdata *pdata;
+       int processed;
+
+       pdata = netdev_priv(ndev);
+
+       xge_txc_poll(ndev);
+       processed = xge_rx_poll(ndev, budget);
+
+       if (processed < budget) {
+               napi_complete_done(napi, processed);
+               xge_intr_enable(pdata);
+       }
+
+       return processed;
+}
+
+static int xge_set_mac_addr(struct net_device *ndev, void *addr)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       int ret;
+
+       ret = eth_mac_addr(ndev, addr);
+       if (ret)
+               return ret;
+
+       xge_mac_set_station_addr(pdata);
+
+       return 0;
+}
+
+static bool is_tx_pending(struct xge_raw_desc *raw_desc)
+{
+       if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
+               return true;
+
+       return false;
+}
+
+static void xge_free_pending_skb(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       struct device *dev = &pdata->pdev->dev;
+       struct xge_desc_ring *tx_ring;
+       struct xge_raw_desc *raw_desc;
+       dma_addr_t dma_addr;
+       struct sk_buff *skb;
+       void *pkt_buf;
+       int i;
+
+       tx_ring = pdata->tx_ring;
+
+       for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
+               raw_desc = &tx_ring->raw_desc[i];
+
+               if (!is_tx_pending(raw_desc))
+                       continue;
+
+               skb = tx_ring->pkt_info[i].skb;
+               dma_addr = tx_ring->pkt_info[i].dma_addr;
+               pkt_buf = tx_ring->pkt_info[i].pkt_buf;
+               dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
+               dev_kfree_skb_any(skb);
+       }
+}
+
+static void xge_timeout(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+
+       rtnl_lock();
+
+       if (!netif_running(ndev))
+               goto out;
+
+       netif_stop_queue(ndev);
+       xge_intr_disable(pdata);
+       napi_disable(&pdata->napi);
+
+       xge_wr_csr(pdata, DMATXCTRL, 0);
+       xge_txc_poll(ndev);
+       xge_free_pending_skb(ndev);
+       xge_wr_csr(pdata, DMATXSTATUS, ~0U);
+
+       xge_setup_desc(pdata->tx_ring);
+       xge_update_tx_desc_addr(pdata);
+       xge_mac_init(pdata);
+
+       napi_enable(&pdata->napi);
+       xge_intr_enable(pdata);
+       xge_mac_enable(pdata);
+       netif_start_queue(ndev);
+
+out:
+       rtnl_unlock();
+}
+
+static void xge_get_stats64(struct net_device *ndev,
+                           struct rtnl_link_stats64 *storage)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       struct xge_stats *stats = &pdata->stats;
+
+       storage->tx_packets += stats->tx_packets;
+       storage->tx_bytes += stats->tx_bytes;
+
+       storage->rx_packets += stats->rx_packets;
+       storage->rx_bytes += stats->rx_bytes;
+       storage->rx_errors += stats->rx_errors;
+}
+
+static const struct net_device_ops xgene_ndev_ops = {
+       .ndo_open = xge_open,
+       .ndo_stop = xge_close,
+       .ndo_start_xmit = xge_start_xmit,
+       .ndo_set_mac_address = xge_set_mac_addr,
+       .ndo_tx_timeout = xge_timeout,
+       .ndo_get_stats64 = xge_get_stats64,
+};
+
+static int xge_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct net_device *ndev;
+       struct xge_pdata *pdata;
+       int ret;
+
+       ndev = alloc_etherdev(sizeof(*pdata));
+       if (!ndev)
+               return -ENOMEM;
+
+       pdata = netdev_priv(ndev);
+
+       pdata->pdev = pdev;
+       pdata->ndev = ndev;
+       SET_NETDEV_DEV(ndev, dev);
+       platform_set_drvdata(pdev, pdata);
+       ndev->netdev_ops = &xgene_ndev_ops;
+
+       ndev->features |= NETIF_F_GSO |
+                         NETIF_F_GRO;
+
+       ret = xge_get_resources(pdata);
+       if (ret)
+               goto err;
+
+       ndev->hw_features = ndev->features;
+       xge_set_ethtool_ops(ndev);
+
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
+       if (ret) {
+               netdev_err(ndev, "No usable DMA configuration\n");
+               goto err;
+       }
+
+       ret = xge_init_hw(ndev);
+       if (ret)
+               goto err;
+
+       ret = xge_mdio_config(ndev);
+       if (ret)
+               goto err;
+
+       netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
+
+       ret = register_netdev(ndev);
+       if (ret) {
+               netdev_err(ndev, "Failed to register netdev\n");
+               goto err;
+       }
+
+       return 0;
+
+err:
+       free_netdev(ndev);
+
+       return ret;
+}
+
+static int xge_remove(struct platform_device *pdev)
+{
+       struct xge_pdata *pdata;
+       struct net_device *ndev;
+
+       pdata = platform_get_drvdata(pdev);
+       ndev = pdata->ndev;
+
+       rtnl_lock();
+       if (netif_running(ndev))
+               dev_close(ndev);
+       rtnl_unlock();
+
+       xge_mdio_remove(ndev);
+       unregister_netdev(ndev);
+       free_netdev(ndev);
+
+       return 0;
+}
+
+static void xge_shutdown(struct platform_device *pdev)
+{
+       struct xge_pdata *pdata;
+
+       pdata = platform_get_drvdata(pdev);
+       if (!pdata)
+               return;
+
+       if (!pdata->ndev)
+               return;
+
+       xge_remove(pdev);
+}
+
+static const struct acpi_device_id xge_acpi_match[] = {
+       { "APMC0D80" },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
+
+static struct platform_driver xge_driver = {
+       .driver = {
+                  .name = "xgene-enet-v2",
+                  .acpi_match_table = ACPI_PTR(xge_acpi_match),
+       },
+       .probe = xge_probe,
+       .remove = xge_remove,
+       .shutdown = xge_shutdown,
+};
+module_platform_driver(xge_driver);
+
+MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
+MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
+MODULE_VERSION(XGENE_ENET_V2_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.h b/drivers/net/ethernet/apm/xgene-v2/main.h
new file mode 100644 (file)
index 0000000..db1178e
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *           Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_MAIN_H__
+#define __XGENE_ENET_V2_MAIN_H__
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/efi.h>
+#include <linux/if_vlan.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/prefetch.h>
+#include <linux/phy.h>
+#include <net/ip.h>
+#include "mac.h"
+#include "enet.h"
+#include "ring.h"
+
+#define XGENE_ENET_V2_VERSION  "v1.0"
+#define XGENE_ENET_STD_MTU     1536
+#define XGENE_ENET_MIN_FRAME   60
+#define IRQ_ID_SIZE             16
+
+struct xge_resource {
+       void __iomem *base_addr;
+       int phy_mode;
+       u32 irq;
+};
+
+struct xge_stats {
+       u64 tx_packets;
+       u64 tx_bytes;
+       u64 rx_packets;
+       u64 rx_bytes;
+       u64 rx_errors;
+};
+
+/* ethernet private data */
+struct xge_pdata {
+       struct xge_resource resources;
+       struct xge_desc_ring *tx_ring;
+       struct xge_desc_ring *rx_ring;
+       struct platform_device *pdev;
+       char irq_name[IRQ_ID_SIZE];
+       struct mii_bus *mdio_bus;
+       struct net_device *ndev;
+       struct napi_struct napi;
+       struct xge_stats stats;
+       int phy_speed;
+       u8 nbufs;
+};
+
+int xge_mdio_config(struct net_device *ndev);
+void xge_mdio_remove(struct net_device *ndev);
+void xge_set_ethtool_ops(struct net_device *ndev);
+
+#endif /* __XGENE_ENET_V2_MAIN_H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c
new file mode 100644 (file)
index 0000000..a583c6a
--- /dev/null
@@ -0,0 +1,167 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *           Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+static int xge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data)
+{
+       struct xge_pdata *pdata = bus->priv;
+       u32 done, val = 0;
+       u8 wait = 10;
+
+       SET_REG_BITS(&val, PHY_ADDR, phy_id);
+       SET_REG_BITS(&val, REG_ADDR, reg);
+       xge_wr_csr(pdata, MII_MGMT_ADDRESS, val);
+
+       xge_wr_csr(pdata, MII_MGMT_CONTROL, data);
+       do {
+               usleep_range(5, 10);
+               done = xge_rd_csr(pdata, MII_MGMT_INDICATORS);
+       } while ((done & MII_MGMT_BUSY) && wait--);
+
+       if (done & MII_MGMT_BUSY) {
+               dev_err(&bus->dev, "MII_MGMT write failed\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static int xge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+       struct xge_pdata *pdata = bus->priv;
+       u32 data, done, val = 0;
+       u8 wait = 10;
+
+       SET_REG_BITS(&val, PHY_ADDR, phy_id);
+       SET_REG_BITS(&val, REG_ADDR, reg);
+       xge_wr_csr(pdata, MII_MGMT_ADDRESS, val);
+
+       xge_wr_csr(pdata, MII_MGMT_COMMAND, MII_READ_CYCLE);
+       do {
+               usleep_range(5, 10);
+               done = xge_rd_csr(pdata, MII_MGMT_INDICATORS);
+       } while ((done & MII_MGMT_BUSY) && wait--);
+
+       if (done & MII_MGMT_BUSY) {
+               dev_err(&bus->dev, "MII_MGMT read failed\n");
+               return -ETIMEDOUT;
+       }
+
+       data = xge_rd_csr(pdata, MII_MGMT_STATUS);
+       xge_wr_csr(pdata, MII_MGMT_COMMAND, 0);
+
+       return data;
+}
+
+static void xge_adjust_link(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       struct phy_device *phydev = ndev->phydev;
+
+       if (phydev->link) {
+               if (pdata->phy_speed != phydev->speed) {
+                       pdata->phy_speed = phydev->speed;
+                       xge_mac_set_speed(pdata);
+                       xge_mac_enable(pdata);
+                       phy_print_status(phydev);
+               }
+       } else {
+               if (pdata->phy_speed != SPEED_UNKNOWN) {
+                       pdata->phy_speed = SPEED_UNKNOWN;
+                       xge_mac_disable(pdata);
+                       phy_print_status(phydev);
+               }
+       }
+}
+
+void xge_mdio_remove(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       struct mii_bus *mdio_bus = pdata->mdio_bus;
+
+       if (ndev->phydev)
+               phy_disconnect(ndev->phydev);
+
+       if (mdio_bus->state == MDIOBUS_REGISTERED)
+               mdiobus_unregister(mdio_bus);
+
+       mdiobus_free(mdio_bus);
+}
+
+int xge_mdio_config(struct net_device *ndev)
+{
+       struct xge_pdata *pdata = netdev_priv(ndev);
+       struct device *dev = &pdata->pdev->dev;
+       struct mii_bus *mdio_bus;
+       struct phy_device *phydev;
+       int ret;
+
+       mdio_bus = mdiobus_alloc();
+       if (!mdio_bus)
+               return -ENOMEM;
+
+       mdio_bus->name = "APM X-Gene Ethernet (v2) MDIO Bus";
+       mdio_bus->read = xge_mdio_read;
+       mdio_bus->write = xge_mdio_write;
+       mdio_bus->priv = pdata;
+       mdio_bus->parent = dev;
+       snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(dev));
+       pdata->mdio_bus = mdio_bus;
+
+       mdio_bus->phy_mask = 0x1;
+       ret = mdiobus_register(mdio_bus);
+       if (ret)
+               goto err;
+
+       phydev = phy_find_first(mdio_bus);
+       if (!phydev) {
+               dev_err(dev, "no PHY found\n");
+               goto err;
+       }
+       phydev = phy_connect(ndev, phydev_name(phydev),
+                            &xge_adjust_link,
+                            pdata->resources.phy_mode);
+
+       if (IS_ERR(phydev)) {
+               netdev_err(ndev, "Could not attach to PHY\n");
+               ret = PTR_ERR(phydev);
+               goto err;
+       }
+
+       phydev->supported &= ~(SUPPORTED_10baseT_Half |
+                              SUPPORTED_10baseT_Full |
+                              SUPPORTED_100baseT_Half |
+                              SUPPORTED_100baseT_Full |
+                              SUPPORTED_1000baseT_Half |
+                              SUPPORTED_AUI |
+                              SUPPORTED_MII |
+                              SUPPORTED_FIBRE |
+                              SUPPORTED_BNC);
+       phydev->advertising = phydev->supported;
+       pdata->phy_speed = SPEED_UNKNOWN;
+
+       return 0;
+err:
+       xge_mdio_remove(ndev);
+
+       return ret;
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/ring.c b/drivers/net/ethernet/apm/xgene-v2/ring.c
new file mode 100644 (file)
index 0000000..3881082
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *           Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+/* create circular linked list of descriptors */
+void xge_setup_desc(struct xge_desc_ring *ring)
+{
+       struct xge_raw_desc *raw_desc;
+       dma_addr_t dma_h, next_dma;
+       u16 offset;
+       int i;
+
+       for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
+               raw_desc = &ring->raw_desc[i];
+
+               offset = (i + 1) & (XGENE_ENET_NUM_DESC - 1);
+               next_dma = ring->dma_addr + (offset * XGENE_ENET_DESC_SIZE);
+
+               raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
+                                          SET_BITS(PKT_SIZE, SLOT_EMPTY));
+               dma_h = upper_32_bits(next_dma);
+               raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, next_dma) |
+                                          SET_BITS(NEXT_DESC_ADDRH, dma_h));
+       }
+}
+
+void xge_update_tx_desc_addr(struct xge_pdata *pdata)
+{
+       struct xge_desc_ring *ring = pdata->tx_ring;
+       dma_addr_t dma_addr = ring->dma_addr;
+
+       xge_wr_csr(pdata, DMATXDESCL, dma_addr);
+       xge_wr_csr(pdata, DMATXDESCH, upper_32_bits(dma_addr));
+
+       ring->head = 0;
+       ring->tail = 0;
+}
+
+void xge_update_rx_desc_addr(struct xge_pdata *pdata)
+{
+       struct xge_desc_ring *ring = pdata->rx_ring;
+       dma_addr_t dma_addr = ring->dma_addr;
+
+       xge_wr_csr(pdata, DMARXDESCL, dma_addr);
+       xge_wr_csr(pdata, DMARXDESCH, upper_32_bits(dma_addr));
+
+       ring->head = 0;
+       ring->tail = 0;
+}
+
+void xge_intr_enable(struct xge_pdata *pdata)
+{
+       u32 data;
+
+       data = RX_PKT_RCVD | TX_PKT_SENT;
+       xge_wr_csr(pdata, DMAINTRMASK, data);
+}
+
+void xge_intr_disable(struct xge_pdata *pdata)
+{
+       xge_wr_csr(pdata, DMAINTRMASK, 0);
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/ring.h b/drivers/net/ethernet/apm/xgene-v2/ring.h
new file mode 100644 (file)
index 0000000..abc8c9a
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *           Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_RING_H__
+#define __XGENE_ENET_V2_RING_H__
+
+#define XGENE_ENET_DESC_SIZE   64
+#define XGENE_ENET_NUM_DESC    256
+#define NUM_BUFS               8
+#define SLOT_EMPTY             0xfff
+
+#define DMATXCTRL              0xa180
+#define DMATXDESCL             0xa184
+#define DMATXDESCH             0xa1a0
+#define DMATXSTATUS            0xa188
+#define DMARXCTRL              0xa18c
+#define DMARXDESCL             0xa190
+#define DMARXDESCH             0xa1a4
+#define DMARXSTATUS            0xa194
+#define DMAINTRMASK            0xa198
+#define DMAINTERRUPT           0xa19c
+
+#define D_POS                  62
+#define D_LEN                  2
+#define E_POS                  63
+#define E_LEN                  1
+#define PKT_ADDRL_POS          0
+#define PKT_ADDRL_LEN          32
+#define PKT_ADDRH_POS          32
+#define PKT_ADDRH_LEN          10
+#define PKT_SIZE_POS           32
+#define PKT_SIZE_LEN           12
+#define NEXT_DESC_ADDRL_POS    0
+#define NEXT_DESC_ADDRL_LEN    32
+#define NEXT_DESC_ADDRH_POS    48
+#define NEXT_DESC_ADDRH_LEN    10
+
+#define TXPKTCOUNT_POS         16
+#define TXPKTCOUNT_LEN         8
+#define RXPKTCOUNT_POS         16
+#define RXPKTCOUNT_LEN         8
+
+#define TX_PKT_SENT            BIT(0)
+#define TX_BUS_ERROR           BIT(3)
+#define RX_PKT_RCVD            BIT(4)
+#define RX_BUS_ERROR           BIT(7)
+#define RXSTATUS_RXPKTRCVD     BIT(0)
+
+struct xge_raw_desc {
+       __le64 m0;
+       __le64 m1;
+       __le64 m2;
+       __le64 m3;
+       __le64 m4;
+       __le64 m5;
+       __le64 m6;
+       __le64 m7;
+};
+
+struct pkt_info {
+       struct sk_buff *skb;
+       dma_addr_t dma_addr;
+       void *pkt_buf;
+};
+
+/* software context of a descriptor ring */
+struct xge_desc_ring {
+       struct net_device *ndev;
+       dma_addr_t dma_addr;
+       u8 head;
+       u8 tail;
+       union {
+               void *desc_addr;
+               struct xge_raw_desc *raw_desc;
+       };
+       struct pkt_info (*pkt_info);
+};
+
+static inline u64 xge_set_desc_bits(int pos, int len, u64 val)
+{
+       return (val & ((1ULL << len) - 1)) << pos;
+}
+
+static inline u64 xge_get_desc_bits(int pos, int len, u64 src)
+{
+       return (src >> pos) & ((1ULL << len) - 1);
+}
+
+#define SET_BITS(field, val) \
+               xge_set_desc_bits(field ## _POS, field ## _LEN, val)
+
+#define GET_BITS(field, src) \
+               xge_get_desc_bits(field ## _POS, field ## _LEN, src)
+
+void xge_setup_desc(struct xge_desc_ring *ring);
+void xge_update_tx_desc_addr(struct xge_pdata *pdata);
+void xge_update_rx_desc_addr(struct xge_pdata *pdata);
+void xge_intr_enable(struct xge_pdata *pdata);
+void xge_intr_disable(struct xge_pdata *pdata);
+
+#endif  /* __XGENE_ENET_V2_RING_H__ */
index 06e681697c1734872b2317550f17f28c306a63bc..2a835e07adfb58b2b6e4021e4b93a2555be8ff73 100644 (file)
@@ -494,7 +494,7 @@ static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
                break;
        }
 
-       mc2 |= FULL_DUPLEX2 | PAD_CRC;
+       mc2 |= FULL_DUPLEX2 | PAD_CRC | LENGTH_CHK;
        xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
        xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
        xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
@@ -623,6 +623,7 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
        xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
        cb |= CFG_CLE_BYPASS_EN0;
        CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
+       CFG_CLE_IP_HDR_LEN_SET(&cb, 0);
        xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
 
        xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
index 5f83037bb96b5f4e78b3ad9e7afc110112fe5ff3..d250bfe94d24cb8f2a3a1f19487ac98a9659332e 100644 (file)
@@ -163,6 +163,7 @@ enum xgene_enet_rm {
 #define CFG_RXCLK_MUXSEL0_SET(dst, val)        xgene_set_bits(dst, val, 26, 3)
 
 #define CFG_CLE_IP_PROTOCOL0_SET(dst, val)     xgene_set_bits(dst, val, 16, 2)
+#define CFG_CLE_IP_HDR_LEN_SET(dst, val)       xgene_set_bits(dst, val, 8, 5)
 #define CFG_CLE_DSTQID0_SET(dst, val)          xgene_set_bits(dst, val, 0, 12)
 #define CFG_CLE_FPSEL0_SET(dst, val)           xgene_set_bits(dst, val, 16, 4)
 #define CFG_CLE_NXTFPSEL0_SET(dst, val)                xgene_set_bits(dst, val, 20, 4)
@@ -215,6 +216,7 @@ enum xgene_enet_rm {
 #define ENET_GHD_MODE                  BIT(26)
 #define FULL_DUPLEX2                   BIT(0)
 #define PAD_CRC                                BIT(2)
+#define LENGTH_CHK                     BIT(4)
 #define SCAN_AUTO_INCR                 BIT(5)
 #define TBYT_ADDR                      0x38
 #define TPKT_ADDR                      0x39
index b3568c453b1451f179a3c6ebe18ac524825840ac..5f37ed3506d571d6ddaad170f2147f430a88e51c 100644 (file)
@@ -601,14 +601,24 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
        return NETDEV_TX_OK;
 }
 
-static void xgene_enet_skip_csum(struct sk_buff *skb)
+static void xgene_enet_rx_csum(struct sk_buff *skb)
 {
+       struct net_device *ndev = skb->dev;
        struct iphdr *iph = ip_hdr(skb);
 
-       if (!ip_is_fragment(iph) ||
-           (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-       }
+       if (!(ndev->features & NETIF_F_RXCSUM))
+               return;
+
+       if (skb->protocol != htons(ETH_P_IP))
+               return;
+
+       if (ip_is_fragment(iph))
+               return;
+
+       if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
+               return;
+
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
 static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
@@ -648,12 +658,24 @@ static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
        buf_pool->head = head;
 }
 
+/* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */
+static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status)
+{
+       if (status == INGRESS_PKT_LEN && len == ETHER_MIN_PACKET) {
+               if (ntohs(eth_hdr(skb)->h_proto) < 46)
+                       return true;
+       }
+
+       return false;
+}
+
 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
                               struct xgene_enet_raw_desc *raw_desc,
                               struct xgene_enet_raw_desc *exp_desc)
 {
        struct xgene_enet_desc_ring *buf_pool, *page_pool;
        u32 datalen, frag_size, skb_index;
+       struct xgene_enet_pdata *pdata;
        struct net_device *ndev;
        dma_addr_t dma_addr;
        struct sk_buff *skb;
@@ -666,6 +688,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
        bool nv;
 
        ndev = rx_ring->ndev;
+       pdata = netdev_priv(ndev);
        dev = ndev_to_dev(rx_ring->ndev);
        buf_pool = rx_ring->buf_pool;
        page_pool = rx_ring->page_pool;
@@ -676,30 +699,29 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
        skb = buf_pool->rx_skb[skb_index];
        buf_pool->rx_skb[skb_index] = NULL;
 
+       datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
+       skb_put(skb, datalen);
+       prefetch(skb->data - NET_IP_ALIGN);
+       skb->protocol = eth_type_trans(skb, ndev);
+
        /* checking for error */
-       status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
+       status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) |
                  GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
-       if (unlikely(status > 2)) {
-               dev_kfree_skb_any(skb);
-               xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
-               xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
-                                      status);
-               ret = -EIO;
-               goto out;
+       if (unlikely(status)) {
+               if (!xgene_enet_errata_10GE_8(skb, datalen, status)) {
+                       dev_kfree_skb_any(skb);
+                       xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
+                       xgene_enet_parse_error(rx_ring, pdata, status);
+                       goto out;
+               }
        }
 
-       /* strip off CRC as HW isn't doing this */
-       datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
-
        nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
-       if (!nv)
+       if (!nv) {
+               /* strip off CRC as HW isn't doing this */
                datalen -= 4;
-
-       skb_put(skb, datalen);
-       prefetch(skb->data - NET_IP_ALIGN);
-
-       if (!nv)
                goto skip_jumbo;
+       }
 
        slots = page_pool->slots - 1;
        head = page_pool->head;
@@ -728,11 +750,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
 
 skip_jumbo:
        skb_checksum_none_assert(skb);
-       skb->protocol = eth_type_trans(skb, ndev);
-       if (likely((ndev->features & NETIF_F_IP_CSUM) &&
-                  skb->protocol == htons(ETH_P_IP))) {
-               xgene_enet_skip_csum(skb);
-       }
+       xgene_enet_rx_csum(skb);
 
        rx_ring->rx_packets++;
        rx_ring->rx_bytes += datalen;
@@ -2039,7 +2057,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
        xgene_enet_setup_ops(pdata);
 
        if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
-               ndev->features |= NETIF_F_TSO;
+               ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM;
                spin_lock_init(&pdata->mss_lock);
        }
        ndev->hw_features = ndev->features;
index 52571741da9f5559145e5ce172a7ee910818bbc4..0d4be2425ebc2c24b4ecfa26fc4637f76ff76f91 100644 (file)
@@ -41,6 +41,7 @@
 #include "../../../phy/mdio-xgene.h"
 
 #define XGENE_DRV_VERSION      "v1.0"
+#define ETHER_MIN_PACKET       64
 #define XGENE_ENET_STD_MTU     1536
 #define XGENE_ENET_MAX_MTU     9600
 #define SKB_BUFFER_SIZE                (XGENE_ENET_STD_MTU - NET_IP_ALIGN)
index ece19e6d68e3bc53e0dddc8dbbb65e1bdf4af0e1..423240c97d398735d649d401dd6c1825911735cf 100644 (file)
@@ -341,8 +341,15 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
 
        xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
        data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
+       /* Errata 10GE_1 - FIFO threshold default value incorrect */
+       RSIF_CLE_BUFF_THRESH_SET(&data, XG_RSIF_CLE_BUFF_THRESH);
        xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data);
 
+       /* Errata 10GE_1 - FIFO threshold default value incorrect */
+       xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, &data);
+       RSIF_PLC_CLE_BUFF_THRESH_SET(&data, XG_RSIF_PLC_CLE_BUFF_THRESH);
+       xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, data);
+
        xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data);
        data |= BIT(12);
        xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data);
index 03b847ad89370ec2f79d2b731f9dbc24760a7224..e644a429ebf448dbba856b40a3bec57912cf1b8c 100644 (file)
 #define XG_DEF_PAUSE_THRES             0x390
 #define XG_DEF_PAUSE_OFF_THRES         0x2c0
 #define XG_RSIF_CONFIG_REG_ADDR                0x00a0
+#define XG_RSIF_CLE_BUFF_THRESH                0x3
+#define RSIF_CLE_BUFF_THRESH_SET(dst, val)     xgene_set_bits(dst, val, 0, 3)
+#define XG_RSIF_CONFIG1_REG_ADDR       0x00b8
+#define XG_RSIF_PLC_CLE_BUFF_THRESH    0x1
+#define RSIF_PLC_CLE_BUFF_THRESH_SET(dst, val) xgene_set_bits(dst, val, 0, 2)
 #define XCLE_BYPASS_REG0_ADDR           0x0160
 #define XCLE_BYPASS_REG1_ADDR           0x0164
 #define XG_CFG_BYPASS_ADDR             0x0204
index dad63623be6a93672974fb43ee50b518fff59ab5..5d6c40d86775dd0189de49173210f4d38ee6934c 100644 (file)
@@ -98,11 +98,7 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
 
        if (err < 0)
                goto err_exit;
-
-       if (netif_running(ndev)) {
-               aq_ndev_close(ndev);
-               aq_ndev_open(ndev);
-       }
+       ndev->mtu = new_mtu;
 
 err_exit:
        return err;
index ee78444bfb8851214709920795e26d658c4ca9b5..cdb02991f249c6354b7095d9d777316617c2be42 100644 (file)
@@ -487,6 +487,9 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
                dx_buff->mss = skb_shinfo(skb)->gso_size;
                dx_buff->is_txc = 1U;
 
+               dx_buff->is_ipv6 =
+                       (ip_hdr(skb)->version == 6) ? 1U : 0U;
+
                dx = aq_ring_next_dx(ring, dx);
                dx_buff = &ring->buff_ring[dx];
                ++ret;
@@ -510,10 +513,22 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
                        1U : 0U;
-               dx_buff->is_tcp_cso =
-                       (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U;
-               dx_buff->is_udp_cso =
-                       (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U;
+
+               if (ip_hdr(skb)->version == 4) {
+                       dx_buff->is_tcp_cso =
+                               (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
+                                       1U : 0U;
+                       dx_buff->is_udp_cso =
+                               (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
+                                       1U : 0U;
+               } else if (ip_hdr(skb)->version == 6) {
+                       dx_buff->is_tcp_cso =
+                               (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
+                                       1U : 0U;
+                       dx_buff->is_udp_cso =
+                               (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
+                                       1U : 0U;
+               }
        }
 
        for (; nr_frags--; ++frag_count) {
index 581de71a958a3519682272a49dec55432e9eebed..4c6c882c6a1c424238473ea40ecf9f0ebf7cee28 100644 (file)
@@ -213,9 +213,9 @@ void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
                if (!((1U << i) & self->msix_entry_mask))
                        continue;
 
-               free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
                if (pdev->msix_enabled)
                        irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
+               free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
                self->msix_entry_mask &= ~(1U << i);
        }
 }
index 0358e6072d45ab94181409de0dd17e80106fbf2d..3a8a4aa13687ff42510e7a260b1de6715a55d8d5 100644 (file)
@@ -101,6 +101,7 @@ int aq_ring_init(struct aq_ring_s *self)
        self->hw_head = 0;
        self->sw_head = 0;
        self->sw_tail = 0;
+       spin_lock_init(&self->header.lock);
        return 0;
 }
 
index 2572546450685d25c8ca4beb71150c356e7c82f6..eecd6d1c4d731a4e648e6811a5615498ee3a8965 100644 (file)
@@ -58,7 +58,8 @@ struct __packed aq_ring_buff_s {
                        u8 len_l2;
                        u8 len_l3;
                        u8 len_l4;
-                       u8 rsvd2;
+                       u8 is_ipv6:1;
+                       u8 rsvd2:7;
                        u32 len_pkt;
                };
        };
index a2b746a2dd50b8825250f5ab1de8a01b5afb32c9..4ee15ff06a448b72dbd6763427d9d02dfadad268 100644 (file)
@@ -433,6 +433,9 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
                                    buff->len_l3 +
                                    buff->len_l2);
                        is_gso = true;
+
+                       if (buff->is_ipv6)
+                               txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPV6;
                } else {
                        buff_pa_len = buff->len;
 
@@ -458,6 +461,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
                        if (unlikely(buff->is_eop)) {
                                txd->ctl |= HW_ATL_A0_TXD_CTL_EOP;
                                txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB;
+                               is_gso = false;
                        }
                }
 
index 1093ea18823a32fc6cb441ab45b0b3a9a82fecc2..0592a0330cf0d601f4b9a27f0d349aeccc66f833 100644 (file)
@@ -137,6 +137,7 @@ static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
        .tx_rings = HW_ATL_A0_TX_RINGS,
        .rx_rings = HW_ATL_A0_RX_RINGS,
        .hw_features = NETIF_F_HW_CSUM |
+                       NETIF_F_RXCSUM |
                        NETIF_F_RXHASH |
                        NETIF_F_SG |
                        NETIF_F_TSO,
index cab2931dab9ac354821e4a30bb8517ccfe0041eb..42150708191dbf67d91b33218a2a275e9b5fd45d 100644 (file)
@@ -471,6 +471,9 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
                                    buff->len_l3 +
                                    buff->len_l2);
                        is_gso = true;
+
+                       if (buff->is_ipv6)
+                               txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
                } else {
                        buff_pa_len = buff->len;
 
@@ -496,6 +499,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
                        if (unlikely(buff->is_eop)) {
                                txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
                                txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
+                               is_gso = false;
                        }
                }
 
index 8bdee3ddd5a0bd9044063caf5686fefcc6b5465f..f3957e9303405c3f26c9f7f7d6507009d5804534 100644 (file)
@@ -188,6 +188,7 @@ static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
        .tx_rings = HW_ATL_B0_TX_RINGS,
        .rx_rings = HW_ATL_B0_RX_RINGS,
        .hw_features = NETIF_F_HW_CSUM |
+                       NETIF_F_RXCSUM |
                        NETIF_F_RXHASH |
                        NETIF_F_SG |
                        NETIF_F_TSO |
index d4a409139ea2c9cc13a03d7d63511d322210cc7a..78c5de467426f1e4276cebe8ee81cc0091d4c6fa 100644 (file)
@@ -102,9 +102,6 @@ struct alx_napi {
 
 #define ALX_MAX_NAPIS 8
 
-#define ALX_FLAG_USING_MSIX    BIT(0)
-#define ALX_FLAG_USING_MSI     BIT(1)
-
 struct alx_priv {
        struct net_device *dev;
 
@@ -112,7 +109,6 @@ struct alx_priv {
 
        /* msi-x vectors */
        int num_vec;
-       struct msix_entry *msix_entries;
 
        /* all descriptor memory */
        struct {
@@ -139,8 +135,6 @@ struct alx_priv {
 
        u16 msg_enable;
 
-       int flags;
-
        /* protects hw.stats */
        spinlock_t stats_lock;
 };
index 6a27c266267587685956b8720cd0a6bc9b3fcb69..a8c2db881b75354f6297094ddedbd26773e65601 100644 (file)
@@ -314,7 +314,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
        napi_complete_done(&np->napi, work);
 
        /* enable interrupt */
-       if (alx->flags & ALX_FLAG_USING_MSIX) {
+       if (alx->hw.pdev->msix_enabled) {
                alx_mask_msix(hw, np->vec_idx, false);
        } else {
                spin_lock_irqsave(&alx->irq_lock, flags);
@@ -811,7 +811,7 @@ static void alx_config_vector_mapping(struct alx_priv *alx)
        u32 tbl[2] = {0, 0};
        int i, vector, idx, shift;
 
-       if (alx->flags & ALX_FLAG_USING_MSIX) {
+       if (alx->hw.pdev->msix_enabled) {
                /* tx mappings */
                for (i = 0, vector = 1; i < alx->num_txq; i++, vector++) {
                        idx = txq_vec_mapping_shift[i * 2];
@@ -828,29 +828,19 @@ static void alx_config_vector_mapping(struct alx_priv *alx)
        alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
 }
 
-static bool alx_enable_msix(struct alx_priv *alx)
+static int alx_enable_msix(struct alx_priv *alx)
 {
-       int i, err, num_vec, num_txq, num_rxq;
+       int err, num_vec, num_txq, num_rxq;
 
        num_txq = min_t(int, num_online_cpus(), ALX_MAX_TX_QUEUES);
        num_rxq = 1;
        num_vec = max_t(int, num_txq, num_rxq) + 1;
 
-       alx->msix_entries = kcalloc(num_vec, sizeof(struct msix_entry),
-                                   GFP_KERNEL);
-       if (!alx->msix_entries) {
-               netdev_warn(alx->dev, "Allocation of msix entries failed!\n");
-               return false;
-       }
-
-       for (i = 0; i < num_vec; i++)
-               alx->msix_entries[i].entry = i;
-
-       err = pci_enable_msix(alx->hw.pdev, alx->msix_entries, num_vec);
+       err = pci_alloc_irq_vectors(alx->hw.pdev, num_vec, num_vec,
+                       PCI_IRQ_MSIX);
        if (err) {
-               kfree(alx->msix_entries);
                netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n");
-               return false;
+               return err;
        }
 
        alx->num_vec = num_vec;
@@ -858,7 +848,7 @@ static bool alx_enable_msix(struct alx_priv *alx)
        alx->num_txq = num_txq;
        alx->num_rxq = num_rxq;
 
-       return true;
+       return err;
 }
 
 static int alx_request_msix(struct alx_priv *alx)
@@ -866,7 +856,7 @@ static int alx_request_msix(struct alx_priv *alx)
        struct net_device *netdev = alx->dev;
        int i, err, vector = 0, free_vector = 0;
 
-       err = request_irq(alx->msix_entries[0].vector, alx_intr_msix_misc,
+       err = request_irq(pci_irq_vector(alx->hw.pdev, 0), alx_intr_msix_misc,
                          0, netdev->name, alx);
        if (err)
                goto out_err;
@@ -889,7 +879,7 @@ static int alx_request_msix(struct alx_priv *alx)
                        sprintf(np->irq_lbl, "%s-unused", netdev->name);
 
                np->vec_idx = vector;
-               err = request_irq(alx->msix_entries[vector].vector,
+               err = request_irq(pci_irq_vector(alx->hw.pdev, vector),
                                  alx_intr_msix_ring, 0, np->irq_lbl, np);
                if (err)
                        goto out_free;
@@ -897,47 +887,31 @@ static int alx_request_msix(struct alx_priv *alx)
        return 0;
 
 out_free:
-       free_irq(alx->msix_entries[free_vector++].vector, alx);
+       free_irq(pci_irq_vector(alx->hw.pdev, free_vector++), alx);
 
        vector--;
        for (i = 0; i < vector; i++)
-               free_irq(alx->msix_entries[free_vector++].vector,
+               free_irq(pci_irq_vector(alx->hw.pdev,free_vector++),
                         alx->qnapi[i]);
 
 out_err:
        return err;
 }
 
-static void alx_init_intr(struct alx_priv *alx, bool msix)
+static int alx_init_intr(struct alx_priv *alx)
 {
-       if (msix) {
-               if (alx_enable_msix(alx))
-                       alx->flags |= ALX_FLAG_USING_MSIX;
-       }
+       int ret;
 
-       if (!(alx->flags & ALX_FLAG_USING_MSIX)) {
-               alx->num_vec = 1;
-               alx->num_napi = 1;
-               alx->num_txq = 1;
-               alx->num_rxq = 1;
-
-               if (!pci_enable_msi(alx->hw.pdev))
-                       alx->flags |= ALX_FLAG_USING_MSI;
-       }
-}
-
-static void alx_disable_advanced_intr(struct alx_priv *alx)
-{
-       if (alx->flags & ALX_FLAG_USING_MSIX) {
-               kfree(alx->msix_entries);
-               pci_disable_msix(alx->hw.pdev);
-               alx->flags &= ~ALX_FLAG_USING_MSIX;
-       }
+       ret = pci_alloc_irq_vectors(alx->hw.pdev, 1, 1,
+                       PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+       if (ret)
+               return ret;
 
-       if (alx->flags & ALX_FLAG_USING_MSI) {
-               pci_disable_msi(alx->hw.pdev);
-               alx->flags &= ~ALX_FLAG_USING_MSI;
-       }
+       alx->num_vec = 1;
+       alx->num_napi = 1;
+       alx->num_txq = 1;
+       alx->num_rxq = 1;
+       return 0;
 }
 
 static void alx_irq_enable(struct alx_priv *alx)
@@ -950,10 +924,11 @@ static void alx_irq_enable(struct alx_priv *alx)
        alx_write_mem32(hw, ALX_IMR, alx->int_mask);
        alx_post_write(hw);
 
-       if (alx->flags & ALX_FLAG_USING_MSIX)
+       if (alx->hw.pdev->msix_enabled) {
                /* enable all msix irqs */
                for (i = 0; i < alx->num_vec; i++)
                        alx_mask_msix(hw, i, false);
+       }
 }
 
 static void alx_irq_disable(struct alx_priv *alx)
@@ -965,13 +940,13 @@ static void alx_irq_disable(struct alx_priv *alx)
        alx_write_mem32(hw, ALX_IMR, 0);
        alx_post_write(hw);
 
-       if (alx->flags & ALX_FLAG_USING_MSIX) {
+       if (alx->hw.pdev->msix_enabled) {
                for (i = 0; i < alx->num_vec; i++) {
                        alx_mask_msix(hw, i, true);
-                       synchronize_irq(alx->msix_entries[i].vector);
+                       synchronize_irq(pci_irq_vector(alx->hw.pdev, i));
                }
        } else {
-               synchronize_irq(alx->hw.pdev->irq);
+               synchronize_irq(pci_irq_vector(alx->hw.pdev, 0));
        }
 }
 
@@ -981,8 +956,11 @@ static int alx_realloc_resources(struct alx_priv *alx)
 
        alx_free_rings(alx);
        alx_free_napis(alx);
-       alx_disable_advanced_intr(alx);
-       alx_init_intr(alx, false);
+       pci_free_irq_vectors(alx->hw.pdev);
+
+       err = alx_init_intr(alx);
+       if (err)
+               return err;
 
        err = alx_alloc_napis(alx);
        if (err)
@@ -1004,7 +982,7 @@ static int alx_request_irq(struct alx_priv *alx)
 
        msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
 
-       if (alx->flags & ALX_FLAG_USING_MSIX) {
+       if (alx->hw.pdev->msix_enabled) {
                alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl);
                err = alx_request_msix(alx);
                if (!err)
@@ -1016,20 +994,20 @@ static int alx_request_irq(struct alx_priv *alx)
                        goto out;
        }
 
-       if (alx->flags & ALX_FLAG_USING_MSI) {
+       if (alx->hw.pdev->msi_enabled) {
                alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
                                msi_ctrl | ALX_MSI_MASK_SEL_LINE);
-               err = request_irq(pdev->irq, alx_intr_msi, 0,
+               err = request_irq(pci_irq_vector(pdev, 0), alx_intr_msi, 0,
                                  alx->dev->name, alx);
                if (!err)
                        goto out;
+
                /* fall back to legacy interrupt */
-               alx->flags &= ~ALX_FLAG_USING_MSI;
-               pci_disable_msi(alx->hw.pdev);
+               pci_free_irq_vectors(alx->hw.pdev);
        }
 
        alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
-       err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
+       err = request_irq(pci_irq_vector(pdev, 0), alx_intr_legacy, IRQF_SHARED,
                          alx->dev->name, alx);
 out:
        if (!err)
@@ -1042,18 +1020,15 @@ out:
 static void alx_free_irq(struct alx_priv *alx)
 {
        struct pci_dev *pdev = alx->hw.pdev;
-       int i, vector = 0;
+       int i;
 
-       if (alx->flags & ALX_FLAG_USING_MSIX) {
-               free_irq(alx->msix_entries[vector++].vector, alx);
+       free_irq(pci_irq_vector(pdev, 0), alx);
+       if (alx->hw.pdev->msix_enabled) {
                for (i = 0; i < alx->num_napi; i++)
-                       free_irq(alx->msix_entries[vector++].vector,
-                                alx->qnapi[i]);
-       } else {
-               free_irq(pdev->irq, alx);
+                       free_irq(pci_irq_vector(pdev, i + 1), alx->qnapi[i]);
        }
 
-       alx_disable_advanced_intr(alx);
+       pci_free_irq_vectors(pdev);
 }
 
 static int alx_identify_hw(struct alx_priv *alx)
@@ -1221,7 +1196,12 @@ static int __alx_open(struct alx_priv *alx, bool resume)
 {
        int err;
 
-       alx_init_intr(alx, true);
+       err = alx_enable_msix(alx);
+       if (err < 0) {
+               err = alx_init_intr(alx);
+               if (err)
+                       return err;
+       }
 
        if (!resume)
                netif_carrier_off(alx->dev);
@@ -1264,7 +1244,7 @@ out_free_rings:
        alx_free_rings(alx);
        alx_free_napis(alx);
 out_disable_adv_intr:
-       alx_disable_advanced_intr(alx);
+       pci_free_irq_vectors(alx->hw.pdev);
        return err;
 }
 
@@ -1637,11 +1617,11 @@ static void alx_poll_controller(struct net_device *netdev)
        struct alx_priv *alx = netdev_priv(netdev);
        int i;
 
-       if (alx->flags & ALX_FLAG_USING_MSIX) {
+       if (alx->hw.pdev->msix_enabled) {
                alx_intr_msix_misc(0, alx);
                for (i = 0; i < alx->num_txq; i++)
                        alx_intr_msix_ring(0, alx->qnapi[i]);
-       } else if (alx->flags & ALX_FLAG_USING_MSI)
+       } else if (alx->hw.pdev->msi_enabled)
                alx_intr_msi(0, alx);
        else
                alx_intr_legacy(0, alx);
@@ -1783,7 +1763,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        netdev->netdev_ops = &alx_netdev_ops;
        netdev->ethtool_ops = &alx_ethtool_ops;
-       netdev->irq = pdev->irq;
+       netdev->irq = pci_irq_vector(pdev, 0);
        netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
 
        if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
index 940fb24bba210ecd73f968082fefda0697628be1..96413808c72699319573e82481b73f587db612d8 100644 (file)
@@ -109,7 +109,6 @@ config TIGON3
        tristate "Broadcom Tigon3 support"
        depends on PCI
        select PHYLIB
-       select HWMON
        imply PTP_1588_CLOCK
        ---help---
          This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
@@ -117,6 +116,13 @@ config TIGON3
          To compile this driver as a module, choose M here: the module
          will be called tg3.  This is recommended.
 
+config TIGON3_HWMON
+       bool "Broadcom Tigon3 HWMON support"
+       default y
+       depends on TIGON3 && HWMON && !(TIGON3=y && HWMON=m)
+       ---help---
+         Say Y if you want to expose the thermal sensor on Tigon3 devices.
+
 config BNX2X
        tristate "Broadcom NetXtremeII 10Gb support"
        depends on PCI
index a68d4889f5db74d895f1bfb9e74c46bd2b892dbc..099b374c1b17bbd8e9cabe68cdc7cd991a258737 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/of_mdio.h>
 #include <linux/phy.h>
 #include <linux/phy_fixed.h>
+#include <net/dsa.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
 
@@ -284,6 +285,7 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
        STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
        STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
        STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
+       /* Per TX-queue statistics are dynamically appended */
 };
 
 #define BCM_SYSPORT_STATS_LEN  ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -338,7 +340,8 @@ static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
                                continue;
                        j++;
                }
-               return j;
+               /* Include per-queue statistics */
+               return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
        default:
                return -EOPNOTSUPP;
        }
@@ -349,6 +352,7 @@ static void bcm_sysport_get_strings(struct net_device *dev,
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        const struct bcm_sysport_stats *s;
+       char buf[128];
        int i, j;
 
        switch (stringset) {
@@ -363,6 +367,18 @@ static void bcm_sysport_get_strings(struct net_device *dev,
                               ETH_GSTRING_LEN);
                        j++;
                }
+
+               for (i = 0; i < dev->num_tx_queues; i++) {
+                       snprintf(buf, sizeof(buf), "txq%d_packets", i);
+                       memcpy(data + j * ETH_GSTRING_LEN, buf,
+                              ETH_GSTRING_LEN);
+                       j++;
+
+                       snprintf(buf, sizeof(buf), "txq%d_bytes", i);
+                       memcpy(data + j * ETH_GSTRING_LEN, buf,
+                              ETH_GSTRING_LEN);
+                       j++;
+               }
                break;
        default:
                break;
@@ -418,6 +434,7 @@ static void bcm_sysport_get_stats(struct net_device *dev,
                                  struct ethtool_stats *stats, u64 *data)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
+       struct bcm_sysport_tx_ring *ring;
        int i, j;
 
        if (netif_running(dev))
@@ -436,6 +453,22 @@ static void bcm_sysport_get_stats(struct net_device *dev,
                data[j] = *(unsigned long *)p;
                j++;
        }
+
+       /* For SYSTEMPORT Lite since we have holes in our statistics, j would
+        * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
+        * needs to point to how many total statistics we have minus the
+        * number of per TX queue statistics
+        */
+       j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
+           dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
+
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               ring = &priv->tx_rings[i];
+               data[j] = ring->packets;
+               j++;
+               data[j] = ring->bytes;
+               j++;
+       }
 }
 
 static void bcm_sysport_get_wol(struct net_device *dev,
@@ -637,6 +670,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
        u16 len, status;
        struct bcm_rsb *rsb;
 
+       /* Clear status before servicing to reduce spurious interrupts */
+       intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
+
        /* Determine how much we should process since last call, SYSTEMPORT Lite
         * groups the producer and consumer indexes into the same 32-bit
         * which we access using RDMA_CONS_INDEX
@@ -647,11 +683,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                p_index = rdma_readl(priv, RDMA_CONS_INDEX);
        p_index &= RDMA_PROD_INDEX_MASK;
 
-       if (p_index < priv->rx_c_index)
-               to_process = (RDMA_CONS_INDEX_MASK + 1) -
-                       priv->rx_c_index + p_index;
-       else
-               to_process = p_index - priv->rx_c_index;
+       to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
 
        netif_dbg(priv, rx_status, ndev,
                  "p_index=%d rx_c_index=%d to_process=%d\n",
@@ -746,26 +778,26 @@ next:
        return processed;
 }
 
-static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
+static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
                                       struct bcm_sysport_cb *cb,
                                       unsigned int *bytes_compl,
                                       unsigned int *pkts_compl)
 {
+       struct bcm_sysport_priv *priv = ring->priv;
        struct device *kdev = &priv->pdev->dev;
-       struct net_device *ndev = priv->netdev;
 
        if (cb->skb) {
-               ndev->stats.tx_bytes += cb->skb->len;
+               ring->bytes += cb->skb->len;
                *bytes_compl += cb->skb->len;
                dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
                                 dma_unmap_len(cb, dma_len),
                                 DMA_TO_DEVICE);
-               ndev->stats.tx_packets++;
+               ring->packets++;
                (*pkts_compl)++;
                bcm_sysport_free_cb(cb);
        /* SKB fragment */
        } else if (dma_unmap_addr(cb, dma_addr)) {
-               ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
+               ring->bytes += dma_unmap_len(cb, dma_len);
                dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
                               dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
                dma_unmap_addr_set(cb, dma_addr, 0);
@@ -782,6 +814,13 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
        struct bcm_sysport_cb *cb;
        u32 hw_ind;
 
+       /* Clear status before servicing to reduce spurious interrupts */
+       if (!ring->priv->is_lite)
+               intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
+       else
+               intrl2_0_writel(ring->priv, BIT(ring->index +
+                               INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
+
        /* Compute how many descriptors have been processed since last call */
        hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
        c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
@@ -803,7 +842,7 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
 
        while (last_tx_cn-- > 0) {
                cb = ring->cbs + last_c_index;
-               bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
+               bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
 
                ring->desc_count++;
                last_c_index++;
@@ -1632,6 +1671,24 @@ static int bcm_sysport_change_mac(struct net_device *dev, void *p)
        return 0;
 }
 
+static struct net_device_stats *bcm_sysport_get_nstats(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       unsigned long tx_bytes = 0, tx_packets = 0;
+       struct bcm_sysport_tx_ring *ring;
+       unsigned int q;
+
+       for (q = 0; q < dev->num_tx_queues; q++) {
+               ring = &priv->tx_rings[q];
+               tx_bytes += ring->bytes;
+               tx_packets += ring->packets;
+       }
+
+       dev->stats.tx_bytes = tx_bytes;
+       dev->stats.tx_packets = tx_packets;
+       return &dev->stats;
+}
+
 static void bcm_sysport_netif_start(struct net_device *dev)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -1893,6 +1950,7 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = bcm_sysport_poll_controller,
 #endif
+       .ndo_get_stats          = bcm_sysport_get_nstats,
 };
 
 #define REV_FMT        "v%2x.%02x"
index 863ddd7870b77d2ce963685098fff71211f395a8..77a51c167a694734b5983f524464e9b94725e1d7 100644 (file)
@@ -647,6 +647,9 @@ enum bcm_sysport_stat_type {
        .reg_offset = ofs, \
 }
 
+/* TX bytes and packets */
+#define NUM_SYSPORT_TXQ_STAT   2
+
 struct bcm_sysport_stats {
        char stat_string[ETH_GSTRING_LEN];
        int stat_sizeof;
@@ -690,6 +693,8 @@ struct bcm_sysport_tx_ring {
        struct bcm_sysport_cb *cbs;     /* Transmit control blocks */
        struct dma_desc *desc_cpu;      /* CPU view of the descriptor */
        struct bcm_sysport_priv *priv;  /* private context backpointer */
+       unsigned long   packets;        /* packets statistics */
+       unsigned long   bytes;          /* bytes statistics */
 };
 
 /* Driver private structure */
index cf15b7e2929ca18c10c7d33581ea6a61a04434e9..6322594ab2600ac100a63140c9420855f07c86dc 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/bcma/bcma.h>
 #include <linux/brcmphy.h>
 #include <linux/etherdevice.h>
+#include <linux/of_net.h>
 #include "bgmac.h"
 
 static inline bool bgmac_is_bcm4707_family(struct bcma_device *core)
@@ -114,7 +115,7 @@ static int bgmac_probe(struct bcma_device *core)
        struct ssb_sprom *sprom = &core->bus->sprom;
        struct mii_bus *mii_bus;
        struct bgmac *bgmac;
-       u8 *mac;
+       const u8 *mac = NULL;
        int err;
 
        bgmac = bgmac_alloc(&core->dev);
@@ -127,21 +128,27 @@ static int bgmac_probe(struct bcma_device *core)
 
        bcma_set_drvdata(core, bgmac);
 
-       switch (core->core_unit) {
-       case 0:
-               mac = sprom->et0mac;
-               break;
-       case 1:
-               mac = sprom->et1mac;
-               break;
-       case 2:
-               mac = sprom->et2mac;
-               break;
-       default:
-               dev_err(bgmac->dev, "Unsupported core_unit %d\n",
-                       core->core_unit);
-               err = -ENOTSUPP;
-               goto err;
+       if (bgmac->dev->of_node)
+               mac = of_get_mac_address(bgmac->dev->of_node);
+
+       /* If no MAC address assigned via device tree, check SPROM */
+       if (!mac) {
+               switch (core->core_unit) {
+               case 0:
+                       mac = sprom->et0mac;
+                       break;
+               case 1:
+                       mac = sprom->et1mac;
+                       break;
+               case 2:
+                       mac = sprom->et2mac;
+                       break;
+               default:
+                       dev_err(bgmac->dev, "Unsupported core_unit %d\n",
+                               core->core_unit);
+                       err = -ENOTSUPP;
+                       goto err;
+               }
        }
 
        ether_addr_copy(bgmac->net_dev->dev_addr, mac);
index e1a24ee6ab8b14b30c723bd682519e1079d059a3..ba4d2e145bb9bb81c32ade2e3855743e493c1c59 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/bcma/bcma.h>
 #include <linux/etherdevice.h>
+#include <linux/interrupt.h>
 #include <linux/bcm47xx_nvram.h>
 #include <linux/phy.h>
 #include <linux/phy_fixed.h>
index 0a23034bbe3ff8d392483e7a5a201caf30c76526..352beff796ae5b090d8e3fa831078cc7182d3a2c 100644 (file)
@@ -2277,7 +2277,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
                                 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
                                 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
 
-#define HW_INTERRUT_ASSERT_SET_0 \
+#define HW_INTERRUPT_ASSERT_SET_0 \
                                (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
                                 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
                                 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
@@ -2290,7 +2290,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
                                 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
                                 AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
                                 AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
-#define HW_INTERRUT_ASSERT_SET_1 \
+#define HW_INTERRUPT_ASSERT_SET_1 \
                                (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
                                 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
                                 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
@@ -2318,7 +2318,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
                                 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
                                 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
                                 AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
-#define HW_INTERRUT_ASSERT_SET_2 \
+#define HW_INTERRUPT_ASSERT_SET_2 \
                                (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
                                 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
                                 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
index 9e8c06130c092d3f061089448797c1da74e15043..ad3e0631877e799d2b1cd8e3c07495bbb5a1fd96 100644 (file)
@@ -4277,7 +4277,10 @@ int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
 {
        if (tc->type != TC_SETUP_MQPRIO)
                return -EINVAL;
-       return bnx2x_setup_tc(dev, tc->tc);
+
+       tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+       return bnx2x_setup_tc(dev, tc->mqprio->num_tc);
 }
 
 /* called with rtnl_lock */
index d8d06fdfc42b9d685244513c1542b69bd78d7ca9..a851f95c307a3331a889972bc3c60273219f8a7b 100644 (file)
@@ -4166,14 +4166,14 @@ static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
                bnx2x_release_phy_lock(bp);
        }
 
-       if (attn & HW_INTERRUT_ASSERT_SET_0) {
+       if (attn & HW_INTERRUPT_ASSERT_SET_0) {
 
                val = REG_RD(bp, reg_offset);
-               val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
+               val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
                REG_WR(bp, reg_offset, val);
 
                BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
-                         (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
+                         (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
                bnx2x_panic();
        }
 }
@@ -4191,7 +4191,7 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
                        BNX2X_ERR("FATAL error from DORQ\n");
        }
 
-       if (attn & HW_INTERRUT_ASSERT_SET_1) {
+       if (attn & HW_INTERRUPT_ASSERT_SET_1) {
 
                int port = BP_PORT(bp);
                int reg_offset;
@@ -4200,11 +4200,11 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
                                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
 
                val = REG_RD(bp, reg_offset);
-               val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
+               val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
                REG_WR(bp, reg_offset, val);
 
                BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
-                         (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
+                         (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
                bnx2x_panic();
        }
 }
@@ -4235,7 +4235,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
                }
        }
 
-       if (attn & HW_INTERRUT_ASSERT_SET_2) {
+       if (attn & HW_INTERRUPT_ASSERT_SET_2) {
 
                int port = BP_PORT(bp);
                int reg_offset;
@@ -4244,11 +4244,11 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
                                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
 
                val = REG_RD(bp, reg_offset);
-               val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
+               val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
                REG_WR(bp, reg_offset, val);
 
                BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
-                         (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
+                         (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
                bnx2x_panic();
        }
 }
@@ -13292,17 +13292,15 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
 
-       /* VF with OLD Hypervisor or old PF do not support filtering */
        if (IS_PF(bp)) {
                if (chip_is_e1x)
                        bp->accept_any_vlan = true;
                else
                        dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-#ifdef CONFIG_BNX2X_SRIOV
-       } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
-               dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-#endif
        }
+       /* For VF we'll know whether to enable VLAN filtering after
+        * getting a response to CHANNEL_TLV_ACQUIRE from PF.
+        */
 
        dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
        dev->features |= NETIF_F_HIGHDMA;
@@ -13738,7 +13736,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
        if (!netif_running(bp->dev)) {
                DP(BNX2X_MSG_PTP,
                   "PTP adjfreq called while the interface is down\n");
-               return -EFAULT;
+               return -ENETDOWN;
        }
 
        if (ppb < 0) {
@@ -13797,6 +13795,12 @@ static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
        struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
 
+       if (!netif_running(bp->dev)) {
+               DP(BNX2X_MSG_PTP,
+                  "PTP adjtime called while the interface is down\n");
+               return -ENETDOWN;
+       }
+
        DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
 
        timecounter_adjtime(&bp->timecounter, delta);
@@ -13809,6 +13813,12 @@ static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
        struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
        u64 ns;
 
+       if (!netif_running(bp->dev)) {
+               DP(BNX2X_MSG_PTP,
+                  "PTP gettime called while the interface is down\n");
+               return -ENETDOWN;
+       }
+
        ns = timecounter_read(&bp->timecounter);
 
        DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
@@ -13824,6 +13834,12 @@ static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
        struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
        u64 ns;
 
+       if (!netif_running(bp->dev)) {
+               DP(BNX2X_MSG_PTP,
+                  "PTP settime called while the interface is down\n");
+               return -ENETDOWN;
+       }
+
        ns = timespec64_to_ns(ts);
 
        DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
@@ -13991,6 +14007,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
                rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
                if (rc)
                        goto init_one_freemem;
+
+#ifdef CONFIG_BNX2X_SRIOV
+               /* VF with OLD Hypervisor or old PF do not support filtering */
+               if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
+                       dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+                       dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+               }
+#endif
        }
 
        /* Enable SRIOV if capability found in configuration space */
index 6fad22adbbb9e72fda1208d2b689ef371ac77d09..bdfd53b46bc568286ac9debc70bb14563329040b 100644 (file)
@@ -434,7 +434,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
 
        /* Add/Remove the filter */
        rc = bnx2x_config_vlan_mac(bp, &ramrod);
-       if (rc && rc != -EEXIST) {
+       if (rc == -EEXIST)
+               return 0;
+       if (rc) {
                BNX2X_ERR("Failed to %s %s\n",
                          filter->add ? "add" : "delete",
                          (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
@@ -444,6 +446,8 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
                return rc;
        }
 
+       filter->applied = true;
+
        return 0;
 }
 
@@ -469,8 +473,10 @@ int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
        /* Rollback if needed */
        if (i != filters->count) {
                BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
-                         i, filters->count + 1);
+                         i, filters->count);
                while (--i >= 0) {
+                       if (!filters->filters[i].applied)
+                               continue;
                        filters->filters[i].add = !filters->filters[i].add;
                        bnx2x_vf_mac_vlan_config(bp, vf, qid,
                                                 &filters->filters[i],
@@ -1899,7 +1905,8 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
                        continue;
                }
 
-               DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
+               DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+                      "add addresses for vf %d\n", vf->abs_vfid);
                for_each_vfq(vf, j) {
                        struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
 
@@ -1920,11 +1927,12 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
                                cpu_to_le32(U64_HI(q_stats_addr));
                        cur_query_entry->address.lo =
                                cpu_to_le32(U64_LO(q_stats_addr));
-                       DP(BNX2X_MSG_IOV,
-                          "added address %x %x for vf %d queue %d client %d\n",
-                          cur_query_entry->address.hi,
-                          cur_query_entry->address.lo, cur_query_entry->funcID,
-                          j, cur_query_entry->index);
+                       DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+                              "added address %x %x for vf %d queue %d client %d\n",
+                              cur_query_entry->address.hi,
+                              cur_query_entry->address.lo,
+                              cur_query_entry->funcID,
+                              j, cur_query_entry->index);
                        cur_query_entry++;
                        cur_data_offset += sizeof(struct per_queue_stats);
                        stats_count++;
index 7a6d406f4c111774ea606c98253608273ae8787b..888d0b6632e86f2f7ab7e2f9e605be87fa4c7061 100644 (file)
@@ -114,6 +114,7 @@ struct bnx2x_vf_mac_vlan_filter {
        (BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
 
        bool add;
+       bool applied;
        u8 *mac;
        u16 vid;
 };
index bfae300cf25ff881292dc36ad56e51e37132cd76..76a4668c50fe98edb3d6e955351a357a3e3f0608 100644 (file)
@@ -868,7 +868,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
        struct bnx2x *bp = netdev_priv(dev);
        struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
        struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
-       int rc, i = 0;
+       int rc = 0, i = 0;
        struct netdev_hw_addr *ha;
 
        if (bp->state != BNX2X_STATE_OPEN) {
@@ -883,6 +883,15 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
        /* Get Rx mode requested */
        DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
 
+       /* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
+       if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
+               DP(NETIF_MSG_IFUP,
+                  "VF supports not more than %d multicast MAC addresses\n",
+                  PFVF_MAX_MULTICAST_PER_VF);
+               rc = -EINVAL;
+               goto out;
+       }
+
        netdev_for_each_mc_addr(ha, dev) {
                DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
                   bnx2x_mc_addr(ha));
@@ -890,16 +899,6 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
                i++;
        }
 
-       /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
-         * addresses tops
-         */
-       if (i >= PFVF_MAX_MULTICAST_PER_VF) {
-               DP(NETIF_MSG_IFUP,
-                  "VF supports not more than %d multicast MAC addresses\n",
-                  PFVF_MAX_MULTICAST_PER_VF);
-               return -EINVAL;
-       }
-
        req->n_multicast = i;
        req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
        req->vf_qid = 0;
@@ -924,7 +923,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
 out:
        bnx2x_vfpf_finalize(bp, &req->first_tlv);
 
-       return 0;
+       return rc;
 }
 
 /* request pf to add a vlan for the vf */
@@ -1778,6 +1777,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
                                goto op_err;
                }
 
+               /* build vlan list */
+               fl = NULL;
+
+               rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+                                              VFPF_VLAN_FILTER);
+               if (rc)
+                       goto op_err;
+
+               if (fl) {
+                       /* set vlan list */
+                       rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+                                                          msg->vf_qid,
+                                                          false);
+                       if (rc)
+                               goto op_err;
+               }
+
        }
 
        if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
index 235733e91c791b8ff04951cbdd42ec10ab435bae..129b8101b93256366af883a6944eb03de78c9824 100644 (file)
@@ -1983,20 +1983,25 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
 
                for (j = 0; j < max_idx; j++) {
                        struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
+                       dma_addr_t mapping = rx_buf->mapping;
                        void *data = rx_buf->data;
 
                        if (!data)
                                continue;
 
-                       dma_unmap_single(&pdev->dev, rx_buf->mapping,
-                                        bp->rx_buf_use_size, bp->rx_dir);
-
                        rx_buf->data = NULL;
 
-                       if (BNXT_RX_PAGE_MODE(bp))
+                       if (BNXT_RX_PAGE_MODE(bp)) {
+                               mapping -= bp->rx_dma_offset;
+                               dma_unmap_page(&pdev->dev, mapping,
+                                              PAGE_SIZE, bp->rx_dir);
                                __free_page(data);
-                       else
+                       } else {
+                               dma_unmap_single(&pdev->dev, mapping,
+                                                bp->rx_buf_use_size,
+                                                bp->rx_dir);
                                kfree(data);
+                       }
                }
 
                for (j = 0; j < max_agg_idx; j++) {
@@ -2455,6 +2460,18 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
        return 0;
 }
 
+static void bnxt_init_cp_rings(struct bnxt *bp)
+{
+       int i;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+               struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+               ring->fw_ring_id = INVALID_HW_RING_ID;
+       }
+}
+
 static int bnxt_init_rx_rings(struct bnxt *bp)
 {
        int i, rc = 0;
@@ -4465,6 +4482,10 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
                vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
        }
 #endif
+       if (BNXT_PF(bp) && (le16_to_cpu(resp->flags) &
+                           FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED))
+               bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
+
        switch (resp->port_partition_type) {
        case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
        case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
@@ -4528,6 +4549,9 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
                pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
                pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
                pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+               if (resp->flags &
+                   cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED))
+                       bp->flags |= BNXT_FLAG_WOL_CAP;
        } else {
 #ifdef CONFIG_BNXT_SRIOV
                struct bnxt_vf_info *vf = &bp->vf;
@@ -4728,7 +4752,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
                rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
                if (rc) {
                        netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
-                                  rc, i);
+                                  i, rc);
                        return rc;
                }
        }
@@ -5002,6 +5026,7 @@ static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
 
 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
 {
+       bnxt_init_cp_rings(bp);
        bnxt_init_rx_rings(bp);
        bnxt_init_tx_rings(bp);
        bnxt_init_ring_grps(bp, irq_re_init);
@@ -5176,9 +5201,10 @@ static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
 {
 #if defined(CONFIG_BNXT_SRIOV)
        if (BNXT_VF(bp))
-               return bp->vf.max_irqs;
+               return min_t(unsigned int, bp->vf.max_irqs,
+                            bp->vf.max_cp_rings);
 #endif
-       return bp->pf.max_irqs;
+       return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
 }
 
 void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
@@ -5507,8 +5533,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
                bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
                                 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
        }
-       link_info->support_auto_speeds =
-               le16_to_cpu(resp->supported_speeds_auto_mode);
+       if (resp->supported_speeds_auto_mode)
+               link_info->support_auto_speeds =
+                       le16_to_cpu(resp->supported_speeds_auto_mode);
 
 hwrm_phy_qcaps_exit:
        mutex_unlock(&bp->hwrm_cmd_lock);
@@ -5834,6 +5861,76 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
        return 0;
 }
 
+int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
+{
+       struct hwrm_wol_filter_alloc_input req = {0};
+       struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+       int rc;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
+       req.port_id = cpu_to_le16(bp->pf.port_id);
+       req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
+       req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
+       memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc)
+               bp->wol_filter_id = resp->wol_filter_id;
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
+{
+       struct hwrm_wol_filter_free_input req = {0};
+       int rc;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
+       req.port_id = cpu_to_le16(bp->pf.port_id);
+       req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
+       req.wol_filter_id = bp->wol_filter_id;
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       return rc;
+}
+
+static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
+{
+       struct hwrm_wol_filter_qcfg_input req = {0};
+       struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       u16 next_handle = 0;
+       int rc;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
+       req.port_id = cpu_to_le16(bp->pf.port_id);
+       req.handle = cpu_to_le16(handle);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc) {
+               next_handle = le16_to_cpu(resp->next_handle);
+               if (next_handle != 0) {
+                       if (resp->wol_type ==
+                           WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
+                               bp->wol = 1;
+                               bp->wol_filter_id = resp->wol_filter_id;
+                       }
+               }
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return next_handle;
+}
+
+static void bnxt_get_wol_settings(struct bnxt *bp)
+{
+       u16 handle = 0;
+
+       if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
+               return;
+
+       do {
+               handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
+       } while (handle && handle != 0xffff);
+}
+
 static bool bnxt_eee_config_ok(struct bnxt *bp)
 {
        struct ethtool_eee *eee = &bp->eee;
@@ -6019,6 +6116,43 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
        return rc;
 }
 
+/* rtnl_lock held, open the NIC half way by allocating all resources, but
+ * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
+ * self tests.
+ */
+int bnxt_half_open_nic(struct bnxt *bp)
+{
+       int rc = 0;
+
+       rc = bnxt_alloc_mem(bp, false);
+       if (rc) {
+               netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
+               goto half_open_err;
+       }
+       rc = bnxt_init_nic(bp, false);
+       if (rc) {
+               netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
+               goto half_open_err;
+       }
+       return 0;
+
+half_open_err:
+       bnxt_free_skbs(bp);
+       bnxt_free_mem(bp, false);
+       dev_close(bp->dev);
+       return rc;
+}
+
+/* rtnl_lock held, this call can only be made after a previous successful
+ * call to bnxt_half_open_nic().
+ */
+void bnxt_half_close_nic(struct bnxt *bp)
+{
+       bnxt_hwrm_resource_free(bp, false, false);
+       bnxt_free_skbs(bp);
+       bnxt_free_mem(bp, false);
+}
+
 static int bnxt_open(struct net_device *dev)
 {
        struct bnxt *bp = netdev_priv(dev);
@@ -6495,8 +6629,14 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
        if (!silent)
                bnxt_dbg_dump_states(bp);
        if (netif_running(bp->dev)) {
+               int rc;
+
+               if (!silent)
+                       bnxt_ulp_stop(bp);
                bnxt_close_nic(bp, false, false);
-               bnxt_open_nic(bp, false, false);
+               rc = bnxt_open_nic(bp, false, false);
+               if (!silent && !rc)
+                       bnxt_ulp_start(bp);
        }
 }
 
@@ -6894,7 +7034,9 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
        if (ntc->type != TC_SETUP_MQPRIO)
                return -EINVAL;
 
-       return bnxt_setup_mq_tc(dev, ntc->tc);
+       ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+       return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc);
 }
 
 #ifdef CONFIG_RFS_ACCEL
@@ -7195,6 +7337,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
        bnxt_clear_int_mode(bp);
        bnxt_hwrm_func_drv_unrgtr(bp);
        bnxt_free_hwrm_resources(bp);
+       bnxt_ethtool_free(bp);
        bnxt_dcb_free(bp);
        kfree(bp->edev);
        bp->edev = NULL;
@@ -7444,6 +7587,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (rc)
                goto init_err_pci_clean;
 
+       rc = bnxt_hwrm_func_reset(bp);
+       if (rc)
+               goto init_err_pci_clean;
+
        bnxt_hwrm_fw_set_time(bp);
 
        dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
@@ -7513,6 +7660,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        bnxt_hwrm_func_qcfg(bp);
        bnxt_hwrm_port_led_qcaps(bp);
+       bnxt_ethtool_init(bp);
 
        bnxt_set_rx_skb_mode(bp, false);
        bnxt_set_tpa_flags(bp);
@@ -7554,14 +7702,16 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (rc)
                goto init_err_pci_clean;
 
-       rc = bnxt_hwrm_func_reset(bp);
-       if (rc)
-               goto init_err_pci_clean;
-
        rc = bnxt_init_int_mode(bp);
        if (rc)
                goto init_err_pci_clean;
 
+       bnxt_get_wol_settings(bp);
+       if (bp->flags & BNXT_FLAG_WOL_CAP)
+               device_set_wakeup_enable(&pdev->dev, bp->wol);
+       else
+               device_set_wakeup_capable(&pdev->dev, false);
+
        rc = register_netdev(dev);
        if (rc)
                goto init_err_clr_int;
@@ -7585,6 +7735,88 @@ init_err_free:
        return rc;
 }
 
+static void bnxt_shutdown(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct bnxt *bp;
+
+       if (!dev)
+               return;
+
+       rtnl_lock();
+       bp = netdev_priv(dev);
+       if (!bp)
+               goto shutdown_exit;
+
+       if (netif_running(dev))
+               dev_close(dev);
+
+       if (system_state == SYSTEM_POWER_OFF) {
+               bnxt_clear_int_mode(bp);
+               pci_wake_from_d3(pdev, bp->wol);
+               pci_set_power_state(pdev, PCI_D3hot);
+       }
+
+shutdown_exit:
+       rtnl_unlock();
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bnxt_suspend(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct bnxt *bp = netdev_priv(dev);
+       int rc = 0;
+
+       rtnl_lock();
+       if (netif_running(dev)) {
+               netif_device_detach(dev);
+               rc = bnxt_close(dev);
+       }
+       bnxt_hwrm_func_drv_unrgtr(bp);
+       rtnl_unlock();
+       return rc;
+}
+
+static int bnxt_resume(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct bnxt *bp = netdev_priv(dev);
+       int rc = 0;
+
+       rtnl_lock();
+       if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
+               rc = -ENODEV;
+               goto resume_exit;
+       }
+       rc = bnxt_hwrm_func_reset(bp);
+       if (rc) {
+               rc = -EBUSY;
+               goto resume_exit;
+       }
+       bnxt_get_wol_settings(bp);
+       if (netif_running(dev)) {
+               rc = bnxt_open(dev);
+               if (!rc)
+                       netif_device_attach(dev);
+       }
+
+resume_exit:
+       rtnl_unlock();
+       return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
+#define BNXT_PM_OPS (&bnxt_pm_ops)
+
+#else
+
+#define BNXT_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
 /**
  * bnxt_io_error_detected - called when PCI error is detected
  * @pdev: Pointer to PCI device
@@ -7701,6 +7933,8 @@ static struct pci_driver bnxt_pci_driver = {
        .id_table       = bnxt_pci_tbl,
        .probe          = bnxt_init_one,
        .remove         = bnxt_remove_one,
+       .shutdown       = bnxt_shutdown,
+       .driver.pm      = BNXT_PM_OPS,
        .err_handler    = &bnxt_err_handler,
 #if defined(CONFIG_BNXT_SRIOV)
        .sriov_configure = bnxt_sriov_configure,
index faf26a2f726b808792fd837437bf7abb9279a8c7..c9a1688a65dedd851148af34534c6342779e7636 100644 (file)
@@ -18,6 +18,8 @@
 #define DRV_VER_MIN    7
 #define DRV_VER_UPD    0
 
+#include <linux/interrupt.h>
+
 struct tx_bd {
        __le32 tx_bd_len_flags_type;
        #define TX_BD_TYPE                                      (0x3f << 0)
@@ -424,8 +426,6 @@ struct rx_tpa_end_cmp_ext {
 
 #define BNXT_MIN_PKT_SIZE      52
 
-#define BNXT_NUM_TESTS(bp)     0
-
 #define BNXT_DEFAULT_RX_RING_SIZE      511
 #define BNXT_DEFAULT_TX_RING_SIZE      511
 
@@ -909,6 +909,14 @@ struct bnxt_led_info {
        __le16  led_color_caps;
 };
 
+#define BNXT_MAX_TEST  8
+
+struct bnxt_test_info {
+       u8 offline_mask;
+       u16 timeout;
+       char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
+};
+
 #define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
 #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
 #define BNXT_CAG_REG_BASE              0x300000
@@ -987,12 +995,14 @@ struct bnxt {
        #define BNXT_FLAG_UDP_RSS_CAP   0x800
        #define BNXT_FLAG_EEE_CAP       0x1000
        #define BNXT_FLAG_NEW_RSS_CAP   0x2000
+       #define BNXT_FLAG_WOL_CAP       0x4000
        #define BNXT_FLAG_ROCEV1_CAP    0x8000
        #define BNXT_FLAG_ROCEV2_CAP    0x10000
        #define BNXT_FLAG_ROCE_CAP      (BNXT_FLAG_ROCEV1_CAP | \
                                         BNXT_FLAG_ROCEV2_CAP)
        #define BNXT_FLAG_NO_AGG_RINGS  0x20000
        #define BNXT_FLAG_RX_PAGE_MODE  0x40000
+       #define BNXT_FLAG_FW_LLDP_AGENT 0x80000
        #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
 
        #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |             \
@@ -1177,6 +1187,12 @@ struct bnxt {
        u32                     lpi_tmr_lo;
        u32                     lpi_tmr_hi;
 
+       u8                      num_tests;
+       struct bnxt_test_info   *test_info;
+
+       u8                      wol_filter_id;
+       u8                      wol;
+
        u8                      num_leds;
        struct bnxt_led_info    leds[BNXT_MAX_LED];
 
@@ -1235,8 +1251,12 @@ void bnxt_tx_disable(struct bnxt *bp);
 void bnxt_tx_enable(struct bnxt *bp);
 int bnxt_hwrm_set_pause(struct bnxt *);
 int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
+int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
 int bnxt_hwrm_fw_set_time(struct bnxt *);
 int bnxt_open_nic(struct bnxt *, bool, bool);
+int bnxt_half_open_nic(struct bnxt *bp);
+void bnxt_half_close_nic(struct bnxt *bp);
 int bnxt_close_nic(struct bnxt *, bool, bool);
 int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp);
 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
index fdf2d8caf7bfaae56b4b39c415feaa343f29d579..03532061d211b168d1bd7774d2aeea9b4ed3a776 100644 (file)
@@ -474,7 +474,7 @@ void bnxt_dcb_init(struct bnxt *bp)
                return;
 
        bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
-       if (BNXT_PF(bp))
+       if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
                bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
        else
                bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
index 6903a873f072ae14f4a7638514446d6ad7b1c6a0..848ecf212b8f58fec7e00ab2891d0add3ebb77a5 100644 (file)
@@ -1,6 +1,7 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
  * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -17,6 +18,7 @@
 #include <linux/firmware.h>
 #include "bnxt_hsi.h"
 #include "bnxt.h"
+#include "bnxt_xdp.h"
 #include "bnxt_ethtool.h"
 #include "bnxt_nvm_defs.h"     /* NVRAM content constant and structure defs */
 #include "bnxt_fw_hdr.h"       /* Firmware hdr constant and structure defs */
@@ -209,6 +211,10 @@ static int bnxt_get_sset_count(struct net_device *dev, int sset)
 
                return num_stats;
        }
+       case ETH_SS_TEST:
+               if (!bp->num_tests)
+                       return -EOPNOTSUPP;
+               return bp->num_tests;
        default:
                return -EOPNOTSUPP;
        }
@@ -306,6 +312,11 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
                        }
                }
                break;
+       case ETH_SS_TEST:
+               if (bp->num_tests)
+                       memcpy(buf, bp->test_info->string,
+                              bp->num_tests * ETH_GSTRING_LEN);
+               break;
        default:
                netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
                           stringset);
@@ -824,7 +835,7 @@ static void bnxt_get_drvinfo(struct net_device *dev,
                        sizeof(info->fw_version));
        strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
        info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
-       info->testinfo_len = BNXT_NUM_TESTS(bp);
+       info->testinfo_len = bp->num_tests;
        /* TODO CHIMP_FW: eeprom dump details */
        info->eedump_len = 0;
        /* TODO CHIMP FW: reg dump details */
@@ -832,6 +843,45 @@ static void bnxt_get_drvinfo(struct net_device *dev,
        kfree(pkglog);
 }
 
+static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       wol->supported = 0;
+       wol->wolopts = 0;
+       memset(&wol->sopass, 0, sizeof(wol->sopass));
+       if (bp->flags & BNXT_FLAG_WOL_CAP) {
+               wol->supported = WAKE_MAGIC;
+               if (bp->wol)
+                       wol->wolopts = WAKE_MAGIC;
+       }
+}
+
+static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (wol->wolopts & ~WAKE_MAGIC)
+               return -EINVAL;
+
+       if (wol->wolopts & WAKE_MAGIC) {
+               if (!(bp->flags & BNXT_FLAG_WOL_CAP))
+                       return -EINVAL;
+               if (!bp->wol) {
+                       if (bnxt_hwrm_alloc_wol_fltr(bp))
+                               return -EBUSY;
+                       bp->wol = 1;
+               }
+       } else {
+               if (bp->wol) {
+                       if (bnxt_hwrm_free_wol_fltr(bp))
+                               return -EBUSY;
+                       bp->wol = 0;
+               }
+       }
+       return 0;
+}
+
 u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
 {
        u32 speed_mask = 0;
@@ -2128,12 +2178,372 @@ static int bnxt_set_phys_id(struct net_device *dev,
        return rc;
 }
 
+static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
+{
+       struct hwrm_selftest_irq_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_test_irq(struct bnxt *bp)
+{
+       int i;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
+               int rc;
+
+               rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
+               if (rc)
+                       return rc;
+       }
+       return 0;
+}
+
+static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
+{
+       struct hwrm_port_mac_cfg_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
+
+       req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
+       if (enable)
+               req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
+       else
+               req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
+                                   struct hwrm_port_phy_cfg_input *req)
+{
+       struct bnxt_link_info *link_info = &bp->link_info;
+       u16 fw_advertising = link_info->advertising;
+       u16 fw_speed;
+       int rc;
+
+       if (!link_info->autoneg)
+               return 0;
+
+       fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
+       if (netif_carrier_ok(bp->dev))
+               fw_speed = bp->link_info.link_speed;
+       else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
+               fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
+       else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
+               fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
+       else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
+               fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
+       else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
+               fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
+
+       req->force_link_speed = cpu_to_le16(fw_speed);
+       req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
+                                 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
+       rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT);
+       req->flags = 0;
+       req->force_link_speed = cpu_to_le16(0);
+       return rc;
+}
+
+static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable)
+{
+       struct hwrm_port_phy_cfg_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+
+       if (enable) {
+               bnxt_disable_an_for_lpbk(bp, &req);
+               req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
+       } else {
+               req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
+       }
+       req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi,
+                           u32 raw_cons, int pkt_size)
+{
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+       struct bnxt_sw_rx_bd *rx_buf;
+       struct rx_cmp *rxcmp;
+       u16 cp_cons, cons;
+       u8 *data;
+       u32 len;
+       int i;
+
+       cp_cons = RING_CMP(raw_cons);
+       rxcmp = (struct rx_cmp *)
+               &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+       cons = rxcmp->rx_cmp_opaque;
+       rx_buf = &rxr->rx_buf_ring[cons];
+       data = rx_buf->data_ptr;
+       len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
+       if (len != pkt_size)
+               return -EIO;
+       i = ETH_ALEN;
+       if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
+               return -EIO;
+       i += ETH_ALEN;
+       for (  ; i < pkt_size; i++) {
+               if (data[i] != (u8)(i & 0xff))
+                       return -EIO;
+       }
+       return 0;
+}
+
+static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size)
+{
+       struct bnxt_napi *bnapi = bp->bnapi[0];
+       struct bnxt_cp_ring_info *cpr;
+       struct tx_cmp *txcmp;
+       int rc = -EIO;
+       u32 raw_cons;
+       u32 cons;
+       int i;
+
+       cpr = &bnapi->cp_ring;
+       raw_cons = cpr->cp_raw_cons;
+       for (i = 0; i < 200; i++) {
+               cons = RING_CMP(raw_cons);
+               txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+               if (!TX_CMP_VALID(txcmp, raw_cons)) {
+                       udelay(5);
+                       continue;
+               }
+
+               /* The valid test of the entry must be done first before
+                * reading any further.
+                */
+               dma_rmb();
+               if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
+                       rc = bnxt_rx_loopback(bp, bnapi, raw_cons, pkt_size);
+                       raw_cons = NEXT_RAW_CMP(raw_cons);
+                       raw_cons = NEXT_RAW_CMP(raw_cons);
+                       break;
+               }
+               raw_cons = NEXT_RAW_CMP(raw_cons);
+       }
+       cpr->cp_raw_cons = raw_cons;
+       return rc;
+}
+
+static int bnxt_run_loopback(struct bnxt *bp)
+{
+       struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
+       int pkt_size, i = 0;
+       struct sk_buff *skb;
+       dma_addr_t map;
+       u8 *data;
+       int rc;
+
+       pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
+       skb = netdev_alloc_skb(bp->dev, pkt_size);
+       if (!skb)
+               return -ENOMEM;
+       data = skb_put(skb, pkt_size);
+       eth_broadcast_addr(data);
+       i += ETH_ALEN;
+       ether_addr_copy(&data[i], bp->dev->dev_addr);
+       i += ETH_ALEN;
+       for ( ; i < pkt_size; i++)
+               data[i] = (u8)(i & 0xff);
+
+       map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
+                            PCI_DMA_TODEVICE);
+       if (dma_mapping_error(&bp->pdev->dev, map)) {
+               dev_kfree_skb(skb);
+               return -EIO;
+       }
+       bnxt_xmit_xdp(bp, txr, map, pkt_size, 0);
+
+       /* Sync BD data before updating doorbell */
+       wmb();
+
+       writel(DB_KEY_TX | txr->tx_prod, txr->tx_doorbell);
+       writel(DB_KEY_TX | txr->tx_prod, txr->tx_doorbell);
+       rc = bnxt_poll_loopback(bp, pkt_size);
+
+       dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
+       dev_kfree_skb(skb);
+       return rc;
+}
+
+static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
+{
+       struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_selftest_exec_input req = {0};
+       int rc;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       resp->test_success = 0;
+       req.flags = test_mask;
+       rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout);
+       *test_results = resp->test_success;
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+#define BNXT_DRV_TESTS                 3
+#define BNXT_MACLPBK_TEST_IDX          (bp->num_tests - BNXT_DRV_TESTS)
+#define BNXT_PHYLPBK_TEST_IDX          (BNXT_MACLPBK_TEST_IDX + 1)
+#define BNXT_IRQ_TEST_IDX              (BNXT_MACLPBK_TEST_IDX + 2)
+
+static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
+                          u64 *buf)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       bool offline = false;
+       u8 test_results = 0;
+       u8 test_mask = 0;
+       int rc, i;
+
+       if (!bp->num_tests || !BNXT_SINGLE_PF(bp))
+               return;
+       memset(buf, 0, sizeof(u64) * bp->num_tests);
+       if (!netif_running(dev)) {
+               etest->flags |= ETH_TEST_FL_FAILED;
+               return;
+       }
+
+       if (etest->flags & ETH_TEST_FL_OFFLINE) {
+               if (bp->pf.active_vfs) {
+                       etest->flags |= ETH_TEST_FL_FAILED;
+                       netdev_warn(dev, "Offline tests cannot be run with active VFs\n");
+                       return;
+               }
+               offline = true;
+       }
+
+       for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
+               u8 bit_val = 1 << i;
+
+               if (!(bp->test_info->offline_mask & bit_val))
+                       test_mask |= bit_val;
+               else if (offline)
+                       test_mask |= bit_val;
+       }
+       if (!offline) {
+               bnxt_run_fw_tests(bp, test_mask, &test_results);
+       } else {
+               rc = bnxt_close_nic(bp, false, false);
+               if (rc)
+                       return;
+               bnxt_run_fw_tests(bp, test_mask, &test_results);
+
+               buf[BNXT_MACLPBK_TEST_IDX] = 1;
+               bnxt_hwrm_mac_loopback(bp, true);
+               msleep(250);
+               rc = bnxt_half_open_nic(bp);
+               if (rc) {
+                       bnxt_hwrm_mac_loopback(bp, false);
+                       etest->flags |= ETH_TEST_FL_FAILED;
+                       return;
+               }
+               if (bnxt_run_loopback(bp))
+                       etest->flags |= ETH_TEST_FL_FAILED;
+               else
+                       buf[BNXT_MACLPBK_TEST_IDX] = 0;
+
+               bnxt_hwrm_mac_loopback(bp, false);
+               bnxt_hwrm_phy_loopback(bp, true);
+               msleep(1000);
+               if (bnxt_run_loopback(bp)) {
+                       buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+                       etest->flags |= ETH_TEST_FL_FAILED;
+               }
+               bnxt_hwrm_phy_loopback(bp, false);
+               bnxt_half_close_nic(bp);
+               bnxt_open_nic(bp, false, true);
+       }
+       if (bnxt_test_irq(bp)) {
+               buf[BNXT_IRQ_TEST_IDX] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
+       for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
+               u8 bit_val = 1 << i;
+
+               if ((test_mask & bit_val) && !(test_results & bit_val)) {
+                       buf[i] = 1;
+                       etest->flags |= ETH_TEST_FL_FAILED;
+               }
+       }
+}
+
+void bnxt_ethtool_init(struct bnxt *bp)
+{
+       struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_selftest_qlist_input req = {0};
+       struct bnxt_test_info *test_info;
+       int i, rc;
+
+       if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
+               return;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               goto ethtool_init_exit;
+
+       test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
+       if (!test_info)
+               goto ethtool_init_exit;
+
+       bp->test_info = test_info;
+       bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
+       if (bp->num_tests > BNXT_MAX_TEST)
+               bp->num_tests = BNXT_MAX_TEST;
+
+       test_info->offline_mask = resp->offline_tests;
+       test_info->timeout = le16_to_cpu(resp->test_timeout);
+       if (!test_info->timeout)
+               test_info->timeout = HWRM_CMD_TIMEOUT;
+       for (i = 0; i < bp->num_tests; i++) {
+               char *str = test_info->string[i];
+               char *fw_str = resp->test0_name + i * 32;
+
+               if (i == BNXT_MACLPBK_TEST_IDX) {
+                       strcpy(str, "Mac loopback test (offline)");
+               } else if (i == BNXT_PHYLPBK_TEST_IDX) {
+                       strcpy(str, "Phy loopback test (offline)");
+               } else if (i == BNXT_IRQ_TEST_IDX) {
+                       strcpy(str, "Interrupt_test (offline)");
+               } else {
+                       strlcpy(str, fw_str, ETH_GSTRING_LEN);
+                       strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
+                       if (test_info->offline_mask & (1 << i))
+                               strncat(str, " (offline)",
+                                       ETH_GSTRING_LEN - strlen(str));
+                       else
+                               strncat(str, " (online)",
+                                       ETH_GSTRING_LEN - strlen(str));
+               }
+       }
+
+ethtool_init_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+}
+
+void bnxt_ethtool_free(struct bnxt *bp)
+{
+       kfree(bp->test_info);
+       bp->test_info = NULL;
+}
+
 const struct ethtool_ops bnxt_ethtool_ops = {
        .get_link_ksettings     = bnxt_get_link_ksettings,
        .set_link_ksettings     = bnxt_set_link_ksettings,
        .get_pauseparam         = bnxt_get_pauseparam,
        .set_pauseparam         = bnxt_set_pauseparam,
        .get_drvinfo            = bnxt_get_drvinfo,
+       .get_wol                = bnxt_get_wol,
+       .set_wol                = bnxt_set_wol,
        .get_coalesce           = bnxt_get_coalesce,
        .set_coalesce           = bnxt_set_coalesce,
        .get_msglevel           = bnxt_get_msglevel,
@@ -2161,4 +2571,5 @@ const struct ethtool_ops bnxt_ethtool_ops = {
        .get_module_eeprom      = bnxt_get_module_eeprom,
        .nway_reset             = bnxt_nway_reset,
        .set_phys_id            = bnxt_set_phys_id,
+       .self_test              = bnxt_self_test,
 };
index ed1e555292e9ce404b44017eca84fac5edb23d85..f1bc90b6fb5bdbec7fbc7896fa32c46b71d104e9 100644 (file)
@@ -1,6 +1,7 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
  * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -38,5 +39,7 @@ extern const struct ethtool_ops bnxt_ethtool_ops;
 u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
 u32 bnxt_fw_to_ethtool_speed(u16);
 u16 bnxt_get_fw_auto_link_speeds(u32);
+void bnxt_ethtool_init(struct bnxt *bp);
+void bnxt_ethtool_free(struct bnxt *bp);
 
 #endif
index 6e275c23d68bfe22561903e9b1d538a160ab8948..7dc71bb95837b8c48eb1668fc094ece6fba07b4b 100644 (file)
 #ifndef BNXT_HSI_H
 #define BNXT_HSI_H
 
-/* HSI and HWRM Specification 1.7.0 */
+/* HSI and HWRM Specification 1.7.6 */
 #define HWRM_VERSION_MAJOR     1
 #define HWRM_VERSION_MINOR     7
-#define HWRM_VERSION_UPDATE    0
+#define HWRM_VERSION_UPDATE    6
 
-#define HWRM_VERSION_STR       "1.7.0"
+#define HWRM_VERSION_RSVD      2 /* non-zero means beta version */
+
+#define HWRM_VERSION_STR       "1.7.6.2"
 /*
  * Following is the signature for HWRM message field that indicates not
  * applicable (All F's). Need to cast it the size of the field if needed.
  */
 #define HWRM_NA_SIGNATURE      ((__le32)(-1))
 #define HWRM_MAX_REQ_LEN    (128)  /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN    (176)  /* hwrm_func_qstats */
+#define HWRM_MAX_RESP_LEN    (248)  /* hwrm_selftest_qlist */
 #define HW_HASH_INDEX_SIZE      0x80    /* 7 bit indirection table index. */
 #define HW_HASH_KEY_SIZE       40
 #define HWRM_RESP_VALID_KEY      1 /* valid key for HWRM response */
@@ -571,9 +573,10 @@ struct hwrm_ver_get_output {
        __le16 max_req_win_len;
        __le16 max_resp_len;
        __le16 def_req_timeout;
+       u8 init_pending;
+       #define VER_GET_RESP_INIT_PENDING_DEV_NOT_RDY               0x1UL
        u8 unused_0;
        u8 unused_1;
-       u8 unused_2;
        u8 valid;
 };
 
@@ -809,6 +812,8 @@ struct hwrm_func_qcfg_output {
        #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED            0x2UL
        #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED          0x4UL
        #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED      0x8UL
+       #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED          0x10UL
+       #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST                     0x20UL
        u8 mac_address[6];
        __le16 pci_id;
        __le16 alloc_rsscos_ctx;
@@ -827,10 +832,12 @@ struct hwrm_func_qcfg_output {
        #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5         0x3UL
        #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0         0x4UL
        #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN         0xffUL
-       u8 unused_0;
+       u8 port_pf_cnt;
+       #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL                 0x0UL
        __le16 dflt_vnic_id;
-       u8 unused_1;
-       u8 unused_2;
+       u8 host_cnt;
+       #define FUNC_QCFG_RESP_HOST_CNT_UNAVAIL            0x0UL
+       u8 unused_0;
        __le32 min_bw;
        #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK                 0xfffffffUL
        #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT                  0
@@ -867,12 +874,12 @@ struct hwrm_func_qcfg_output {
        #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB                     0x0UL
        #define FUNC_QCFG_RESP_EVB_MODE_VEB                        0x1UL
        #define FUNC_QCFG_RESP_EVB_MODE_VEPA                       0x2UL
-       u8 unused_3;
+       u8 unused_1;
        __le16 alloc_vfs;
        __le32 alloc_mcast_filters;
        __le32 alloc_hw_ring_grps;
        __le16 alloc_sp_tx_rings;
-       u8 unused_4;
+       u8 unused_2;
        u8 valid;
 };
 
@@ -888,16 +895,13 @@ struct hwrm_func_cfg_input {
        u8 unused_0;
        u8 unused_1;
        __le32 flags;
-       #define FUNC_CFG_REQ_FLAGS_PROM_MODE                        0x1UL
-       #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK               0x2UL
-       #define FUNC_CFG_REQ_FLAGS_SRC_IP_ADDR_CHECK                0x4UL
-       #define FUNC_CFG_REQ_FLAGS_VLAN_PRI_MATCH                   0x8UL
-       #define FUNC_CFG_REQ_FLAGS_DFLT_PRI_NOMATCH                 0x10UL
-       #define FUNC_CFG_REQ_FLAGS_DISABLE_PAUSE                    0x20UL
-       #define FUNC_CFG_REQ_FLAGS_DISABLE_STP                      0x40UL
-       #define FUNC_CFG_REQ_FLAGS_DISABLE_LLDP             0x80UL
-       #define FUNC_CFG_REQ_FLAGS_DISABLE_PTPV2                    0x100UL
-       #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE                 0x200UL
+       #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE      0x1UL
+       #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE       0x2UL
+       #define FUNC_CFG_REQ_FLAGS_RSVD_MASK                        0x1fcUL
+       #define FUNC_CFG_REQ_FLAGS_RSVD_SFT                         2
+       #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE          0x200UL
+       #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE         0x400UL
+       #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST                 0x800UL
        __le32 enables;
        #define FUNC_CFG_REQ_ENABLES_MTU                            0x1UL
        #define FUNC_CFG_REQ_ENABLES_MRU                            0x2UL
@@ -1013,7 +1017,7 @@ struct hwrm_func_qstats_output {
        __le64 tx_ucast_pkts;
        __le64 tx_mcast_pkts;
        __le64 tx_bcast_pkts;
-       __le64 tx_err_pkts;
+       __le64 tx_discard_pkts;
        __le64 tx_drop_pkts;
        __le64 tx_ucast_bytes;
        __le64 tx_mcast_bytes;
@@ -1021,7 +1025,7 @@ struct hwrm_func_qstats_output {
        __le64 rx_ucast_pkts;
        __le64 rx_mcast_pkts;
        __le64 rx_bcast_pkts;
-       __le64 rx_err_pkts;
+       __le64 rx_discard_pkts;
        __le64 rx_drop_pkts;
        __le64 rx_ucast_bytes;
        __le64 rx_mcast_bytes;
@@ -4743,25 +4747,72 @@ struct hwrm_temp_monitor_query_output {
        u8 valid;
 };
 
-/* hwrm_nvm_read */
-/* Input (40 bytes) */
-struct hwrm_nvm_read_input {
+/* hwrm_wol_filter_alloc */
+/* Input (64 bytes) */
+struct hwrm_wol_filter_alloc_input {
        __le16 req_type;
        __le16 cmpl_ring;
        __le16 seq_id;
        __le16 target_id;
        __le64 resp_addr;
-       __le64 host_dest_addr;
-       __le16 dir_idx;
+       __le32 flags;
+       __le32 enables;
+       #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS            0x1UL
+       #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET         0x2UL
+       #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE      0x4UL
+       #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR      0x8UL
+       #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR     0x10UL
+       #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE     0x20UL
+       __le16 port_id;
+       u8 wol_type;
+       #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT             0x0UL
+       #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP                  0x1UL
+       #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID              0xffUL
        u8 unused_0;
-       u8 unused_1;
-       __le32 offset;
-       __le32 len;
+       __le32 unused_1;
+       u8 mac_address[6];
+       __le16 pattern_offset;
+       __le16 pattern_buf_size;
+       __le16 pattern_mask_size;
        __le32 unused_2;
+       __le64 pattern_buf_addr;
+       __le64 pattern_mask_addr;
 };
 
 /* Output (16 bytes) */
-struct hwrm_nvm_read_output {
+struct hwrm_wol_filter_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 wol_filter_id;
+       u8 unused_0;
+       __le16 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_wol_filter_free */
+/* Input (32 bytes) */
+struct hwrm_wol_filter_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS     0x1UL
+       __le32 enables;
+       #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID           0x1UL
+       __le16 port_id;
+       u8 wol_filter_id;
+       u8 unused_0[5];
+};
+
+/* Output (16 bytes) */
+struct hwrm_wol_filter_free_output {
        __le16 error_code;
        __le16 req_type;
        __le16 seq_id;
@@ -4773,21 +4824,107 @@ struct hwrm_nvm_read_output {
        u8 valid;
 };
 
-/* hwrm_nvm_raw_dump */
-/* Input (32 bytes) */
-struct hwrm_nvm_raw_dump_input {
+/* hwrm_wol_filter_qcfg */
+/* Input (56 bytes) */
+struct hwrm_wol_filter_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       __le16 handle;
+       __le32 unused_0;
+       __le64 pattern_buf_addr;
+       __le16 pattern_buf_size;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3[3];
+       u8 unused_4;
+       __le64 pattern_mask_addr;
+       __le16 pattern_mask_size;
+       __le16 unused_5[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_wol_filter_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 next_handle;
+       u8 wol_filter_id;
+       u8 wol_type;
+       #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT             0x0UL
+       #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP                  0x1UL
+       #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID              0xffUL
+       __le32 unused_0;
+       u8 mac_address[6];
+       __le16 pattern_offset;
+       __le16 pattern_size;
+       __le16 pattern_mask_size;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_wol_reason_qcfg */
+/* Input (40 bytes) */
+struct hwrm_wol_reason_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2[3];
+       u8 unused_3;
+       __le64 wol_pkt_buf_addr;
+       __le16 wol_pkt_buf_size;
+       __le16 unused_4[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_wol_reason_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 wol_filter_id;
+       u8 wol_reason;
+       #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT           0x0UL
+       #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP                0x1UL
+       #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID    0xffUL
+       u8 wol_pkt_len;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_read */
+/* Input (40 bytes) */
+struct hwrm_nvm_read_input {
        __le16 req_type;
        __le16 cmpl_ring;
        __le16 seq_id;
        __le16 target_id;
        __le64 resp_addr;
        __le64 host_dest_addr;
+       __le16 dir_idx;
+       u8 unused_0;
+       u8 unused_1;
        __le32 offset;
        __le32 len;
+       __le32 unused_2;
 };
 
 /* Output (16 bytes) */
-struct hwrm_nvm_raw_dump_output {
+struct hwrm_nvm_read_output {
        __le16 error_code;
        __le16 req_type;
        __le16 seq_id;
@@ -4881,6 +5018,15 @@ struct hwrm_nvm_write_output {
        u8 valid;
 };
 
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_nvm_write_cmd_err {
+       u8 code;
+       #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN                     0x0UL
+       #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR            0x1UL
+       #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE            0x2UL
+       u8 unused_0[7];
+};
+
 /* hwrm_nvm_modify */
 /* Input (40 bytes) */
 struct hwrm_nvm_modify_input {
@@ -5112,6 +5258,100 @@ struct hwrm_nvm_install_update_cmd_err {
        u8 unused_0[7];
 };
 
+/* hwrm_selftest_qlist */
+/* Input (16 bytes) */
+struct hwrm_selftest_qlist_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (248 bytes) */
+struct hwrm_selftest_qlist_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 num_tests;
+       u8 available_tests;
+       #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST       0x1UL
+       #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST      0x2UL
+       #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST  0x4UL
+       #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST    0x8UL
+       u8 offline_tests;
+       #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST          0x1UL
+       #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST         0x2UL
+       #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST    0x4UL
+       #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST      0x8UL
+       u8 unused_0;
+       __le16 test_timeout;
+       u8 unused_1;
+       u8 unused_2;
+       char test0_name[32];
+       char test1_name[32];
+       char test2_name[32];
+       char test3_name[32];
+       char test4_name[32];
+       char test5_name[32];
+       char test6_name[32];
+       char test7_name[32];
+};
+
+/* hwrm_selftest_exec */
+/* Input (24 bytes) */
+struct hwrm_selftest_exec_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 flags;
+       #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST                    0x1UL
+       #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST                   0x2UL
+       #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST               0x4UL
+       #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST                 0x8UL
+       u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_selftest_exec_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 requested_tests;
+       #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST         0x1UL
+       #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST       0x2UL
+       #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST   0x4UL
+       #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST     0x8UL
+       u8 test_success;
+       #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST            0x1UL
+       #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST           0x2UL
+       #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST      0x4UL
+       #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST         0x8UL
+       __le16 unused_0[3];
+};
+
+/* hwrm_selftest_irq */
+/* Input (16 bytes) */
+struct hwrm_selftest_irq_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (8 bytes) */
+struct hwrm_selftest_irq_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+};
+
 /* Hardware Resource Manager Specification */
 /* Input (16 bytes) */
 struct input {
@@ -5130,6 +5370,16 @@ struct output {
        __le16 resp_len;
 };
 
+/* Short Command Structure (16 bytes) */
+struct hwrm_short_input {
+       __le16 req_type;
+       __le16 signature;
+       #define SHORT_REQ_SIGNATURE_SHORT_CMD                      0x4321UL
+       __le16 unused_0;
+       __le16 size;
+       __le64 req_addr;
+};
+
 /* Command numbering (8 bytes) */
 struct cmd_nums {
        __le16 req_type;
@@ -5252,11 +5502,15 @@ struct cmd_nums {
        #define HWRM_CFA_FLOW_FLUSH                                (0x105UL)
        #define HWRM_CFA_FLOW_STATS                                (0x106UL)
        #define HWRM_CFA_FLOW_INFO                                 (0x107UL)
+       #define HWRM_SELFTEST_QLIST                                (0x200UL)
+       #define HWRM_SELFTEST_EXEC                                 (0x201UL)
+       #define HWRM_SELFTEST_IRQ                                  (0x202UL)
        #define HWRM_DBG_READ_DIRECT                               (0xff10UL)
        #define HWRM_DBG_READ_INDIRECT                             (0xff11UL)
        #define HWRM_DBG_WRITE_DIRECT                              (0xff12UL)
        #define HWRM_DBG_WRITE_INDIRECT                    (0xff13UL)
        #define HWRM_DBG_DUMP                                      (0xff14UL)
+       #define HWRM_NVM_FACTORY_DEFAULTS                          (0xffeeUL)
        #define HWRM_NVM_VALIDATE_OPTION                           (0xffefUL)
        #define HWRM_NVM_FLUSH                                     (0xfff0UL)
        #define HWRM_NVM_GET_VARIABLE                              (0xfff1UL)
@@ -5464,6 +5718,7 @@ struct hwrm_struct_hdr {
        #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE    0x422UL
        #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC                  0x424UL
        #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE                   0x426UL
+       #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE            0x1UL
        #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION              0xaUL
        __le16 len;
        u8 version;
index 0b8cd7443843241efcff2e632869c6ccc7a40fbe..f89353175e6b9328e219efe5ccb34c0d95d5ecf2 100644 (file)
@@ -1,6 +1,7 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
  * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -84,6 +85,9 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
        u32 func_flags;
        int rc;
 
+       if (bp->hwrm_spec_code < 0x10701)
+               return -ENOTSUPP;
+
        rc = bnxt_vf_ndo_prep(bp, vf_id);
        if (rc)
                return rc;
@@ -96,9 +100,9 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
 
        func_flags = vf->func_flags;
        if (setting)
-               func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+               func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
        else
-               func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+               func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
        /*TODO: if the driver supports VLAN filter on guest VLAN,
         * the spoof check should also include vlan anti-spoofing
         */
index 1ab72e4820af18bde648c992f8ced320ca35fe1b..dbc8d977fc5a3a06a9a23fc86585326bb4284f58 100644 (file)
@@ -1,6 +1,7 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
  * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 899c30fb51886d78f4de3d0f89124116b7bb4ef1..9dae32756767cff8c4869b90c5c702bcc5ff00b9 100644 (file)
 #include "bnxt.h"
 #include "bnxt_xdp.h"
 
-static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
-                         dma_addr_t mapping, u32 len, u16 rx_prod)
+void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+                  dma_addr_t mapping, u32 len, u16 rx_prod)
 {
        struct bnxt_sw_tx_bd *tx_buf;
-       struct tx_bd_ext *txbd1;
        struct tx_bd *txbd;
        u32 flags;
        u16 prod;
@@ -33,22 +32,12 @@ static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
        tx_buf->rx_prod = rx_prod;
 
        txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
-       flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
-               (2 << TX_BD_FLAGS_BD_CNT_SHIFT) | TX_BD_FLAGS_COAL_NOW |
+       flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) |
                TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
        txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
        txbd->tx_bd_opaque = prod;
        txbd->tx_bd_haddr = cpu_to_le64(mapping);
 
-       prod = NEXT_TX(prod);
-       txbd1 = (struct tx_bd_ext *)
-               &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
-
-       txbd1->tx_bd_hsize_lflags = cpu_to_le32(0);
-       txbd1->tx_bd_mss = cpu_to_le32(0);
-       txbd1->tx_bd_cfa_action = cpu_to_le32(0);
-       txbd1->tx_bd_cfa_meta = cpu_to_le32(0);
-
        prod = NEXT_TX(prod);
        txr->tx_prod = prod;
 }
@@ -66,7 +55,6 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
        for (i = 0; i < nr_pkts; i++) {
                last_tx_cons = tx_cons;
                tx_cons = NEXT_TX(tx_cons);
-               tx_cons = NEXT_TX(tx_cons);
        }
        txr->tx_cons = tx_cons;
        if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) {
@@ -133,7 +121,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
                return false;
 
        case XDP_TX:
-               if (tx_avail < 2) {
+               if (tx_avail < 1) {
                        trace_xdp_exception(bp->dev, xdp_prog, act);
                        bnxt_reuse_rx_data(rxr, cons, page);
                        return true;
index b529f2c5355b4361b93086c44fe0a94eb2cac512..12a5ad66b564cbfdbbf20c7753fe0a679e88e065 100644 (file)
@@ -10,6 +10,8 @@
 #ifndef BNXT_XDP_H
 #define BNXT_XDP_H
 
+void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+                  dma_addr_t mapping, u32 len, u16 rx_prod);
 void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
 bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
                 struct page *page, u8 **data_ptr, unsigned int *len,
index f92896835d2a4ceb18e7d69fdc53ce7730402667..a205a9ff9e179ba9b5f854b9e9707a29b36a45bb 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Broadcom GENET (Gigabit Ethernet) controller driver
  *
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -450,6 +450,22 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
                        genet_dma_ring_regs[r]);
 }
 
+static int bcmgenet_begin(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       /* Turn on the clock */
+       return clk_prepare_enable(priv->clk);
+}
+
+static void bcmgenet_complete(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       /* Turn off the clock */
+       clk_disable_unprepare(priv->clk);
+}
+
 static int bcmgenet_get_link_ksettings(struct net_device *dev,
                                       struct ethtool_link_ksettings *cmd)
 {
@@ -605,7 +621,7 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
 
        /* GENET TDMA hardware does not support a configurable timeout, but will
         * always generate an interrupt either after MBDONE packets have been
-        * transmitted, or when the ring is emtpy.
+        * transmitted, or when the ring is empty.
         */
        if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
            ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
@@ -691,6 +707,19 @@ struct bcmgenet_stats {
        .reg_offset = offset, \
 }
 
+#define STAT_GENET_Q(num) \
+       STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
+                       tx_rings[num].packets), \
+       STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
+                       tx_rings[num].bytes), \
+       STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
+                       rx_rings[num].bytes),    \
+       STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
+                       rx_rings[num].packets), \
+       STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
+                       rx_rings[num].errors), \
+       STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
+                       rx_rings[num].dropped)
 
 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
  * between the end of TX stats and the beginning of the RX RUNT
@@ -778,12 +807,19 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
        STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
        /* Misc UniMAC counters */
        STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
-                       UMAC_RBUF_OVFL_CNT),
-       STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
+                       UMAC_RBUF_OVFL_CNT_V1),
+       STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
+                       UMAC_RBUF_ERR_CNT_V1),
        STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
        STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
        STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
        STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
+       /* Per TX queues */
+       STAT_GENET_Q(0),
+       STAT_GENET_Q(1),
+       STAT_GENET_Q(2),
+       STAT_GENET_Q(3),
+       STAT_GENET_Q(16),
 };
 
 #define BCMGENET_STATS_LEN     ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -821,6 +857,45 @@ static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
        }
 }
 
+static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
+{
+       u16 new_offset;
+       u32 val;
+
+       switch (offset) {
+       case UMAC_RBUF_OVFL_CNT_V1:
+               if (GENET_IS_V2(priv))
+                       new_offset = RBUF_OVFL_CNT_V2;
+               else
+                       new_offset = RBUF_OVFL_CNT_V3PLUS;
+
+               val = bcmgenet_rbuf_readl(priv, new_offset);
+               /* clear if overflowed */
+               if (val == ~0)
+                       bcmgenet_rbuf_writel(priv, 0, new_offset);
+               break;
+       case UMAC_RBUF_ERR_CNT_V1:
+               if (GENET_IS_V2(priv))
+                       new_offset = RBUF_ERR_CNT_V2;
+               else
+                       new_offset = RBUF_ERR_CNT_V3PLUS;
+
+               val = bcmgenet_rbuf_readl(priv, new_offset);
+               /* clear if overflowed */
+               if (val == ~0)
+                       bcmgenet_rbuf_writel(priv, 0, new_offset);
+               break;
+       default:
+               val = bcmgenet_umac_readl(priv, offset);
+               /* clear if overflowed */
+               if (val == ~0)
+                       bcmgenet_umac_writel(priv, 0, offset);
+               break;
+       }
+
+       return val;
+}
+
 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
 {
        int i, j = 0;
@@ -836,19 +911,28 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
                case BCMGENET_STAT_NETDEV:
                case BCMGENET_STAT_SOFT:
                        continue;
-               case BCMGENET_STAT_MIB_RX:
-               case BCMGENET_STAT_MIB_TX:
                case BCMGENET_STAT_RUNT:
-                       if (s->type != BCMGENET_STAT_MIB_RX)
-                               offset = BCMGENET_STAT_OFFSET;
+                       offset += BCMGENET_STAT_OFFSET;
+                       /* fall through */
+               case BCMGENET_STAT_MIB_TX:
+                       offset += BCMGENET_STAT_OFFSET;
+                       /* fall through */
+               case BCMGENET_STAT_MIB_RX:
                        val = bcmgenet_umac_readl(priv,
                                                  UMAC_MIB_START + j + offset);
+                       offset = 0;     /* Reset Offset */
                        break;
                case BCMGENET_STAT_MISC:
-                       val = bcmgenet_umac_readl(priv, s->reg_offset);
-                       /* clear if overflowed */
-                       if (val == ~0)
-                               bcmgenet_umac_writel(priv, 0, s->reg_offset);
+                       if (GENET_IS_V1(priv)) {
+                               val = bcmgenet_umac_readl(priv, s->reg_offset);
+                               /* clear if overflowed */
+                               if (val == ~0)
+                                       bcmgenet_umac_writel(priv, 0,
+                                                            s->reg_offset);
+                       } else {
+                               val = bcmgenet_update_stat_misc(priv,
+                                                               s->reg_offset);
+                       }
                        break;
                }
 
@@ -973,6 +1057,8 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
 
 /* standard ethtool support functions. */
 static const struct ethtool_ops bcmgenet_ethtool_ops = {
+       .begin                  = bcmgenet_begin,
+       .complete               = bcmgenet_complete,
        .get_strings            = bcmgenet_get_strings,
        .get_sset_count         = bcmgenet_get_sset_count,
        .get_ethtool_stats      = bcmgenet_get_ethtool_stats,
@@ -1011,8 +1097,17 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
                /* Power down LED */
                if (priv->hw_params->flags & GENET_HAS_EXT) {
                        reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
-                       reg |= (EXT_PWR_DOWN_PHY |
-                               EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+                       if (GENET_IS_V5(priv))
+                               reg |= EXT_PWR_DOWN_PHY_EN |
+                                      EXT_PWR_DOWN_PHY_RD |
+                                      EXT_PWR_DOWN_PHY_SD |
+                                      EXT_PWR_DOWN_PHY_RX |
+                                      EXT_PWR_DOWN_PHY_TX |
+                                      EXT_IDDQ_GLBL_PWR;
+                       else
+                               reg |= EXT_PWR_DOWN_PHY;
+
+                       reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
                        bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
 
                        bcmgenet_phy_power_set(priv->dev, false);
@@ -1037,12 +1132,34 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
 
        switch (mode) {
        case GENET_POWER_PASSIVE:
-               reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
-                               EXT_PWR_DOWN_BIAS);
-               /* fallthrough */
+               reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+               if (GENET_IS_V5(priv)) {
+                       reg &= ~(EXT_PWR_DOWN_PHY_EN |
+                                EXT_PWR_DOWN_PHY_RD |
+                                EXT_PWR_DOWN_PHY_SD |
+                                EXT_PWR_DOWN_PHY_RX |
+                                EXT_PWR_DOWN_PHY_TX |
+                                EXT_IDDQ_GLBL_PWR);
+                       reg |=   EXT_PHY_RESET;
+                       bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+                       mdelay(1);
+
+                       reg &=  ~EXT_PHY_RESET;
+               } else {
+                       reg &= ~EXT_PWR_DOWN_PHY;
+                       reg |= EXT_PWR_DN_EN_LD;
+               }
+               bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+               bcmgenet_phy_power_set(priv->dev, true);
+               bcmgenet_mii_reset(priv->dev);
+               break;
+
        case GENET_POWER_CABLE_SENSE:
                /* enable APD */
-               reg |= EXT_PWR_DN_EN_LD;
+               if (!GENET_IS_V5(priv)) {
+                       reg |= EXT_PWR_DN_EN_LD;
+                       bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+               }
                break;
        case GENET_POWER_WOL_MAGIC:
                bcmgenet_wol_power_up_cfg(priv, mode);
@@ -1050,39 +1167,20 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
        default:
                break;
        }
-
-       bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-       if (mode == GENET_POWER_PASSIVE) {
-               bcmgenet_phy_power_set(priv->dev, true);
-               bcmgenet_mii_reset(priv->dev);
-       }
 }
 
 /* ioctl handle special commands that are not present in ethtool. */
 static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
-       int val = 0;
 
        if (!netif_running(dev))
                return -EINVAL;
 
-       switch (cmd) {
-       case SIOCGMIIPHY:
-       case SIOCGMIIREG:
-       case SIOCSMIIREG:
-               if (!priv->phydev)
-                       val = -ENODEV;
-               else
-                       val = phy_mii_ioctl(priv->phydev, rq, cmd);
-               break;
-
-       default:
-               val = -EINVAL;
-               break;
-       }
+       if (!priv->phydev)
+               return -ENODEV;
 
-       return val;
+       return phy_mii_ioctl(priv->phydev, rq, cmd);
 }
 
 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
@@ -1167,21 +1265,24 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct device *kdev = &priv->pdev->dev;
        struct enet_cb *tx_cb_ptr;
-       struct netdev_queue *txq;
        unsigned int pkts_compl = 0;
        unsigned int bytes_compl = 0;
        unsigned int c_index;
        unsigned int txbds_ready;
        unsigned int txbds_processed = 0;
 
-       /* Compute how many buffers are transmitted since last xmit call */
-       c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
-       c_index &= DMA_C_INDEX_MASK;
-
-       if (likely(c_index >= ring->c_index))
-               txbds_ready = c_index - ring->c_index;
+       /* Clear status before servicing to reduce spurious interrupts */
+       if (ring->index == DESC_INDEX)
+               bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
+                                        INTRL2_CPU_CLEAR);
        else
-               txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
+               bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+                                        INTRL2_CPU_CLEAR);
+
+       /* Compute how many buffers are transmitted since last xmit call */
+       c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
+               & DMA_C_INDEX_MASK;
+       txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
 
        netif_dbg(priv, tx_done, dev,
                  "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
@@ -1214,20 +1315,15 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
        }
 
        ring->free_bds += txbds_processed;
-       ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
+       ring->c_index = c_index;
 
-       dev->stats.tx_packets += pkts_compl;
-       dev->stats.tx_bytes += bytes_compl;
+       ring->packets += pkts_compl;
+       ring->bytes += bytes_compl;
 
-       txq = netdev_get_tx_queue(dev, ring->queue);
-       netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
-
-       if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
-               if (netif_tx_queue_stopped(txq))
-                       netif_tx_wake_queue(txq);
-       }
+       netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
+                                 pkts_compl, bytes_compl);
 
-       return pkts_compl;
+       return txbds_processed;
 }
 
 static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
@@ -1248,8 +1344,16 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
        struct bcmgenet_tx_ring *ring =
                container_of(napi, struct bcmgenet_tx_ring, napi);
        unsigned int work_done = 0;
+       struct netdev_queue *txq;
+       unsigned long flags;
 
-       work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
+       spin_lock_irqsave(&ring->lock, flags);
+       work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
+       if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+               txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
+               netif_tx_wake_queue(txq);
+       }
+       spin_unlock_irqrestore(&ring->lock, flags);
 
        if (work_done == 0) {
                napi_complete(napi);
@@ -1588,18 +1692,28 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
        unsigned long dma_flag;
        int len;
        unsigned int rxpktprocessed = 0, rxpkttoprocess;
-       unsigned int p_index;
+       unsigned int p_index, mask;
        unsigned int discards;
        unsigned int chksum_ok = 0;
 
+       /* Clear status before servicing to reduce spurious interrupts */
+       if (ring->index == DESC_INDEX) {
+               bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
+                                        INTRL2_CPU_CLEAR);
+       } else {
+               mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
+               bcmgenet_intrl2_1_writel(priv,
+                                        mask,
+                                        INTRL2_CPU_CLEAR);
+       }
+
        p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
 
        discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
                   DMA_P_INDEX_DISCARD_CNT_MASK;
        if (discards > ring->old_discards) {
                discards = discards - ring->old_discards;
-               dev->stats.rx_missed_errors += discards;
-               dev->stats.rx_errors += discards;
+               ring->errors += discards;
                ring->old_discards += discards;
 
                /* Clear HW register when we reach 75% of maximum 0xFFFF */
@@ -1611,12 +1725,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
        }
 
        p_index &= DMA_P_INDEX_MASK;
-
-       if (likely(p_index >= ring->c_index))
-               rxpkttoprocess = p_index - ring->c_index;
-       else
-               rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
-                                p_index;
+       rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
 
        netif_dbg(priv, rx_status, dev,
                  "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
@@ -1627,7 +1736,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
                skb = bcmgenet_rx_refill(priv, cb);
 
                if (unlikely(!skb)) {
-                       dev->stats.rx_dropped++;
+                       ring->dropped++;
                        goto next;
                }
 
@@ -1655,7 +1764,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
                if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
                        netif_err(priv, rx_status, dev,
                                  "dropping fragmented packet!\n");
-                       dev->stats.rx_errors++;
+                       ring->errors++;
                        dev_kfree_skb_any(skb);
                        goto next;
                }
@@ -1704,8 +1813,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
 
                /*Finish setting up the received SKB and send it to the kernel*/
                skb->protocol = eth_type_trans(skb, priv->dev);
-               dev->stats.rx_packets++;
-               dev->stats.rx_bytes += len;
+               ring->packets++;
+               ring->bytes += len;
                if (dma_flag & DMA_RX_MULT)
                        dev->stats.multicast++;
 
@@ -1843,10 +1952,8 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
        /* Mask all interrupts.*/
        bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
        bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
-       bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
        bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
        bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
-       bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
 }
 
 static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
@@ -1873,8 +1980,6 @@ static int init_umac(struct bcmgenet_priv *priv)
        int ret;
        u32 reg;
        u32 int0_enable = 0;
-       u32 int1_enable = 0;
-       int i;
 
        dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
 
@@ -1901,12 +2006,6 @@ static int init_umac(struct bcmgenet_priv *priv)
 
        bcmgenet_intr_disable(priv);
 
-       /* Enable Rx default queue 16 interrupts */
-       int0_enable |= UMAC_IRQ_RXDMA_DONE;
-
-       /* Enable Tx default queue 16 interrupts */
-       int0_enable |= UMAC_IRQ_TXDMA_DONE;
-
        /* Configure backpressure vectors for MoCA */
        if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
                reg = bcmgenet_bp_mc_get(priv);
@@ -1924,18 +2023,8 @@ static int init_umac(struct bcmgenet_priv *priv)
        if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
                int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
 
-       /* Enable Rx priority queue interrupts */
-       for (i = 0; i < priv->hw_params->rx_queues; ++i)
-               int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
-
-       /* Enable Tx priority queue interrupts */
-       for (i = 0; i < priv->hw_params->tx_queues; ++i)
-               int1_enable |= (1 << i);
-
        bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
-       bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
 
-       /* Enable rx/tx engine.*/
        dev_dbg(kdev, "done init umac\n");
 
        return 0;
@@ -2067,22 +2156,33 @@ static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
 static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
 {
        unsigned int i;
+       u32 int0_enable = UMAC_IRQ_TXDMA_DONE;
+       u32 int1_enable = 0;
        struct bcmgenet_tx_ring *ring;
 
        for (i = 0; i < priv->hw_params->tx_queues; ++i) {
                ring = &priv->tx_rings[i];
                napi_enable(&ring->napi);
+               int1_enable |= (1 << i);
        }
 
        ring = &priv->tx_rings[DESC_INDEX];
        napi_enable(&ring->napi);
+
+       bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+       bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
 }
 
 static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
 {
        unsigned int i;
+       u32 int0_disable = UMAC_IRQ_TXDMA_DONE;
+       u32 int1_disable = 0xffff;
        struct bcmgenet_tx_ring *ring;
 
+       bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
+       bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
+
        for (i = 0; i < priv->hw_params->tx_queues; ++i) {
                ring = &priv->tx_rings[i];
                napi_disable(&ring->napi);
@@ -2195,22 +2295,33 @@ static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
 static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
 {
        unsigned int i;
+       u32 int0_enable = UMAC_IRQ_RXDMA_DONE;
+       u32 int1_enable = 0;
        struct bcmgenet_rx_ring *ring;
 
        for (i = 0; i < priv->hw_params->rx_queues; ++i) {
                ring = &priv->rx_rings[i];
                napi_enable(&ring->napi);
+               int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
        }
 
        ring = &priv->rx_rings[DESC_INDEX];
        napi_enable(&ring->napi);
+
+       bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+       bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
 }
 
 static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
 {
        unsigned int i;
+       u32 int0_disable = UMAC_IRQ_RXDMA_DONE;
+       u32 int1_disable = 0xffff << UMAC_IRQ1_RX_INTR_SHIFT;
        struct bcmgenet_rx_ring *ring;
 
+       bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
+       bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
+
        for (i = 0; i < priv->hw_params->rx_queues; ++i) {
                ring = &priv->rx_rings[i];
                napi_disable(&ring->napi);
@@ -2457,24 +2568,28 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
 /* Interrupt bottom half */
 static void bcmgenet_irq_task(struct work_struct *work)
 {
+       unsigned long flags;
+       unsigned int status;
        struct bcmgenet_priv *priv = container_of(
                        work, struct bcmgenet_priv, bcmgenet_irq_work);
 
        netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
 
-       if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
-               priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
+       spin_lock_irqsave(&priv->lock, flags);
+       status = priv->irq0_stat;
+       priv->irq0_stat = 0;
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       if (status & UMAC_IRQ_MPD_R) {
                netif_dbg(priv, wol, priv->dev,
                          "magic packet detected, waking up\n");
                bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
        }
 
        /* Link UP/DOWN event */
-       if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
+       if (status & UMAC_IRQ_LINK_EVENT)
                phy_mac_interrupt(priv->phydev,
-                                 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
-               priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
-       }
+                                 !!(status & UMAC_IRQ_LINK_UP));
 }
 
 /* bcmgenet_isr1: handle Rx and Tx priority queues */
@@ -2483,22 +2598,21 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
        struct bcmgenet_priv *priv = dev_id;
        struct bcmgenet_rx_ring *rx_ring;
        struct bcmgenet_tx_ring *tx_ring;
-       unsigned int index;
+       unsigned int index, status;
 
-       /* Save irq status for bottom-half processing. */
-       priv->irq1_stat =
-               bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
+       /* Read irq status */
+       status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
                ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
 
        /* clear interrupts */
-       bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+       bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
 
        netif_dbg(priv, intr, priv->dev,
-                 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+                 "%s: IRQ=0x%x\n", __func__, status);
 
        /* Check Rx priority queue interrupts */
        for (index = 0; index < priv->hw_params->rx_queues; index++) {
-               if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
+               if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
                        continue;
 
                rx_ring = &priv->rx_rings[index];
@@ -2511,7 +2625,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
 
        /* Check Tx priority queue interrupts */
        for (index = 0; index < priv->hw_params->tx_queues; index++) {
-               if (!(priv->irq1_stat & BIT(index)))
+               if (!(status & BIT(index)))
                        continue;
 
                tx_ring = &priv->tx_rings[index];
@@ -2531,19 +2645,20 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
        struct bcmgenet_priv *priv = dev_id;
        struct bcmgenet_rx_ring *rx_ring;
        struct bcmgenet_tx_ring *tx_ring;
+       unsigned int status;
+       unsigned long flags;
 
-       /* Save irq status for bottom-half processing. */
-       priv->irq0_stat =
-               bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
+       /* Read irq status */
+       status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
                ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
 
        /* clear interrupts */
-       bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+       bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
 
        netif_dbg(priv, intr, priv->dev,
-                 "IRQ=0x%x\n", priv->irq0_stat);
+                 "IRQ=0x%x\n", status);
 
-       if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
+       if (status & UMAC_IRQ_RXDMA_DONE) {
                rx_ring = &priv->rx_rings[DESC_INDEX];
 
                if (likely(napi_schedule_prep(&rx_ring->napi))) {
@@ -2552,7 +2667,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
                }
        }
 
-       if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
+       if (status & UMAC_IRQ_TXDMA_DONE) {
                tx_ring = &priv->tx_rings[DESC_INDEX];
 
                if (likely(napi_schedule_prep(&tx_ring->napi))) {
@@ -2565,18 +2680,28 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
                                UMAC_IRQ_PHY_DET_F |
                                UMAC_IRQ_LINK_EVENT |
                                UMAC_IRQ_HFB_SM |
-                               UMAC_IRQ_HFB_MM |
-                               UMAC_IRQ_MPD_R)) {
+                               UMAC_IRQ_HFB_MM)) {
                /* all other interested interrupts handled in bottom half */
                schedule_work(&priv->bcmgenet_irq_work);
        }
 
        if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
-           priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
-               priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+               status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
                wake_up(&priv->wq);
        }
 
+       /* all other interested interrupts handled in bottom half */
+       status &= (UMAC_IRQ_LINK_EVENT |
+                  UMAC_IRQ_MPD_R);
+       if (status) {
+               /* Save irq status for bottom-half processing. */
+               spin_lock_irqsave(&priv->lock, flags);
+               priv->irq0_stat |= status;
+               spin_unlock_irqrestore(&priv->lock, flags);
+
+               schedule_work(&priv->bcmgenet_irq_work);
+       }
+
        return IRQ_HANDLED;
 }
 
@@ -2801,6 +2926,8 @@ err_irq0:
 err_fini_dma:
        bcmgenet_fini_dma(priv);
 err_clk_disable:
+       if (priv->internal_phy)
+               bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
        clk_disable_unprepare(priv->clk);
        return ret;
 }
@@ -2845,7 +2972,7 @@ static int bcmgenet_close(struct net_device *dev)
        if (ret)
                return ret;
 
-       /* Disable MAC transmit. TX DMA disabled have to done before this */
+       /* Disable MAC transmit. TX DMA disabled must be done before this */
        umac_enable_set(priv, CMD_TX_EN, false);
 
        /* tx reclaim */
@@ -3025,6 +3152,48 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
        return 0;
 }
 
+static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       unsigned long tx_bytes = 0, tx_packets = 0;
+       unsigned long rx_bytes = 0, rx_packets = 0;
+       unsigned long rx_errors = 0, rx_dropped = 0;
+       struct bcmgenet_tx_ring *tx_ring;
+       struct bcmgenet_rx_ring *rx_ring;
+       unsigned int q;
+
+       for (q = 0; q < priv->hw_params->tx_queues; q++) {
+               tx_ring = &priv->tx_rings[q];
+               tx_bytes += tx_ring->bytes;
+               tx_packets += tx_ring->packets;
+       }
+       tx_ring = &priv->tx_rings[DESC_INDEX];
+       tx_bytes += tx_ring->bytes;
+       tx_packets += tx_ring->packets;
+
+       for (q = 0; q < priv->hw_params->rx_queues; q++) {
+               rx_ring = &priv->rx_rings[q];
+
+               rx_bytes += rx_ring->bytes;
+               rx_packets += rx_ring->packets;
+               rx_errors += rx_ring->errors;
+               rx_dropped += rx_ring->dropped;
+       }
+       rx_ring = &priv->rx_rings[DESC_INDEX];
+       rx_bytes += rx_ring->bytes;
+       rx_packets += rx_ring->packets;
+       rx_errors += rx_ring->errors;
+       rx_dropped += rx_ring->dropped;
+
+       dev->stats.tx_bytes = tx_bytes;
+       dev->stats.tx_packets = tx_packets;
+       dev->stats.rx_bytes = rx_bytes;
+       dev->stats.rx_packets = rx_packets;
+       dev->stats.rx_errors = rx_errors;
+       dev->stats.rx_missed_errors = rx_errors;
+       return &dev->stats;
+}
+
 static const struct net_device_ops bcmgenet_netdev_ops = {
        .ndo_open               = bcmgenet_open,
        .ndo_stop               = bcmgenet_close,
@@ -3037,6 +3206,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = bcmgenet_poll_controller,
 #endif
+       .ndo_get_stats          = bcmgenet_get_stats,
 };
 
 /* Array of GENET hardware parameters/characteristics */
@@ -3110,6 +3280,25 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
                .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
                         GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
        },
+       [GENET_V5] = {
+               .tx_queues = 4,
+               .tx_bds_per_q = 32,
+               .rx_queues = 0,
+               .rx_bds_per_q = 0,
+               .bp_in_en_shift = 17,
+               .bp_in_mask = 0x1ffff,
+               .hfb_filter_cnt = 48,
+               .hfb_filter_size = 128,
+               .qtag_mask = 0x3F,
+               .tbuf_offset = 0x0600,
+               .hfb_offset = 0x8000,
+               .hfb_reg_offset = 0xfc00,
+               .rdma_offset = 0x2000,
+               .tdma_offset = 0x4000,
+               .words_per_bd = 3,
+               .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+                        GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
+       },
 };
 
 /* Infer hardware parameters from the detected GENET version */
@@ -3120,26 +3309,22 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
        u8 major;
        u16 gphy_rev;
 
-       if (GENET_IS_V4(priv)) {
+       if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
                bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
                genet_dma_ring_regs = genet_dma_ring_regs_v4;
                priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
-               priv->version = GENET_V4;
        } else if (GENET_IS_V3(priv)) {
                bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
                genet_dma_ring_regs = genet_dma_ring_regs_v123;
                priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
-               priv->version = GENET_V3;
        } else if (GENET_IS_V2(priv)) {
                bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
                genet_dma_ring_regs = genet_dma_ring_regs_v123;
                priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
-               priv->version = GENET_V2;
        } else if (GENET_IS_V1(priv)) {
                bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
                genet_dma_ring_regs = genet_dma_ring_regs_v123;
                priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
-               priv->version = GENET_V1;
        }
 
        /* enum genet_version starts at 1 */
@@ -3149,7 +3334,9 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
        /* Read GENET HW version */
        reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
        major = (reg >> 24 & 0x0f);
-       if (major == 5)
+       if (major == 6)
+               major = 5;
+       else if (major == 5)
                major = 4;
        else if (major == 0)
                major = 1;
@@ -3177,18 +3364,24 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
         */
        gphy_rev = reg & 0xffff;
 
+       if (GENET_IS_V5(priv)) {
+               /* The EPHY revision should come from the MDIO registers of
+                * the PHY not from GENET.
+                */
+               if (gphy_rev != 0) {
+                       pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
+                               gphy_rev);
+               }
+       /* This is reserved so should require special treatment */
+       } else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
+               pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
+               return;
        /* This is the good old scheme, just GPHY major, no minor nor patch */
-       if ((gphy_rev & 0xf0) != 0)
+       } else if ((gphy_rev & 0xf0) != 0) {
                priv->gphy_rev = gphy_rev << 8;
-
        /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
-       else if ((gphy_rev & 0xff00) != 0)
+       } else if ((gphy_rev & 0xff00) != 0) {
                priv->gphy_rev = gphy_rev;
-
-       /* This is reserved so should require special treatment */
-       else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
-               pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
-               return;
        }
 
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
@@ -3219,6 +3412,7 @@ static const struct of_device_id bcmgenet_match[] = {
        { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
        { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
        { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
+       { .compatible = "brcm,genet-v5", .data = (void *)GENET_V5 },
        { },
 };
 MODULE_DEVICE_TABLE(of, bcmgenet_match);
@@ -3233,6 +3427,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
        const void *macaddr;
        struct resource *r;
        int err = -EIO;
+       const char *phy_mode_str;
 
        /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
        dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
@@ -3276,6 +3471,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
                goto err;
        }
 
+       spin_lock_init(&priv->lock);
+
        SET_NETDEV_DEV(dev, &pdev->dev);
        dev_set_drvdata(&pdev->dev, dev);
        ether_addr_copy(dev->dev_addr, macaddr);
@@ -3338,6 +3535,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
                priv->clk_eee = NULL;
        }
 
+       /* If this is an internal GPHY, power it on now, before UniMAC is
+        * brought out of reset as absolutely no UniMAC activity is allowed
+        */
+       if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
+           !strcasecmp(phy_mode_str, "internal"))
+               bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
        err = reset_umac(priv);
        if (err)
                goto err_clk_disable;
@@ -3395,7 +3599,8 @@ static int bcmgenet_suspend(struct device *d)
 
        bcmgenet_netif_stop(dev);
 
-       phy_suspend(priv->phydev);
+       if (!device_may_wakeup(d))
+               phy_suspend(priv->phydev);
 
        netif_device_detach(dev);
 
@@ -3406,7 +3611,7 @@ static int bcmgenet_suspend(struct device *d)
        if (ret)
                return ret;
 
-       /* Disable MAC transmit. TX DMA disabled have to done before this */
+       /* Disable MAC transmit. TX DMA disabled must be done before this */
        umac_enable_set(priv, CMD_TX_EN, false);
 
        /* tx reclaim */
@@ -3492,7 +3697,8 @@ static int bcmgenet_resume(struct device *d)
 
        netif_device_attach(dev);
 
-       phy_resume(priv->phydev);
+       if (!device_may_wakeup(d))
+               phy_resume(priv->phydev);
 
        if (priv->eee.eee_enabled)
                bcmgenet_eee_enable_set(dev, true);
@@ -3502,6 +3708,8 @@ static int bcmgenet_resume(struct device *d)
        return 0;
 
 out_clk_disable:
+       if (priv->internal_phy)
+               bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
        clk_disable_unprepare(priv->clk);
        return ret;
 }
index 1e2dc34d331a49e05a8fc9a66156dfeeb00ee10f..efd07020b89fc3a7bd3c68fce1bbd7fe406acfcf 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -214,7 +214,9 @@ struct bcmgenet_mib_counters {
 #define  MDIO_REG_SHIFT                        16
 #define  MDIO_REG_MASK                 0x1F
 
-#define UMAC_RBUF_OVFL_CNT             0x61C
+#define UMAC_RBUF_OVFL_CNT_V1          0x61C
+#define RBUF_OVFL_CNT_V2               0x80
+#define RBUF_OVFL_CNT_V3PLUS           0x94
 
 #define UMAC_MPD_CTRL                  0x620
 #define  MPD_EN                                (1 << 0)
@@ -224,7 +226,9 @@ struct bcmgenet_mib_counters {
 
 #define UMAC_MPD_PW_MS                 0x624
 #define UMAC_MPD_PW_LS                 0x628
-#define UMAC_RBUF_ERR_CNT              0x634
+#define UMAC_RBUF_ERR_CNT_V1           0x634
+#define RBUF_ERR_CNT_V2                        0x84
+#define RBUF_ERR_CNT_V3PLUS            0x98
 #define UMAC_MDF_ERR_CNT               0x638
 #define UMAC_MDF_CTRL                  0x650
 #define UMAC_MDF_ADDR                  0x654
@@ -351,8 +355,14 @@ struct bcmgenet_mib_counters {
 #define  EXT_PWR_DN_EN_LD              (1 << 3)
 #define  EXT_ENERGY_DET                        (1 << 4)
 #define  EXT_IDDQ_FROM_PHY             (1 << 5)
+#define  EXT_IDDQ_GLBL_PWR             (1 << 7)
 #define  EXT_PHY_RESET                 (1 << 8)
 #define  EXT_ENERGY_DET_MASK           (1 << 12)
+#define  EXT_PWR_DOWN_PHY_TX           (1 << 16)
+#define  EXT_PWR_DOWN_PHY_RX           (1 << 17)
+#define  EXT_PWR_DOWN_PHY_SD           (1 << 18)
+#define  EXT_PWR_DOWN_PHY_RD           (1 << 19)
+#define  EXT_PWR_DOWN_PHY_EN           (1 << 20)
 
 #define EXT_RGMII_OOB_CTRL             0x0C
 #define  RGMII_LINK                    (1 << 4)
@@ -495,13 +505,15 @@ enum bcmgenet_version {
        GENET_V1 = 1,
        GENET_V2,
        GENET_V3,
-       GENET_V4
+       GENET_V4,
+       GENET_V5
 };
 
 #define GENET_IS_V1(p) ((p)->version == GENET_V1)
 #define GENET_IS_V2(p) ((p)->version == GENET_V2)
 #define GENET_IS_V3(p) ((p)->version == GENET_V3)
 #define GENET_IS_V4(p) ((p)->version == GENET_V4)
+#define GENET_IS_V5(p) ((p)->version == GENET_V5)
 
 /* Hardware flags */
 #define GENET_HAS_40BITS       (1 << 0)
@@ -540,6 +552,8 @@ struct bcmgenet_skb_cb {
 struct bcmgenet_tx_ring {
        spinlock_t      lock;           /* ring lock */
        struct napi_struct napi;        /* NAPI per tx queue */
+       unsigned long   packets;
+       unsigned long   bytes;
        unsigned int    index;          /* ring index */
        unsigned int    queue;          /* queue index */
        struct enet_cb  *cbs;           /* tx ring buffer control block*/
@@ -558,6 +572,10 @@ struct bcmgenet_tx_ring {
 
 struct bcmgenet_rx_ring {
        struct napi_struct napi;        /* Rx NAPI struct */
+       unsigned long   bytes;
+       unsigned long   packets;
+       unsigned long   errors;
+       unsigned long   dropped;
        unsigned int    index;          /* Rx ring index */
        struct enet_cb  *cbs;           /* Rx ring buffer control block */
        unsigned int    size;           /* Rx ring size */
@@ -619,11 +637,13 @@ struct bcmgenet_priv {
        struct work_struct bcmgenet_irq_work;
        int irq0;
        int irq1;
-       unsigned int irq0_stat;
-       unsigned int irq1_stat;
        int wol_irq;
        bool wol_irq_disabled;
 
+       /* shared status */
+       spinlock_t lock;
+       unsigned int irq0_stat;
+
        /* HW descriptors/checksum variables */
        bool desc_64b_en;
        bool desc_rxchk_en;
index b97122926d3aa91210a8945d45f268d370c86ee4..2fbd027f0148f96003a08405177883e9df832769 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
  *
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -127,7 +127,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
                                enum bcmgenet_power_mode mode)
 {
        struct net_device *dev = priv->dev;
-       u32 cpu_mask_clear;
        int retries = 0;
        u32 reg;
 
@@ -173,18 +172,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
                bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
        }
 
-       /* Enable the MPD interrupt */
-       cpu_mask_clear = UMAC_IRQ_MPD_R;
-
-       bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
-
        return 0;
 }
 
 void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
                               enum bcmgenet_power_mode mode)
 {
-       u32 cpu_mask_set;
        u32 reg;
 
        if (mode != GENET_POWER_WOL_MAGIC) {
@@ -201,10 +194,4 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
        reg &= ~CMD_CRC_FWD;
        bcmgenet_umac_writel(priv, reg, UMAC_CMD);
        priv->crc_fwd_en = 0;
-
-       /* Stop monitoring magic packet IRQ */
-       cpu_mask_set = UMAC_IRQ_MPD_R;
-
-       /* Stop monitoring magic packet IRQ */
-       bcmgenet_intrl2_0_writel(priv, cpu_mask_set, INTRL2_CPU_MASK_SET);
 }
index e87607621e62a076104d67046a10603305d66ecf..285676f8da6bf3b9b0bb42e8a5407be56e478796 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Broadcom GENET MDIO routines
  *
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -195,53 +195,43 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
        u32 reg = 0;
 
        /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
-       if (!GENET_IS_V4(priv))
-               return;
-
-       reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
-       if (enable) {
-               reg &= ~EXT_CK25_DIS;
-               bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
-               mdelay(1);
-
-               reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
-               reg |= EXT_GPHY_RESET;
+       if (GENET_IS_V4(priv)) {
+               reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
+               if (enable) {
+                       reg &= ~EXT_CK25_DIS;
+                       bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+                       mdelay(1);
+
+                       reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
+                       reg |= EXT_GPHY_RESET;
+                       bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+                       mdelay(1);
+
+                       reg &= ~EXT_GPHY_RESET;
+               } else {
+                       reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN |
+                              EXT_GPHY_RESET;
+                       bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+                       mdelay(1);
+                       reg |= EXT_CK25_DIS;
+               }
                bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
-               mdelay(1);
-
-               reg &= ~EXT_GPHY_RESET;
+               udelay(60);
        } else {
-               reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | EXT_GPHY_RESET;
-               bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
                mdelay(1);
-               reg |= EXT_CK25_DIS;
        }
-       bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
-       udelay(60);
-}
-
-static void bcmgenet_internal_phy_setup(struct net_device *dev)
-{
-       struct bcmgenet_priv *priv = netdev_priv(dev);
-       u32 reg;
-
-       /* Power up PHY */
-       bcmgenet_phy_power_set(dev, true);
-       /* enable APD */
-       reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
-       reg |= EXT_PWR_DN_EN_LD;
-       bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-       bcmgenet_mii_reset(dev);
 }
 
 static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
 {
        u32 reg;
 
-       /* Speed settings are set in bcmgenet_mii_setup() */
-       reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
-       reg |= LED_ACT_SOURCE_MAC;
-       bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+       if (!GENET_IS_V5(priv)) {
+               /* Speed settings are set in bcmgenet_mii_setup() */
+               reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
+               reg |= LED_ACT_SOURCE_MAC;
+               bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+       }
 
        if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
                fixed_phy_set_link_update(priv->phydev,
@@ -281,7 +271,6 @@ int bcmgenet_mii_config(struct net_device *dev)
 
                if (priv->internal_phy) {
                        phy_name = "internal PHY";
-                       bcmgenet_internal_phy_setup(dev);
                } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
                        phy_name = "MoCA";
                        bcmgenet_moca_phy_setup(priv);
index 30d1eb9ebec9afab2271db1f8c0b4f448b64da08..f395b951f5e77bca9a926ea3f1210bf1fcb13ded 100644 (file)
@@ -825,6 +825,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
        return timeout_us ? 0 : -EBUSY;
 }
 
+#ifdef CONFIG_TIGON3_HWMON
 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
 {
        u32 i, apedata;
@@ -904,6 +905,7 @@ static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
 
        return 0;
 }
+#endif
 
 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
 {
@@ -10744,6 +10746,7 @@ static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
        return tg3_reset_hw(tp, reset_phy);
 }
 
+#ifdef CONFIG_TIGON3_HWMON
 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
 {
        int i;
@@ -10826,6 +10829,10 @@ static void tg3_hwmon_open(struct tg3 *tp)
                dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
        }
 }
+#else
+static inline void tg3_hwmon_close(struct tg3 *tp) { }
+static inline void tg3_hwmon_open(struct tg3 *tp) { }
+#endif /* CONFIG_TIGON3_HWMON */
 
 
 #define TG3_STAT_ADD32(PSTAT, REG) \
index 9e59663a6eadb012de6f4a4474484800401fce3b..0f6811860ad51de9b871e806f3f254a1abfcf2eb 100644 (file)
@@ -1930,13 +1930,13 @@ static void
 bfa_ioc_send_enable(struct bfa_ioc *ioc)
 {
        struct bfi_ioc_ctrl_req enable_req;
-       struct timeval tv;
 
        bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
                    bfa_ioc_portid(ioc));
        enable_req.clscode = htons(ioc->clscode);
-       do_gettimeofday(&tv);
-       enable_req.tv_sec = ntohl(tv.tv_sec);
+       enable_req.rsvd = htons(0);
+       /* overflow in 2106 */
+       enable_req.tv_sec = ntohl(ktime_get_real_seconds());
        bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
 }
 
@@ -1947,6 +1947,10 @@ bfa_ioc_send_disable(struct bfa_ioc *ioc)
 
        bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
                    bfa_ioc_portid(ioc));
+       disable_req.clscode = htons(ioc->clscode);
+       disable_req.rsvd = htons(0);
+       /* overflow in 2106 */
+       disable_req.tv_sec = ntohl(ktime_get_real_seconds());
        bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
 }
 
index 05c1c1dd7751bd720fac026876c7fcf7392eca03..cebfe3bd086e36f60f717579f03037058b1d1d9e 100644 (file)
@@ -325,7 +325,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
                return PTR_ERR(kern_buf);
 
        rc = sscanf(kern_buf, "%x:%x", &addr, &len);
-       if (rc < 2) {
+       if (rc < 2 || len > UINT_MAX >> 2) {
                netdev_warn(bnad->netdev, "failed to read user buffer\n");
                kfree(kern_buf);
                return -EINVAL;
index 30606b11b128e9d169f421cacbe6b80b962ed821..5cbd1e7a926aa2f1eaf3a2f73aa75fe231b219f2 100644 (file)
@@ -684,8 +684,8 @@ static void macb_tx_error_task(struct work_struct *work)
                                netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
                                            macb_tx_ring_wrap(bp, tail),
                                            skb->data);
-                               bp->stats.tx_packets++;
-                               bp->stats.tx_bytes += skb->len;
+                               bp->dev->stats.tx_packets++;
+                               bp->dev->stats.tx_bytes += skb->len;
                        }
                } else {
                        /* "Buffers exhausted mid-frame" errors may only happen
@@ -778,8 +778,8 @@ static void macb_tx_interrupt(struct macb_queue *queue)
                                netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
                                            macb_tx_ring_wrap(bp, tail),
                                            skb->data);
-                               bp->stats.tx_packets++;
-                               bp->stats.tx_bytes += skb->len;
+                               bp->dev->stats.tx_packets++;
+                               bp->dev->stats.tx_bytes += skb->len;
                        }
 
                        /* Now we can safely release resources */
@@ -911,14 +911,14 @@ static int gem_rx(struct macb *bp, int budget)
                if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
                        netdev_err(bp->dev,
                                   "not whole frame pointed by descriptor\n");
-                       bp->stats.rx_dropped++;
+                       bp->dev->stats.rx_dropped++;
                        break;
                }
                skb = bp->rx_skbuff[entry];
                if (unlikely(!skb)) {
                        netdev_err(bp->dev,
                                   "inconsistent Rx descriptor chain\n");
-                       bp->stats.rx_dropped++;
+                       bp->dev->stats.rx_dropped++;
                        break;
                }
                /* now everything is ready for receiving packet */
@@ -938,8 +938,8 @@ static int gem_rx(struct macb *bp, int budget)
                    GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-               bp->stats.rx_packets++;
-               bp->stats.rx_bytes += skb->len;
+               bp->dev->stats.rx_packets++;
+               bp->dev->stats.rx_bytes += skb->len;
 
 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
                netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
@@ -984,7 +984,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
         */
        skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
        if (!skb) {
-               bp->stats.rx_dropped++;
+               bp->dev->stats.rx_dropped++;
                for (frag = first_frag; ; frag++) {
                        desc = macb_rx_desc(bp, frag);
                        desc->addr &= ~MACB_BIT(RX_USED);
@@ -1030,8 +1030,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
        __skb_pull(skb, NET_IP_ALIGN);
        skb->protocol = eth_type_trans(skb, bp->dev);
 
-       bp->stats.rx_packets++;
-       bp->stats.rx_bytes += skb->len;
+       bp->dev->stats.rx_packets++;
+       bp->dev->stats.rx_bytes += skb->len;
        netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
                    skb->len, skb->csum);
        netif_receive_skb(skb);
@@ -2210,7 +2210,7 @@ static void gem_update_stats(struct macb *bp)
 static struct net_device_stats *gem_get_stats(struct macb *bp)
 {
        struct gem_stats *hwstat = &bp->hw_stats.gem;
-       struct net_device_stats *nstat = &bp->stats;
+       struct net_device_stats *nstat = &bp->dev->stats;
 
        gem_update_stats(bp);
 
@@ -2281,7 +2281,7 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
 static struct net_device_stats *macb_get_stats(struct net_device *dev)
 {
        struct macb *bp = netdev_priv(dev);
-       struct net_device_stats *nstat = &bp->stats;
+       struct net_device_stats *nstat = &bp->dev->stats;
        struct macb_stats *hwstat = &bp->hw_stats.macb;
 
        if (macb_is_gem(bp))
@@ -2993,15 +2993,15 @@ static void at91ether_rx(struct net_device *dev)
                        memcpy(skb_put(skb, pktlen), p_recv, pktlen);
 
                        skb->protocol = eth_type_trans(skb, dev);
-                       lp->stats.rx_packets++;
-                       lp->stats.rx_bytes += pktlen;
+                       dev->stats.rx_packets++;
+                       dev->stats.rx_bytes += pktlen;
                        netif_rx(skb);
                } else {
-                       lp->stats.rx_dropped++;
+                       dev->stats.rx_dropped++;
                }
 
                if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
-                       lp->stats.multicast++;
+                       dev->stats.multicast++;
 
                /* reset ownership bit */
                desc->addr &= ~MACB_BIT(RX_USED);
@@ -3036,15 +3036,15 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
        if (intstatus & MACB_BIT(TCOMP)) {
                /* The TCOM bit is set even if the transmission failed */
                if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
-                       lp->stats.tx_errors++;
+                       dev->stats.tx_errors++;
 
                if (lp->skb) {
                        dev_kfree_skb_irq(lp->skb);
                        lp->skb = NULL;
                        dma_unmap_single(NULL, lp->skb_physaddr,
                                         lp->skb_length, DMA_TO_DEVICE);
-                       lp->stats.tx_packets++;
-                       lp->stats.tx_bytes += lp->skb_length;
+                       dev->stats.tx_packets++;
+                       dev->stats.tx_bytes += lp->skb_length;
                }
                netif_wake_queue(dev);
        }
index 234a49eaccfd2dd2f80f5e402e262af957285cec..ec037b0fa2a4d53ae77ff5ff5ed5fb5eb9866319 100644 (file)
@@ -919,7 +919,6 @@ struct macb {
        struct clk              *rx_clk;
        struct net_device       *dev;
        struct napi_struct      napi;
-       struct net_device_stats stats;
        union {
                struct macb_stats       macb;
                struct gem_stats        gem;
index 2fedd91f3df88fb5ea88f288e5121a8fc8fb3cf0..dee604651ba7d309686cca04f1b41222221adfe1 100644 (file)
@@ -43,6 +43,8 @@ struct octeon_cn23xx_pf {
        struct octeon_config *conf;
 };
 
+#define CN23XX_SLI_DEF_BP                      0x40
+
 int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
 
 int validate_cn23xx_pf_config_info(struct octeon_device *oct,
index f629c2fe04a44b16794db66d2a3e4c6bf5c09876..796c2cbc11f6b5de044c5c465806184d5ad17825 100644 (file)
@@ -26,6 +26,9 @@
 #include "octeon_main.h"
 #include "octeon_network.h"
 
+/* OOM task polling interval */
+#define LIO_OOM_POLL_INTERVAL_MS 250
+
 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
 {
        struct lio *lio = GET_LIO(netdev);
@@ -124,6 +127,17 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
        struct octeon_device *oct = lio->oct_dev;
        u8 *mac;
 
+       if (nctrl->completion && nctrl->response_code) {
+               /* Signal whoever is interested that the response code from the
+                * firmware has arrived.
+                */
+               WRITE_ONCE(*nctrl->response_code, nctrl->status);
+               complete(nctrl->completion);
+       }
+
+       if (nctrl->status)
+               return;
+
        switch (nctrl->ncmd.s.cmd) {
        case OCTNET_CMD_CHANGE_DEVFLAGS:
        case OCTNET_CMD_SET_MULTI_LIST:
@@ -131,11 +145,20 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
 
        case OCTNET_CMD_CHANGE_MACADDR:
                mac = ((u8 *)&nctrl->udd[0]) + 2;
-               netif_info(lio, probe, lio->netdev,
-                          "MACAddr changed to %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
-                          mac[0], mac[1],
-                          mac[2], mac[3],
-                          mac[4], mac[5]);
+               if (nctrl->ncmd.s.param1) {
+                       /* vfidx is 0 based, but vf_num (param1) is 1 based */
+                       int vfidx = nctrl->ncmd.s.param1 - 1;
+                       bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
+
+                       if (mac_is_admin_assigned)
+                               netif_info(lio, probe, lio->netdev,
+                                          "MAC Address %pM is configured for VF %d\n",
+                                          mac, vfidx);
+               } else {
+                       netif_info(lio, probe, lio->netdev,
+                                  " MACAddr changed to %pM\n",
+                                  mac);
+               }
                break;
 
        case OCTNET_CMD_CHANGE_MTU:
@@ -284,3 +307,56 @@ void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
         * the PF did that already
         */
 }
+
+static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
+{
+       struct cavium_wk *wk = (struct cavium_wk *)work;
+       struct lio *lio = (struct lio *)wk->ctxptr;
+       struct octeon_device *oct = lio->oct_dev;
+       struct octeon_droq *droq;
+       int q, q_no = 0;
+
+       if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
+               for (q = 0; q < lio->linfo.num_rxpciq; q++) {
+                       q_no = lio->linfo.rxpciq[q].s.q_no;
+                       droq = oct->droq[q_no];
+                       if (!droq)
+                               continue;
+                       octeon_droq_check_oom(droq);
+               }
+       }
+       queue_delayed_work(lio->rxq_status_wq.wq,
+                          &lio->rxq_status_wq.wk.work,
+                          msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
+}
+
+int setup_rx_oom_poll_fn(struct net_device *netdev)
+{
+       struct lio *lio = GET_LIO(netdev);
+       struct octeon_device *oct = lio->oct_dev;
+
+       lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status",
+                                               WQ_MEM_RECLAIM, 0);
+       if (!lio->rxq_status_wq.wq) {
+               dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
+               return -ENOMEM;
+       }
+       INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work,
+                         octnet_poll_check_rxq_oom_status);
+       lio->rxq_status_wq.wk.ctxptr = lio;
+       queue_delayed_work(lio->rxq_status_wq.wq,
+                          &lio->rxq_status_wq.wk.work,
+                          msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
+       return 0;
+}
+
+void cleanup_rx_oom_poll_fn(struct net_device *netdev)
+{
+       struct lio *lio = GET_LIO(netdev);
+
+       if (lio->rxq_status_wq.wq) {
+               cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work);
+               flush_workqueue(lio->rxq_status_wq.wq);
+               destroy_workqueue(lio->rxq_status_wq.wq);
+       }
+}
index 50384cede8be9b84431690074022bbff4bbc9199..dab10c7e4443df10c702898e23c198965b7a5b38 100644 (file)
 
 static int octnet_get_link_stats(struct net_device *netdev);
 
+struct oct_intrmod_context {
+       int octeon_id;
+       wait_queue_head_t wc;
+       int cond;
+       int status;
+};
+
+struct oct_intrmod_resp {
+       u64     rh;
+       struct oct_intrmod_cfg intrmod;
+       u64     status;
+};
+
 struct oct_mdio_cmd_context {
        int octeon_id;
        wait_queue_head_t wc;
@@ -213,17 +226,23 @@ static int lio_get_link_ksettings(struct net_device *netdev,
        struct lio *lio = GET_LIO(netdev);
        struct octeon_device *oct = lio->oct_dev;
        struct oct_link_info *linfo;
-       u32 supported, advertising;
+       u32 supported = 0, advertising = 0;
 
        linfo = &lio->linfo;
 
        if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
            linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
+           linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
            linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
                ecmd->base.port = PORT_FIBRE;
-               supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
-                            SUPPORTED_Pause);
-               advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
+
+               if (linfo->link.s.speed == SPEED_10000) {
+                       supported = SUPPORTED_10000baseT_Full;
+                       advertising = ADVERTISED_10000baseT_Full;
+               }
+
+               supported |= SUPPORTED_FIBRE | SUPPORTED_Pause;
+               advertising |= ADVERTISED_Pause;
                ethtool_convert_legacy_u32_to_link_mode(
                        ecmd->link_modes.supported, supported);
                ethtool_convert_legacy_u32_to_link_mode(
@@ -1292,95 +1311,103 @@ static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
        }
 }
 
-static int lio_get_intr_coalesce(struct net_device *netdev,
-                                struct ethtool_coalesce *intr_coal)
+/* Callback function for intrmod */
+static void octnet_intrmod_callback(struct octeon_device *oct_dev,
+                                   u32 status,
+                                   void *ptr)
 {
-       struct lio *lio = GET_LIO(netdev);
-       struct octeon_device *oct = lio->oct_dev;
-       struct octeon_instr_queue *iq;
-       struct oct_intrmod_cfg *intrmod_cfg;
+       struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
+       struct oct_intrmod_context *ctx;
 
-       intrmod_cfg = &oct->intrmod;
+       ctx  = (struct oct_intrmod_context *)sc->ctxptr;
 
-       switch (oct->chip_id) {
-       case OCTEON_CN23XX_PF_VID:
-       case OCTEON_CN23XX_VF_VID:
-               if (!intrmod_cfg->rx_enable) {
-                       intr_coal->rx_coalesce_usecs = intrmod_cfg->rx_usecs;
-                       intr_coal->rx_max_coalesced_frames =
-                               intrmod_cfg->rx_frames;
-               }
-               if (!intrmod_cfg->tx_enable)
-                       intr_coal->tx_max_coalesced_frames =
-                               intrmod_cfg->tx_frames;
-               break;
-       case OCTEON_CN68XX:
-       case OCTEON_CN66XX: {
-               struct octeon_cn6xxx *cn6xxx =
-                       (struct octeon_cn6xxx *)oct->chip;
+       ctx->status = status;
 
-               if (!intrmod_cfg->rx_enable) {
-                       intr_coal->rx_coalesce_usecs =
-                               CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
-                       intr_coal->rx_max_coalesced_frames =
-                               CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
-               }
-               iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
-               intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
-               break;
-       }
-       default:
-               netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+       oct_dev = lio_get_device(ctx->octeon_id);
+
+       WRITE_ONCE(ctx->cond, 1);
+
+       /* This barrier is required to be sure that the response has been
+        * written fully before waking up the handler
+        */
+       wmb();
+
+       wake_up_interruptible(&ctx->wc);
+}
+
+/*  get interrupt moderation parameters */
+static int octnet_get_intrmod_cfg(struct lio *lio,
+                                 struct oct_intrmod_cfg *intr_cfg)
+{
+       struct octeon_soft_command *sc;
+       struct oct_intrmod_context *ctx;
+       struct oct_intrmod_resp *resp;
+       int retval;
+       struct octeon_device *oct_dev = lio->oct_dev;
+
+       /* Alloc soft command */
+       sc = (struct octeon_soft_command *)
+               octeon_alloc_soft_command(oct_dev,
+                                         0,
+                                         sizeof(struct oct_intrmod_resp),
+                                         sizeof(struct oct_intrmod_context));
+
+       if (!sc)
+               return -ENOMEM;
+
+       resp = (struct oct_intrmod_resp *)sc->virtrptr;
+       memset(resp, 0, sizeof(struct oct_intrmod_resp));
+
+       ctx = (struct oct_intrmod_context *)sc->ctxptr;
+       memset(ctx, 0, sizeof(struct oct_intrmod_context));
+       WRITE_ONCE(ctx->cond, 0);
+       ctx->octeon_id = lio_get_device_id(oct_dev);
+       init_waitqueue_head(&ctx->wc);
+
+       sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+       octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
+                                   OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0);
+
+       sc->callback = octnet_intrmod_callback;
+       sc->callback_arg = sc;
+       sc->wait_time = 1000;
+
+       retval = octeon_send_soft_command(oct_dev, sc);
+       if (retval == IQ_SEND_FAILED) {
+               octeon_free_soft_command(oct_dev, sc);
                return -EINVAL;
        }
-       if (intrmod_cfg->rx_enable) {
-               intr_coal->use_adaptive_rx_coalesce =
-                       intrmod_cfg->rx_enable;
-               intr_coal->rate_sample_interval =
-                       intrmod_cfg->check_intrvl;
-               intr_coal->pkt_rate_high =
-                       intrmod_cfg->maxpkt_ratethr;
-               intr_coal->pkt_rate_low =
-                       intrmod_cfg->minpkt_ratethr;
-               intr_coal->rx_max_coalesced_frames_high =
-                       intrmod_cfg->rx_maxcnt_trigger;
-               intr_coal->rx_coalesce_usecs_high =
-                       intrmod_cfg->rx_maxtmr_trigger;
-               intr_coal->rx_coalesce_usecs_low =
-                       intrmod_cfg->rx_mintmr_trigger;
-               intr_coal->rx_max_coalesced_frames_low =
-                   intrmod_cfg->rx_mincnt_trigger;
+
+       /* Sleep on a wait queue till the cond flag indicates that the
+        * response arrived or timed-out.
+        */
+       if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
+               dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n");
+               goto intrmod_info_wait_intr;
        }
-       if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
-           (intrmod_cfg->tx_enable)) {
-               intr_coal->use_adaptive_tx_coalesce = intrmod_cfg->tx_enable;
-               intr_coal->tx_max_coalesced_frames_high =
-                   intrmod_cfg->tx_maxcnt_trigger;
-               intr_coal->tx_max_coalesced_frames_low =
-                   intrmod_cfg->tx_mincnt_trigger;
+
+       retval = ctx->status || resp->status;
+       if (retval) {
+               dev_err(&oct_dev->pci_dev->dev,
+                       "Get interrupt moderation parameters failed\n");
+               goto intrmod_info_wait_fail;
        }
-       return 0;
-}
 
-/* Callback function for intrmod */
-static void octnet_intrmod_callback(struct octeon_device *oct_dev,
-                                   u32 status,
-                                   void *ptr)
-{
-       struct oct_intrmod_cmd *cmd = ptr;
-       struct octeon_soft_command *sc = cmd->sc;
+       octeon_swap_8B_data((u64 *)&resp->intrmod,
+                           (sizeof(struct oct_intrmod_cfg)) / 8);
+       memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg));
+       octeon_free_soft_command(oct_dev, sc);
 
-       oct_dev = cmd->oct_dev;
+       return 0;
 
-       if (status)
-               dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
-                       CVM_CAST64(status));
-       else
-               dev_info(&oct_dev->pci_dev->dev,
-                        "Rx-Adaptive Interrupt moderation enabled:%llx\n",
-                        oct_dev->intrmod.rx_enable);
+intrmod_info_wait_fail:
 
        octeon_free_soft_command(oct_dev, sc);
+
+intrmod_info_wait_intr:
+
+       return -ENODEV;
 }
 
 /*  Configure interrupt moderation parameters */
@@ -1388,7 +1415,7 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
                                  struct oct_intrmod_cfg *intr_cfg)
 {
        struct octeon_soft_command *sc;
-       struct oct_intrmod_cmd *cmd;
+       struct oct_intrmod_context *ctx;
        struct oct_intrmod_cfg *cfg;
        int retval;
        struct octeon_device *oct_dev = lio->oct_dev;
@@ -1398,19 +1425,21 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
                octeon_alloc_soft_command(oct_dev,
                                          sizeof(struct oct_intrmod_cfg),
                                          0,
-                                         sizeof(struct oct_intrmod_cmd));
+                                         sizeof(struct oct_intrmod_context));
 
        if (!sc)
                return -ENOMEM;
 
-       cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
+       ctx = (struct oct_intrmod_context *)sc->ctxptr;
+
+       WRITE_ONCE(ctx->cond, 0);
+       ctx->octeon_id = lio_get_device_id(oct_dev);
+       init_waitqueue_head(&ctx->wc);
+
        cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
 
        memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
        octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
-       cmd->sc = sc;
-       cmd->cfg = cfg;
-       cmd->oct_dev = oct_dev;
 
        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
 
@@ -1418,7 +1447,7 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
                                    OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
 
        sc->callback = octnet_intrmod_callback;
-       sc->callback_arg = cmd;
+       sc->callback_arg = sc;
        sc->wait_time = 1000;
 
        retval = octeon_send_soft_command(oct_dev, sc);
@@ -1427,7 +1456,29 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
                return -EINVAL;
        }
 
-       return 0;
+       /* Sleep on a wait queue till the cond flag indicates that the
+        * response arrived or timed-out.
+        */
+       if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) {
+               retval = ctx->status;
+               if (retval)
+                       dev_err(&oct_dev->pci_dev->dev,
+                               "intrmod config failed. Status: %llx\n",
+                               CVM_CAST64(retval));
+               else
+                       dev_info(&oct_dev->pci_dev->dev,
+                                "Rx-Adaptive Interrupt moderation %s\n",
+                                (intr_cfg->rx_enable) ?
+                                "enabled" : "disabled");
+
+               octeon_free_soft_command(oct_dev, sc);
+
+               return ((retval) ? -ENODEV : 0);
+       }
+
+       dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n");
+
+       return -EINTR;
 }
 
 static void
@@ -1584,80 +1635,106 @@ static int octnet_get_link_stats(struct net_device *netdev)
        return 0;
 }
 
-/* Enable/Disable auto interrupt Moderation */
-static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
-                                *intr_coal)
+static int lio_get_intr_coalesce(struct net_device *netdev,
+                                struct ethtool_coalesce *intr_coal)
 {
-       int ret = 0;
+       struct lio *lio = GET_LIO(netdev);
        struct octeon_device *oct = lio->oct_dev;
-       struct oct_intrmod_cfg *intrmod_cfg;
-
-       intrmod_cfg = &oct->intrmod;
-
-       if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) {
-               if (intr_coal->rate_sample_interval)
-                       intrmod_cfg->check_intrvl =
-                               intr_coal->rate_sample_interval;
-               else
-                       intrmod_cfg->check_intrvl =
-                               LIO_INTRMOD_CHECK_INTERVAL;
+       struct octeon_instr_queue *iq;
+       struct oct_intrmod_cfg intrmod_cfg;
 
-               if (intr_coal->pkt_rate_high)
-                       intrmod_cfg->maxpkt_ratethr =
-                               intr_coal->pkt_rate_high;
-               else
-                       intrmod_cfg->maxpkt_ratethr =
-                               LIO_INTRMOD_MAXPKT_RATETHR;
+       if (octnet_get_intrmod_cfg(lio, &intrmod_cfg))
+               return -ENODEV;
 
-               if (intr_coal->pkt_rate_low)
-                       intrmod_cfg->minpkt_ratethr =
-                               intr_coal->pkt_rate_low;
-               else
-                       intrmod_cfg->minpkt_ratethr =
-                               LIO_INTRMOD_MINPKT_RATETHR;
+       switch (oct->chip_id) {
+       case OCTEON_CN23XX_PF_VID:
+       case OCTEON_CN23XX_VF_VID: {
+               if (!intrmod_cfg.rx_enable) {
+                       intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs;
+                       intr_coal->rx_max_coalesced_frames =
+                               oct->rx_max_coalesced_frames;
+               }
+               if (!intrmod_cfg.tx_enable)
+                       intr_coal->tx_max_coalesced_frames =
+                               oct->tx_max_coalesced_frames;
+               break;
        }
-       if (oct->intrmod.rx_enable) {
-               if (intr_coal->rx_max_coalesced_frames_high)
-                       intrmod_cfg->rx_maxcnt_trigger =
-                               intr_coal->rx_max_coalesced_frames_high;
-               else
-                       intrmod_cfg->rx_maxcnt_trigger =
-                               LIO_INTRMOD_RXMAXCNT_TRIGGER;
+       case OCTEON_CN68XX:
+       case OCTEON_CN66XX: {
+               struct octeon_cn6xxx *cn6xxx =
+                       (struct octeon_cn6xxx *)oct->chip;
 
-               if (intr_coal->rx_coalesce_usecs_high)
-                       intrmod_cfg->rx_maxtmr_trigger =
-                               intr_coal->rx_coalesce_usecs_high;
-               else
-                       intrmod_cfg->rx_maxtmr_trigger =
-                               LIO_INTRMOD_RXMAXTMR_TRIGGER;
+               if (!intrmod_cfg.rx_enable) {
+                       intr_coal->rx_coalesce_usecs =
+                               CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
+                       intr_coal->rx_max_coalesced_frames =
+                               CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
+               }
+               iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
+               intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
+               break;
+       }
+       default:
+               netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+               return -EINVAL;
+       }
+       if (intrmod_cfg.rx_enable) {
+               intr_coal->use_adaptive_rx_coalesce =
+                       intrmod_cfg.rx_enable;
+               intr_coal->rate_sample_interval =
+                       intrmod_cfg.check_intrvl;
+               intr_coal->pkt_rate_high =
+                       intrmod_cfg.maxpkt_ratethr;
+               intr_coal->pkt_rate_low =
+                       intrmod_cfg.minpkt_ratethr;
+               intr_coal->rx_max_coalesced_frames_high =
+                       intrmod_cfg.rx_maxcnt_trigger;
+               intr_coal->rx_coalesce_usecs_high =
+                       intrmod_cfg.rx_maxtmr_trigger;
+               intr_coal->rx_coalesce_usecs_low =
+                       intrmod_cfg.rx_mintmr_trigger;
+               intr_coal->rx_max_coalesced_frames_low =
+                       intrmod_cfg.rx_mincnt_trigger;
+       }
+       if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
+           (intrmod_cfg.tx_enable)) {
+               intr_coal->use_adaptive_tx_coalesce =
+                       intrmod_cfg.tx_enable;
+               intr_coal->tx_max_coalesced_frames_high =
+                       intrmod_cfg.tx_maxcnt_trigger;
+               intr_coal->tx_max_coalesced_frames_low =
+                       intrmod_cfg.tx_mincnt_trigger;
+       }
+       return 0;
+}
 
-               if (intr_coal->rx_coalesce_usecs_low)
-                       intrmod_cfg->rx_mintmr_trigger =
-                               intr_coal->rx_coalesce_usecs_low;
-               else
-                       intrmod_cfg->rx_mintmr_trigger =
-                               LIO_INTRMOD_RXMINTMR_TRIGGER;
+/* Enable/Disable auto interrupt Moderation */
+static int oct_cfg_adaptive_intr(struct lio *lio,
+                                struct oct_intrmod_cfg *intrmod_cfg,
+                                struct ethtool_coalesce *intr_coal)
+{
+       int ret = 0;
 
-               if (intr_coal->rx_max_coalesced_frames_low)
-                       intrmod_cfg->rx_mincnt_trigger =
-                               intr_coal->rx_max_coalesced_frames_low;
-               else
-                       intrmod_cfg->rx_mincnt_trigger =
-                               LIO_INTRMOD_RXMINCNT_TRIGGER;
+       if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) {
+               intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval;
+               intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high;
+               intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low;
        }
-       if (oct->intrmod.tx_enable) {
-               if (intr_coal->tx_max_coalesced_frames_high)
-                       intrmod_cfg->tx_maxcnt_trigger =
-                               intr_coal->tx_max_coalesced_frames_high;
-               else
-                       intrmod_cfg->tx_maxcnt_trigger =
-                               LIO_INTRMOD_TXMAXCNT_TRIGGER;
-               if (intr_coal->tx_max_coalesced_frames_low)
-                       intrmod_cfg->tx_mincnt_trigger =
-                               intr_coal->tx_max_coalesced_frames_low;
-               else
-                       intrmod_cfg->tx_mincnt_trigger =
-                               LIO_INTRMOD_TXMINCNT_TRIGGER;
+       if (intrmod_cfg->rx_enable) {
+               intrmod_cfg->rx_maxcnt_trigger =
+                       intr_coal->rx_max_coalesced_frames_high;
+               intrmod_cfg->rx_maxtmr_trigger =
+                       intr_coal->rx_coalesce_usecs_high;
+               intrmod_cfg->rx_mintmr_trigger =
+                       intr_coal->rx_coalesce_usecs_low;
+               intrmod_cfg->rx_mincnt_trigger =
+                       intr_coal->rx_max_coalesced_frames_low;
+       }
+       if (intrmod_cfg->tx_enable) {
+               intrmod_cfg->tx_maxcnt_trigger =
+                       intr_coal->tx_max_coalesced_frames_high;
+               intrmod_cfg->tx_mincnt_trigger =
+                       intr_coal->tx_max_coalesced_frames_low;
        }
 
        ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
@@ -1666,7 +1743,9 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
 }
 
 static int
-oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
+oct_cfg_rx_intrcnt(struct lio *lio,
+                  struct oct_intrmod_cfg *intrmod,
+                  struct ethtool_coalesce *intr_coal)
 {
        struct octeon_device *oct = lio->oct_dev;
        u32 rx_max_coalesced_frames;
@@ -1692,7 +1771,7 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
                int q_no;
 
                if (!intr_coal->rx_max_coalesced_frames)
-                       rx_max_coalesced_frames = oct->intrmod.rx_frames;
+                       rx_max_coalesced_frames = intrmod->rx_frames;
                else
                        rx_max_coalesced_frames =
                            intr_coal->rx_max_coalesced_frames;
@@ -1703,17 +1782,18 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
                            (octeon_read_csr64(
                                 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
                             (0x3fffff00000000UL)) |
-                               rx_max_coalesced_frames);
+                               (rx_max_coalesced_frames - 1));
                        /*consider setting resend bit*/
                }
-               oct->intrmod.rx_frames = rx_max_coalesced_frames;
+               intrmod->rx_frames = rx_max_coalesced_frames;
+               oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
                break;
        }
        case OCTEON_CN23XX_VF_VID: {
                int q_no;
 
                if (!intr_coal->rx_max_coalesced_frames)
-                       rx_max_coalesced_frames = oct->intrmod.rx_frames;
+                       rx_max_coalesced_frames = intrmod->rx_frames;
                else
                        rx_max_coalesced_frames =
                            intr_coal->rx_max_coalesced_frames;
@@ -1724,9 +1804,10 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
                                 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
                             (0x3fffff00000000UL)) |
                                rx_max_coalesced_frames);
-                       /* consider writing to resend bit here */
+                       /*consider writing to resend bit here*/
                }
-               oct->intrmod.rx_frames = rx_max_coalesced_frames;
+               intrmod->rx_frames = rx_max_coalesced_frames;
+               oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
                break;
        }
        default:
@@ -1736,6 +1817,7 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
 }
 
 static int oct_cfg_rx_intrtime(struct lio *lio,
+                              struct oct_intrmod_cfg *intrmod,
                               struct ethtool_coalesce *intr_coal)
 {
        struct octeon_device *oct = lio->oct_dev;
@@ -1766,7 +1848,7 @@ static int oct_cfg_rx_intrtime(struct lio *lio,
                int q_no;
 
                if (!intr_coal->rx_coalesce_usecs)
-                       rx_coalesce_usecs = oct->intrmod.rx_usecs;
+                       rx_coalesce_usecs = intrmod->rx_usecs;
                else
                        rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
                time_threshold =
@@ -1775,11 +1857,12 @@ static int oct_cfg_rx_intrtime(struct lio *lio,
                        q_no += oct->sriov_info.pf_srn;
                        octeon_write_csr64(oct,
                                           CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
-                                          (oct->intrmod.rx_frames |
-                                           (time_threshold << 32)));
+                                          (intrmod->rx_frames |
+                                           ((u64)time_threshold << 32)));
                        /*consider writing to resend bit here*/
                }
-               oct->intrmod.rx_usecs = rx_coalesce_usecs;
+               intrmod->rx_usecs = rx_coalesce_usecs;
+               oct->rx_coalesce_usecs = rx_coalesce_usecs;
                break;
        }
        case OCTEON_CN23XX_VF_VID: {
@@ -1787,7 +1870,7 @@ static int oct_cfg_rx_intrtime(struct lio *lio,
                int q_no;
 
                if (!intr_coal->rx_coalesce_usecs)
-                       rx_coalesce_usecs = oct->intrmod.rx_usecs;
+                       rx_coalesce_usecs = intrmod->rx_usecs;
                else
                        rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
 
@@ -1796,11 +1879,12 @@ static int oct_cfg_rx_intrtime(struct lio *lio,
                for (q_no = 0; q_no < oct->num_oqs; q_no++) {
                        octeon_write_csr64(
                                oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
-                               (oct->intrmod.rx_frames |
-                                (time_threshold << 32)));
-                       /* consider setting resend bit */
+                               (intrmod->rx_frames |
+                                ((u64)time_threshold << 32)));
+                       /*consider setting resend bit*/
                }
-               oct->intrmod.rx_usecs = rx_coalesce_usecs;
+               intrmod->rx_usecs = rx_coalesce_usecs;
+               oct->rx_coalesce_usecs = rx_coalesce_usecs;
                break;
        }
        default:
@@ -1811,8 +1895,9 @@ static int oct_cfg_rx_intrtime(struct lio *lio,
 }
 
 static int
-oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
-                  __attribute__((unused)))
+oct_cfg_tx_intrcnt(struct lio *lio,
+                  struct oct_intrmod_cfg *intrmod,
+                  struct ethtool_coalesce *intr_coal)
 {
        struct octeon_device *oct = lio->oct_dev;
        u32 iq_intr_pkt;
@@ -1839,12 +1924,13 @@ oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
                        val = readq(inst_cnt_reg);
                        /*clear wmark and count.dont want to write count back*/
                        val = (val & 0xFFFF000000000000ULL) |
-                             ((u64)iq_intr_pkt
+                             ((u64)(iq_intr_pkt - 1)
                               << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
                        writeq(val, inst_cnt_reg);
                        /*consider setting resend bit*/
                }
-               oct->intrmod.tx_frames = iq_intr_pkt;
+               intrmod->tx_frames = iq_intr_pkt;
+               oct->tx_max_coalesced_frames = iq_intr_pkt;
                break;
        }
        default:
@@ -1859,6 +1945,7 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
        struct lio *lio = GET_LIO(netdev);
        int ret;
        struct octeon_device *oct = lio->oct_dev;
+       struct oct_intrmod_cfg intrmod = {0};
        u32 j, q_no;
        int db_max, db_min;
 
@@ -1877,8 +1964,8 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
                } else {
                        dev_err(&oct->pci_dev->dev,
                                "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
-                               intr_coal->tx_max_coalesced_frames, db_min,
-                               db_max);
+                               intr_coal->tx_max_coalesced_frames,
+                               db_min, db_max);
                        return -EINVAL;
                }
                break;
@@ -1889,24 +1976,36 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
                return -EINVAL;
        }
 
-       oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
-       oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
+       intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
+       intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
+       intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
+       intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
+       intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
 
-       ret = oct_cfg_adaptive_intr(lio, intr_coal);
+       ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal);
 
        if (!intr_coal->use_adaptive_rx_coalesce) {
-               ret = oct_cfg_rx_intrtime(lio, intr_coal);
+               ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal);
                if (ret)
                        goto ret_intrmod;
 
-               ret = oct_cfg_rx_intrcnt(lio, intr_coal);
+               ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal);
                if (ret)
                        goto ret_intrmod;
+       } else {
+               oct->rx_coalesce_usecs =
+                       CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
+               oct->rx_max_coalesced_frames =
+                       CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
        }
+
        if (!intr_coal->use_adaptive_tx_coalesce) {
-               ret = oct_cfg_tx_intrcnt(lio, intr_coal);
+               ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal);
                if (ret)
                        goto ret_intrmod;
+       } else {
+               oct->tx_max_coalesced_frames =
+                       CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
        }
 
        return 0;
index be9c0e3f5ade7d4e61694da214702f0223ab5d59..927617cbf6a91a4c8738ffcb161eed0d61e9483d 100644 (file)
@@ -16,6 +16,7 @@
  * NONINFRINGEMENT.  See the GNU General Public License for more details.
  ***********************************************************************/
 #include <linux/module.h>
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/firmware.h>
 #include <net/vxlan.h>
@@ -60,12 +61,6 @@ MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
 
 static int ptp_enable = 1;
 
-/* Bit mask values for lio->ifstate */
-#define   LIO_IFSTATE_DROQ_OPS             0x01
-#define   LIO_IFSTATE_REGISTERED           0x02
-#define   LIO_IFSTATE_RUNNING              0x04
-#define   LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
-
 /* Polling interval for determining when NIC application is alive */
 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
 
@@ -152,7 +147,7 @@ struct octnic_gather {
         */
        struct octeon_sg_entry *sg;
 
-       u64 sg_dma_ptr;
+       dma_addr_t sg_dma_ptr;
 };
 
 struct handshake {
@@ -178,6 +173,8 @@ static int liquidio_stop(struct net_device *netdev);
 static void liquidio_remove(struct pci_dev *pdev);
 static int liquidio_probe(struct pci_dev *pdev,
                          const struct pci_device_id *ent);
+static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
+                                     int linkstate);
 
 static struct handshake handshake[MAX_OCTEON_DEVICES];
 static struct completion first_stage;
@@ -530,36 +527,6 @@ static void liquidio_deinit_pci(void)
        pci_unregister_driver(&liquidio_pci_driver);
 }
 
-/**
- * \brief check interface state
- * @param lio per-network private data
- * @param state_flag flag state to check
- */
-static inline int ifstate_check(struct lio *lio, int state_flag)
-{
-       return atomic_read(&lio->ifstate) & state_flag;
-}
-
-/**
- * \brief set interface state
- * @param lio per-network private data
- * @param state_flag flag state to set
- */
-static inline void ifstate_set(struct lio *lio, int state_flag)
-{
-       atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
-}
-
-/**
- * \brief clear interface state
- * @param lio per-network private data
- * @param state_flag flag state to clear
- */
-static inline void ifstate_reset(struct lio *lio, int state_flag)
-{
-       atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
-}
-
 /**
  * \brief Stop Tx queues
  * @param netdev network device
@@ -734,6 +701,9 @@ static void delete_glists(struct lio *lio)
        struct octnic_gather *g;
        int i;
 
+       kfree(lio->glist_lock);
+       lio->glist_lock = NULL;
+
        if (!lio->glist)
                return;
 
@@ -741,23 +711,27 @@ static void delete_glists(struct lio *lio)
                do {
                        g = (struct octnic_gather *)
                                list_delete_head(&lio->glist[i]);
-                       if (g) {
-                               if (g->sg) {
-                                       dma_unmap_single(&lio->oct_dev->
-                                                        pci_dev->dev,
-                                                        g->sg_dma_ptr,
-                                                        g->sg_size,
-                                                        DMA_TO_DEVICE);
-                                       kfree((void *)((unsigned long)g->sg -
-                                                      g->adjust));
-                               }
+                       if (g)
                                kfree(g);
-                       }
                } while (g);
+
+               if (lio->glists_virt_base && lio->glists_virt_base[i] &&
+                   lio->glists_dma_base && lio->glists_dma_base[i]) {
+                       lio_dma_free(lio->oct_dev,
+                                    lio->glist_entry_size * lio->tx_qsize,
+                                    lio->glists_virt_base[i],
+                                    lio->glists_dma_base[i]);
+               }
        }
 
-       kfree((void *)lio->glist);
-       kfree((void *)lio->glist_lock);
+       kfree(lio->glists_virt_base);
+       lio->glists_virt_base = NULL;
+
+       kfree(lio->glists_dma_base);
+       lio->glists_dma_base = NULL;
+
+       kfree(lio->glist);
+       lio->glist = NULL;
 }
 
 /**
@@ -772,22 +746,49 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
        lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
                                  GFP_KERNEL);
        if (!lio->glist_lock)
-               return 1;
+               return -ENOMEM;
 
        lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
                             GFP_KERNEL);
        if (!lio->glist) {
-               kfree((void *)lio->glist_lock);
-               return 1;
+               kfree(lio->glist_lock);
+               lio->glist_lock = NULL;
+               return -ENOMEM;
+       }
+
+       lio->glist_entry_size =
+               ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+
+       /* allocate memory to store virtual and dma base address of
+        * per glist consistent memory
+        */
+       lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
+                                       GFP_KERNEL);
+       lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
+                                      GFP_KERNEL);
+
+       if (!lio->glists_virt_base || !lio->glists_dma_base) {
+               delete_glists(lio);
+               return -ENOMEM;
        }
 
        for (i = 0; i < num_iqs; i++) {
-               int numa_node = cpu_to_node(i % num_online_cpus());
+               int numa_node = dev_to_node(&oct->pci_dev->dev);
 
                spin_lock_init(&lio->glist_lock[i]);
 
                INIT_LIST_HEAD(&lio->glist[i]);
 
+               lio->glists_virt_base[i] =
+                       lio_dma_alloc(oct,
+                                     lio->glist_entry_size * lio->tx_qsize,
+                                     &lio->glists_dma_base[i]);
+
+               if (!lio->glists_virt_base[i]) {
+                       delete_glists(lio);
+                       return -ENOMEM;
+               }
+
                for (j = 0; j < lio->tx_qsize; j++) {
                        g = kzalloc_node(sizeof(*g), GFP_KERNEL,
                                         numa_node);
@@ -796,43 +797,18 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
                        if (!g)
                                break;
 
-                       g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
-                                     OCT_SG_ENTRY_SIZE);
+                       g->sg = lio->glists_virt_base[i] +
+                               (j * lio->glist_entry_size);
 
-                       g->sg = kmalloc_node(g->sg_size + 8,
-                                            GFP_KERNEL, numa_node);
-                       if (!g->sg)
-                               g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
-                       if (!g->sg) {
-                               kfree(g);
-                               break;
-                       }
-
-                       /* The gather component should be aligned on 64-bit
-                        * boundary
-                        */
-                       if (((unsigned long)g->sg) & 7) {
-                               g->adjust = 8 - (((unsigned long)g->sg) & 7);
-                               g->sg = (struct octeon_sg_entry *)
-                                       ((unsigned long)g->sg + g->adjust);
-                       }
-                       g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
-                                                      g->sg, g->sg_size,
-                                                      DMA_TO_DEVICE);
-                       if (dma_mapping_error(&oct->pci_dev->dev,
-                                             g->sg_dma_ptr)) {
-                               kfree((void *)((unsigned long)g->sg -
-                                              g->adjust));
-                               kfree(g);
-                               break;
-                       }
+                       g->sg_dma_ptr = lio->glists_dma_base[i] +
+                                       (j * lio->glist_entry_size);
 
                        list_add_tail(&g->list, &lio->glist[i]);
                }
 
                if (j != lio->tx_qsize) {
                        delete_glists(lio);
-                       return 1;
+                       return -ENOMEM;
                }
        }
 
@@ -959,14 +935,13 @@ static void update_txq_status(struct octeon_device *oct, int iq_num)
                        INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
                                                  tx_restart, 1);
                        netif_wake_subqueue(netdev, iq->q_index);
-               } else {
-                       if (!octnet_iq_is_full(oct, lio->txq)) {
-                               INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
-                                                         lio->txq,
-                                                         tx_restart, 1);
-                               wake_q(netdev, lio->txq);
-                       }
                }
+       } else if (netif_queue_stopped(netdev) &&
+                  lio->linfo.link.s.link_up &&
+                  (!octnet_iq_is_full(oct, lio->txq))) {
+               INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
+                                         lio->txq, tx_restart, 1);
+               netif_wake_queue(netdev);
        }
 }
 
@@ -1076,16 +1051,35 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
        int i;
        int num_ioq_vectors;
        int num_alloc_ioq_vectors;
+       char *queue_irq_names = NULL;
+       char *aux_irq_name = NULL;
 
        if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
                oct->num_msix_irqs = oct->sriov_info.num_pf_rings;
                /* one non ioq interrupt for handling sli_mac_pf_int_sum */
                oct->num_msix_irqs += 1;
 
+               /* allocate storage for the names assigned to each irq */
+               oct->irq_name_storage =
+                       kcalloc((MAX_IOQ_INTERRUPTS_PER_PF + 1), INTRNAMSIZ,
+                               GFP_KERNEL);
+               if (!oct->irq_name_storage) {
+                       dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
+                       return -ENOMEM;
+               }
+
+               queue_irq_names = oct->irq_name_storage;
+               aux_irq_name = &queue_irq_names
+                               [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
+
                oct->msix_entries = kcalloc(
                    oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
-               if (!oct->msix_entries)
-                       return 1;
+               if (!oct->msix_entries) {
+                       dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
+                       kfree(oct->irq_name_storage);
+                       oct->irq_name_storage = NULL;
+                       return -ENOMEM;
+               }
 
                msix_entries = (struct msix_entry *)oct->msix_entries;
                /*Assumption is that pf msix vectors start from pf srn to pf to
@@ -1103,7 +1097,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
                        dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
                        kfree(oct->msix_entries);
                        oct->msix_entries = NULL;
-                       return 1;
+                       kfree(oct->irq_name_storage);
+                       oct->irq_name_storage = NULL;
+                       return num_alloc_ioq_vectors;
                }
                dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
 
@@ -1111,9 +1107,12 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
 
                /** For PF, there is one non-ioq interrupt handler */
                num_ioq_vectors -= 1;
+
+               snprintf(aux_irq_name, INTRNAMSIZ,
+                        "LiquidIO%u-pf%u-aux", oct->octeon_id, oct->pf_num);
                irqret = request_irq(msix_entries[num_ioq_vectors].vector,
-                                    liquidio_legacy_intr_handler, 0, "octeon",
-                                    oct);
+                                    liquidio_legacy_intr_handler, 0,
+                                    aux_irq_name, oct);
                if (irqret) {
                        dev_err(&oct->pci_dev->dev,
                                "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
@@ -1121,13 +1120,20 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
                        pci_disable_msix(oct->pci_dev);
                        kfree(oct->msix_entries);
                        oct->msix_entries = NULL;
-                       return 1;
+                       kfree(oct->irq_name_storage);
+                       oct->irq_name_storage = NULL;
+                       return irqret;
                }
 
                for (i = 0; i < num_ioq_vectors; i++) {
+                       snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ,
+                                "LiquidIO%u-pf%u-rxtx-%u",
+                                oct->octeon_id, oct->pf_num, i);
+
                        irqret = request_irq(msix_entries[i].vector,
                                             liquidio_msix_intr_handler, 0,
-                                            "octeon", &oct->ioq_vector[i]);
+                                            &queue_irq_names[IRQ_NAME_OFF(i)],
+                                            &oct->ioq_vector[i]);
                        if (irqret) {
                                dev_err(&oct->pci_dev->dev,
                                        "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
@@ -1147,7 +1153,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
                                pci_disable_msix(oct->pci_dev);
                                kfree(oct->msix_entries);
                                oct->msix_entries = NULL;
-                               return 1;
+                               kfree(oct->irq_name_storage);
+                               oct->irq_name_storage = NULL;
+                               return irqret;
                        }
                        oct->ioq_vector[i].vector = msix_entries[i].vector;
                        /* assign the cpu mask for this msix interrupt vector */
@@ -1165,111 +1173,150 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
                else
                        oct->flags |= LIO_FLAG_MSI_ENABLED;
 
+               /* allocate storage for the names assigned to the irq */
+               oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
+               if (!oct->irq_name_storage)
+                       return -ENOMEM;
+
+               queue_irq_names = oct->irq_name_storage;
+
+               snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
+                        "LiquidIO%u-pf%u-rxtx-%u",
+                        oct->octeon_id, oct->pf_num, 0);
+
                irqret = request_irq(oct->pci_dev->irq,
-                                    liquidio_legacy_intr_handler, IRQF_SHARED,
-                                    "octeon", oct);
+                                    liquidio_legacy_intr_handler,
+                                    IRQF_SHARED,
+                                    &queue_irq_names[IRQ_NAME_OFF(0)], oct);
                if (irqret) {
                        if (oct->flags & LIO_FLAG_MSI_ENABLED)
                                pci_disable_msi(oct->pci_dev);
                        dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
                                irqret);
-                       return 1;
+                       kfree(oct->irq_name_storage);
+                       oct->irq_name_storage = NULL;
+                       return irqret;
                }
        }
        return 0;
 }
 
+static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
+{
+       struct octeon_device *other_oct;
+
+       other_oct = lio_get_device(oct->octeon_id + 1);
+
+       if (other_oct && other_oct->pci_dev) {
+               int oct_busnum, other_oct_busnum;
+
+               oct_busnum = oct->pci_dev->bus->number;
+               other_oct_busnum = other_oct->pci_dev->bus->number;
+
+               if (oct_busnum == other_oct_busnum) {
+                       int oct_slot, other_oct_slot;
+
+                       oct_slot = PCI_SLOT(oct->pci_dev->devfn);
+                       other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
+
+                       if (oct_slot == other_oct_slot)
+                               return other_oct;
+               }
+       }
+
+       return NULL;
+}
+
+static void disable_all_vf_links(struct octeon_device *oct)
+{
+       struct net_device *netdev;
+       int max_vfs, vf, i;
+
+       if (!oct)
+               return;
+
+       max_vfs = oct->sriov_info.max_vfs;
+
+       for (i = 0; i < oct->ifcount; i++) {
+               netdev = oct->props[i].netdev;
+               if (!netdev)
+                       continue;
+
+               for (vf = 0; vf < max_vfs; vf++)
+                       liquidio_set_vf_link_state(netdev, vf,
+                                                  IFLA_VF_LINK_STATE_DISABLE);
+       }
+}
+
 static int liquidio_watchdog(void *param)
 {
-       u64 wdog;
-       u16 mask_of_stuck_cores = 0;
-       u16 mask_of_crashed_cores = 0;
-       int core_num;
-       u8 core_is_stuck[LIO_MAX_CORES];
-       u8 core_crashed[LIO_MAX_CORES];
+       bool err_msg_was_printed[LIO_MAX_CORES];
+       u16 mask_of_crashed_or_stuck_cores = 0;
+       bool all_vf_links_are_disabled = false;
        struct octeon_device *oct = param;
+       struct octeon_device *other_oct;
+#ifdef CONFIG_MODULE_UNLOAD
+       long refcount, vfs_referencing_pf;
+       u64 vfs_mask1, vfs_mask2;
+#endif
+       int core;
 
-       memset(core_is_stuck, 0, sizeof(core_is_stuck));
-       memset(core_crashed, 0, sizeof(core_crashed));
+       memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
 
        while (!kthread_should_stop()) {
-               mask_of_crashed_cores =
+               /* sleep for a couple of seconds so that we don't hog the CPU */
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule_timeout(msecs_to_jiffies(2000));
+
+               mask_of_crashed_or_stuck_cores =
                    (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
 
-               for (core_num = 0; core_num < LIO_MAX_CORES; core_num++) {
-                       if (!core_is_stuck[core_num]) {
-                               wdog = lio_pci_readq(oct, CIU3_WDOG(core_num));
-
-                               /* look at watchdog state field */
-                               wdog &= CIU3_WDOG_MASK;
-                               if (wdog) {
-                                       /* this watchdog timer has expired */
-                                       core_is_stuck[core_num] =
-                                               LIO_MONITOR_WDOG_EXPIRE;
-                                       mask_of_stuck_cores |= (1 << core_num);
-                               }
-                       }
+               if (!mask_of_crashed_or_stuck_cores)
+                       continue;
 
-                       if (!core_crashed[core_num])
-                               core_crashed[core_num] =
-                                   (mask_of_crashed_cores >> core_num) & 1;
-               }
+               WRITE_ONCE(oct->cores_crashed, true);
+               other_oct = get_other_octeon_device(oct);
+               if (other_oct)
+                       WRITE_ONCE(other_oct->cores_crashed, true);
 
-               if (mask_of_stuck_cores) {
-                       for (core_num = 0; core_num < LIO_MAX_CORES;
-                            core_num++) {
-                               if (core_is_stuck[core_num] == 1) {
-                                       dev_err(&oct->pci_dev->dev,
-                                               "ERROR: Octeon core %d is stuck!\n",
-                                               core_num);
-                                       /* 2 means we have printk'd  an error
-                                        * so no need to repeat the same printk
-                                        */
-                                       core_is_stuck[core_num] =
-                                               LIO_MONITOR_CORE_STUCK_MSGD;
-                               }
-                       }
-               }
+               for (core = 0; core < LIO_MAX_CORES; core++) {
+                       bool core_crashed_or_got_stuck;
 
-               if (mask_of_crashed_cores) {
-                       for (core_num = 0; core_num < LIO_MAX_CORES;
-                            core_num++) {
-                               if (core_crashed[core_num] == 1) {
-                                       dev_err(&oct->pci_dev->dev,
-                                               "ERROR: Octeon core %d crashed!  See oct-fwdump for details.\n",
-                                               core_num);
-                                       /* 2 means we have printk'd  an error
-                                        * so no need to repeat the same printk
-                                        */
-                                       core_crashed[core_num] =
-                                               LIO_MONITOR_CORE_STUCK_MSGD;
-                               }
+                       core_crashed_or_got_stuck =
+                                               (mask_of_crashed_or_stuck_cores
+                                                >> core) & 1;
+
+                       if (core_crashed_or_got_stuck &&
+                           !err_msg_was_printed[core]) {
+                               dev_err(&oct->pci_dev->dev,
+                                       "ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
+                                       core);
+                                       err_msg_was_printed[core] = true;
                        }
                }
+
+               if (all_vf_links_are_disabled)
+                       continue;
+
+               disable_all_vf_links(oct);
+               disable_all_vf_links(other_oct);
+               all_vf_links_are_disabled = true;
+
 #ifdef CONFIG_MODULE_UNLOAD
-               if (mask_of_stuck_cores || mask_of_crashed_cores) {
-                       /* make module refcount=0 so that rmmod will work */
-                       long refcount;
+               vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
+               vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
 
-                       refcount = module_refcount(THIS_MODULE);
+               vfs_referencing_pf  = hweight64(vfs_mask1);
+               vfs_referencing_pf += hweight64(vfs_mask2);
 
-                       while (refcount > 0) {
+               refcount = module_refcount(THIS_MODULE);
+               if (refcount >= vfs_referencing_pf) {
+                       while (vfs_referencing_pf) {
                                module_put(THIS_MODULE);
-                               refcount = module_refcount(THIS_MODULE);
-                       }
-
-                       /* compensate for and withstand an unlikely (but still
-                        * possible) race condition
-                        */
-                       while (refcount < 0) {
-                               try_module_get(THIS_MODULE);
-                               refcount = module_refcount(THIS_MODULE);
+                               vfs_referencing_pf--;
                        }
                }
 #endif
-               /* sleep for two seconds */
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(2 * HZ);
        }
 
        return 0;
@@ -1361,6 +1408,12 @@ liquidio_probe(struct pci_dev *pdev,
        return 0;
 }
 
+static bool fw_type_is_none(void)
+{
+       return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
+                      sizeof(LIO_FW_NAME_TYPE_NONE)) == 0;
+}
+
 /**
  *\brief Destroy resources associated with octeon device
  * @param pdev PCI device structure
@@ -1441,6 +1494,9 @@ static void octeon_destroy_resources(struct octeon_device *oct)
                                pci_disable_msi(oct->pci_dev);
                }
 
+               kfree(oct->irq_name_storage);
+               oct->irq_name_storage = NULL;
+
        /* fallthrough */
        case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
                if (OCTEON_CN23XX_PF(oct))
@@ -1500,9 +1556,12 @@ static void octeon_destroy_resources(struct octeon_device *oct)
 
                /* fallthrough */
        case OCT_DEV_PCI_MAP_DONE:
-               /* Soft reset the octeon device before exiting */
-               if ((!OCTEON_CN23XX_PF(oct)) || !oct->octeon_id)
-                       oct->fn_list.soft_reset(oct);
+               if (!fw_type_is_none()) {
+                       /* Soft reset the octeon device before exiting */
+                       if (!OCTEON_CN23XX_PF(oct) ||
+                           (OCTEON_CN23XX_PF(oct) && !oct->octeon_id))
+                               oct->fn_list.soft_reset(oct);
+               }
 
                octeon_unmap_pci_barx(oct, 0);
                octeon_unmap_pci_barx(oct, 1);
@@ -1635,6 +1694,15 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
        if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
                liquidio_stop(netdev);
 
+       if (fw_type_is_none()) {
+               struct octnic_ctrl_pkt nctrl;
+
+               memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+               nctrl.ncmd.s.cmd = OCTNET_CMD_RESET_PF;
+               nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+               octnet_send_nic_ctrl_pkt(oct, &nctrl);
+       }
+
        if (oct->props[lio->ifidx].napi_enabled == 1) {
                list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
                        napi_disable(napi);
@@ -1650,6 +1718,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
 
        cleanup_link_status_change_wq(netdev);
 
+       cleanup_rx_oom_poll_fn(netdev);
+
        delete_glists(lio);
 
        free_netdev(netdev);
@@ -1885,9 +1955,6 @@ static void free_netsgbuf(void *buf)
                i++;
        }
 
-       dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
-                               g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
-
        iq = skb_iq(lio, skb);
        spin_lock(&lio->glist_lock[iq]);
        list_add_tail(&g->list, &lio->glist[iq]);
@@ -1933,9 +2000,6 @@ static void free_netsgbuf_with_resp(void *buf)
                i++;
        }
 
-       dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
-                               g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
-
        iq = skb_iq(lio, skb);
 
        spin_lock(&lio->glist_lock[iq]);
@@ -2124,8 +2188,7 @@ static int load_firmware(struct octeon_device *oct)
        char fw_name[LIO_MAX_FW_FILENAME_LEN];
        char *tmp_fw_type;
 
-       if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
-                   sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) {
+       if (fw_type_is_none()) {
                dev_info(&oct->pci_dev->dev, "Skipping firmware load\n");
                return ret;
        }
@@ -2209,8 +2272,8 @@ static void if_cfg_callback(struct octeon_device *oct,
 
        oct = lio_get_device(ctx->octeon_id);
        if (resp->status)
-               dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
-                       CVM_CAST64(resp->status));
+               dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n",
+                       CVM_CAST64(resp->status), status);
        WRITE_ONCE(ctx->cond, 1);
 
        snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
@@ -2435,8 +2498,11 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
        /* Flush the instruction queue */
        iq = oct->instr_queue[iq_no];
        if (iq) {
-               /* Process iq buffers with in the budget limits */
-               tx_done = octeon_flush_iq(oct, iq, budget);
+               if (atomic_read(&iq->instr_pending))
+                       /* Process iq buffers with in the budget limits */
+                       tx_done = octeon_flush_iq(oct, iq, budget);
+               else
+                       tx_done = 1;
                /* Update iq read-index rather than waiting for next interrupt.
                 * Return back if tx_done is false.
                 */
@@ -2553,6 +2619,15 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
                                __func__);
                        return 1;
                }
+
+               if (octeon_dev->ioq_vector) {
+                       struct octeon_ioq_vector *ioq_vector;
+
+                       ioq_vector = &octeon_dev->ioq_vector[q];
+                       netif_set_xps_queue(netdev,
+                                           &ioq_vector->affinity_mask,
+                                           ioq_vector->iq_index);
+               }
        }
 
        return 0;
@@ -3273,8 +3348,6 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
                        i++;
                }
 
-               dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
-                                          g->sg_size, DMA_TO_DEVICE);
                dptr = g->sg_dma_ptr;
 
                if (OCTEON_CN23XX_PF(oct))
@@ -3426,6 +3499,8 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
        struct octnic_ctrl_pkt nctrl;
        int ret = 0;
 
+       memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
        nctrl.ncmd.u64 = 0;
        nctrl.ncmd.s.cmd = command;
        nctrl.ncmd.s.param1 = rx_cmd;
@@ -3459,6 +3534,8 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
        struct octnic_ctrl_pkt nctrl;
        int ret = 0;
 
+       memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
        nctrl.ncmd.u64 = 0;
        nctrl.ncmd.s.cmd = command;
        nctrl.ncmd.s.more = vxlan_cmd_bit;
@@ -3596,7 +3673,8 @@ static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
        nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
        nctrl.ncmd.s.more = 1;
        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
-       nctrl.cb_fn = 0;
+       nctrl.netpndev = (u64)netdev;
+       nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
        nctrl.wait_time = LIO_CMD_WAIT_TM;
 
        nctrl.udd[0] = 0;
@@ -4122,6 +4200,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
                if (setup_link_status_change_wq(netdev))
                        goto setup_nic_dev_fail;
 
+               if (setup_rx_oom_poll_fn(netdev))
+                       goto setup_nic_dev_fail;
+
                /* Register the network device with the OS */
                if (register_netdev(netdev)) {
                        dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
@@ -4271,7 +4352,6 @@ static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
  */
 static int liquidio_init_nic_module(struct octeon_device *oct)
 {
-       struct oct_intrmod_cfg *intrmod_cfg;
        int i, retval = 0;
        int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
 
@@ -4296,22 +4376,6 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
 
        liquidio_ptp_init(oct);
 
-       /* Initialize interrupt moderation params */
-       intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
-       intrmod_cfg->rx_enable = 1;
-       intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
-       intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
-       intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
-       intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
-       intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
-       intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
-       intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
-       intrmod_cfg->tx_enable = 1;
-       intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
-       intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
-       intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
-       intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
-       intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
        dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
 
        return retval;
@@ -4373,6 +4437,7 @@ octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
        struct octeon_device *oct = (struct octeon_device *)buf;
        struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
        int i, notice, vf_idx;
+       bool cores_crashed;
        u64 *data, vf_num;
 
        notice = recv_pkt->rh.r.ossp;
@@ -4383,19 +4448,23 @@ octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
        octeon_swap_8B_data(&vf_num, 1);
        vf_idx = (int)vf_num - 1;
 
+       cores_crashed = READ_ONCE(oct->cores_crashed);
+
        if (notice == VF_DRV_LOADED) {
                if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
                        oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
                        dev_info(&oct->pci_dev->dev,
                                 "driver for VF%d was loaded\n", vf_idx);
-                       try_module_get(THIS_MODULE);
+                       if (!cores_crashed)
+                               try_module_get(THIS_MODULE);
                }
        } else if (notice == VF_DRV_REMOVED) {
                if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
                        oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
                        dev_info(&oct->pci_dev->dev,
                                 "driver for VF%d was removed\n", vf_idx);
-                       module_put(THIS_MODULE);
+                       if (!cores_crashed)
+                               module_put(THIS_MODULE);
                }
        } else if (notice == VF_DRV_MACADDR_CHANGED) {
                u8 *b = (u8 *)&data[1];
@@ -4447,14 +4516,16 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
        if (OCTEON_CN23XX_PF(octeon_dev)) {
                if (!cn23xx_fw_loaded(octeon_dev)) {
                        fw_loaded = 0;
-                       /* Do a soft reset of the Octeon device. */
-                       if (octeon_dev->fn_list.soft_reset(octeon_dev))
-                               return 1;
-                       /* things might have changed */
-                       if (!cn23xx_fw_loaded(octeon_dev))
-                               fw_loaded = 0;
-                       else
-                               fw_loaded = 1;
+                       if (!fw_type_is_none()) {
+                               /* Do a soft reset of the Octeon device. */
+                               if (octeon_dev->fn_list.soft_reset(octeon_dev))
+                                       return 1;
+                               /* things might have changed */
+                               if (!cn23xx_fw_loaded(octeon_dev))
+                                       fw_loaded = 0;
+                               else
+                                       fw_loaded = 1;
+                       }
                } else {
                        fw_loaded = 1;
                }
index 9d5e03502c76cbfe3c8372a5d3e73c67e07e3a03..34c77821fad9f2a49d49841bef2b0c601ca402aa 100644 (file)
@@ -16,6 +16,7 @@
  * NONINFRINGEMENT.  See the GNU General Public License for more details.
  ***********************************************************************/
 #include <linux/module.h>
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <net/vxlan.h>
 #include "liquidio_common.h"
@@ -39,12 +40,6 @@ MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
 
 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
 
-/* Bit mask values for lio->ifstate */
-#define   LIO_IFSTATE_DROQ_OPS             0x01
-#define   LIO_IFSTATE_REGISTERED           0x02
-#define   LIO_IFSTATE_RUNNING              0x04
-#define   LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
-
 struct liquidio_if_cfg_context {
        int octeon_id;
 
@@ -108,6 +103,8 @@ struct octnic_gather {
         * received from the IP layer.
         */
        struct octeon_sg_entry *sg;
+
+       dma_addr_t sg_dma_ptr;
 };
 
 struct octeon_device_priv {
@@ -333,36 +330,6 @@ static struct pci_driver liquidio_vf_pci_driver = {
        .err_handler    = &liquidio_vf_err_handler,    /* For AER */
 };
 
-/**
- * \brief check interface state
- * @param lio per-network private data
- * @param state_flag flag state to check
- */
-static int ifstate_check(struct lio *lio, int state_flag)
-{
-       return atomic_read(&lio->ifstate) & state_flag;
-}
-
-/**
- * \brief set interface state
- * @param lio per-network private data
- * @param state_flag flag state to set
- */
-static void ifstate_set(struct lio *lio, int state_flag)
-{
-       atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
-}
-
-/**
- * \brief clear interface state
- * @param lio per-network private data
- * @param state_flag flag state to clear
- */
-static void ifstate_reset(struct lio *lio, int state_flag)
-{
-       atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
-}
-
 /**
  * \brief Stop Tx queues
  * @param netdev network device
@@ -490,6 +457,9 @@ static void delete_glists(struct lio *lio)
        struct octnic_gather *g;
        int i;
 
+       kfree(lio->glist_lock);
+       lio->glist_lock = NULL;
+
        if (!lio->glist)
                return;
 
@@ -497,17 +467,27 @@ static void delete_glists(struct lio *lio)
                do {
                        g = (struct octnic_gather *)
                            list_delete_head(&lio->glist[i]);
-                       if (g) {
-                               if (g->sg)
-                                       kfree((void *)((unsigned long)g->sg -
-                                                       g->adjust));
+                       if (g)
                                kfree(g);
-                       }
                } while (g);
+
+               if (lio->glists_virt_base && lio->glists_virt_base[i] &&
+                   lio->glists_dma_base && lio->glists_dma_base[i]) {
+                       lio_dma_free(lio->oct_dev,
+                                    lio->glist_entry_size * lio->tx_qsize,
+                                    lio->glists_virt_base[i],
+                                    lio->glists_dma_base[i]);
+               }
        }
 
+       kfree(lio->glists_virt_base);
+       lio->glists_virt_base = NULL;
+
+       kfree(lio->glists_dma_base);
+       lio->glists_dma_base = NULL;
+
        kfree(lio->glist);
-       kfree(lio->glist_lock);
+       lio->glist = NULL;
 }
 
 /**
@@ -522,13 +502,30 @@ static int setup_glists(struct lio *lio, int num_iqs)
        lio->glist_lock =
            kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL);
        if (!lio->glist_lock)
-               return 1;
+               return -ENOMEM;
 
        lio->glist =
            kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL);
        if (!lio->glist) {
                kfree(lio->glist_lock);
-               return 1;
+               lio->glist_lock = NULL;
+               return -ENOMEM;
+       }
+
+       lio->glist_entry_size =
+               ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+
+       /* allocate memory to store virtual and dma base address of
+        * per glist consistent memory
+        */
+       lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
+                                       GFP_KERNEL);
+       lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
+                                      GFP_KERNEL);
+
+       if (!lio->glists_virt_base || !lio->glists_dma_base) {
+               delete_glists(lio);
+               return -ENOMEM;
        }
 
        for (i = 0; i < num_iqs; i++) {
@@ -536,34 +533,33 @@ static int setup_glists(struct lio *lio, int num_iqs)
 
                INIT_LIST_HEAD(&lio->glist[i]);
 
+               lio->glists_virt_base[i] =
+                       lio_dma_alloc(lio->oct_dev,
+                                     lio->glist_entry_size * lio->tx_qsize,
+                                     &lio->glists_dma_base[i]);
+
+               if (!lio->glists_virt_base[i]) {
+                       delete_glists(lio);
+                       return -ENOMEM;
+               }
+
                for (j = 0; j < lio->tx_qsize; j++) {
                        g = kzalloc(sizeof(*g), GFP_KERNEL);
                        if (!g)
                                break;
 
-                       g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
-                                     OCT_SG_ENTRY_SIZE);
+                       g->sg = lio->glists_virt_base[i] +
+                               (j * lio->glist_entry_size);
 
-                       g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
-                       if (!g->sg) {
-                               kfree(g);
-                               break;
-                       }
+                       g->sg_dma_ptr = lio->glists_dma_base[i] +
+                                       (j * lio->glist_entry_size);
 
-                       /* The gather component should be aligned on 64-bit
-                        * boundary
-                        */
-                       if (((unsigned long)g->sg) & 7) {
-                               g->adjust = 8 - (((unsigned long)g->sg) & 7);
-                               g->sg = (struct octeon_sg_entry *)
-                                       ((unsigned long)g->sg + g->adjust);
-                       }
                        list_add_tail(&g->list, &lio->glist[i]);
                }
 
                if (j != lio->tx_qsize) {
                        delete_glists(lio);
-                       return 1;
+                       return -ENOMEM;
                }
        }
 
@@ -692,13 +688,12 @@ static void update_txq_status(struct octeon_device *oct, int iq_num)
                        netif_wake_subqueue(netdev, iq->q_index);
                        INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
                                                  tx_restart, 1);
-               } else {
-                       if (!octnet_iq_is_full(oct, lio->txq)) {
-                               INCR_INSTRQUEUE_PKT_COUNT(
-                                   lio->oct_dev, lio->txq, tx_restart, 1);
-                               wake_q(netdev, lio->txq);
-                       }
                }
+       } else if (netif_queue_stopped(netdev) && lio->linfo.link.s.link_up &&
+                  (!octnet_iq_is_full(oct, lio->txq))) {
+               INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
+                                         lio->txq, tx_restart, 1);
+               netif_wake_queue(netdev);
        }
 }
 
@@ -750,6 +745,7 @@ liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
 static int octeon_setup_interrupt(struct octeon_device *oct)
 {
        struct msix_entry *msix_entries;
+       char *queue_irq_names = NULL;
        int num_alloc_ioq_vectors;
        int num_ioq_vectors;
        int irqret;
@@ -758,10 +754,25 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
        if (oct->msix_on) {
                oct->num_msix_irqs = oct->sriov_info.rings_per_vf;
 
+               /* allocate storage for the names assigned to each irq */
+               oct->irq_name_storage =
+                       kcalloc(MAX_IOQ_INTERRUPTS_PER_VF, INTRNAMSIZ,
+                               GFP_KERNEL);
+               if (!oct->irq_name_storage) {
+                       dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
+                       return -ENOMEM;
+               }
+
+               queue_irq_names = oct->irq_name_storage;
+
                oct->msix_entries = kcalloc(
                    oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
-               if (!oct->msix_entries)
-                       return 1;
+               if (!oct->msix_entries) {
+                       dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
+                       kfree(oct->irq_name_storage);
+                       oct->irq_name_storage = NULL;
+                       return -ENOMEM;
+               }
 
                msix_entries = (struct msix_entry *)oct->msix_entries;
 
@@ -775,16 +786,23 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
                        dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
                        kfree(oct->msix_entries);
                        oct->msix_entries = NULL;
-                       return 1;
+                       kfree(oct->irq_name_storage);
+                       oct->irq_name_storage = NULL;
+                       return num_alloc_ioq_vectors;
                }
                dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
 
                num_ioq_vectors = oct->num_msix_irqs;
 
                for (i = 0; i < num_ioq_vectors; i++) {
+                       snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ,
+                                "LiquidIO%u-vf%u-rxtx-%u",
+                                oct->octeon_id, oct->vf_num, i);
+
                        irqret = request_irq(msix_entries[i].vector,
                                             liquidio_msix_intr_handler, 0,
-                                            "octeon", &oct->ioq_vector[i]);
+                                            &queue_irq_names[IRQ_NAME_OFF(i)],
+                                            &oct->ioq_vector[i]);
                        if (irqret) {
                                dev_err(&oct->pci_dev->dev,
                                        "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
@@ -800,7 +818,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
                                pci_disable_msix(oct->pci_dev);
                                kfree(oct->msix_entries);
                                oct->msix_entries = NULL;
-                               return 1;
+                               kfree(oct->irq_name_storage);
+                               oct->irq_name_storage = NULL;
+                               return irqret;
                        }
                        oct->ioq_vector[i].vector = msix_entries[i].vector;
                        /* assign the cpu mask for this msix interrupt vector */
@@ -945,6 +965,8 @@ static void octeon_destroy_resources(struct octeon_device *oct)
                        pci_disable_msix(oct->pci_dev);
                        kfree(oct->msix_entries);
                        oct->msix_entries = NULL;
+                       kfree(oct->irq_name_storage);
+                       oct->irq_name_storage = NULL;
                }
                /* Soft reset the octeon device before exiting */
                if (oct->pci_dev->reset_fn)
@@ -1133,6 +1155,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
        if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
                unregister_netdev(netdev);
 
+       cleanup_rx_oom_poll_fn(netdev);
+
        cleanup_link_status_change_wq(netdev);
 
        delete_glists(lio);
@@ -1324,10 +1348,6 @@ static void free_netsgbuf(void *buf)
                i++;
        }
 
-       dma_unmap_single(&lio->oct_dev->pci_dev->dev,
-                        finfo->dptr, g->sg_size,
-                        DMA_TO_DEVICE);
-
        iq = skb_iq(lio, skb);
 
        spin_lock(&lio->glist_lock[iq]);
@@ -1374,10 +1394,6 @@ static void free_netsgbuf_with_resp(void *buf)
                i++;
        }
 
-       dma_unmap_single(&lio->oct_dev->pci_dev->dev,
-                        finfo->dptr, g->sg_size,
-                        DMA_TO_DEVICE);
-
        iq = skb_iq(lio, skb);
 
        spin_lock(&lio->glist_lock[iq]);
@@ -1620,8 +1636,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
        /* Flush the instruction queue */
        iq = oct->instr_queue[iq_no];
        if (iq) {
-               /* Process iq buffers with in the budget limits */
-               tx_done = octeon_flush_iq(oct, iq, budget);
+               if (atomic_read(&iq->instr_pending))
+                       /* Process iq buffers with in the budget limits */
+                       tx_done = octeon_flush_iq(oct, iq, budget);
+               else
+                       tx_done = 1;
+
                /* Update iq read-index rather than waiting for next interrupt.
                 * Return back if tx_done is false.
                 */
@@ -2382,23 +2402,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
                        i++;
                }
 
-               dptr = dma_map_single(&oct->pci_dev->dev,
-                                     g->sg, g->sg_size,
-                                     DMA_TO_DEVICE);
-               if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
-                       dev_err(&oct->pci_dev->dev, "%s DMA mapping error 4\n",
-                               __func__);
-                       dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
-                                        skb->len - skb->data_len,
-                                        DMA_TO_DEVICE);
-                       for (j = 1; j <= frags; j++) {
-                               frag = &skb_shinfo(skb)->frags[j - 1];
-                               dma_unmap_page(&oct->pci_dev->dev,
-                                              g->sg[j >> 2].ptr[j & 3],
-                                              frag->size, DMA_TO_DEVICE);
-                       }
-                       return NETDEV_TX_BUSY;
-               }
+               dptr = g->sg_dma_ptr;
 
                ndata.cmd.cmd3.dptr = dptr;
                finfo->dptr = dptr;
@@ -2480,6 +2484,8 @@ liquidio_vlan_rx_add_vid(struct net_device *netdev,
        struct lio *lio = GET_LIO(netdev);
        struct octeon_device *oct = lio->oct_dev;
        struct octnic_ctrl_pkt nctrl;
+       struct completion compl;
+       u16 response_code;
        int ret = 0;
 
        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
@@ -2491,14 +2497,25 @@ liquidio_vlan_rx_add_vid(struct net_device *netdev,
        nctrl.wait_time = 100;
        nctrl.netpndev = (u64)netdev;
        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+       init_completion(&compl);
+       nctrl.completion = &compl;
+       nctrl.response_code = &response_code;
 
        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
        if (ret < 0) {
                dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
                        ret);
+               return -EIO;
        }
 
-       return ret;
+       if (!wait_for_completion_timeout(&compl,
+                                        msecs_to_jiffies(nctrl.wait_time)))
+               return -EPERM;
+
+       if (READ_ONCE(response_code))
+               return -EPERM;
+
+       return 0;
 }
 
 static int
@@ -2543,6 +2560,8 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
        struct octnic_ctrl_pkt nctrl;
        int ret = 0;
 
+       memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
        nctrl.ncmd.u64 = 0;
        nctrl.ncmd.s.cmd = command;
        nctrl.ncmd.s.param1 = rx_cmd;
@@ -2575,6 +2594,8 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
        struct octnic_ctrl_pkt nctrl;
        int ret = 0;
 
+       memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
        nctrl.ncmd.u64 = 0;
        nctrl.ncmd.s.cmd = command;
        nctrl.ncmd.s.more = vxlan_cmd_bit;
@@ -2997,6 +3018,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
                if (setup_link_status_change_wq(netdev))
                        goto setup_nic_dev_fail;
 
+               if (setup_rx_oom_poll_fn(netdev))
+                       goto setup_nic_dev_fail;
+
                /* Register the network device with the OS */
                if (register_netdev(netdev)) {
                        dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
@@ -3051,7 +3075,6 @@ setup_nic_wait_intr:
  */
 static int liquidio_init_nic_module(struct octeon_device *oct)
 {
-       struct oct_intrmod_cfg *intrmod_cfg;
        int num_nic_ports = 1;
        int i, retval = 0;
 
@@ -3073,22 +3096,6 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
                goto octnet_init_failure;
        }
 
-       /* Initialize interrupt moderation params */
-       intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
-       intrmod_cfg->rx_enable = 1;
-       intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
-       intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
-       intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
-       intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
-       intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
-       intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
-       intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
-       intrmod_cfg->tx_enable = 1;
-       intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
-       intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
-       intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
-       intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
-       intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
        dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
 
        return retval;
index 294c6f3c6b48254044c610c78625c9c3c86e9b1f..8ea2323d8d676a0b96b5653b3fba966bfd5dcd5d 100644 (file)
@@ -27,7 +27,7 @@
 
 #define LIQUIDIO_PACKAGE ""
 #define LIQUIDIO_BASE_MAJOR_VERSION 1
-#define LIQUIDIO_BASE_MINOR_VERSION 4
+#define LIQUIDIO_BASE_MINOR_VERSION 5
 #define LIQUIDIO_BASE_MICRO_VERSION 1
 #define LIQUIDIO_BASE_VERSION   __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
                                __stringify(LIQUIDIO_BASE_MINOR_VERSION)
@@ -83,6 +83,7 @@ enum octeon_tag_type {
 #define OPCODE_NIC_INTRMOD_CFG         0x08
 #define OPCODE_NIC_IF_CFG              0x09
 #define OPCODE_NIC_VF_DRV_NOTICE       0x0A
+#define OPCODE_NIC_INTRMOD_PARAMS      0x0B
 #define VF_DRV_LOADED                  1
 #define VF_DRV_REMOVED                -1
 #define VF_DRV_MACADDR_CHANGED         2
@@ -100,6 +101,11 @@ enum octeon_tag_type {
 
 #define BYTES_PER_DHLEN_UNIT        8
 #define MAX_REG_CNT                 2000000U
+#define INTRNAMSIZ                  32
+#define IRQ_NAME_OFF(i)             ((i) * INTRNAMSIZ)
+#define MAX_IOQ_INTERRUPTS_PER_PF   (64 * 2)
+#define MAX_IOQ_INTERRUPTS_PER_VF   (8 * 2)
+
 
 static inline u32 incr_index(u32 index, u32 count, u32 max)
 {
@@ -181,6 +187,7 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
 #define   OCTNET_CMD_Q                0
 
 /* NIC Command types */
+#define   OCTNET_CMD_RESET_PF         0x0
 #define   OCTNET_CMD_CHANGE_MTU       0x1
 #define   OCTNET_CMD_CHANGE_MACADDR   0x2
 #define   OCTNET_CMD_CHANGE_DEVFLAGS  0x3
@@ -845,29 +852,6 @@ struct oct_mdio_cmd {
 
 #define OCT_LINK_STATS_SIZE   (sizeof(struct oct_link_stats))
 
-/* intrmod: max. packet rate threshold */
-#define LIO_INTRMOD_MAXPKT_RATETHR     196608
-/* intrmod: min. packet rate threshold */
-#define LIO_INTRMOD_MINPKT_RATETHR     9216
-/* intrmod: max. packets to trigger interrupt */
-#define LIO_INTRMOD_RXMAXCNT_TRIGGER   384
-/* intrmod: min. packets to trigger interrupt */
-#define LIO_INTRMOD_RXMINCNT_TRIGGER   0
-/* intrmod: max. time to trigger interrupt */
-#define LIO_INTRMOD_RXMAXTMR_TRIGGER   128
-/* 66xx:intrmod: min. time to trigger interrupt
- * (value of 1 is optimum for TCP_RR)
- */
-#define LIO_INTRMOD_RXMINTMR_TRIGGER   1
-
-/* intrmod: max. packets to trigger interrupt */
-#define LIO_INTRMOD_TXMAXCNT_TRIGGER   64
-/* intrmod: min. packets to trigger interrupt */
-#define LIO_INTRMOD_TXMINCNT_TRIGGER   0
-
-/* intrmod: poll interval in seconds */
-#define LIO_INTRMOD_CHECK_INTERVAL  1
-
 struct oct_intrmod_cfg {
        u64 rx_enable;
        u64 tx_enable;
index b3dc2e9651a8e205d7e6e451109f98e96065de2c..d29ebc531151f0fe85cb83826d3af9b069d75f52 100644 (file)
 #define   CN23XX_MAX_RINGS_PER_VF          8
 
 #define   CN23XX_MAX_INPUT_QUEUES      CN23XX_MAX_RINGS_PER_PF
-#define   CN23XX_MAX_IQ_DESCRIPTORS    2048
+#define   CN23XX_MAX_IQ_DESCRIPTORS    512
 #define   CN23XX_DB_MIN                 1
 #define   CN23XX_DB_MAX                 8
 #define   CN23XX_DB_TIMEOUT             1
 
 #define   CN23XX_MAX_OUTPUT_QUEUES     CN23XX_MAX_RINGS_PER_PF
-#define   CN23XX_MAX_OQ_DESCRIPTORS    2048
+#define   CN23XX_MAX_OQ_DESCRIPTORS    512
 #define   CN23XX_OQ_BUF_SIZE           1536
 #define   CN23XX_OQ_PKTSPER_INTR       128
 /*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
-#define   CN23XX_OQ_REFIL_THRESHOLD    128
+#define   CN23XX_OQ_REFIL_THRESHOLD    16
 
 #define   CN23XX_OQ_INTR_PKT           64
 #define   CN23XX_OQ_INTR_TIME          100
index 9675ffbf25e6bd9bf34d346204f1b0fbcbcfa185..e21b477d0159f1e17570259e2db3bcb155378728 100644 (file)
@@ -793,7 +793,7 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
        u32 num_descs = 0;
        u32 iq_no = 0;
        union oct_txpciq txpciq;
-       int numa_node = cpu_to_node(iq_no % num_online_cpus());
+       int numa_node = dev_to_node(&oct->pci_dev->dev);
 
        if (OCTEON_CN6XXX(oct))
                num_descs =
@@ -837,7 +837,7 @@ int octeon_setup_output_queues(struct octeon_device *oct)
        u32 num_descs = 0;
        u32 desc_size = 0;
        u32 oq_no = 0;
-       int numa_node = cpu_to_node(oq_no % num_online_cpus());
+       int numa_node = dev_to_node(&oct->pci_dev->dev);
 
        if (OCTEON_CN6XXX(oct)) {
                num_descs =
index c301a3852482845ee65bf260c48dbd69853e9522..92f67de111aa0ac942a92fbff2120141e10f32b8 100644 (file)
@@ -453,9 +453,6 @@ struct octeon_device {
        /** List of dispatch functions */
        struct octeon_dispatch_list dispatch;
 
-       /* Interrupt Moderation */
-       struct oct_intrmod_cfg intrmod;
-
        u32 int_status;
 
        u64 droq_intr;
@@ -517,6 +514,9 @@ struct octeon_device {
 
        void *msix_entries;
 
+       /* when requesting IRQs, the names are stored here */
+       void *irq_name_storage;
+
        struct octeon_sriov_info sriov_info;
 
        struct octeon_pf_vf_hs_word pfvf_hsword;
@@ -538,6 +538,12 @@ struct octeon_device {
        u32 priv_flags;
 
        void *watchdog_task;
+
+       u32 rx_coalesce_usecs;
+       u32 rx_max_coalesced_frames;
+       u32 tx_max_coalesced_frames;
+
+       bool cores_crashed;
 };
 
 #define  OCT_DRV_ONLINE 1
@@ -551,12 +557,6 @@ struct octeon_device {
 #define CHIP_CONF(oct, TYPE)             \
        (((struct octeon_ ## TYPE  *)((oct)->chip))->conf)
 
-struct oct_intrmod_cmd {
-       struct octeon_device *oct_dev;
-       struct octeon_soft_command *sc;
-       struct oct_intrmod_cfg *cfg;
-};
-
 /*------------------ Function Prototypes ----------------------*/
 
 /** Initialize device list memory */
index 0be87d119a979ea70117e13b2213987460da2a81..286be5539cef707c9464f1c4488cdd6134e9b343 100644 (file)
@@ -155,11 +155,6 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
                        recv_buffer_destroy(droq->recv_buf_list[i].buffer,
                                            pg_info);
 
-               if (droq->desc_ring && droq->desc_ring[i].info_ptr)
-                       lio_unmap_ring_info(oct->pci_dev,
-                                           (u64)droq->
-                                           desc_ring[i].info_ptr,
-                                           OCT_DROQ_INFO_SIZE);
                droq->recv_buf_list[i].buffer = NULL;
        }
 
@@ -211,10 +206,7 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
        vfree(droq->recv_buf_list);
 
        if (droq->info_base_addr)
-               cnnic_free_aligned_dma(oct->pci_dev, droq->info_list,
-                                      droq->info_alloc_size,
-                                      droq->info_base_addr,
-                                      droq->info_list_dma);
+               lio_free_info_buffer(oct, droq);
 
        if (droq->desc_ring)
                lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
@@ -234,8 +226,7 @@ int octeon_init_droq(struct octeon_device *oct,
        struct octeon_droq *droq;
        u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
        u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
-       int orig_node = dev_to_node(&oct->pci_dev->dev);
-       int numa_node = cpu_to_node(q_no % num_online_cpus());
+       int numa_node = dev_to_node(&oct->pci_dev->dev);
 
        dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
 
@@ -275,13 +266,8 @@ int octeon_init_droq(struct octeon_device *oct,
        droq->buffer_size = c_buf_size;
 
        desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
-       set_dev_node(&oct->pci_dev->dev, numa_node);
        droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
                                        (dma_addr_t *)&droq->desc_ring_dma);
-       set_dev_node(&oct->pci_dev->dev, orig_node);
-       if (!droq->desc_ring)
-               droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
-                                       (dma_addr_t *)&droq->desc_ring_dma);
 
        if (!droq->desc_ring) {
                dev_err(&oct->pci_dev->dev,
@@ -294,12 +280,7 @@ int octeon_init_droq(struct octeon_device *oct,
        dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
                droq->max_count);
 
-       droq->info_list =
-               cnnic_numa_alloc_aligned_dma((droq->max_count *
-                                             OCT_DROQ_INFO_SIZE),
-                                            &droq->info_alloc_size,
-                                            &droq->info_base_addr,
-                                            numa_node);
+       droq->info_list = lio_alloc_info_buffer(oct, droq);
        if (!droq->info_list) {
                dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
                lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
@@ -532,6 +513,32 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
        return desc_refilled;
 }
 
+/** check if we can allocate packets to get out of oom.
+ *  @param  droq - Droq being checked.
+ *  @return does not return anything
+ */
+void octeon_droq_check_oom(struct octeon_droq *droq)
+{
+       int desc_refilled;
+       struct octeon_device *oct = droq->oct_dev;
+
+       if (readl(droq->pkts_credit_reg) <= CN23XX_SLI_DEF_BP) {
+               spin_lock_bh(&droq->lock);
+               desc_refilled = octeon_droq_refill(oct, droq);
+               if (desc_refilled) {
+                       /* Flush the droq descriptor data to memory to be sure
+                        * that when we update the credits the data in memory
+                        * is accurate.
+                        */
+                       wmb();
+                       writel(desc_refilled, droq->pkts_credit_reg);
+                       /* make sure mmio write completes */
+                       mmiowb();
+               }
+               spin_unlock_bh(&droq->lock);
+       }
+}
+
 static inline u32
 octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
 {
@@ -983,7 +990,7 @@ int octeon_create_droq(struct octeon_device *oct,
                       u32 desc_size, void *app_ctx)
 {
        struct octeon_droq *droq;
-       int numa_node = cpu_to_node(q_no % num_online_cpus());
+       int numa_node = dev_to_node(&oct->pci_dev->dev);
 
        if (oct->droq[q_no]) {
                dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
index e62074090681d3597f973f54fadb2133b21b2931..9781577115e76ff7d1ef27966bf3cbdba84ee7b4 100644 (file)
@@ -325,10 +325,10 @@ struct octeon_droq {
        size_t desc_ring_dma;
 
        /** Info ptr list are allocated at this virtual address. */
-       size_t info_base_addr;
+       void *info_base_addr;
 
        /** DMA mapped address of the info list */
-       size_t info_list_dma;
+       dma_addr_t info_list_dma;
 
        /** Allocated size of info list. */
        u32 info_alloc_size;
@@ -426,4 +426,6 @@ int octeon_droq_process_packets(struct octeon_device *oct,
 int octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no,
                                 int cmd, u32 arg);
 
+void octeon_droq_check_oom(struct octeon_droq *droq);
+
 #endif /*__OCTEON_DROQ_H__ */
index 4608a5af35a3204b54378dc03eef94c976370ac1..5063a12613e53646b9930f10090eae13c32301e3 100644 (file)
@@ -152,7 +152,7 @@ struct octeon_instr_queue {
        struct oct_iq_stats stats;
 
        /** DMA mapped base address of the input descriptor ring. */
-       u64 base_addr_dma;
+       dma_addr_t base_addr_dma;
 
        /** Application context */
        void *app_ctx;
index aa36e9ae7676556e562a8bb4c9cb46aacc709890..bed9ef17bc26b4526cf3c57dcb4823cdc2586491 100644 (file)
@@ -140,48 +140,6 @@ err_release_region:
        return 1;
 }
 
-static inline void *
-cnnic_numa_alloc_aligned_dma(u32 size,
-                            u32 *alloc_size,
-                            size_t *orig_ptr,
-                            int numa_node)
-{
-       int retries = 0;
-       void *ptr = NULL;
-
-#define OCTEON_MAX_ALLOC_RETRIES     1
-       do {
-               struct page *page = NULL;
-
-               page = alloc_pages_node(numa_node,
-                                       GFP_KERNEL,
-                                       get_order(size));
-               if (!page)
-                       page = alloc_pages(GFP_KERNEL,
-                                          get_order(size));
-               ptr = (void *)page_address(page);
-               if ((unsigned long)ptr & 0x07) {
-                       __free_pages(page, get_order(size));
-                       ptr = NULL;
-                       /* Increment the size required if the first
-                        * attempt failed.
-                        */
-                       if (!retries)
-                               size += 7;
-               }
-               retries++;
-       } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
-
-       *alloc_size = size;
-       *orig_ptr = (unsigned long)ptr;
-       if ((unsigned long)ptr & 0x07)
-               ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
-       return ptr;
-}
-
-#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
-               free_pages(orig_ptr, get_order(size))
-
 static inline int
 sleep_cond(wait_queue_head_t *wait_queue, int *condition)
 {
index 6bb89419006eb5635cc65c415ad6183f5296f6d3..bf483932ff25021652ecf44e550426713c3af1d8 100644 (file)
 #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
 #define LIO_MIN_MTU_SIZE ETH_MIN_MTU
 
+/* Bit mask values for lio->ifstate */
+#define   LIO_IFSTATE_DROQ_OPS             0x01
+#define   LIO_IFSTATE_REGISTERED           0x02
+#define   LIO_IFSTATE_RUNNING              0x04
+#define   LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
+
 struct oct_nic_stats_resp {
        u64     rh;
        struct oct_link_stats stats;
@@ -62,6 +68,9 @@ struct lio {
 
        /** Array of gather component linked lists */
        struct list_head *glist;
+       void **glists_virt_base;
+       dma_addr_t *glists_dma_base;
+       u32 glist_entry_size;
 
        /** Pointer to the NIC properties for the Octeon device this network
         *  interface is associated with.
@@ -120,6 +129,9 @@ struct lio {
        /* work queue for  txq status */
        struct cavium_wq        txq_status_wq;
 
+       /* work queue for  rxq oom status */
+       struct cavium_wq        rxq_status_wq;
+
        /* work queue for  link status */
        struct cavium_wq        link_status_wq;
 
@@ -129,10 +141,6 @@ struct lio {
 #define LIO_SIZE         (sizeof(struct lio))
 #define GET_LIO(netdev)  ((struct lio *)netdev_priv(netdev))
 
-#define CIU3_WDOG(c)                 (0x1010000020000ULL + ((c) << 3))
-#define CIU3_WDOG_MASK               12ULL
-#define LIO_MONITOR_WDOG_EXPIRE      1
-#define LIO_MONITOR_CORE_STUCK_MSGD  2
 #define LIO_MAX_CORES                12
 
 /**
@@ -143,6 +151,10 @@ struct lio {
  */
 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
 
+int setup_rx_oom_poll_fn(struct net_device *netdev);
+
+void cleanup_rx_oom_poll_fn(struct net_device *netdev);
+
 /**
  * \brief Link control command completion callback
  * @param nctrl_ptr pointer to control packet structure
@@ -344,6 +356,29 @@ static inline void tx_buffer_free(void *buffer)
 #define lio_dma_free(oct, size, virt_addr, dma_addr) \
        dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
 
+static inline void *
+lio_alloc_info_buffer(struct octeon_device *oct,
+                     struct octeon_droq *droq)
+{
+       void *virt_ptr;
+
+       virt_ptr = lio_dma_alloc(oct, (droq->max_count * OCT_DROQ_INFO_SIZE),
+                                &droq->info_list_dma);
+       if (virt_ptr) {
+               droq->info_alloc_size = droq->max_count * OCT_DROQ_INFO_SIZE;
+               droq->info_base_addr = virt_ptr;
+       }
+
+       return virt_ptr;
+}
+
+static inline void lio_free_info_buffer(struct octeon_device *oct,
+                                       struct octeon_droq *droq)
+{
+       lio_dma_free(oct, droq->info_alloc_size, droq->info_base_addr,
+                    droq->info_list_dma);
+}
+
 static inline
 void *get_rbd(struct sk_buff *skb)
 {
@@ -359,22 +394,7 @@ void *get_rbd(struct sk_buff *skb)
 static inline u64
 lio_map_ring_info(struct octeon_droq *droq, u32 i)
 {
-       dma_addr_t dma_addr;
-       struct octeon_device *oct = droq->oct_dev;
-
-       dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
-                                 OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
-
-       WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
-
-       return (u64)dma_addr;
-}
-
-static inline void
-lio_unmap_ring_info(struct pci_dev *pci_dev,
-                   u64 info_ptr, u32 size)
-{
-       dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE);
+       return droq->info_list_dma + (i * sizeof(struct octeon_droq_info));
 }
 
 static inline u64
@@ -427,4 +447,34 @@ static inline void octeon_fast_packet_next(struct octeon_droq *droq,
               get_rbd(droq->recv_buf_list[idx].buffer), copy_len);
 }
 
+/**
+ * \brief check interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to check
+ */
+static inline int ifstate_check(struct lio *lio, int state_flag)
+{
+       return atomic_read(&lio->ifstate) & state_flag;
+}
+
+/**
+ * \brief set interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to set
+ */
+static inline void ifstate_set(struct lio *lio, int state_flag)
+{
+       atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
+}
+
+/**
+ * \brief clear interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to clear
+ */
+static inline void ifstate_reset(struct lio *lio, int state_flag)
+{
+       atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
+}
+
 #endif
index 0243be8dd56fc32736c13cef0487514efcfe6a68..b457cf23fce6196e1f5ec5cc144644a5441d641c 100644 (file)
@@ -100,14 +100,16 @@ static void octnet_link_ctrl_callback(struct octeon_device *oct,
 
        nctrl = (struct octnic_ctrl_pkt *)sc->ctxptr;
 
-       /* Call the callback function if status is OK.
-        * Status is OK only if a response was expected and core returned
-        * success.
+       /* Call the callback function if status is zero (meaning OK) or status
+        * contains a firmware status code bigger than zero (meaning the
+        * firmware is reporting an error).
         * If no response was expected, status is OK if the command was posted
         * successfully.
         */
-       if (!status && nctrl->cb_fn)
+       if ((!status || status > FIRMWARE_STATUS_CODE(0)) && nctrl->cb_fn) {
+               nctrl->status = status;
                nctrl->cb_fn(nctrl);
+       }
 
        octeon_free_soft_command(oct, sc);
 }
index 0c7a5c9b2932d4db89066d3496b28d134a8bcedc..6480ef8634418dca43ab0a4187ee046276dac7ad 100644 (file)
@@ -62,6 +62,10 @@ struct octnic_ctrl_pkt {
 
        /** Callback function called when the command has been fetched */
        octnic_ctrl_pkt_cb_fn_t cb_fn;
+
+       u32 status;
+       u16 *response_code;
+       struct completion *completion;
 };
 
 #define MAX_UDD_SIZE(nctrl) (sizeof((nctrl)->udd))
index 707bc15adec61351c1384b8454c85a87a2c4b437..261f448f9de23d059d0e5a5f1552bef5069e4c65 100644 (file)
@@ -62,8 +62,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
        u32 iq_no = (u32)txpciq.s.q_no;
        u32 q_size;
        struct cavium_wq *db_wq;
-       int orig_node = dev_to_node(&oct->pci_dev->dev);
-       int numa_node = cpu_to_node(iq_no % num_online_cpus());
+       int numa_node = dev_to_node(&oct->pci_dev->dev);
 
        if (OCTEON_CN6XXX(oct))
                conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx)));
@@ -91,13 +90,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
 
        iq->oct_dev = oct;
 
-       set_dev_node(&oct->pci_dev->dev, numa_node);
-       iq->base_addr = lio_dma_alloc(oct, q_size,
-                                     (dma_addr_t *)&iq->base_addr_dma);
-       set_dev_node(&oct->pci_dev->dev, orig_node);
-       if (!iq->base_addr)
-               iq->base_addr = lio_dma_alloc(oct, q_size,
-                                             (dma_addr_t *)&iq->base_addr_dma);
+       iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma);
        if (!iq->base_addr) {
                dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
                        iq_no);
@@ -211,7 +204,7 @@ int octeon_setup_iq(struct octeon_device *oct,
                    void *app_ctx)
 {
        u32 iq_no = (u32)txpciq.s.q_no;
-       int numa_node = cpu_to_node(iq_no % num_online_cpus());
+       int numa_node = dev_to_node(&oct->pci_dev->dev);
 
        if (oct->instr_queue[iq_no]) {
                dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
index 2fbaae96b505fbd0dcd45e85d366ee2585ef9e55..3d691c69f74d1cf7567b2d58730dcfd656c0eefd 100644 (file)
@@ -69,50 +69,53 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
        int resp_to_process = MAX_ORD_REQS_TO_PROCESS;
        u32 status;
        u64 status64;
-       struct octeon_instr_rdp *rdp;
-       u64 rptr;
 
        ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
 
        do {
                spin_lock_bh(&ordered_sc_list->lock);
 
-               if (ordered_sc_list->head.next == &ordered_sc_list->head) {
+               if (list_empty(&ordered_sc_list->head)) {
                        spin_unlock_bh(&ordered_sc_list->lock);
                        return 1;
                }
 
-               sc = (struct octeon_soft_command *)ordered_sc_list->
-                   head.next;
-               if (OCTEON_CN23XX_PF(octeon_dev) ||
-                   OCTEON_CN23XX_VF(octeon_dev)) {
-                       rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
-                       rptr = sc->cmd.cmd3.rptr;
-               } else {
-                       rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
-                       rptr = sc->cmd.cmd2.rptr;
-               }
+               sc = list_first_entry(&ordered_sc_list->head,
+                                     struct octeon_soft_command, node);
 
                status = OCTEON_REQUEST_PENDING;
 
                /* check if octeon has finished DMA'ing a response
                 * to where rptr is pointing to
                 */
-               dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev,
-                                       rptr, rdp->rlen,
-                                       DMA_FROM_DEVICE);
                status64 = *sc->status_word;
 
                if (status64 != COMPLETION_WORD_INIT) {
+                       /* This logic ensures that all 64b have been written.
+                        * 1. check byte 0 for non-FF
+                        * 2. if non-FF, then swap result from BE to host order
+                        * 3. check byte 7 (swapped to 0) for non-FF
+                        * 4. if non-FF, use the low 32-bit status code
+                        * 5. if either byte 0 or byte 7 is FF, don't use status
+                        */
                        if ((status64 & 0xff) != 0xff) {
                                octeon_swap_8B_data(&status64, 1);
                                if (((status64 & 0xff) != 0xff)) {
-                                       status = (u32)(status64 &
-                                                      0xffffffffULL);
+                                       /* retrieve 16-bit firmware status */
+                                       status = (u32)(status64 & 0xffffULL);
+                                       if (status) {
+                                               status =
+                                                 FIRMWARE_STATUS_CODE(status);
+                                       } else {
+                                               /* i.e. no error */
+                                               status = OCTEON_REQUEST_DONE;
+                                       }
                                }
                        }
                } else if (force_quit || (sc->timeout &&
                        time_after(jiffies, (unsigned long)sc->timeout))) {
+                       dev_err(&octeon_dev->pci_dev->dev, "%s: cmd failed, timeout (%ld, %ld)\n",
+                               __func__, (long)jiffies, (long)sc->timeout);
                        status = OCTEON_REQUEST_TIMEOUT;
                }
 
index cbb2d84e89323aea4852c31c164c2ff49c40257c..9169c2815dba36c59b7b8cea5642a7fb27bff439 100644 (file)
@@ -78,6 +78,8 @@ enum {
 
 /*------------   Error codes used by host driver   -----------------*/
 #define DRIVER_MAJOR_ERROR_CODE           0x0000
+/*------   Error codes used by firmware (bits 15..0 set by firmware */
+#define FIRMWARE_MAJOR_ERROR_CODE         0x0001
 
 /**  A value of 0x00000000 indicates no error i.e. success */
 #define DRIVER_ERROR_NONE                 0x00000000
@@ -116,6 +118,9 @@ enum {
 
 };
 
+#define FIRMWARE_STATUS_CODE(status) \
+       ((FIRMWARE_MAJOR_ERROR_CODE << 16) | (status))
+
 /** Initialize the response lists. The number of response lists to create is
  * given by count.
  * @param octeon_dev      - the octeon device structure.
index e739c715356283553f4ace131a251bc4b30d6de2..6fb44218bf555f84d736d08ca2b383a3a99540f1 100644 (file)
@@ -269,6 +269,7 @@ struct nicvf {
 #define        MAX_QUEUES_PER_QSET                     8
        struct queue_set        *qs;
        struct nicvf_cq_poll    *napi[8];
+       void                    *iommu_domain;
        u8                      vf_id;
        u8                      sqs_id;
        bool                    sqs_mode;
@@ -318,9 +319,7 @@ struct nicvf {
        struct bgx_stats        bgx_stats;
 
        /* MSI-X  */
-       bool                    msix_enabled;
        u8                      num_vec;
-       struct msix_entry       msix_entries[NIC_VF_MSIX_VECTORS];
        char                    irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15];
        bool                    irq_allocated[NIC_VF_MSIX_VECTORS];
        cpumask_var_t           affinity_mask[NIC_VF_MSIX_VECTORS];
index 767234e2e8f94bb0520bee29e1813f8934cae7a8..fb770b0182d3766324fe88f13d8f8506afd29134 100644 (file)
@@ -65,9 +65,7 @@ struct nicpf {
        bool                    mbx_lock[MAX_NUM_VFS_SUPPORTED];
 
        /* MSI-X */
-       bool                    msix_enabled;
        u8                      num_vec;
-       struct msix_entry       *msix_entries;
        bool                    irq_allocated[NIC_PF_MSIX_VECTORS];
        char                    irq_name[NIC_PF_MSIX_VECTORS][20];
 };
@@ -1088,7 +1086,7 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
        u64 intr;
        u8  vf, vf_per_mbx_reg = 64;
 
-       if (irq == nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector)
+       if (irq == pci_irq_vector(nic->pdev, NIC_PF_INTR_ID_MBOX0))
                mbx = 0;
        else
                mbx = 1;
@@ -1107,51 +1105,13 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
        return IRQ_HANDLED;
 }
 
-static int nic_enable_msix(struct nicpf *nic)
-{
-       int i, ret;
-
-       nic->num_vec = pci_msix_vec_count(nic->pdev);
-
-       nic->msix_entries = kmalloc_array(nic->num_vec,
-                                         sizeof(struct msix_entry),
-                                         GFP_KERNEL);
-       if (!nic->msix_entries)
-               return -ENOMEM;
-
-       for (i = 0; i < nic->num_vec; i++)
-               nic->msix_entries[i].entry = i;
-
-       ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
-       if (ret) {
-               dev_err(&nic->pdev->dev,
-                       "Request for #%d msix vectors failed, returned %d\n",
-                          nic->num_vec, ret);
-               kfree(nic->msix_entries);
-               return ret;
-       }
-
-       nic->msix_enabled = 1;
-       return 0;
-}
-
-static void nic_disable_msix(struct nicpf *nic)
-{
-       if (nic->msix_enabled) {
-               pci_disable_msix(nic->pdev);
-               kfree(nic->msix_entries);
-               nic->msix_enabled = 0;
-               nic->num_vec = 0;
-       }
-}
-
 static void nic_free_all_interrupts(struct nicpf *nic)
 {
        int irq;
 
        for (irq = 0; irq < nic->num_vec; irq++) {
                if (nic->irq_allocated[irq])
-                       free_irq(nic->msix_entries[irq].vector, nic);
+                       free_irq(pci_irq_vector(nic->pdev, irq), nic);
                nic->irq_allocated[irq] = false;
        }
 }
@@ -1159,18 +1119,24 @@ static void nic_free_all_interrupts(struct nicpf *nic)
 static int nic_register_interrupts(struct nicpf *nic)
 {
        int i, ret;
+       nic->num_vec = pci_msix_vec_count(nic->pdev);
 
        /* Enable MSI-X */
-       ret = nic_enable_msix(nic);
-       if (ret)
-               return ret;
+       ret = pci_alloc_irq_vectors(nic->pdev, nic->num_vec, nic->num_vec,
+                                   PCI_IRQ_MSIX);
+       if (ret < 0) {
+               dev_err(&nic->pdev->dev,
+                       "Request for #%d msix vectors failed, returned %d\n",
+                          nic->num_vec, ret);
+               return 1;
+       }
 
        /* Register mailbox interrupt handler */
        for (i = NIC_PF_INTR_ID_MBOX0; i < nic->num_vec; i++) {
                sprintf(nic->irq_name[i],
                        "NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0));
 
-               ret = request_irq(nic->msix_entries[i].vector,
+               ret = request_irq(pci_irq_vector(nic->pdev, i),
                                  nic_mbx_intr_handler, 0,
                                  nic->irq_name[i], nic);
                if (ret)
@@ -1186,14 +1152,16 @@ static int nic_register_interrupts(struct nicpf *nic)
 fail:
        dev_err(&nic->pdev->dev, "Request irq failed\n");
        nic_free_all_interrupts(nic);
-       nic_disable_msix(nic);
+       pci_free_irq_vectors(nic->pdev);
+       nic->num_vec = 0;
        return ret;
 }
 
 static void nic_unregister_interrupts(struct nicpf *nic)
 {
        nic_free_all_interrupts(nic);
-       nic_disable_msix(nic);
+       pci_free_irq_vectors(nic->pdev);
+       nic->num_vec = 0;
 }
 
 static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
index 6feaa24bcfd42bb9647298a0b665e6bf3b11d496..81a2fcb3cb1b31ef18d65d52f4eaf5dde689a4e4 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/log2.h>
 #include <linux/prefetch.h>
 #include <linux/irq.h>
+#include <linux/iommu.h>
 
 #include "nic_reg.h"
 #include "nic.h"
@@ -525,7 +526,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
                        /* Get actual TSO descriptors and free them */
                        tso_sqe =
                         (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+                       nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
+                                                tso_sqe->subdesc_cnt);
                        nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
+               } else {
+                       nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
+                                                hdr->subdesc_cnt);
                }
                nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
                prefetch(skb);
@@ -576,6 +582,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
 {
        struct sk_buff *skb;
        struct nicvf *nic = netdev_priv(netdev);
+       struct nicvf *snic = nic;
        int err = 0;
        int rq_idx;
 
@@ -592,7 +599,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
        if (err && !cqe_rx->rb_cnt)
                return;
 
-       skb = nicvf_get_rcv_skb(nic, cqe_rx);
+       skb = nicvf_get_rcv_skb(snic, cqe_rx);
        if (!skb) {
                netdev_dbg(nic->netdev, "Packet not received\n");
                return;
@@ -875,38 +882,9 @@ static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
        return IRQ_HANDLED;
 }
 
-static int nicvf_enable_msix(struct nicvf *nic)
-{
-       int ret, vec;
-
-       nic->num_vec = NIC_VF_MSIX_VECTORS;
-
-       for (vec = 0; vec < nic->num_vec; vec++)
-               nic->msix_entries[vec].entry = vec;
-
-       ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
-       if (ret) {
-               netdev_err(nic->netdev,
-                          "Req for #%d msix vectors failed\n", nic->num_vec);
-               return 0;
-       }
-       nic->msix_enabled = 1;
-       return 1;
-}
-
-static void nicvf_disable_msix(struct nicvf *nic)
-{
-       if (nic->msix_enabled) {
-               pci_disable_msix(nic->pdev);
-               nic->msix_enabled = 0;
-               nic->num_vec = 0;
-       }
-}
-
 static void nicvf_set_irq_affinity(struct nicvf *nic)
 {
        int vec, cpu;
-       int irqnum;
 
        for (vec = 0; vec < nic->num_vec; vec++) {
                if (!nic->irq_allocated[vec])
@@ -923,15 +901,14 @@ static void nicvf_set_irq_affinity(struct nicvf *nic)
 
                cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
                                nic->affinity_mask[vec]);
-               irqnum = nic->msix_entries[vec].vector;
-               irq_set_affinity_hint(irqnum, nic->affinity_mask[vec]);
+               irq_set_affinity_hint(pci_irq_vector(nic->pdev, vec),
+                                     nic->affinity_mask[vec]);
        }
 }
 
 static int nicvf_register_interrupts(struct nicvf *nic)
 {
        int irq, ret = 0;
-       int vector;
 
        for_each_cq_irq(irq)
                sprintf(nic->irq_name[irq], "%s-rxtx-%d",
@@ -950,8 +927,8 @@ static int nicvf_register_interrupts(struct nicvf *nic)
 
        /* Register CQ interrupts */
        for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
-               vector = nic->msix_entries[irq].vector;
-               ret = request_irq(vector, nicvf_intr_handler,
+               ret = request_irq(pci_irq_vector(nic->pdev, irq),
+                                 nicvf_intr_handler,
                                  0, nic->irq_name[irq], nic->napi[irq]);
                if (ret)
                        goto err;
@@ -961,8 +938,8 @@ static int nicvf_register_interrupts(struct nicvf *nic)
        /* Register RBDR interrupt */
        for (irq = NICVF_INTR_ID_RBDR;
             irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
-               vector = nic->msix_entries[irq].vector;
-               ret = request_irq(vector, nicvf_rbdr_intr_handler,
+               ret = request_irq(pci_irq_vector(nic->pdev, irq),
+                                 nicvf_rbdr_intr_handler,
                                  0, nic->irq_name[irq], nic);
                if (ret)
                        goto err;
@@ -974,7 +951,7 @@ static int nicvf_register_interrupts(struct nicvf *nic)
                nic->pnicvf->netdev->name,
                nic->sqs_mode ? (nic->sqs_id + 1) : 0);
        irq = NICVF_INTR_ID_QS_ERR;
-       ret = request_irq(nic->msix_entries[irq].vector,
+       ret = request_irq(pci_irq_vector(nic->pdev, irq),
                          nicvf_qs_err_intr_handler,
                          0, nic->irq_name[irq], nic);
        if (ret)
@@ -994,6 +971,7 @@ err:
 
 static void nicvf_unregister_interrupts(struct nicvf *nic)
 {
+       struct pci_dev *pdev = nic->pdev;
        int irq;
 
        /* Free registered interrupts */
@@ -1001,19 +979,20 @@ static void nicvf_unregister_interrupts(struct nicvf *nic)
                if (!nic->irq_allocated[irq])
                        continue;
 
-               irq_set_affinity_hint(nic->msix_entries[irq].vector, NULL);
+               irq_set_affinity_hint(pci_irq_vector(pdev, irq), NULL);
                free_cpumask_var(nic->affinity_mask[irq]);
 
                if (irq < NICVF_INTR_ID_SQ)
-                       free_irq(nic->msix_entries[irq].vector, nic->napi[irq]);
+                       free_irq(pci_irq_vector(pdev, irq), nic->napi[irq]);
                else
-                       free_irq(nic->msix_entries[irq].vector, nic);
+                       free_irq(pci_irq_vector(pdev, irq), nic);
 
                nic->irq_allocated[irq] = false;
        }
 
        /* Disable MSI-X */
-       nicvf_disable_msix(nic);
+       pci_free_irq_vectors(pdev);
+       nic->num_vec = 0;
 }
 
 /* Initialize MSIX vectors and register MISC interrupt.
@@ -1025,16 +1004,22 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
        int irq = NICVF_INTR_ID_MISC;
 
        /* Return if mailbox interrupt is already registered */
-       if (nic->msix_enabled)
+       if (nic->pdev->msix_enabled)
                return 0;
 
        /* Enable MSI-X */
-       if (!nicvf_enable_msix(nic))
+       nic->num_vec = pci_msix_vec_count(nic->pdev);
+       ret = pci_alloc_irq_vectors(nic->pdev, nic->num_vec, nic->num_vec,
+                                   PCI_IRQ_MSIX);
+       if (ret < 0) {
+               netdev_err(nic->netdev,
+                          "Req for #%d msix vectors failed\n", nic->num_vec);
                return 1;
+       }
 
        sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
        /* Register Misc interrupt */
-       ret = request_irq(nic->msix_entries[irq].vector,
+       ret = request_irq(pci_irq_vector(nic->pdev, irq),
                          nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
 
        if (ret)
@@ -1157,7 +1142,7 @@ int nicvf_stop(struct net_device *netdev)
 
        /* Wait for pending IRQ handlers to finish */
        for (irq = 0; irq < nic->num_vec; irq++)
-               synchronize_irq(nic->msix_entries[irq].vector);
+               synchronize_irq(pci_irq_vector(nic->pdev, irq));
 
        tasklet_kill(&nic->rbdr_task);
        tasklet_kill(&nic->qs_err_task);
@@ -1358,7 +1343,7 @@ static int nicvf_set_mac_address(struct net_device *netdev, void *p)
 
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 
-       if (nic->msix_enabled) {
+       if (nic->pdev->msix_enabled) {
                if (nicvf_hw_set_mac_addr(nic, netdev))
                        return -EBUSY;
        } else {
@@ -1643,6 +1628,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!pass1_silicon(nic->pdev))
                nic->hw_tso = true;
 
+       /* Get iommu domain for iova to physical addr conversion */
+       nic->iommu_domain = iommu_get_domain_for_dev(dev);
+
        pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
        if (sdevid == 0xA134)
                nic->t88 = true;
@@ -1655,8 +1643,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto err_unregister_interrupts;
 
-       netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
-                              NETIF_F_TSO | NETIF_F_GRO |
+       netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_SG |
+                              NETIF_F_TSO | NETIF_F_GRO | NETIF_F_TSO6 |
+                              NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                               NETIF_F_HW_VLAN_CTAG_RX);
 
        netdev->hw_features |= NETIF_F_RXHASH;
@@ -1664,7 +1653,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        netdev->features |= netdev->hw_features;
        netdev->hw_features |= NETIF_F_LOOPBACK;
 
-       netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+       netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+                               NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
 
        netdev->netdev_ops = &nicvf_netdev_ops;
        netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
index ac0390be3b126e957071bde64daebdd29b536c34..7b0fd8d871ccdb2813473c1030714f7d4e1678a2 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/netdevice.h>
 #include <linux/ip.h>
 #include <linux/etherdevice.h>
+#include <linux/iommu.h>
 #include <net/ip.h>
 #include <net/tso.h>
 
 #include "q_struct.h"
 #include "nicvf_queues.h"
 
+#define NICVF_PAGE_ORDER ((PAGE_SIZE <= 4096) ?  PAGE_ALLOC_COSTLY_ORDER : 0)
+
+static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
+{
+       /* Translation is installed only when IOMMU is present */
+       if (nic->iommu_domain)
+               return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
+       return dma_addr;
+}
+
 static void nicvf_get_page(struct nicvf *nic)
 {
        if (!nic->rb_pageref || !nic->rb_page)
@@ -87,7 +98,7 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
 static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
                                         u32 buf_len, u64 **rbuf)
 {
-       int order = (PAGE_SIZE <= 4096) ?  PAGE_ALLOC_COSTLY_ORDER : 0;
+       int order = NICVF_PAGE_ORDER;
 
        /* Check if request can be accomodated in previous allocated page */
        if (nic->rb_page &&
@@ -97,22 +108,27 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
        }
 
        nicvf_get_page(nic);
-       nic->rb_page = NULL;
 
        /* Allocate a new page */
+       nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
+                                  order);
        if (!nic->rb_page) {
-               nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
-                                          order);
-               if (!nic->rb_page) {
-                       this_cpu_inc(nic->pnicvf->drv_stats->
-                                    rcv_buffer_alloc_failures);
-                       return -ENOMEM;
-               }
-               nic->rb_page_offset = 0;
+               this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
+               return -ENOMEM;
        }
-
+       nic->rb_page_offset = 0;
 ret:
-       *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset);
+       /* HW will ensure data coherency, CPU sync not required */
+       *rbuf = (u64 *)((u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
+                                               nic->rb_page_offset, buf_len,
+                                               DMA_FROM_DEVICE,
+                                               DMA_ATTR_SKIP_CPU_SYNC));
+       if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
+               if (!nic->rb_page_offset)
+                       __free_pages(nic->rb_page, order);
+               nic->rb_page = NULL;
+               return -ENOMEM;
+       }
        nic->rb_page_offset += buf_len;
 
        return 0;
@@ -158,16 +174,21 @@ static int  nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
        rbdr->dma_size = buf_size;
        rbdr->enable = true;
        rbdr->thresh = RBDR_THRESH;
+       rbdr->head = 0;
+       rbdr->tail = 0;
 
        nic->rb_page = NULL;
        for (idx = 0; idx < ring_len; idx++) {
                err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
                                             &rbuf);
-               if (err)
+               if (err) {
+                       /* To free already allocated and mapped ones */
+                       rbdr->tail = idx - 1;
                        return err;
+               }
 
                desc = GET_RBDR_DESC(rbdr, idx);
-               desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+               desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
        }
 
        nicvf_get_page(nic);
@@ -179,7 +200,7 @@ static int  nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
 static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
 {
        int head, tail;
-       u64 buf_addr;
+       u64 buf_addr, phys_addr;
        struct rbdr_entry_t *desc;
 
        if (!rbdr)
@@ -192,18 +213,26 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
        head = rbdr->head;
        tail = rbdr->tail;
 
-       /* Free SKBs */
+       /* Release page references */
        while (head != tail) {
                desc = GET_RBDR_DESC(rbdr, head);
-               buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
-               put_page(virt_to_page(phys_to_virt(buf_addr)));
+               buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
+               phys_addr = nicvf_iova_to_phys(nic, buf_addr);
+               dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
+                                    DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+               if (phys_addr)
+                       put_page(virt_to_page(phys_to_virt(phys_addr)));
                head++;
                head &= (rbdr->dmem.q_len - 1);
        }
-       /* Free SKB of tail desc */
+       /* Release buffer of tail desc */
        desc = GET_RBDR_DESC(rbdr, tail);
-       buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
-       put_page(virt_to_page(phys_to_virt(buf_addr)));
+       buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
+       phys_addr = nicvf_iova_to_phys(nic, buf_addr);
+       dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
+                            DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+       if (phys_addr)
+               put_page(virt_to_page(phys_to_virt(phys_addr)));
 
        /* Free RBDR ring */
        nicvf_free_q_desc_mem(nic, &rbdr->dmem);
@@ -250,7 +279,7 @@ refill:
                        break;
 
                desc = GET_RBDR_DESC(rbdr, tail);
-               desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+               desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
                refill_rb_cnt--;
                new_rb++;
        }
@@ -361,9 +390,29 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
        return 0;
 }
 
+void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
+                             int hdr_sqe, u8 subdesc_cnt)
+{
+       u8 idx;
+       struct sq_gather_subdesc *gather;
+
+       /* Unmap DMA mapped skb data buffers */
+       for (idx = 0; idx < subdesc_cnt; idx++) {
+               hdr_sqe++;
+               hdr_sqe &= (sq->dmem.q_len - 1);
+               gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe);
+               /* HW will ensure data coherency, CPU sync not required */
+               dma_unmap_page_attrs(&nic->pdev->dev, gather->addr,
+                                    gather->size, DMA_TO_DEVICE,
+                                    DMA_ATTR_SKIP_CPU_SYNC);
+       }
+}
+
 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
 {
        struct sk_buff *skb;
+       struct sq_hdr_subdesc *hdr;
+       struct sq_hdr_subdesc *tso_sqe;
 
        if (!sq)
                return;
@@ -379,8 +428,22 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
        smp_rmb();
        while (sq->head != sq->tail) {
                skb = (struct sk_buff *)sq->skbuff[sq->head];
-               if (skb)
-                       dev_kfree_skb_any(skb);
+               if (!skb)
+                       goto next;
+               hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
+               /* Check for dummy descriptor used for HW TSO offload on 88xx */
+               if (hdr->dont_send) {
+                       /* Get actual TSO descriptors and unmap them */
+                       tso_sqe =
+                        (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+                       nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
+                                                tso_sqe->subdesc_cnt);
+               } else {
+                       nicvf_unmap_sndq_buffers(nic, sq, sq->head,
+                                                hdr->subdesc_cnt);
+               }
+               dev_kfree_skb_any(skb);
+next:
                sq->head++;
                sq->head &= (sq->dmem.q_len - 1);
        }
@@ -559,9 +622,11 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
        nicvf_send_msg_to_pf(nic, &mbx);
 
        if (!nic->sqs_mode && (qidx == 0)) {
-               /* Enable checking L3/L4 length and TCP/UDP checksums */
+               /* Enable checking L3/L4 length and TCP/UDP checksums
+                * Also allow IPv6 pkts with zero UDP checksum.
+                */
                nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
-                                     (BIT(24) | BIT(23) | BIT(21)));
+                                     (BIT(24) | BIT(23) | BIT(21) | BIT(20)));
                nicvf_config_vlan_stripping(nic, nic->netdev->features);
        }
 
@@ -882,6 +947,14 @@ static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
        return qentry;
 }
 
+/* Rollback to previous tail pointer when descriptors not used */
+static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
+                                         int qentry, int desc_cnt)
+{
+       sq->tail = qentry;
+       atomic_add(desc_cnt, &sq->free_cnt);
+}
+
 /* Free descriptor back to SQ for future use */
 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
 {
@@ -1021,7 +1094,13 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
 {
        int proto;
        struct sq_hdr_subdesc *hdr;
+       union {
+               struct iphdr *v4;
+               struct ipv6hdr *v6;
+               unsigned char *hdr;
+       } ip;
 
+       ip.hdr = skb_network_header(skb);
        hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
        memset(hdr, 0, SND_QUEUE_DESC_SIZE);
        hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
@@ -1046,7 +1125,9 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
                hdr->l3_offset = skb_network_offset(skb);
                hdr->l4_offset = skb_transport_offset(skb);
 
-               proto = ip_hdr(skb)->protocol;
+               proto = (ip.v4->version == 4) ? ip.v4->protocol :
+                       ip.v6->nexthdr;
+
                switch (proto) {
                case IPPROTO_TCP:
                        hdr->csum_l4 = SEND_L4_CSUM_TCP;
@@ -1207,8 +1288,9 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
                        struct sk_buff *skb, u8 sq_num)
 {
        int i, size;
-       int subdesc_cnt, tso_sqe = 0;
+       int subdesc_cnt, hdr_sqe = 0;
        int qentry;
+       u64 dma_addr;
 
        subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
        if (subdesc_cnt > atomic_read(&sq->free_cnt))
@@ -1223,12 +1305,21 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
        /* Add SQ header subdesc */
        nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
                                 skb, skb->len);
-       tso_sqe = qentry;
+       hdr_sqe = qentry;
 
        /* Add SQ gather subdescs */
        qentry = nicvf_get_nxt_sqentry(sq, qentry);
        size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
-       nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
+       /* HW will ensure data coherency, CPU sync not required */
+       dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
+                                     offset_in_page(skb->data), size,
+                                     DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+       if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
+               nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
+               return 0;
+       }
+
+       nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
 
        /* Check for scattered buffer */
        if (!skb_is_nonlinear(skb))
@@ -1241,15 +1332,26 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
 
                qentry = nicvf_get_nxt_sqentry(sq, qentry);
                size = skb_frag_size(frag);
-               nicvf_sq_add_gather_subdesc(sq, qentry, size,
-                                           virt_to_phys(
-                                           skb_frag_address(frag)));
+               dma_addr = dma_map_page_attrs(&nic->pdev->dev,
+                                             skb_frag_page(frag),
+                                             frag->page_offset, size,
+                                             DMA_TO_DEVICE,
+                                             DMA_ATTR_SKIP_CPU_SYNC);
+               if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
+                       /* Free entire chain of mapped buffers
+                        * here 'i' = frags mapped + above mapped skb->data
+                        */
+                       nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
+                       nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
+                       return 0;
+               }
+               nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
        }
 
 doorbell:
        if (nic->t88 && skb_shinfo(skb)->gso_size) {
                qentry = nicvf_get_nxt_sqentry(sq, qentry);
-               nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
+               nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
        }
 
        nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
@@ -1282,6 +1384,7 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
        int offset;
        u16 *rb_lens = NULL;
        u64 *rb_ptrs = NULL;
+       u64 phys_addr;
 
        rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
        /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
@@ -1296,15 +1399,23 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
        else
                rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
 
-       netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
-                  __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
-
        for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
                payload_len = rb_lens[frag_num(frag)];
+               phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
+               if (!phys_addr) {
+                       if (skb)
+                               dev_kfree_skb_any(skb);
+                       return NULL;
+               }
+
                if (!frag) {
                        /* First fragment */
+                       dma_unmap_page_attrs(&nic->pdev->dev,
+                                            *rb_ptrs - cqe_rx->align_pad,
+                                            RCV_FRAG_LEN, DMA_FROM_DEVICE,
+                                            DMA_ATTR_SKIP_CPU_SYNC);
                        skb = nicvf_rb_ptr_to_skb(nic,
-                                                 *rb_ptrs - cqe_rx->align_pad,
+                                                 phys_addr - cqe_rx->align_pad,
                                                  payload_len);
                        if (!skb)
                                return NULL;
@@ -1312,8 +1423,11 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
                        skb_put(skb, payload_len);
                } else {
                        /* Add fragments */
-                       page = virt_to_page(phys_to_virt(*rb_ptrs));
-                       offset = phys_to_virt(*rb_ptrs) - page_address(page);
+                       dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs,
+                                            RCV_FRAG_LEN, DMA_FROM_DEVICE,
+                                            DMA_ATTR_SKIP_CPU_SYNC);
+                       page = virt_to_page(phys_to_virt(phys_addr));
+                       offset = phys_to_virt(phys_addr) - page_address(page);
                        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
                                        offset, payload_len, RCV_FRAG_LEN);
                }
index 5cb84da99a2de5bc594464db8759c8359d20447f..10cb4b84625b14a0446996776689ae6733f4ccee 100644 (file)
@@ -87,7 +87,7 @@
 #define RCV_BUF_COUNT          (1ULL << (RBDR_SIZE + 13))
 #define MAX_RCV_BUF_COUNT      (1ULL << (RBDR_SIZE6 + 13))
 #define RBDR_THRESH            (RCV_BUF_COUNT / 2)
-#define DMA_BUFFER_LEN         2048 /* In multiples of 128bytes */
+#define DMA_BUFFER_LEN         1536 /* In multiples of 128bytes */
 #define RCV_FRAG_LEN    (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 
@@ -301,6 +301,8 @@ struct queue_set {
 
 #define        CQ_ERR_MASK     (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
 
+void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
+                             int hdr_sqe, u8 subdesc_cnt);
 void nicvf_config_vlan_stripping(struct nicvf *nic,
                                 netdev_features_t features);
 int nicvf_set_qset_resources(struct nicvf *nic);
index 4c8e8cf730bbc2ee1d488d42d9d42163d442fb75..64a1095e4d1495c1e32c3ff7008882789f6b6f6e 100644 (file)
@@ -123,14 +123,44 @@ static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
        return 1;
 }
 
+static int max_bgx_per_node;
+static void set_max_bgx_per_node(struct pci_dev *pdev)
+{
+       u16 sdevid;
+
+       if (max_bgx_per_node)
+               return;
+
+       pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
+       switch (sdevid) {
+       case PCI_SUBSYS_DEVID_81XX_BGX:
+               max_bgx_per_node = MAX_BGX_PER_CN81XX;
+               break;
+       case PCI_SUBSYS_DEVID_83XX_BGX:
+               max_bgx_per_node = MAX_BGX_PER_CN83XX;
+               break;
+       case PCI_SUBSYS_DEVID_88XX_BGX:
+       default:
+               max_bgx_per_node = MAX_BGX_PER_CN88XX;
+               break;
+       }
+}
+
+static struct bgx *get_bgx(int node, int bgx_idx)
+{
+       int idx = (node * max_bgx_per_node) + bgx_idx;
+
+       return bgx_vnic[idx];
+}
+
 /* Return number of BGX present in HW */
 unsigned bgx_get_map(int node)
 {
        int i;
        unsigned map = 0;
 
-       for (i = 0; i < MAX_BGX_PER_NODE; i++) {
-               if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i])
+       for (i = 0; i < max_bgx_per_node; i++) {
+               if (bgx_vnic[(node * max_bgx_per_node) + i])
                        map |= (1 << i);
        }
 
@@ -143,7 +173,7 @@ int bgx_get_lmac_count(int node, int bgx_idx)
 {
        struct bgx *bgx;
 
-       bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+       bgx = get_bgx(node, bgx_idx);
        if (bgx)
                return bgx->lmac_count;
 
@@ -158,7 +188,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
        struct bgx *bgx;
        struct lmac *lmac;
 
-       bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+       bgx = get_bgx(node, bgx_idx);
        if (!bgx)
                return;
 
@@ -172,7 +202,7 @@ EXPORT_SYMBOL(bgx_get_lmac_link_state);
 
 const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
 {
-       struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+       struct bgx *bgx = get_bgx(node, bgx_idx);
 
        if (bgx)
                return bgx->lmac[lmacid].mac;
@@ -183,7 +213,7 @@ EXPORT_SYMBOL(bgx_get_lmac_mac);
 
 void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
 {
-       struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+       struct bgx *bgx = get_bgx(node, bgx_idx);
 
        if (!bgx)
                return;
@@ -194,7 +224,7 @@ EXPORT_SYMBOL(bgx_set_lmac_mac);
 
 void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
 {
-       struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+       struct bgx *bgx = get_bgx(node, bgx_idx);
        struct lmac *lmac;
        u64 cfg;
 
@@ -217,7 +247,7 @@ EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
 void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
 {
        struct pfc *pfc = (struct pfc *)pause;
-       struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+       struct bgx *bgx = get_bgx(node, bgx_idx);
        struct lmac *lmac;
        u64 cfg;
 
@@ -237,7 +267,7 @@ EXPORT_SYMBOL(bgx_lmac_get_pfc);
 void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
 {
        struct pfc *pfc = (struct pfc *)pause;
-       struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+       struct bgx *bgx = get_bgx(node, bgx_idx);
        struct lmac *lmac;
        u64 cfg;
 
@@ -369,7 +399,7 @@ u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
 {
        struct bgx *bgx;
 
-       bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+       bgx = get_bgx(node, bgx_idx);
        if (!bgx)
                return 0;
 
@@ -383,7 +413,7 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
 {
        struct bgx *bgx;
 
-       bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+       bgx = get_bgx(node, bgx_idx);
        if (!bgx)
                return 0;
 
@@ -411,7 +441,7 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx,
        struct lmac *lmac;
        u64    cfg;
 
-       bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+       bgx = get_bgx(node, bgx_idx);
        if (!bgx)
                return;
 
@@ -1011,12 +1041,6 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
                        dev_info(dev, "%s: 40G_KR4\n", (char *)str);
                break;
        case BGX_MODE_QSGMII:
-               if ((lmacid == 0) &&
-                   (bgx_get_lane2sds_cfg(bgx, lmac) != lmacid))
-                       return;
-               if ((lmacid == 2) &&
-                   (bgx_get_lane2sds_cfg(bgx, lmac) == lmacid))
-                       return;
                dev_info(dev, "%s: QSGMII\n", (char *)str);
                break;
        case BGX_MODE_RGMII:
@@ -1334,11 +1358,13 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_release_regions;
        }
 
+       set_max_bgx_per_node(pdev);
+
        pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
        if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
                bgx->bgx_id = (pci_resource_start(pdev,
                        PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
-               bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
+               bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node;
                bgx->max_lmac = MAX_LMAC_PER_BGX;
                bgx_vnic[bgx->bgx_id] = bgx;
        } else {
index a60f189429bb658cb5ab8383982f86ddd9090fc3..c5080f2cead5d0efc435fd827038eb7dbe4b5830 100644 (file)
@@ -22,7 +22,6 @@
 #define    MAX_BGX_PER_CN88XX                  2
 #define    MAX_BGX_PER_CN81XX                  3 /* 2 BGXs + 1 RGX */
 #define    MAX_BGX_PER_CN83XX                  4
-#define    MAX_BGX_PER_NODE                    4
 #define    MAX_LMAC_PER_BGX                    4
 #define    MAX_BGX_CHANS_PER_LMAC              16
 #define    MAX_DMAC_PER_LMAC                   8
index 6916c62f2487dd9a9bca381685cea6b4c771ef1f..94b9482f14a55da8dc015c6b2e25e23d1309e00f 100644 (file)
@@ -223,7 +223,6 @@ struct port_info {
        struct cmac *mac;
        struct cphy *phy;
        struct link_config link_config;
-       struct net_device_stats netstats;
 };
 
 struct sge;
index d8aff7a4b3c7cc087139cfefc022b4e8cf75fda5..8623be13bf86f2cc82f579daadc8121dcfb8432d 100644 (file)
@@ -296,7 +296,7 @@ static struct net_device_stats *t1_get_stats(struct net_device *dev)
 {
        struct adapter *adapter = dev->ml_priv;
        struct port_info *p = &adapter->port[dev->if_port];
-       struct net_device_stats *ns = &p->netstats;
+       struct net_device_stats *ns = &dev->stats;
        const struct cmac_statistics *pstats;
 
        /* Do a full update of the MAC stats */
index 8b395b537330507a21df8a0705f292e077b77e36..087ff0ffb59777e625395b6caf9753116316289c 100644 (file)
@@ -72,7 +72,6 @@ struct port_info {
        struct cphy phy;
        struct cmac mac;
        struct link_config link_config;
-       struct net_device_stats netstats;
        int activity;
        __be32 iscsi_ipv4addr;
        struct iscsi_config iscsic;
index d76491676b5175e52b383e8c17760859b4f69040..2ff6bd139c96ccf077acab356a4464b13fa61b94 100644 (file)
@@ -1489,7 +1489,7 @@ static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
 {
        struct port_info *pi = netdev_priv(dev);
        struct adapter *adapter = pi->adapter;
-       struct net_device_stats *ns = &pi->netstats;
+       struct net_device_stats *ns = &dev->stats;
        const struct mac_stats *pstats;
 
        spin_lock(&adapter->stats_lock);
index 87000cd397372ab999d25ea2c935f0385fa941ee..0de8eb72325c53ae50cd3ec535915486a9fb5064 100644 (file)
@@ -6369,7 +6369,6 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
        unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
        unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
        unsigned int fl_align_log = fls(fl_align) - 1;
-       unsigned int ingpad;
 
        t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
                     HOSTPAGESIZEPF0_V(sge_hps) |
@@ -6389,6 +6388,10 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
                                                  INGPADBOUNDARY_SHIFT_X) |
                                 EGRSTATUSPAGESIZE_V(stat_len != 64));
        } else {
+               unsigned int pack_align;
+               unsigned int ingpad, ingpack;
+               unsigned int pcie_cap;
+
                /* T5 introduced the separation of the Free List Padding and
                 * Packing Boundaries.  Thus, we can select a smaller Padding
                 * Boundary to avoid uselessly chewing up PCIe Link and Memory
@@ -6401,27 +6404,62 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
                 * Size (the minimum unit of transfer to/from Memory).  If we
                 * have a Padding Boundary which is smaller than the Memory
                 * Line Size, that'll involve a Read-Modify-Write cycle on the
-                * Memory Controller which is never good.  For T5 the smallest
-                * Padding Boundary which we can select is 32 bytes which is
-                * larger than any known Memory Controller Line Size so we'll
-                * use that.
-                *
-                * T5 has a different interpretation of the "0" value for the
-                * Packing Boundary.  This corresponds to 16 bytes instead of
-                * the expected 32 bytes.  We never have a Packing Boundary
-                * less than 32 bytes so we can't use that special value but
-                * on the other hand, if we wanted 32 bytes, the best we can
-                * really do is 64 bytes.
-               */
-               if (fl_align <= 32) {
+                * Memory Controller which is never good.
+                */
+
+               /* We want the Packing Boundary to be based on the Cache Line
+                * Size in order to help avoid False Sharing performance
+                * issues between CPUs, etc.  We also want the Packing
+                * Boundary to incorporate the PCI-E Maximum Payload Size.  We
+                * get best performance when the Packing Boundary is a
+                * multiple of the Maximum Payload Size.
+                */
+               pack_align = fl_align;
+               pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP);
+               if (pcie_cap) {
+                       unsigned int mps, mps_log;
+                       u16 devctl;
+
+                       /* The PCIe Device Control Maximum Payload Size field
+                        * [bits 7:5] encodes sizes as powers of 2 starting at
+                        * 128 bytes.
+                        */
+                       pci_read_config_word(adap->pdev,
+                                            pcie_cap + PCI_EXP_DEVCTL,
+                                            &devctl);
+                       mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
+                       mps = 1 << mps_log;
+                       if (mps > pack_align)
+                               pack_align = mps;
+               }
+
+               /* N.B. T5/T6 have a crazy special interpretation of the "0"
+                * value for the Packing Boundary.  This corresponds to 16
+                * bytes instead of the expected 32 bytes.  So if we want 32
+                * bytes, the best we can really do is 64 bytes ...
+                */
+               if (pack_align <= 16) {
+                       ingpack = INGPACKBOUNDARY_16B_X;
+                       fl_align = 16;
+               } else if (pack_align == 32) {
+                       ingpack = INGPACKBOUNDARY_64B_X;
                        fl_align = 64;
-                       fl_align_log = 6;
+               } else {
+                       unsigned int pack_align_log = fls(pack_align) - 1;
+
+                       ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
+                       fl_align = pack_align;
                }
 
+               /* Use the smallest Ingress Padding which isn't smaller than
+                * the Memory Controller Read/Write Size.  We'll take that as
+                * being 8 bytes since we don't know of any system with a
+                * wider Memory Controller Bus Width.
+                */
                if (is_t5(adap->params.chip))
-                       ingpad = INGPCIEBOUNDARY_32B_X;
+                       ingpad = INGPADBOUNDARY_32B_X;
                else
-                       ingpad = T6_INGPADBOUNDARY_32B_X;
+                       ingpad = T6_INGPADBOUNDARY_8B_X;
 
                t4_set_reg_field(adap, SGE_CONTROL_A,
                                 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
@@ -6430,8 +6468,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
                                 EGRSTATUSPAGESIZE_V(stat_len != 64));
                t4_set_reg_field(adap, SGE_CONTROL2_A,
                                 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
-                                INGPACKBOUNDARY_V(fl_align_log -
-                                                  INGPACKBOUNDARY_SHIFT_X));
+                                INGPACKBOUNDARY_V(ingpack));
        }
        /*
         * Adjust various SGE Free List Host Buffer Sizes.
index 36cf3073ca37d59c2cf3a129603637fc54619c13..f6558cbfc54ec3f7369699d137003cafb003f4c7 100644 (file)
 #define INGPADBOUNDARY_SHIFT_X         5
 
 #define T6_INGPADBOUNDARY_SHIFT_X      3
+#define T6_INGPADBOUNDARY_8B_X         0
 #define T6_INGPADBOUNDARY_32B_X                2
 
+#define INGPADBOUNDARY_32B_X           0
+
 /* CONTROL2 register */
 #define INGPACKBOUNDARY_SHIFT_X                5
 #define INGPACKBOUNDARY_16B_X          0
+#define INGPACKBOUNDARY_64B_X          1
 
 /* GTS register */
 #define SGE_TIMERREGS                  6
index 127ce9707378c151c3037c92dcfc6eb8da16dcb3..91b8f6f5a7653cd74b40f77c1b0e71180c2bbcfd 100644 (file)
@@ -312,8 +312,6 @@ struct de_private {
 
        u32                     msg_enable;
 
-       struct net_device_stats net_stats;
-
        struct pci_dev          *pdev;
 
        u16                     setup_frame[DE_SETUP_FRAME_WORDS];
@@ -388,14 +386,14 @@ static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
                        netif_warn(de, rx_err, de->dev,
                                   "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
                                   status);
-                       de->net_stats.rx_length_errors++;
+                       de->dev->stats.rx_length_errors++;
                }
        } else if (status & RxError) {
                /* There was a fatal error. */
-               de->net_stats.rx_errors++; /* end of a packet.*/
-               if (status & 0x0890) de->net_stats.rx_length_errors++;
-               if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
-               if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
+               de->dev->stats.rx_errors++; /* end of a packet.*/
+               if (status & 0x0890) de->dev->stats.rx_length_errors++;
+               if (status & RxErrCRC) de->dev->stats.rx_crc_errors++;
+               if (status & RxErrFIFO) de->dev->stats.rx_fifo_errors++;
        }
 }
 
@@ -423,7 +421,7 @@ static void de_rx (struct de_private *de)
                mapping = de->rx_skb[rx_tail].mapping;
 
                if (unlikely(drop)) {
-                       de->net_stats.rx_dropped++;
+                       de->dev->stats.rx_dropped++;
                        goto rx_next;
                }
 
@@ -441,7 +439,7 @@ static void de_rx (struct de_private *de)
                buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
                copy_skb = netdev_alloc_skb(de->dev, buflen);
                if (unlikely(!copy_skb)) {
-                       de->net_stats.rx_dropped++;
+                       de->dev->stats.rx_dropped++;
                        drop = 1;
                        rx_work = 100;
                        goto rx_next;
@@ -470,8 +468,8 @@ static void de_rx (struct de_private *de)
 
                skb->protocol = eth_type_trans (skb, de->dev);
 
-               de->net_stats.rx_packets++;
-               de->net_stats.rx_bytes += skb->len;
+               de->dev->stats.rx_packets++;
+               de->dev->stats.rx_bytes += skb->len;
                rc = netif_rx (skb);
                if (rc == NET_RX_DROP)
                        drop = 1;
@@ -572,18 +570,18 @@ static void de_tx (struct de_private *de)
                                netif_dbg(de, tx_err, de->dev,
                                          "tx err, status 0x%x\n",
                                          status);
-                               de->net_stats.tx_errors++;
+                               de->dev->stats.tx_errors++;
                                if (status & TxOWC)
-                                       de->net_stats.tx_window_errors++;
+                                       de->dev->stats.tx_window_errors++;
                                if (status & TxMaxCol)
-                                       de->net_stats.tx_aborted_errors++;
+                                       de->dev->stats.tx_aborted_errors++;
                                if (status & TxLinkFail)
-                                       de->net_stats.tx_carrier_errors++;
+                                       de->dev->stats.tx_carrier_errors++;
                                if (status & TxFIFOUnder)
-                                       de->net_stats.tx_fifo_errors++;
+                                       de->dev->stats.tx_fifo_errors++;
                        } else {
-                               de->net_stats.tx_packets++;
-                               de->net_stats.tx_bytes += skb->len;
+                               de->dev->stats.tx_packets++;
+                               de->dev->stats.tx_bytes += skb->len;
                                netif_dbg(de, tx_done, de->dev,
                                          "tx done, slot %d\n", tx_tail);
                        }
@@ -814,9 +812,9 @@ static void de_set_rx_mode (struct net_device *dev)
 static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
 {
        if (unlikely(rx_missed & RxMissedOver))
-               de->net_stats.rx_missed_errors += RxMissedMask;
+               de->dev->stats.rx_missed_errors += RxMissedMask;
        else
-               de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
+               de->dev->stats.rx_missed_errors += (rx_missed & RxMissedMask);
 }
 
 static void __de_get_stats(struct de_private *de)
@@ -836,7 +834,7 @@ static struct net_device_stats *de_get_stats(struct net_device *dev)
                __de_get_stats(de);
        spin_unlock_irq(&de->lock);
 
-       return &de->net_stats;
+       return &dev->stats;
 }
 
 static inline int de_is_running (struct de_private *de)
@@ -1348,7 +1346,7 @@ static void de_clean_rings (struct de_private *de)
                struct sk_buff *skb = de->tx_skb[i].skb;
                if ((skb) && (skb != DE_DUMMY_SKB)) {
                        if (skb != DE_SETUP_SKB) {
-                               de->net_stats.tx_dropped++;
+                               de->dev->stats.tx_dropped++;
                                pci_unmap_single(de->pdev,
                                        de->tx_skb[i].mapping,
                                        skb->len, PCI_DMA_TODEVICE);
index 1e350135f11d9a3b13b8e0300f063d927f65a365..778f974e2928bfdd32391f3ecb8b424843f72374 100644 (file)
@@ -878,10 +878,10 @@ tx_error (struct net_device *dev, int tx_status)
        frame_id = (tx_status & 0xffff0000);
        printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
                dev->name, tx_status, frame_id);
-       np->stats.tx_errors++;
+       dev->stats.tx_errors++;
        /* Ttransmit Underrun */
        if (tx_status & 0x10) {
-               np->stats.tx_fifo_errors++;
+               dev->stats.tx_fifo_errors++;
                dw16(TxStartThresh, dr16(TxStartThresh) + 0x10);
                /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
                dw16(ASICCtrl + 2,
@@ -903,7 +903,7 @@ tx_error (struct net_device *dev, int tx_status)
        }
        /* Late Collision */
        if (tx_status & 0x04) {
-               np->stats.tx_fifo_errors++;
+               dev->stats.tx_fifo_errors++;
                /* TxReset and clear FIFO */
                dw16(ASICCtrl + 2, TxReset | FIFOReset);
                /* Wait reset done */
@@ -916,13 +916,8 @@ tx_error (struct net_device *dev, int tx_status)
                /* Let TxStartThresh stay default value */
        }
        /* Maximum Collisions */
-#ifdef ETHER_STATS
        if (tx_status & 0x08)
-               np->stats.collisions16++;
-#else
-       if (tx_status & 0x08)
-               np->stats.collisions++;
-#endif
+               dev->stats.collisions++;
        /* Restart the Tx */
        dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
 }
@@ -952,15 +947,15 @@ receive_packet (struct net_device *dev)
                        break;
                /* Update rx error statistics, drop packet. */
                if (frame_status & RFS_Errors) {
-                       np->stats.rx_errors++;
+                       dev->stats.rx_errors++;
                        if (frame_status & (RxRuntFrame | RxLengthError))
-                               np->stats.rx_length_errors++;
+                               dev->stats.rx_length_errors++;
                        if (frame_status & RxFCSError)
-                               np->stats.rx_crc_errors++;
+                               dev->stats.rx_crc_errors++;
                        if (frame_status & RxAlignmentError && np->speed != 1000)
-                               np->stats.rx_frame_errors++;
+                               dev->stats.rx_frame_errors++;
                        if (frame_status & RxFIFOOverrun)
-                               np->stats.rx_fifo_errors++;
+                               dev->stats.rx_fifo_errors++;
                } else {
                        struct sk_buff *skb;
 
@@ -1096,23 +1091,23 @@ get_stats (struct net_device *dev)
        /* All statistics registers need to be acknowledged,
           else statistic overflow could cause problems */
 
-       np->stats.rx_packets += dr32(FramesRcvOk);
-       np->stats.tx_packets += dr32(FramesXmtOk);
-       np->stats.rx_bytes += dr32(OctetRcvOk);
-       np->stats.tx_bytes += dr32(OctetXmtOk);
+       dev->stats.rx_packets += dr32(FramesRcvOk);
+       dev->stats.tx_packets += dr32(FramesXmtOk);
+       dev->stats.rx_bytes += dr32(OctetRcvOk);
+       dev->stats.tx_bytes += dr32(OctetXmtOk);
 
-       np->stats.multicast = dr32(McstFramesRcvdOk);
-       np->stats.collisions += dr32(SingleColFrames)
+       dev->stats.multicast = dr32(McstFramesRcvdOk);
+       dev->stats.collisions += dr32(SingleColFrames)
                             +  dr32(MultiColFrames);
 
        /* detailed tx errors */
        stat_reg = dr16(FramesAbortXSColls);
-       np->stats.tx_aborted_errors += stat_reg;
-       np->stats.tx_errors += stat_reg;
+       dev->stats.tx_aborted_errors += stat_reg;
+       dev->stats.tx_errors += stat_reg;
 
        stat_reg = dr16(CarrierSenseErrors);
-       np->stats.tx_carrier_errors += stat_reg;
-       np->stats.tx_errors += stat_reg;
+       dev->stats.tx_carrier_errors += stat_reg;
+       dev->stats.tx_errors += stat_reg;
 
        /* Clear all other statistic register. */
        dr32(McstOctetXmtOk);
@@ -1142,7 +1137,7 @@ get_stats (struct net_device *dev)
        dr16(TCPCheckSumErrors);
        dr16(UDPCheckSumErrors);
        dr16(IPCheckSumErrors);
-       return &np->stats;
+       return &dev->stats;
 }
 
 static int
index 5d8ae5320242635ced82ce1267277c63a9c91a0f..10e98ba33ebf520010a07e53861d769a35b45ec2 100644 (file)
@@ -377,7 +377,6 @@ struct netdev_private {
        void __iomem *eeprom_addr;
        spinlock_t tx_lock;
        spinlock_t rx_lock;
-       struct net_device_stats stats;
        unsigned int rx_buf_sz;         /* Based on MTU+slack. */
        unsigned int speed;             /* Operating speed */
        unsigned int vlan;              /* VLAN Id */
index 30e855004c57592f9ab6c0cea2eb73f63b59b7ca..02dd5246dfae9a99b20f2bb4b2b13185d3239a3c 100644 (file)
@@ -4939,8 +4939,9 @@ static int
 __be_cmd_set_logical_link_config(struct be_adapter *adapter,
                                 int link_state, int version, u8 domain)
 {
-       struct be_mcc_wrb *wrb;
        struct be_cmd_req_set_ll_link *req;
+       struct be_mcc_wrb *wrb;
+       u32 link_config = 0;
        int status;
 
        mutex_lock(&adapter->mcc_lock);
@@ -4962,10 +4963,12 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
 
        if (link_state == IFLA_VF_LINK_STATE_ENABLE ||
            link_state == IFLA_VF_LINK_STATE_AUTO)
-               req->link_config |= PLINK_ENABLE;
+               link_config |= PLINK_ENABLE;
 
        if (link_state == IFLA_VF_LINK_STATE_AUTO)
-               req->link_config |= PLINK_TRACK;
+               link_config |= PLINK_TRACK;
+
+       req->link_config = cpu_to_le32(link_config);
 
        status = be_mcc_notify_wait(adapter);
 err:
index 23d82748f52b9aa19ecd17226444f3326dfaaed4..e863ba74d005d7f255931b336825df2abadd2fc8 100644 (file)
@@ -1148,14 +1148,14 @@ static int ethoc_probe(struct platform_device *pdev)
 
        /* Allow the platform setup code to pass in a MAC address. */
        if (pdata) {
-               memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
+               ether_addr_copy(netdev->dev_addr, pdata->hwaddr);
                priv->phy_id = pdata->phy_id;
        } else {
                const void *mac;
 
                mac = of_get_mac_address(pdev->dev.of_node);
                if (mac)
-                       memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
+                       ether_addr_copy(netdev->dev_addr, mac);
                priv->phy_id = -1;
        }
 
index 992ebe973d25bfbccff7b5c42dc1801ea41fc9ea..659f1ad37e96a823a4ee3cba04be905d5c9c5dee 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/module.h>
 #include <linux/etherdevice.h>
+#include <linux/interrupt.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/of_net.h>
@@ -189,11 +190,9 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
 
        nps_enet_tx_handler(ndev);
        work_done = nps_enet_rx_handler(ndev);
-       if (work_done < budget) {
+       if ((work_done < budget) && napi_complete_done(napi, work_done)) {
                u32 buf_int_enable_value = 0;
 
-               napi_complete_done(napi, work_done);
-
                /* set tx_done and rx_rdy bits */
                buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
                buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;
index 928b0df2b8e033e2b784759e32a0218e0b7e16f2..2153c5bbdd1267416f399e5742e509b8fd0b12cd 100644 (file)
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
+#include <linux/of.h>
 #include <linux/phy.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <net/ip.h>
 #include <net/ncsi.h>
 
 #define DRV_NAME       "ftgmac100"
 #define DRV_VERSION    "0.7"
 
-#define RX_QUEUE_ENTRIES       256     /* must be power of 2 */
-#define TX_QUEUE_ENTRIES       512     /* must be power of 2 */
+/* Arbitrary values, I am not sure the HW has limits */
+#define MAX_RX_QUEUE_ENTRIES   1024
+#define MAX_TX_QUEUE_ENTRIES   1024
+#define MIN_RX_QUEUE_ENTRIES   32
+#define MIN_TX_QUEUE_ENTRIES   32
 
-#define MAX_PKT_SIZE           1518
-#define RX_BUF_SIZE            PAGE_SIZE       /* must be smaller than 0x3fff */
+/* Defaults */
+#define DEF_RX_QUEUE_ENTRIES   128
+#define DEF_TX_QUEUE_ENTRIES   128
 
-/******************************************************************************
- * private data
- *****************************************************************************/
-struct ftgmac100_descs {
-       struct ftgmac100_rxdes rxdes[RX_QUEUE_ENTRIES];
-       struct ftgmac100_txdes txdes[TX_QUEUE_ENTRIES];
-};
+#define MAX_PKT_SIZE           1536
+#define RX_BUF_SIZE            MAX_PKT_SIZE    /* must be smaller than 0x3fff */
+
+/* Min number of tx ring entries before stopping queue */
+#define TX_THRESHOLD           (MAX_SKB_FRAGS + 1)
 
 struct ftgmac100 {
+       /* Registers */
        struct resource *res;
        void __iomem *base;
-       int irq;
-
-       struct ftgmac100_descs *descs;
-       dma_addr_t descs_dma_addr;
-
-       struct page *rx_pages[RX_QUEUE_ENTRIES];
 
+       /* Rx ring */
+       unsigned int rx_q_entries;
+       struct ftgmac100_rxdes *rxdes;
+       dma_addr_t rxdes_dma;
+       struct sk_buff **rx_skbs;
        unsigned int rx_pointer;
+       u32 rxdes0_edorr_mask;
+
+       /* Tx ring */
+       unsigned int tx_q_entries;
+       struct ftgmac100_txdes *txdes;
+       dma_addr_t txdes_dma;
+       struct sk_buff **tx_skbs;
        unsigned int tx_clean_pointer;
        unsigned int tx_pointer;
-       unsigned int tx_pending;
+       u32 txdes0_edotr_mask;
 
-       spinlock_t tx_lock;
+       /* Used to signal the reset task of ring change request */
+       unsigned int new_rx_q_entries;
+       unsigned int new_tx_q_entries;
 
+       /* Scratch page to use when rx skb alloc fails */
+       void *rx_scratch;
+       dma_addr_t rx_scratch_dma;
+
+       /* Component structures */
        struct net_device *netdev;
        struct device *dev;
        struct ncsi_dev *ndev;
        struct napi_struct napi;
-
+       struct work_struct reset_task;
        struct mii_bus *mii_bus;
-       int old_speed;
-       int int_mask_all;
+
+       /* Link management */
+       int cur_speed;
+       int cur_duplex;
        bool use_ncsi;
-       bool enabled;
 
-       u32 rxdes0_edorr_mask;
-       u32 txdes0_edotr_mask;
+       /* Misc */
+       bool need_mac_restart;
+       bool is_aspeed;
 };
 
-static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
-                                  struct ftgmac100_rxdes *rxdes, gfp_t gfp);
-
-/******************************************************************************
- * internal functions (hardware register access)
- *****************************************************************************/
-static void ftgmac100_set_rx_ring_base(struct ftgmac100 *priv, dma_addr_t addr)
-{
-       iowrite32(addr, priv->base + FTGMAC100_OFFSET_RXR_BADR);
-}
-
-static void ftgmac100_set_rx_buffer_size(struct ftgmac100 *priv,
-               unsigned int size)
-{
-       size = FTGMAC100_RBSR_SIZE(size);
-       iowrite32(size, priv->base + FTGMAC100_OFFSET_RBSR);
-}
-
-static void ftgmac100_set_normal_prio_tx_ring_base(struct ftgmac100 *priv,
-                                                  dma_addr_t addr)
-{
-       iowrite32(addr, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
-}
-
-static void ftgmac100_txdma_normal_prio_start_polling(struct ftgmac100 *priv)
-{
-       iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
-}
-
-static int ftgmac100_reset_hw(struct ftgmac100 *priv)
+static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr)
 {
        struct net_device *netdev = priv->netdev;
        int i;
 
        /* NOTE: reset clears all registers */
-       iowrite32(FTGMAC100_MACCR_SW_RST, priv->base + FTGMAC100_OFFSET_MACCR);
-       for (i = 0; i < 5; i++) {
+       iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
+       iowrite32(maccr | FTGMAC100_MACCR_SW_RST,
+                 priv->base + FTGMAC100_OFFSET_MACCR);
+       for (i = 0; i < 50; i++) {
                unsigned int maccr;
 
                maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
                if (!(maccr & FTGMAC100_MACCR_SW_RST))
                        return 0;
 
-               udelay(1000);
+               udelay(1);
        }
 
-       netdev_err(netdev, "software reset failed\n");
+       netdev_err(netdev, "Hardware reset failed\n");
        return -EIO;
 }
 
-static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac)
+static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv)
+{
+       u32 maccr = 0;
+
+       switch (priv->cur_speed) {
+       case SPEED_10:
+       case 0: /* no link */
+               break;
+
+       case SPEED_100:
+               maccr |= FTGMAC100_MACCR_FAST_MODE;
+               break;
+
+       case SPEED_1000:
+               maccr |= FTGMAC100_MACCR_GIGA_MODE;
+               break;
+       default:
+               netdev_err(priv->netdev, "Unknown speed %d !\n",
+                          priv->cur_speed);
+               break;
+       }
+
+       /* (Re)initialize the queue pointers */
+       priv->rx_pointer = 0;
+       priv->tx_clean_pointer = 0;
+       priv->tx_pointer = 0;
+
+       /* The doc says reset twice with 10us interval */
+       if (ftgmac100_reset_mac(priv, maccr))
+               return -EIO;
+       usleep_range(10, 1000);
+       return ftgmac100_reset_mac(priv, maccr);
+}
+
+static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac)
 {
        unsigned int maddr = mac[0] << 8 | mac[1];
        unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
@@ -143,7 +170,7 @@ static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac)
        iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
 }
 
-static void ftgmac100_setup_mac(struct ftgmac100 *priv)
+static void ftgmac100_initial_mac(struct ftgmac100 *priv)
 {
        u8 mac[ETH_ALEN];
        unsigned int m;
@@ -187,55 +214,91 @@ static int ftgmac100_set_mac_addr(struct net_device *dev, void *p)
                return ret;
 
        eth_commit_mac_addr_change(dev, p);
-       ftgmac100_set_mac(netdev_priv(dev), dev->dev_addr);
+       ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr);
 
        return 0;
 }
 
 static void ftgmac100_init_hw(struct ftgmac100 *priv)
 {
-       /* setup ring buffer base registers */
-       ftgmac100_set_rx_ring_base(priv,
-                                  priv->descs_dma_addr +
-                                  offsetof(struct ftgmac100_descs, rxdes));
-       ftgmac100_set_normal_prio_tx_ring_base(priv,
-                                              priv->descs_dma_addr +
-                                              offsetof(struct ftgmac100_descs, txdes));
+       u32 reg, rfifo_sz, tfifo_sz;
 
-       ftgmac100_set_rx_buffer_size(priv, RX_BUF_SIZE);
+       /* Clear stale interrupts */
+       reg = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
+       iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR);
 
-       iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), priv->base + FTGMAC100_OFFSET_APTC);
+       /* Setup RX ring buffer base */
+       iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR);
 
-       ftgmac100_set_mac(priv, priv->netdev->dev_addr);
-}
+       /* Setup TX ring buffer base */
+       iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
 
-#define MACCR_ENABLE_ALL       (FTGMAC100_MACCR_TXDMA_EN       | \
-                                FTGMAC100_MACCR_RXDMA_EN       | \
-                                FTGMAC100_MACCR_TXMAC_EN       | \
-                                FTGMAC100_MACCR_RXMAC_EN       | \
-                                FTGMAC100_MACCR_FULLDUP        | \
-                                FTGMAC100_MACCR_CRC_APD        | \
-                                FTGMAC100_MACCR_RX_RUNT        | \
-                                FTGMAC100_MACCR_RX_BROADPKT)
+       /* Configure RX buffer size */
+       iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE),
+                 priv->base + FTGMAC100_OFFSET_RBSR);
 
-static void ftgmac100_start_hw(struct ftgmac100 *priv, int speed)
-{
-       int maccr = MACCR_ENABLE_ALL;
+       /* Set RX descriptor autopoll */
+       iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1),
+                 priv->base + FTGMAC100_OFFSET_APTC);
 
-       switch (speed) {
-       default:
-       case 10:
-               break;
-
-       case 100:
-               maccr |= FTGMAC100_MACCR_FAST_MODE;
-               break;
-
-       case 1000:
-               maccr |= FTGMAC100_MACCR_GIGA_MODE;
-               break;
-       }
+       /* Write MAC address */
+       ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr);
 
+       /* Configure descriptor sizes and increase burst sizes according
+        * to values in Aspeed SDK. The FIFO arbitration is enabled and
+        * the thresholds set based on the recommended values in the
+        * AST2400 specification.
+        */
+       iowrite32(FTGMAC100_DBLAC_RXDES_SIZE(2) |   /* 2*8 bytes RX descs */
+                 FTGMAC100_DBLAC_TXDES_SIZE(2) |   /* 2*8 bytes TX descs */
+                 FTGMAC100_DBLAC_RXBURST_SIZE(3) | /* 512 bytes max RX bursts */
+                 FTGMAC100_DBLAC_TXBURST_SIZE(3) | /* 512 bytes max TX bursts */
+                 FTGMAC100_DBLAC_RX_THR_EN |       /* Enable fifo threshold arb */
+                 FTGMAC100_DBLAC_RXFIFO_HTHR(6) |  /* 6/8 of FIFO high threshold */
+                 FTGMAC100_DBLAC_RXFIFO_LTHR(2),   /* 2/8 of FIFO low threshold */
+                 priv->base + FTGMAC100_OFFSET_DBLAC);
+
+       /* Interrupt mitigation configured for 1 interrupt/packet. HW interrupt
+        * mitigation doesn't seem to provide any benefit with NAPI so leave
+        * it at that.
+        */
+       iowrite32(FTGMAC100_ITC_RXINT_THR(1) |
+                 FTGMAC100_ITC_TXINT_THR(1),
+                 priv->base + FTGMAC100_OFFSET_ITC);
+
+       /* Configure FIFO sizes in the TPAFCR register */
+       reg = ioread32(priv->base + FTGMAC100_OFFSET_FEAR);
+       rfifo_sz = reg & 0x00000007;
+       tfifo_sz = (reg >> 3) & 0x00000007;
+       reg = ioread32(priv->base + FTGMAC100_OFFSET_TPAFCR);
+       reg &= ~0x3f000000;
+       reg |= (tfifo_sz << 27);
+       reg |= (rfifo_sz << 24);
+       iowrite32(reg, priv->base + FTGMAC100_OFFSET_TPAFCR);
+}
+
+static void ftgmac100_start_hw(struct ftgmac100 *priv)
+{
+       u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
+
+       /* Keep the original GMAC and FAST bits */
+       maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE);
+
+       /* Add all the main enable bits */
+       maccr |= FTGMAC100_MACCR_TXDMA_EN       |
+                FTGMAC100_MACCR_RXDMA_EN       |
+                FTGMAC100_MACCR_TXMAC_EN       |
+                FTGMAC100_MACCR_RXMAC_EN       |
+                FTGMAC100_MACCR_CRC_APD        |
+                FTGMAC100_MACCR_PHY_LINK_LEVEL |
+                FTGMAC100_MACCR_RX_RUNT        |
+                FTGMAC100_MACCR_RX_BROADPKT;
+
+       /* Add other bits as needed */
+       if (priv->cur_duplex == DUPLEX_FULL)
+               maccr |= FTGMAC100_MACCR_FULLDUP;
+
+       /* Hit the HW */
        iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
 }
 
@@ -244,656 +307,641 @@ static void ftgmac100_stop_hw(struct ftgmac100 *priv)
        iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR);
 }
 
-/******************************************************************************
- * internal functions (receive descriptor)
- *****************************************************************************/
-static bool ftgmac100_rxdes_first_segment(struct ftgmac100_rxdes *rxdes)
-{
-       return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FRS);
-}
-
-static bool ftgmac100_rxdes_last_segment(struct ftgmac100_rxdes *rxdes)
-{
-       return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_LRS);
-}
-
-static bool ftgmac100_rxdes_packet_ready(struct ftgmac100_rxdes *rxdes)
-{
-       return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY);
-}
-
-static void ftgmac100_rxdes_set_dma_own(const struct ftgmac100 *priv,
-                                       struct ftgmac100_rxdes *rxdes)
-{
-       /* clear status bits */
-       rxdes->rxdes0 &= cpu_to_le32(priv->rxdes0_edorr_mask);
-}
-
-static bool ftgmac100_rxdes_rx_error(struct ftgmac100_rxdes *rxdes)
-{
-       return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ERR);
-}
-
-static bool ftgmac100_rxdes_crc_error(struct ftgmac100_rxdes *rxdes)
-{
-       return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_CRC_ERR);
-}
-
-static bool ftgmac100_rxdes_frame_too_long(struct ftgmac100_rxdes *rxdes)
-{
-       return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FTL);
-}
-
-static bool ftgmac100_rxdes_runt(struct ftgmac100_rxdes *rxdes)
-{
-       return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RUNT);
-}
-
-static bool ftgmac100_rxdes_odd_nibble(struct ftgmac100_rxdes *rxdes)
-{
-       return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ODD_NB);
-}
-
-static unsigned int ftgmac100_rxdes_data_length(struct ftgmac100_rxdes *rxdes)
-{
-       return le32_to_cpu(rxdes->rxdes0) & FTGMAC100_RXDES0_VDBC;
-}
-
-static bool ftgmac100_rxdes_multicast(struct ftgmac100_rxdes *rxdes)
+static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
+                                 struct ftgmac100_rxdes *rxdes, gfp_t gfp)
 {
-       return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_MULTICAST);
-}
-
-static void ftgmac100_rxdes_set_end_of_ring(const struct ftgmac100 *priv,
-                                           struct ftgmac100_rxdes *rxdes)
-{
-       rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
-}
-
-static void ftgmac100_rxdes_set_dma_addr(struct ftgmac100_rxdes *rxdes,
-                                        dma_addr_t addr)
-{
-       rxdes->rxdes3 = cpu_to_le32(addr);
-}
-
-static dma_addr_t ftgmac100_rxdes_get_dma_addr(struct ftgmac100_rxdes *rxdes)
-{
-       return le32_to_cpu(rxdes->rxdes3);
-}
-
-static bool ftgmac100_rxdes_is_tcp(struct ftgmac100_rxdes *rxdes)
-{
-       return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) ==
-              cpu_to_le32(FTGMAC100_RXDES1_PROT_TCPIP);
-}
-
-static bool ftgmac100_rxdes_is_udp(struct ftgmac100_rxdes *rxdes)
-{
-       return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) ==
-              cpu_to_le32(FTGMAC100_RXDES1_PROT_UDPIP);
-}
-
-static bool ftgmac100_rxdes_tcpcs_err(struct ftgmac100_rxdes *rxdes)
-{
-       return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_TCP_CHKSUM_ERR);
-}
-
-static bool ftgmac100_rxdes_udpcs_err(struct ftgmac100_rxdes *rxdes)
-{
-       return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_UDP_CHKSUM_ERR);
-}
-
-static bool ftgmac100_rxdes_ipcs_err(struct ftgmac100_rxdes *rxdes)
-{
-       return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_IP_CHKSUM_ERR);
-}
+       struct net_device *netdev = priv->netdev;
+       struct sk_buff *skb;
+       dma_addr_t map;
+       int err;
 
-static inline struct page **ftgmac100_rxdes_page_slot(struct ftgmac100 *priv,
-                                                     struct ftgmac100_rxdes *rxdes)
-{
-       return &priv->rx_pages[rxdes - priv->descs->rxdes];
-}
+       skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE);
+       if (unlikely(!skb)) {
+               if (net_ratelimit())
+                       netdev_warn(netdev, "failed to allocate rx skb\n");
+               err = -ENOMEM;
+               map = priv->rx_scratch_dma;
+       } else {
+               map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE,
+                                    DMA_FROM_DEVICE);
+               if (unlikely(dma_mapping_error(priv->dev, map))) {
+                       if (net_ratelimit())
+                               netdev_err(netdev, "failed to map rx page\n");
+                       dev_kfree_skb_any(skb);
+                       map = priv->rx_scratch_dma;
+                       skb = NULL;
+                       err = -ENOMEM;
+               }
+       }
 
-/*
- * rxdes2 is not used by hardware. We use it to keep track of page.
- * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
- */
-static void ftgmac100_rxdes_set_page(struct ftgmac100 *priv,
-                                    struct ftgmac100_rxdes *rxdes,
-                                    struct page *page)
-{
-       *ftgmac100_rxdes_page_slot(priv, rxdes) = page;
-}
+       /* Store skb */
+       priv->rx_skbs[entry] = skb;
 
-static struct page *ftgmac100_rxdes_get_page(struct ftgmac100 *priv,
-                                            struct ftgmac100_rxdes *rxdes)
-{
-       return *ftgmac100_rxdes_page_slot(priv, rxdes);
-}
+       /* Store DMA address into RX desc */
+       rxdes->rxdes3 = cpu_to_le32(map);
 
-/******************************************************************************
- * internal functions (receive)
- *****************************************************************************/
-static int ftgmac100_next_rx_pointer(int pointer)
-{
-       return (pointer + 1) & (RX_QUEUE_ENTRIES - 1);
-}
+       /* Ensure the above is ordered vs clearing the OWN bit */
+       dma_wmb();
 
-static void ftgmac100_rx_pointer_advance(struct ftgmac100 *priv)
-{
-       priv->rx_pointer = ftgmac100_next_rx_pointer(priv->rx_pointer);
-}
+       /* Clean status (which resets own bit) */
+       if (entry == (priv->rx_q_entries - 1))
+               rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask);
+       else
+               rxdes->rxdes0 = 0;
 
-static struct ftgmac100_rxdes *ftgmac100_current_rxdes(struct ftgmac100 *priv)
-{
-       return &priv->descs->rxdes[priv->rx_pointer];
+       return 0;
 }
 
-static struct ftgmac100_rxdes *
-ftgmac100_rx_locate_first_segment(struct ftgmac100 *priv)
+static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv,
+                                             unsigned int pointer)
 {
-       struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv);
-
-       while (ftgmac100_rxdes_packet_ready(rxdes)) {
-               if (ftgmac100_rxdes_first_segment(rxdes))
-                       return rxdes;
-
-               ftgmac100_rxdes_set_dma_own(priv, rxdes);
-               ftgmac100_rx_pointer_advance(priv);
-               rxdes = ftgmac100_current_rxdes(priv);
-       }
-
-       return NULL;
+       return (pointer + 1) & (priv->rx_q_entries - 1);
 }
 
-static bool ftgmac100_rx_packet_error(struct ftgmac100 *priv,
-                                     struct ftgmac100_rxdes *rxdes)
+static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status)
 {
        struct net_device *netdev = priv->netdev;
-       bool error = false;
-
-       if (unlikely(ftgmac100_rxdes_rx_error(rxdes))) {
-               if (net_ratelimit())
-                       netdev_info(netdev, "rx err\n");
 
+       if (status & FTGMAC100_RXDES0_RX_ERR)
                netdev->stats.rx_errors++;
-               error = true;
-       }
-
-       if (unlikely(ftgmac100_rxdes_crc_error(rxdes))) {
-               if (net_ratelimit())
-                       netdev_info(netdev, "rx crc err\n");
 
+       if (status & FTGMAC100_RXDES0_CRC_ERR)
                netdev->stats.rx_crc_errors++;
-               error = true;
-       } else if (unlikely(ftgmac100_rxdes_ipcs_err(rxdes))) {
-               if (net_ratelimit())
-                       netdev_info(netdev, "rx IP checksum err\n");
-
-               error = true;
-       }
-
-       if (unlikely(ftgmac100_rxdes_frame_too_long(rxdes))) {
-               if (net_ratelimit())
-                       netdev_info(netdev, "rx frame too long\n");
-
-               netdev->stats.rx_length_errors++;
-               error = true;
-       } else if (unlikely(ftgmac100_rxdes_runt(rxdes))) {
-               if (net_ratelimit())
-                       netdev_info(netdev, "rx runt\n");
-
-               netdev->stats.rx_length_errors++;
-               error = true;
-       } else if (unlikely(ftgmac100_rxdes_odd_nibble(rxdes))) {
-               if (net_ratelimit())
-                       netdev_info(netdev, "rx odd nibble\n");
 
+       if (status & (FTGMAC100_RXDES0_FTL |
+                     FTGMAC100_RXDES0_RUNT |
+                     FTGMAC100_RXDES0_RX_ODD_NB))
                netdev->stats.rx_length_errors++;
-               error = true;
-       }
-
-       return error;
 }
 
-static void ftgmac100_rx_drop_packet(struct ftgmac100 *priv)
+static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
 {
        struct net_device *netdev = priv->netdev;
-       struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv);
-       bool done = false;
+       struct ftgmac100_rxdes *rxdes;
+       struct sk_buff *skb;
+       unsigned int pointer, size;
+       u32 status, csum_vlan;
+       dma_addr_t map;
 
-       if (net_ratelimit())
-               netdev_dbg(netdev, "drop packet %p\n", rxdes);
+       /* Grab next RX descriptor */
+       pointer = priv->rx_pointer;
+       rxdes = &priv->rxdes[pointer];
 
-       do {
-               if (ftgmac100_rxdes_last_segment(rxdes))
-                       done = true;
+       /* Grab descriptor status */
+       status = le32_to_cpu(rxdes->rxdes0);
 
-               ftgmac100_rxdes_set_dma_own(priv, rxdes);
-               ftgmac100_rx_pointer_advance(priv);
-               rxdes = ftgmac100_current_rxdes(priv);
-       } while (!done && ftgmac100_rxdes_packet_ready(rxdes));
+       /* Do we have a packet ? */
+       if (!(status & FTGMAC100_RXDES0_RXPKT_RDY))
+               return false;
 
-       netdev->stats.rx_dropped++;
-}
+       /* Order subsequent reads with the test for the ready bit */
+       dma_rmb();
 
-static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
-{
-       struct net_device *netdev = priv->netdev;
-       struct ftgmac100_rxdes *rxdes;
-       struct sk_buff *skb;
-       bool done = false;
+       /* We don't cope with fragmented RX packets */
+       if (unlikely(!(status & FTGMAC100_RXDES0_FRS) ||
+                    !(status & FTGMAC100_RXDES0_LRS)))
+               goto drop;
 
-       rxdes = ftgmac100_rx_locate_first_segment(priv);
-       if (!rxdes)
-               return false;
+       /* Grab received size and csum vlan field in the descriptor */
+       size = status & FTGMAC100_RXDES0_VDBC;
+       csum_vlan = le32_to_cpu(rxdes->rxdes1);
 
-       if (unlikely(ftgmac100_rx_packet_error(priv, rxdes))) {
-               ftgmac100_rx_drop_packet(priv);
-               return true;
+       /* Any error (other than csum offload) flagged ? */
+       if (unlikely(status & RXDES0_ANY_ERROR)) {
+               /* Correct for incorrect flagging of runt packets
+                * with vlan tags... Just accept a runt packet that
+                * has been flagged as vlan and whose size is at
+                * least 60 bytes.
+                */
+               if ((status & FTGMAC100_RXDES0_RUNT) &&
+                   (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) &&
+                   (size >= 60))
+                       status &= ~FTGMAC100_RXDES0_RUNT;
+
+               /* Any error still in there ? */
+               if (status & RXDES0_ANY_ERROR) {
+                       ftgmac100_rx_packet_error(priv, status);
+                       goto drop;
+               }
        }
 
-       /* start processing */
-       skb = netdev_alloc_skb_ip_align(netdev, 128);
-       if (unlikely(!skb)) {
-               if (net_ratelimit())
-                       netdev_err(netdev, "rx skb alloc failed\n");
-
-               ftgmac100_rx_drop_packet(priv);
-               return true;
+       /* If the packet had no skb (failed to allocate earlier)
+        * then try to allocate one and skip
+        */
+       skb = priv->rx_skbs[pointer];
+       if (!unlikely(skb)) {
+               ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
+               goto drop;
        }
 
-       if (unlikely(ftgmac100_rxdes_multicast(rxdes)))
+       if (unlikely(status & FTGMAC100_RXDES0_MULTICAST))
                netdev->stats.multicast++;
 
-       /*
-        * It seems that HW does checksum incorrectly with fragmented packets,
-        * so we are conservative here - if HW checksum error, let software do
-        * the checksum again.
+       /* If the HW found checksum errors, bounce it to software.
+        *
+        * If we didn't, we need to see if the packet was recognized
+        * by HW as one of the supported checksummed protocols before
+        * we accept the HW test results.
         */
-       if ((ftgmac100_rxdes_is_tcp(rxdes) && !ftgmac100_rxdes_tcpcs_err(rxdes)) ||
-           (ftgmac100_rxdes_is_udp(rxdes) && !ftgmac100_rxdes_udpcs_err(rxdes)))
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-
-       do {
-               dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
-               struct page *page = ftgmac100_rxdes_get_page(priv, rxdes);
-               unsigned int size;
-
-               dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+       if (netdev->features & NETIF_F_RXCSUM) {
+               u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR |
+                       FTGMAC100_RXDES1_UDP_CHKSUM_ERR |
+                       FTGMAC100_RXDES1_IP_CHKSUM_ERR;
+               if ((csum_vlan & err_bits) ||
+                   !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK))
+                       skb->ip_summed = CHECKSUM_NONE;
+               else
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+       }
 
-               size = ftgmac100_rxdes_data_length(rxdes);
-               skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0, size);
+       /* Transfer received size to skb */
+       skb_put(skb, size);
 
-               skb->len += size;
-               skb->data_len += size;
-               skb->truesize += PAGE_SIZE;
+       /* Tear down DMA mapping, do necessary cache management */
+       map = le32_to_cpu(rxdes->rxdes3);
 
-               if (ftgmac100_rxdes_last_segment(rxdes))
-                       done = true;
+#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU)
+       /* When we don't have an iommu, we can save cycles by not
+        * invalidating the cache for the part of the packet that
+        * wasn't received.
+        */
+       dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE);
+#else
+       dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+#endif
 
-               ftgmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);
 
-               ftgmac100_rx_pointer_advance(priv);
-               rxdes = ftgmac100_current_rxdes(priv);
-       } while (!done);
+       /* Resplenish rx ring */
+       ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
+       priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
 
-       /* Small frames are copied into linear part of skb to free one page */
-       if (skb->len <= 128) {
-               skb->truesize -= PAGE_SIZE;
-               __pskb_pull_tail(skb, skb->len);
-       } else {
-               /* We pull the minimum amount into linear part */
-               __pskb_pull_tail(skb, ETH_HLEN);
-       }
        skb->protocol = eth_type_trans(skb, netdev);
 
        netdev->stats.rx_packets++;
-       netdev->stats.rx_bytes += skb->len;
+       netdev->stats.rx_bytes += size;
 
        /* push packet to protocol stack */
-       napi_gro_receive(&priv->napi, skb);
+       if (skb->ip_summed == CHECKSUM_NONE)
+               netif_receive_skb(skb);
+       else
+               napi_gro_receive(&priv->napi, skb);
 
        (*processed)++;
        return true;
+
+ drop:
+       /* Clean rxdes0 (which resets own bit) */
+       rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
+       priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
+       netdev->stats.rx_dropped++;
+       return true;
 }
 
-/******************************************************************************
- * internal functions (transmit descriptor)
- *****************************************************************************/
-static void ftgmac100_txdes_reset(const struct ftgmac100 *priv,
-                                 struct ftgmac100_txdes *txdes)
+static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv,
+                                    unsigned int index)
 {
-       /* clear all except end of ring bit */
-       txdes->txdes0 &= cpu_to_le32(priv->txdes0_edotr_mask);
-       txdes->txdes1 = 0;
-       txdes->txdes2 = 0;
-       txdes->txdes3 = 0;
+       if (index == (priv->tx_q_entries - 1))
+               return priv->txdes0_edotr_mask;
+       else
+               return 0;
 }
 
-static bool ftgmac100_txdes_owned_by_dma(struct ftgmac100_txdes *txdes)
+static unsigned int ftgmac100_next_tx_pointer(struct ftgmac100 *priv,
+                                             unsigned int pointer)
 {
-       return txdes->txdes0 & cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
+       return (pointer + 1) & (priv->tx_q_entries - 1);
 }
 
-static void ftgmac100_txdes_set_dma_own(struct ftgmac100_txdes *txdes)
+static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv)
 {
-       /*
-        * Make sure dma own bit will not be set before any other
-        * descriptor fields.
+       /* Returns the number of available slots in the TX queue
+        *
+        * This always leaves one free slot so we don't have to
+        * worry about empty vs. full, and this simplifies the
+        * test for ftgmac100_tx_buf_cleanable() below
         */
-       wmb();
-       txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
+       return (priv->tx_clean_pointer - priv->tx_pointer - 1) &
+               (priv->tx_q_entries - 1);
 }
 
-static void ftgmac100_txdes_set_end_of_ring(const struct ftgmac100 *priv,
-                                           struct ftgmac100_txdes *txdes)
+static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv)
 {
-       txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
+       return priv->tx_pointer != priv->tx_clean_pointer;
 }
 
-static void ftgmac100_txdes_set_first_segment(struct ftgmac100_txdes *txdes)
+static void ftgmac100_free_tx_packet(struct ftgmac100 *priv,
+                                    unsigned int pointer,
+                                    struct sk_buff *skb,
+                                    struct ftgmac100_txdes *txdes,
+                                    u32 ctl_stat)
 {
-       txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_FTS);
-}
+       dma_addr_t map = le32_to_cpu(txdes->txdes3);
+       size_t len;
 
-static void ftgmac100_txdes_set_last_segment(struct ftgmac100_txdes *txdes)
-{
-       txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_LTS);
-}
+       if (ctl_stat & FTGMAC100_TXDES0_FTS) {
+               len = skb_headlen(skb);
+               dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE);
+       } else {
+               len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat);
+               dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE);
+       }
 
-static void ftgmac100_txdes_set_buffer_size(struct ftgmac100_txdes *txdes,
-                                           unsigned int len)
-{
-       txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXBUF_SIZE(len));
+       /* Free SKB on last segment */
+       if (ctl_stat & FTGMAC100_TXDES0_LTS)
+               dev_kfree_skb(skb);
+       priv->tx_skbs[pointer] = NULL;
 }
 
-static void ftgmac100_txdes_set_txint(struct ftgmac100_txdes *txdes)
+static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
 {
-       txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TXIC);
-}
+       struct net_device *netdev = priv->netdev;
+       struct ftgmac100_txdes *txdes;
+       struct sk_buff *skb;
+       unsigned int pointer;
+       u32 ctl_stat;
 
-static void ftgmac100_txdes_set_tcpcs(struct ftgmac100_txdes *txdes)
-{
-       txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TCP_CHKSUM);
-}
+       pointer = priv->tx_clean_pointer;
+       txdes = &priv->txdes[pointer];
 
-static void ftgmac100_txdes_set_udpcs(struct ftgmac100_txdes *txdes)
-{
-       txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_UDP_CHKSUM);
-}
+       ctl_stat = le32_to_cpu(txdes->txdes0);
+       if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN)
+               return false;
 
-static void ftgmac100_txdes_set_ipcs(struct ftgmac100_txdes *txdes)
-{
-       txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_IP_CHKSUM);
-}
+       skb = priv->tx_skbs[pointer];
+       netdev->stats.tx_packets++;
+       netdev->stats.tx_bytes += skb->len;
+       ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
+       txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
 
-static void ftgmac100_txdes_set_dma_addr(struct ftgmac100_txdes *txdes,
-                                        dma_addr_t addr)
-{
-       txdes->txdes3 = cpu_to_le32(addr);
-}
+       priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
 
-static dma_addr_t ftgmac100_txdes_get_dma_addr(struct ftgmac100_txdes *txdes)
-{
-       return le32_to_cpu(txdes->txdes3);
+       return true;
 }
 
-/*
- * txdes2 is not used by hardware. We use it to keep track of socket buffer.
- * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
- */
-static void ftgmac100_txdes_set_skb(struct ftgmac100_txdes *txdes,
-                                   struct sk_buff *skb)
+static void ftgmac100_tx_complete(struct ftgmac100 *priv)
 {
-       txdes->txdes2 = (unsigned int)skb;
-}
+       struct net_device *netdev = priv->netdev;
 
-static struct sk_buff *ftgmac100_txdes_get_skb(struct ftgmac100_txdes *txdes)
-{
-       return (struct sk_buff *)txdes->txdes2;
-}
+       /* Process all completed packets */
+       while (ftgmac100_tx_buf_cleanable(priv) &&
+              ftgmac100_tx_complete_packet(priv))
+               ;
 
-/******************************************************************************
- * internal functions (transmit)
- *****************************************************************************/
-static int ftgmac100_next_tx_pointer(int pointer)
-{
-       return (pointer + 1) & (TX_QUEUE_ENTRIES - 1);
+       /* Restart queue if needed */
+       smp_mb();
+       if (unlikely(netif_queue_stopped(netdev) &&
+                    ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) {
+               struct netdev_queue *txq;
+
+               txq = netdev_get_tx_queue(netdev, 0);
+               __netif_tx_lock(txq, smp_processor_id());
+               if (netif_queue_stopped(netdev) &&
+                   ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
+                       netif_wake_queue(netdev);
+               __netif_tx_unlock(txq);
+       }
 }
 
-static void ftgmac100_tx_pointer_advance(struct ftgmac100 *priv)
+static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan)
 {
-       priv->tx_pointer = ftgmac100_next_tx_pointer(priv->tx_pointer);
-}
+       if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
+               u8 ip_proto = ip_hdr(skb)->protocol;
 
-static void ftgmac100_tx_clean_pointer_advance(struct ftgmac100 *priv)
-{
-       priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv->tx_clean_pointer);
+               *csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM;
+               switch(ip_proto) {
+               case IPPROTO_TCP:
+                       *csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM;
+                       return true;
+               case IPPROTO_UDP:
+                       *csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM;
+                       return true;
+               case IPPROTO_IP:
+                       return true;
+               }
+       }
+       return skb_checksum_help(skb) == 0;
 }
 
-static struct ftgmac100_txdes *ftgmac100_current_txdes(struct ftgmac100 *priv)
+static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
+                                    struct net_device *netdev)
 {
-       return &priv->descs->txdes[priv->tx_pointer];
-}
+       struct ftgmac100 *priv = netdev_priv(netdev);
+       struct ftgmac100_txdes *txdes, *first;
+       unsigned int pointer, nfrags, len, i, j;
+       u32 f_ctl_stat, ctl_stat, csum_vlan;
+       dma_addr_t map;
 
-static struct ftgmac100_txdes *
-ftgmac100_current_clean_txdes(struct ftgmac100 *priv)
-{
-       return &priv->descs->txdes[priv->tx_clean_pointer];
-}
+       /* The HW doesn't pad small frames */
+       if (eth_skb_pad(skb)) {
+               netdev->stats.tx_dropped++;
+               return NETDEV_TX_OK;
+       }
 
-static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
-{
-       struct net_device *netdev = priv->netdev;
-       struct ftgmac100_txdes *txdes;
-       struct sk_buff *skb;
-       dma_addr_t map;
+       /* Reject oversize packets */
+       if (unlikely(skb->len > MAX_PKT_SIZE)) {
+               if (net_ratelimit())
+                       netdev_dbg(netdev, "tx packet too big\n");
+               goto drop;
+       }
 
-       if (priv->tx_pending == 0)
-               return false;
+       /* Do we have a limit on #fragments ? I yet have to get a reply
+        * from Aspeed. If there's one I haven't hit it.
+        */
+       nfrags = skb_shinfo(skb)->nr_frags;
 
-       txdes = ftgmac100_current_clean_txdes(priv);
+       /* Get header len */
+       len = skb_headlen(skb);
 
-       if (ftgmac100_txdes_owned_by_dma(txdes))
-               return false;
+       /* Map the packet head */
+       map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
+       if (dma_mapping_error(priv->dev, map)) {
+               if (net_ratelimit())
+                       netdev_err(netdev, "map tx packet head failed\n");
+               goto drop;
+       }
 
-       skb = ftgmac100_txdes_get_skb(txdes);
-       map = ftgmac100_txdes_get_dma_addr(txdes);
+       /* Grab the next free tx descriptor */
+       pointer = priv->tx_pointer;
+       txdes = first = &priv->txdes[pointer];
 
-       netdev->stats.tx_packets++;
-       netdev->stats.tx_bytes += skb->len;
+       /* Setup it up with the packet head. Don't write the head to the
+        * ring just yet
+        */
+       priv->tx_skbs[pointer] = skb;
+       f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
+       f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
+       f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
+       f_ctl_stat |= FTGMAC100_TXDES0_FTS;
+       if (nfrags == 0)
+               f_ctl_stat |= FTGMAC100_TXDES0_LTS;
+       txdes->txdes3 = cpu_to_le32(map);
+
+       /* Setup HW checksumming */
+       csum_vlan = 0;
+       if (skb->ip_summed == CHECKSUM_PARTIAL &&
+           !ftgmac100_prep_tx_csum(skb, &csum_vlan))
+               goto drop;
+       txdes->txdes1 = cpu_to_le32(csum_vlan);
+
+       /* Next descriptor */
+       pointer = ftgmac100_next_tx_pointer(priv, pointer);
+
+       /* Add the fragments */
+       for (i = 0; i < nfrags; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               len = frag->size;
+
+               /* Map it */
+               map = skb_frag_dma_map(priv->dev, frag, 0, len,
+                                      DMA_TO_DEVICE);
+               if (dma_mapping_error(priv->dev, map))
+                       goto dma_err;
+
+               /* Setup descriptor */
+               priv->tx_skbs[pointer] = skb;
+               txdes = &priv->txdes[pointer];
+               ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
+               ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
+               ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
+               if (i == (nfrags - 1))
+                       ctl_stat |= FTGMAC100_TXDES0_LTS;
+               txdes->txdes0 = cpu_to_le32(ctl_stat);
+               txdes->txdes1 = 0;
+               txdes->txdes3 = cpu_to_le32(map);
+
+               /* Next one */
+               pointer = ftgmac100_next_tx_pointer(priv, pointer);
+       }
 
-       dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
+       /* Order the previous packet and descriptor udpates
+        * before setting the OWN bit on the first descriptor.
+        */
+       dma_wmb();
+       first->txdes0 = cpu_to_le32(f_ctl_stat);
 
-       dev_kfree_skb(skb);
+       /* Update next TX pointer */
+       priv->tx_pointer = pointer;
 
-       ftgmac100_txdes_reset(priv, txdes);
+       /* If there isn't enough room for all the fragments of a new packet
+        * in the TX ring, stop the queue. The sequence below is race free
+        * vs. a concurrent restart in ftgmac100_poll()
+        */
+       if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) {
+               netif_stop_queue(netdev);
+               /* Order the queue stop with the test below */
+               smp_mb();
+               if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
+                       netif_wake_queue(netdev);
+       }
 
-       ftgmac100_tx_clean_pointer_advance(priv);
+       /* Poke transmitter to read the updated TX descriptors */
+       iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
 
-       spin_lock(&priv->tx_lock);
-       priv->tx_pending--;
-       spin_unlock(&priv->tx_lock);
-       netif_wake_queue(netdev);
+       return NETDEV_TX_OK;
 
-       return true;
-}
+ dma_err:
+       if (net_ratelimit())
+               netdev_err(netdev, "map tx fragment failed\n");
+
+       /* Free head */
+       pointer = priv->tx_pointer;
+       ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat);
+       first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask);
+
+       /* Then all fragments */
+       for (j = 0; j < i; j++) {
+               pointer = ftgmac100_next_tx_pointer(priv, pointer);
+               txdes = &priv->txdes[pointer];
+               ctl_stat = le32_to_cpu(txdes->txdes0);
+               ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
+               txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
+       }
 
-static void ftgmac100_tx_complete(struct ftgmac100 *priv)
-{
-       while (ftgmac100_tx_complete_packet(priv))
-               ;
+       /* This cannot be reached if we successfully mapped the
+        * last fragment, so we know ftgmac100_free_tx_packet()
+        * hasn't freed the skb yet.
+        */
+ drop:
+       /* Drop the packet */
+       dev_kfree_skb_any(skb);
+       netdev->stats.tx_dropped++;
+
+       return NETDEV_TX_OK;
 }
 
-static int ftgmac100_xmit(struct ftgmac100 *priv, struct sk_buff *skb,
-                         dma_addr_t map)
+static void ftgmac100_free_buffers(struct ftgmac100 *priv)
 {
-       struct net_device *netdev = priv->netdev;
-       struct ftgmac100_txdes *txdes;
-       unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
-
-       txdes = ftgmac100_current_txdes(priv);
-       ftgmac100_tx_pointer_advance(priv);
-
-       /* setup TX descriptor */
-       ftgmac100_txdes_set_skb(txdes, skb);
-       ftgmac100_txdes_set_dma_addr(txdes, map);
-       ftgmac100_txdes_set_buffer_size(txdes, len);
-
-       ftgmac100_txdes_set_first_segment(txdes);
-       ftgmac100_txdes_set_last_segment(txdes);
-       ftgmac100_txdes_set_txint(txdes);
-       if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               __be16 protocol = skb->protocol;
-
-               if (protocol == cpu_to_be16(ETH_P_IP)) {
-                       u8 ip_proto = ip_hdr(skb)->protocol;
-
-                       ftgmac100_txdes_set_ipcs(txdes);
-                       if (ip_proto == IPPROTO_TCP)
-                               ftgmac100_txdes_set_tcpcs(txdes);
-                       else if (ip_proto == IPPROTO_UDP)
-                               ftgmac100_txdes_set_udpcs(txdes);
-               }
+       int i;
+
+       /* Free all RX buffers */
+       for (i = 0; i < priv->rx_q_entries; i++) {
+               struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
+               struct sk_buff *skb = priv->rx_skbs[i];
+               dma_addr_t map = le32_to_cpu(rxdes->rxdes3);
+
+               if (!skb)
+                       continue;
+
+               priv->rx_skbs[i] = NULL;
+               dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+               dev_kfree_skb_any(skb);
        }
 
-       spin_lock(&priv->tx_lock);
-       priv->tx_pending++;
-       if (priv->tx_pending == TX_QUEUE_ENTRIES)
-               netif_stop_queue(netdev);
+       /* Free all TX buffers */
+       for (i = 0; i < priv->tx_q_entries; i++) {
+               struct ftgmac100_txdes *txdes = &priv->txdes[i];
+               struct sk_buff *skb = priv->tx_skbs[i];
 
-       /* start transmit */
-       ftgmac100_txdes_set_dma_own(txdes);
-       spin_unlock(&priv->tx_lock);
+               if (!skb)
+                       continue;
+               ftgmac100_free_tx_packet(priv, i, skb, txdes,
+                                        le32_to_cpu(txdes->txdes0));
+       }
+}
+
+static void ftgmac100_free_rings(struct ftgmac100 *priv)
+{
+       /* Free skb arrays */
+       kfree(priv->rx_skbs);
+       kfree(priv->tx_skbs);
 
-       ftgmac100_txdma_normal_prio_start_polling(priv);
+       /* Free descriptors */
+       if (priv->rxdes)
+               dma_free_coherent(priv->dev, MAX_RX_QUEUE_ENTRIES *
+                                 sizeof(struct ftgmac100_rxdes),
+                                 priv->rxdes, priv->rxdes_dma);
+       priv->rxdes = NULL;
 
-       return NETDEV_TX_OK;
+       if (priv->txdes)
+               dma_free_coherent(priv->dev, MAX_TX_QUEUE_ENTRIES *
+                                 sizeof(struct ftgmac100_txdes),
+                                 priv->txdes, priv->txdes_dma);
+       priv->txdes = NULL;
+
+       /* Free scratch packet buffer */
+       if (priv->rx_scratch)
+               dma_free_coherent(priv->dev, RX_BUF_SIZE,
+                                 priv->rx_scratch, priv->rx_scratch_dma);
 }
 
-/******************************************************************************
- * internal functions (buffer)
- *****************************************************************************/
-static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
-                                  struct ftgmac100_rxdes *rxdes, gfp_t gfp)
+static int ftgmac100_alloc_rings(struct ftgmac100 *priv)
 {
-       struct net_device *netdev = priv->netdev;
-       struct page *page;
-       dma_addr_t map;
+       /* Allocate skb arrays */
+       priv->rx_skbs = kcalloc(MAX_RX_QUEUE_ENTRIES, sizeof(void *),
+                               GFP_KERNEL);
+       if (!priv->rx_skbs)
+               return -ENOMEM;
+       priv->tx_skbs = kcalloc(MAX_TX_QUEUE_ENTRIES, sizeof(void *),
+                               GFP_KERNEL);
+       if (!priv->tx_skbs)
+               return -ENOMEM;
 
-       page = alloc_page(gfp);
-       if (!page) {
-               if (net_ratelimit())
-                       netdev_err(netdev, "failed to allocate rx page\n");
+       /* Allocate descriptors */
+       priv->rxdes = dma_zalloc_coherent(priv->dev,
+                                         MAX_RX_QUEUE_ENTRIES *
+                                         sizeof(struct ftgmac100_rxdes),
+                                         &priv->rxdes_dma, GFP_KERNEL);
+       if (!priv->rxdes)
+               return -ENOMEM;
+       priv->txdes = dma_zalloc_coherent(priv->dev,
+                                         MAX_TX_QUEUE_ENTRIES *
+                                         sizeof(struct ftgmac100_txdes),
+                                         &priv->txdes_dma, GFP_KERNEL);
+       if (!priv->txdes)
                return -ENOMEM;
-       }
 
-       map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(priv->dev, map))) {
-               if (net_ratelimit())
-                       netdev_err(netdev, "failed to map rx page\n");
-               __free_page(page);
+       /* Allocate scratch packet buffer */
+       priv->rx_scratch = dma_alloc_coherent(priv->dev,
+                                             RX_BUF_SIZE,
+                                             &priv->rx_scratch_dma,
+                                             GFP_KERNEL);
+       if (!priv->rx_scratch)
                return -ENOMEM;
-       }
 
-       ftgmac100_rxdes_set_page(priv, rxdes, page);
-       ftgmac100_rxdes_set_dma_addr(rxdes, map);
-       ftgmac100_rxdes_set_dma_own(priv, rxdes);
        return 0;
 }
 
-static void ftgmac100_free_buffers(struct ftgmac100 *priv)
+static void ftgmac100_init_rings(struct ftgmac100 *priv)
 {
+       struct ftgmac100_rxdes *rxdes = NULL;
+       struct ftgmac100_txdes *txdes = NULL;
        int i;
 
-       for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
-               struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
-               struct page *page = ftgmac100_rxdes_get_page(priv, rxdes);
-               dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
+       /* Update entries counts */
+       priv->rx_q_entries = priv->new_rx_q_entries;
+       priv->tx_q_entries = priv->new_tx_q_entries;
 
-               if (!page)
-                       continue;
+       if (WARN_ON(priv->rx_q_entries < MIN_RX_QUEUE_ENTRIES))
+               return;
 
-               dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
-               __free_page(page);
+       /* Initialize RX ring */
+       for (i = 0; i < priv->rx_q_entries; i++) {
+               rxdes = &priv->rxdes[i];
+               rxdes->rxdes0 = 0;
+               rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma);
        }
+       /* Mark the end of the ring */
+       rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
 
-       for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
-               struct ftgmac100_txdes *txdes = &priv->descs->txdes[i];
-               struct sk_buff *skb = ftgmac100_txdes_get_skb(txdes);
-               dma_addr_t map = ftgmac100_txdes_get_dma_addr(txdes);
-
-               if (!skb)
-                       continue;
+       if (WARN_ON(priv->tx_q_entries < MIN_RX_QUEUE_ENTRIES))
+               return;
 
-               dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
-               kfree_skb(skb);
+       /* Initialize TX ring */
+       for (i = 0; i < priv->tx_q_entries; i++) {
+               txdes = &priv->txdes[i];
+               txdes->txdes0 = 0;
        }
-
-       dma_free_coherent(priv->dev, sizeof(struct ftgmac100_descs),
-                         priv->descs, priv->descs_dma_addr);
+       txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
 }
 
-static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
+static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
 {
        int i;
 
-       priv->descs = dma_zalloc_coherent(priv->dev,
-                                         sizeof(struct ftgmac100_descs),
-                                         &priv->descs_dma_addr, GFP_KERNEL);
-       if (!priv->descs)
-               return -ENOMEM;
-
-       /* initialize RX ring */
-       ftgmac100_rxdes_set_end_of_ring(priv,
-                                       &priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
-
-       for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
-               struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
+       for (i = 0; i < priv->rx_q_entries; i++) {
+               struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
 
-               if (ftgmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL))
-                       goto err;
+               if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL))
+                       return -ENOMEM;
        }
-
-       /* initialize TX ring */
-       ftgmac100_txdes_set_end_of_ring(priv,
-                                       &priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
        return 0;
-
-err:
-       ftgmac100_free_buffers(priv);
-       return -ENOMEM;
 }
 
-/******************************************************************************
- * internal functions (mdio)
- *****************************************************************************/
 static void ftgmac100_adjust_link(struct net_device *netdev)
 {
        struct ftgmac100 *priv = netdev_priv(netdev);
        struct phy_device *phydev = netdev->phydev;
-       int ier;
+       int new_speed;
 
-       if (phydev->speed == priv->old_speed)
-               return;
+       /* We store "no link" as speed 0 */
+       if (!phydev->link)
+               new_speed = 0;
+       else
+               new_speed = phydev->speed;
 
-       priv->old_speed = phydev->speed;
+       if (phydev->speed == priv->cur_speed &&
+           phydev->duplex == priv->cur_duplex)
+               return;
 
-       ier = ioread32(priv->base + FTGMAC100_OFFSET_IER);
+       /* Print status if we have a link or we had one and just lost it,
+        * don't print otherwise.
+        */
+       if (new_speed || priv->cur_speed)
+               phy_print_status(phydev);
 
-       /* disable all interrupts */
-       iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+       priv->cur_speed = new_speed;
+       priv->cur_duplex = phydev->duplex;
 
-       netif_stop_queue(netdev);
-       ftgmac100_stop_hw(priv);
+       /* Link is down, do nothing else */
+       if (!new_speed)
+               return;
 
-       netif_start_queue(netdev);
-       ftgmac100_init_hw(priv);
-       ftgmac100_start_hw(priv, phydev->speed);
+       /* Disable all interrupts */
+       iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
 
-       /* re-enable interrupts */
-       iowrite32(ier, priv->base + FTGMAC100_OFFSET_IER);
+       /* Reset the adapter asynchronously */
+       schedule_work(&priv->reset_task);
 }
 
 static int ftgmac100_mii_probe(struct ftgmac100 *priv)
@@ -918,9 +966,6 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv)
        return 0;
 }
 
-/******************************************************************************
- * struct mii_bus functions
- *****************************************************************************/
 static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
 {
        struct net_device *netdev = bus->priv;
@@ -992,9 +1037,6 @@ static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
        return -EIO;
 }
 
-/******************************************************************************
- * struct ethtool_ops functions
- *****************************************************************************/
 static void ftgmac100_get_drvinfo(struct net_device *netdev,
                                  struct ethtool_drvinfo *info)
 {
@@ -1003,175 +1045,319 @@ static void ftgmac100_get_drvinfo(struct net_device *netdev,
        strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
 }
 
+static int ftgmac100_nway_reset(struct net_device *ndev)
+{
+       if (!ndev->phydev)
+               return -ENXIO;
+       return phy_start_aneg(ndev->phydev);
+}
+
+static void ftgmac100_get_ringparam(struct net_device *netdev,
+                                   struct ethtool_ringparam *ering)
+{
+       struct ftgmac100 *priv = netdev_priv(netdev);
+
+       memset(ering, 0, sizeof(*ering));
+       ering->rx_max_pending = MAX_RX_QUEUE_ENTRIES;
+       ering->tx_max_pending = MAX_TX_QUEUE_ENTRIES;
+       ering->rx_pending = priv->rx_q_entries;
+       ering->tx_pending = priv->tx_q_entries;
+}
+
+static int ftgmac100_set_ringparam(struct net_device *netdev,
+                                  struct ethtool_ringparam *ering)
+{
+       struct ftgmac100 *priv = netdev_priv(netdev);
+
+       if (ering->rx_pending > MAX_RX_QUEUE_ENTRIES ||
+           ering->tx_pending > MAX_TX_QUEUE_ENTRIES ||
+           ering->rx_pending < MIN_RX_QUEUE_ENTRIES ||
+           ering->tx_pending < MIN_TX_QUEUE_ENTRIES ||
+           !is_power_of_2(ering->rx_pending) ||
+           !is_power_of_2(ering->tx_pending))
+               return -EINVAL;
+
+       priv->new_rx_q_entries = ering->rx_pending;
+       priv->new_tx_q_entries = ering->tx_pending;
+       if (netif_running(netdev))
+               schedule_work(&priv->reset_task);
+
+       return 0;
+}
+
 static const struct ethtool_ops ftgmac100_ethtool_ops = {
        .get_drvinfo            = ftgmac100_get_drvinfo,
        .get_link               = ethtool_op_get_link,
        .get_link_ksettings     = phy_ethtool_get_link_ksettings,
        .set_link_ksettings     = phy_ethtool_set_link_ksettings,
+       .get_ringparam          = ftgmac100_get_ringparam,
+       .set_ringparam          = ftgmac100_set_ringparam,
 };
 
-/******************************************************************************
- * interrupt handler
- *****************************************************************************/
 static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id)
 {
        struct net_device *netdev = dev_id;
        struct ftgmac100 *priv = netdev_priv(netdev);
+       unsigned int status, new_mask = FTGMAC100_INT_BAD;
 
-       /* When running in NCSI mode, the interface should be ready for
-        * receiving or transmitting NCSI packets before it's opened.
-        */
-       if (likely(priv->use_ncsi || netif_running(netdev))) {
-               /* Disable interrupts for polling */
-               iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
-               napi_schedule(&priv->napi);
+       /* Fetch and clear interrupt bits, process abnormal ones */
+       status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
+       iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
+       if (unlikely(status & FTGMAC100_INT_BAD)) {
+
+               /* RX buffer unavailable */
+               if (status & FTGMAC100_INT_NO_RXBUF)
+                       netdev->stats.rx_over_errors++;
+
+               /* received packet lost due to RX FIFO full */
+               if (status & FTGMAC100_INT_RPKT_LOST)
+                       netdev->stats.rx_fifo_errors++;
+
+               /* sent packet lost due to excessive TX collision */
+               if (status & FTGMAC100_INT_XPKT_LOST)
+                       netdev->stats.tx_fifo_errors++;
+
+               /* AHB error -> Reset the chip */
+               if (status & FTGMAC100_INT_AHB_ERR) {
+                       if (net_ratelimit())
+                               netdev_warn(netdev,
+                                          "AHB bus error ! Resetting chip.\n");
+                       iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+                       schedule_work(&priv->reset_task);
+                       return IRQ_HANDLED;
+               }
+
+               /* We may need to restart the MAC after such errors, delay
+                * this until after we have freed some Rx buffers though
+                */
+               priv->need_mac_restart = true;
+
+               /* Disable those errors until we restart */
+               new_mask &= ~status;
        }
 
+       /* Only enable "bad" interrupts while NAPI is on */
+       iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER);
+
+       /* Schedule NAPI bh */
+       napi_schedule_irqoff(&priv->napi);
+
        return IRQ_HANDLED;
 }
 
-/******************************************************************************
- * struct napi_struct functions
- *****************************************************************************/
+static bool ftgmac100_check_rx(struct ftgmac100 *priv)
+{
+       struct ftgmac100_rxdes *rxdes = &priv->rxdes[priv->rx_pointer];
+
+       /* Do we have a packet ? */
+       return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY));
+}
+
 static int ftgmac100_poll(struct napi_struct *napi, int budget)
 {
        struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi);
-       struct net_device *netdev = priv->netdev;
-       unsigned int status;
-       bool completed = true;
-       int rx = 0;
+       int work_done = 0;
+       bool more;
 
-       status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
-       iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
+       /* Handle TX completions */
+       if (ftgmac100_tx_buf_cleanable(priv))
+               ftgmac100_tx_complete(priv);
 
-       if (status & (FTGMAC100_INT_RPKT_BUF | FTGMAC100_INT_NO_RXBUF)) {
-               /*
-                * FTGMAC100_INT_RPKT_BUF:
-                *      RX DMA has received packets into RX buffer successfully
-                *
-                * FTGMAC100_INT_NO_RXBUF:
-                *      RX buffer unavailable
-                */
-               bool retry;
+       /* Handle RX packets */
+       do {
+               more = ftgmac100_rx_packet(priv, &work_done);
+       } while (more && work_done < budget);
 
-               do {
-                       retry = ftgmac100_rx_packet(priv, &rx);
-               } while (retry && rx < budget);
 
-               if (retry && rx == budget)
-                       completed = false;
+       /* The interrupt is telling us to kick the MAC back to life
+        * after an RX overflow
+        */
+       if (unlikely(priv->need_mac_restart)) {
+               ftgmac100_start_hw(priv);
+
+               /* Re-enable "bad" interrupts */
+               iowrite32(FTGMAC100_INT_BAD,
+                         priv->base + FTGMAC100_OFFSET_IER);
        }
 
-       if (status & (FTGMAC100_INT_XPKT_ETH | FTGMAC100_INT_XPKT_LOST)) {
-               /*
-                * FTGMAC100_INT_XPKT_ETH:
-                *      packet transmitted to ethernet successfully
-                *
-                * FTGMAC100_INT_XPKT_LOST:
-                *      packet transmitted to ethernet lost due to late
-                *      collision or excessive collision
+       /* As long as we are waiting for transmit packets to be
+        * completed we keep NAPI going
+        */
+       if (ftgmac100_tx_buf_cleanable(priv))
+               work_done = budget;
+
+       if (work_done < budget) {
+               /* We are about to re-enable all interrupts. However
+                * the HW has been latching RX/TX packet interrupts while
+                * they were masked. So we clear them first, then we need
+                * to re-check if there's something to process
                 */
-               ftgmac100_tx_complete(priv);
+               iowrite32(FTGMAC100_INT_RXTX,
+                         priv->base + FTGMAC100_OFFSET_ISR);
+               if (ftgmac100_check_rx(priv) ||
+                   ftgmac100_tx_buf_cleanable(priv))
+                       return budget;
+
+               /* deschedule NAPI */
+               napi_complete(napi);
+
+               /* enable all interrupts */
+               iowrite32(FTGMAC100_INT_ALL,
+                         priv->base + FTGMAC100_OFFSET_IER);
        }
 
-       if (status & priv->int_mask_all & (FTGMAC100_INT_NO_RXBUF |
-                       FTGMAC100_INT_RPKT_LOST | FTGMAC100_INT_AHB_ERR)) {
-               if (net_ratelimit())
-                       netdev_info(netdev, "[ISR] = 0x%x: %s%s%s\n", status,
-                                   status & FTGMAC100_INT_NO_RXBUF ? "NO_RXBUF " : "",
-                                   status & FTGMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
-                                   status & FTGMAC100_INT_AHB_ERR ? "AHB_ERR " : "");
+       return work_done;
+}
 
-               if (status & FTGMAC100_INT_NO_RXBUF) {
-                       /* RX buffer unavailable */
-                       netdev->stats.rx_over_errors++;
-               }
+static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
+{
+       int err = 0;
 
-               if (status & FTGMAC100_INT_RPKT_LOST) {
-                       /* received packet lost due to RX FIFO full */
-                       netdev->stats.rx_fifo_errors++;
-               }
-       }
+       /* Re-init descriptors (adjust queue sizes) */
+       ftgmac100_init_rings(priv);
 
-       if (completed) {
-               napi_complete(napi);
+       /* Realloc rx descriptors */
+       err = ftgmac100_alloc_rx_buffers(priv);
+       if (err && !ignore_alloc_err)
+               return err;
 
-               /* enable all interrupts */
-               iowrite32(priv->int_mask_all,
-                         priv->base + FTGMAC100_OFFSET_IER);
+       /* Reinit and restart HW */
+       ftgmac100_init_hw(priv);
+       ftgmac100_start_hw(priv);
+
+       /* Re-enable the device */
+       napi_enable(&priv->napi);
+       netif_start_queue(priv->netdev);
+
+       /* Enable all interrupts */
+       iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER);
+
+       return err;
+}
+
+static void ftgmac100_reset_task(struct work_struct *work)
+{
+       struct ftgmac100 *priv = container_of(work, struct ftgmac100,
+                                             reset_task);
+       struct net_device *netdev = priv->netdev;
+       int err;
+
+       netdev_dbg(netdev, "Resetting NIC...\n");
+
+       /* Lock the world */
+       rtnl_lock();
+       if (netdev->phydev)
+               mutex_lock(&netdev->phydev->lock);
+       if (priv->mii_bus)
+               mutex_lock(&priv->mii_bus->mdio_lock);
+
+
+       /* Check if the interface is still up */
+       if (!netif_running(netdev))
+               goto bail;
+
+       /* Stop the network stack */
+       netif_trans_update(netdev);
+       napi_disable(&priv->napi);
+       netif_tx_disable(netdev);
+
+       /* Stop and reset the MAC */
+       ftgmac100_stop_hw(priv);
+       err = ftgmac100_reset_and_config_mac(priv);
+       if (err) {
+               /* Not much we can do ... it might come back... */
+               netdev_err(netdev, "attempting to continue...\n");
        }
 
-       return rx;
+       /* Free all rx and tx buffers */
+       ftgmac100_free_buffers(priv);
+
+       /* Setup everything again and restart chip */
+       ftgmac100_init_all(priv, true);
+
+       netdev_dbg(netdev, "Reset done !\n");
+ bail:
+       if (priv->mii_bus)
+               mutex_unlock(&priv->mii_bus->mdio_lock);
+       if (netdev->phydev)
+               mutex_unlock(&netdev->phydev->lock);
+       rtnl_unlock();
 }
 
-/******************************************************************************
- * struct net_device_ops functions
- *****************************************************************************/
 static int ftgmac100_open(struct net_device *netdev)
 {
        struct ftgmac100 *priv = netdev_priv(netdev);
-       unsigned int status;
        int err;
 
-       err = ftgmac100_alloc_buffers(priv);
+       /* Allocate ring buffers  */
+       err = ftgmac100_alloc_rings(priv);
        if (err) {
-               netdev_err(netdev, "failed to allocate buffers\n");
-               goto err_alloc;
+               netdev_err(netdev, "Failed to allocate descriptors\n");
+               return err;
        }
 
-       err = request_irq(priv->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
-       if (err) {
-               netdev_err(netdev, "failed to request irq %d\n", priv->irq);
-               goto err_irq;
+       /* When using NC-SI we force the speed to 100Mbit/s full duplex,
+        *
+        * Otherwise we leave it set to 0 (no link), the link
+        * message from the PHY layer will handle setting it up to
+        * something else if needed.
+        */
+       if (priv->use_ncsi) {
+               priv->cur_duplex = DUPLEX_FULL;
+               priv->cur_speed = SPEED_100;
+       } else {
+               priv->cur_duplex = 0;
+               priv->cur_speed = 0;
        }
 
-       priv->rx_pointer = 0;
-       priv->tx_clean_pointer = 0;
-       priv->tx_pointer = 0;
-       priv->tx_pending = 0;
-
-       err = ftgmac100_reset_hw(priv);
+       /* Reset the hardware */
+       err = ftgmac100_reset_and_config_mac(priv);
        if (err)
                goto err_hw;
 
-       ftgmac100_init_hw(priv);
-       ftgmac100_start_hw(priv, priv->use_ncsi ? 100 : 10);
+       /* Initialize NAPI */
+       netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
 
-       /* Clear stale interrupts */
-       status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
-       iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
+       /* Grab our interrupt */
+       err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
+       if (err) {
+               netdev_err(netdev, "failed to request irq %d\n", netdev->irq);
+               goto err_irq;
+       }
 
-       if (netdev->phydev)
+       /* Start things up */
+       err = ftgmac100_init_all(priv, false);
+       if (err) {
+               netdev_err(netdev, "Failed to allocate packet buffers\n");
+               goto err_alloc;
+       }
+
+       if (netdev->phydev) {
+               /* If we have a PHY, start polling */
                phy_start(netdev->phydev);
-       else if (priv->use_ncsi)
+       } else if (priv->use_ncsi) {
+               /* If using NC-SI, set our carrier on and start the stack */
                netif_carrier_on(netdev);
 
-       napi_enable(&priv->napi);
-       netif_start_queue(netdev);
-
-       /* enable all interrupts */
-       iowrite32(priv->int_mask_all, priv->base + FTGMAC100_OFFSET_IER);
-
-       /* Start the NCSI device */
-       if (priv->use_ncsi) {
+               /* Start the NCSI device */
                err = ncsi_start_dev(priv->ndev);
                if (err)
                        goto err_ncsi;
        }
 
-       priv->enabled = true;
-
        return 0;
 
-err_ncsi:
+ err_ncsi:
        napi_disable(&priv->napi);
        netif_stop_queue(netdev);
-       iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
-err_hw:
-       free_irq(priv->irq, netdev);
-err_irq:
+ err_alloc:
        ftgmac100_free_buffers(priv);
-err_alloc:
+       free_irq(netdev->irq, netdev);
+ err_irq:
+       netif_napi_del(&priv->napi);
+ err_hw:
+       iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+       ftgmac100_free_rings(priv);
        return err;
 }
 
@@ -1179,56 +1365,33 @@ static int ftgmac100_stop(struct net_device *netdev)
 {
        struct ftgmac100 *priv = netdev_priv(netdev);
 
-       if (!priv->enabled)
-               return 0;
+       /* Note about the reset task: We are called with the rtnl lock
+        * held, so we are synchronized against the core of the reset
+        * task. We must not try to synchronously cancel it otherwise
+        * we can deadlock. But since it will test for netif_running()
+        * which has already been cleared by the net core, we don't
+        * anything special to do.
+        */
 
        /* disable all interrupts */
-       priv->enabled = false;
        iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
 
        netif_stop_queue(netdev);
        napi_disable(&priv->napi);
+       netif_napi_del(&priv->napi);
        if (netdev->phydev)
                phy_stop(netdev->phydev);
        else if (priv->use_ncsi)
                ncsi_stop_dev(priv->ndev);
 
        ftgmac100_stop_hw(priv);
-       free_irq(priv->irq, netdev);
+       free_irq(netdev->irq, netdev);
        ftgmac100_free_buffers(priv);
+       ftgmac100_free_rings(priv);
 
        return 0;
 }
 
-static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
-                                    struct net_device *netdev)
-{
-       struct ftgmac100 *priv = netdev_priv(netdev);
-       dma_addr_t map;
-
-       if (unlikely(skb->len > MAX_PKT_SIZE)) {
-               if (net_ratelimit())
-                       netdev_dbg(netdev, "tx packet too big\n");
-
-               netdev->stats.tx_dropped++;
-               kfree_skb(skb);
-               return NETDEV_TX_OK;
-       }
-
-       map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(priv->dev, map))) {
-               /* drop packet */
-               if (net_ratelimit())
-                       netdev_err(netdev, "map socket buffer failed\n");
-
-               netdev->stats.tx_dropped++;
-               kfree_skb(skb);
-               return NETDEV_TX_OK;
-       }
-
-       return ftgmac100_xmit(priv, skb, map);
-}
-
 /* optional */
 static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 {
@@ -1238,6 +1401,17 @@ static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int
        return phy_mii_ioctl(netdev->phydev, ifr, cmd);
 }
 
+static void ftgmac100_tx_timeout(struct net_device *netdev)
+{
+       struct ftgmac100 *priv = netdev_priv(netdev);
+
+       /* Disable all interrupts */
+       iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+
+       /* Do the reset outside of interrupt context */
+       schedule_work(&priv->reset_task);
+}
+
 static const struct net_device_ops ftgmac100_netdev_ops = {
        .ndo_open               = ftgmac100_open,
        .ndo_stop               = ftgmac100_stop,
@@ -1245,6 +1419,7 @@ static const struct net_device_ops ftgmac100_netdev_ops = {
        .ndo_set_mac_address    = ftgmac100_set_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_do_ioctl           = ftgmac100_do_ioctl,
+       .ndo_tx_timeout         = ftgmac100_tx_timeout,
 };
 
 static int ftgmac100_setup_mdio(struct net_device *netdev)
@@ -1259,8 +1434,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
        if (!priv->mii_bus)
                return -EIO;
 
-       if (of_machine_is_compatible("aspeed,ast2400") ||
-           of_machine_is_compatible("aspeed,ast2500")) {
+       if (priv->is_aspeed) {
                /* This driver supports the old MDIO interface */
                reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
                reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
@@ -1319,15 +1493,13 @@ static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
                    nd->link_up ? "up" : "down");
 }
 
-/******************************************************************************
- * struct platform_driver functions
- *****************************************************************************/
 static int ftgmac100_probe(struct platform_device *pdev)
 {
        struct resource *res;
        int irq;
        struct net_device *netdev;
        struct ftgmac100 *priv;
+       struct device_node *np;
        int err = 0;
 
        if (!pdev)
@@ -1352,6 +1524,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
 
        netdev->ethtool_ops = &ftgmac100_ethtool_ops;
        netdev->netdev_ops = &ftgmac100_netdev_ops;
+       netdev->watchdog_timeo = 5 * HZ;
 
        platform_set_drvdata(pdev, netdev);
 
@@ -1359,11 +1532,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
        priv = netdev_priv(netdev);
        priv->netdev = netdev;
        priv->dev = &pdev->dev;
-
-       spin_lock_init(&priv->tx_lock);
-
-       /* initialize NAPI */
-       netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
+       INIT_WORK(&priv->reset_task, ftgmac100_reset_task);
 
        /* map io memory */
        priv->res = request_mem_region(res->start, resource_size(res),
@@ -1381,29 +1550,23 @@ static int ftgmac100_probe(struct platform_device *pdev)
                goto err_ioremap;
        }
 
-       priv->irq = irq;
+       netdev->irq = irq;
 
        /* MAC address from chip or random one */
-       ftgmac100_setup_mac(priv);
-
-       priv->int_mask_all = (FTGMAC100_INT_RPKT_LOST |
-                             FTGMAC100_INT_XPKT_ETH |
-                             FTGMAC100_INT_XPKT_LOST |
-                             FTGMAC100_INT_AHB_ERR |
-                             FTGMAC100_INT_RPKT_BUF |
-                             FTGMAC100_INT_NO_RXBUF);
+       ftgmac100_initial_mac(priv);
 
-       if (of_machine_is_compatible("aspeed,ast2400") ||
-           of_machine_is_compatible("aspeed,ast2500")) {
+       np = pdev->dev.of_node;
+       if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
+                  of_device_is_compatible(np, "aspeed,ast2500-mac"))) {
                priv->rxdes0_edorr_mask = BIT(30);
                priv->txdes0_edotr_mask = BIT(30);
+               priv->is_aspeed = true;
        } else {
                priv->rxdes0_edorr_mask = BIT(15);
                priv->txdes0_edotr_mask = BIT(15);
        }
 
-       if (pdev->dev.of_node &&
-           of_get_property(pdev->dev.of_node, "use-ncsi", NULL)) {
+       if (np && of_get_property(np, "use-ncsi", NULL)) {
                if (!IS_ENABLED(CONFIG_NET_NCSI)) {
                        dev_err(&pdev->dev, "NCSI stack not enabled\n");
                        goto err_ncsi_dev;
@@ -1421,15 +1584,20 @@ static int ftgmac100_probe(struct platform_device *pdev)
                        goto err_setup_mdio;
        }
 
-       /* We have to disable on-chip IP checksum functionality
-        * when NCSI is enabled on the interface. It doesn't work
-        * in that case.
-        */
-       netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
-       if (priv->use_ncsi &&
-           of_get_property(pdev->dev.of_node, "no-hw-checksum", NULL))
-               netdev->features &= ~NETIF_F_IP_CSUM;
+       /* Default ring sizes */
+       priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES;
+       priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES;
+
+       /* Base feature set */
+       netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+               NETIF_F_GRO | NETIF_F_SG;
 
+       /* AST2400  doesn't have working HW checksum generation */
+       if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
+               netdev->hw_features &= ~NETIF_F_HW_CSUM;
+       if (np && of_get_property(np, "no-hw-checksum", NULL))
+               netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
+       netdev->features |= netdev->hw_features;
 
        /* register network device */
        err = register_netdev(netdev);
@@ -1438,7 +1606,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
                goto err_register_netdev;
        }
 
-       netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
+       netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base);
 
        return 0;
 
@@ -1465,6 +1633,12 @@ static int ftgmac100_remove(struct platform_device *pdev)
        priv = netdev_priv(netdev);
 
        unregister_netdev(netdev);
+
+       /* There's a small chance the reset task will have been re-queued,
+        * during stop, make sure it's gone before we free the structure.
+        */
+       cancel_work_sync(&priv->reset_task);
+
        ftgmac100_destroy_mdio(netdev);
 
        iounmap(priv->base);
index a7ce0ac8858a5f1a123be8ca46a03bc5fa18f9f0..97912c456e800ac27aa7836a86fef4e777c66b84 100644 (file)
 #define FTGMAC100_INT_PHYSTS_CHG       (1 << 9)
 #define FTGMAC100_INT_NO_HPTXBUF       (1 << 10)
 
+/* Interrupts we care about in NAPI mode */
+#define FTGMAC100_INT_BAD  (FTGMAC100_INT_RPKT_LOST | \
+                           FTGMAC100_INT_XPKT_LOST | \
+                           FTGMAC100_INT_AHB_ERR   | \
+                           FTGMAC100_INT_NO_RXBUF)
+
+/* Normal RX/TX interrupts, enabled when NAPI off */
+#define FTGMAC100_INT_RXTX (FTGMAC100_INT_XPKT_ETH  | \
+                           FTGMAC100_INT_RPKT_BUF)
+
+/* All the interrupts we care about */
+#define FTGMAC100_INT_ALL (FTGMAC100_INT_RPKT_BUF  |  \
+                          FTGMAC100_INT_BAD)
+
 /*
  * Interrupt timer control register
  */
  * Transmit descriptor, aligned to 16 bytes
  */
 struct ftgmac100_txdes {
-       unsigned int    txdes0;
-       unsigned int    txdes1;
-       unsigned int    txdes2; /* not used by HW */
-       unsigned int    txdes3; /* TXBUF_BADR */
+       __le32  txdes0; /* Control & status bits */
+       __le32  txdes1; /* Irq, checksum and vlan control */
+       __le32  txdes2; /* Reserved */
+       __le32  txdes3; /* DMA buffer address */
 } __attribute__ ((aligned(16)));
 
 #define FTGMAC100_TXDES0_TXBUF_SIZE(x) ((x) & 0x3fff)
@@ -213,10 +227,10 @@ struct ftgmac100_txdes {
  * Receive descriptor, aligned to 16 bytes
  */
 struct ftgmac100_rxdes {
-       unsigned int    rxdes0;
-       unsigned int    rxdes1;
-       unsigned int    rxdes2; /* not used by HW */
-       unsigned int    rxdes3; /* RXBUF_BADR */
+       __le32  rxdes0; /* Control & status bits */
+       __le32  rxdes1; /* Checksum and vlan status */
+       __le32  rxdes2; /* length/type on AST2500 */
+       __le32  rxdes3; /* DMA buffer address */
 } __attribute__ ((aligned(16)));
 
 #define FTGMAC100_RXDES0_VDBC          0x3fff
@@ -234,6 +248,14 @@ struct ftgmac100_rxdes {
 #define FTGMAC100_RXDES0_FRS           (1 << 29)
 #define FTGMAC100_RXDES0_RXPKT_RDY     (1 << 31)
 
+/* Errors we care about for dropping packets */
+#define RXDES0_ANY_ERROR               ( \
+       FTGMAC100_RXDES0_RX_ERR         | \
+       FTGMAC100_RXDES0_CRC_ERR        | \
+       FTGMAC100_RXDES0_FTL            | \
+       FTGMAC100_RXDES0_RUNT           | \
+       FTGMAC100_RXDES0_RX_ODD_NB)
+
 #define FTGMAC100_RXDES1_VLANTAG_CI    0xffff
 #define FTGMAC100_RXDES1_PROT_MASK     (0x3 << 20)
 #define FTGMAC100_RXDES1_PROT_NONIP    (0x0 << 20)
index e2ca107f9d94f162cdab87e406c72095a6746651..9a520e4f0df9a0d47b75f71f01557414ba3d4eab 100644 (file)
@@ -137,6 +137,13 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
 /* L4 Type field: TCP */
 #define FM_L4_PARSE_RESULT_TCP 0x20
 
+/* FD status field indicating whether the FM Parser has attempted to validate
+ * the L4 csum of the frame.
+ * Note that having this bit set doesn't necessarily imply that the checksum
+ * is valid. One would have to check the parse results to find that out.
+ */
+#define FM_FD_STAT_L4CV         0x00000004
+
 #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
 #define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
 
@@ -235,6 +242,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
         * For conformity, we'll still declare GSO explicitly.
         */
        net_dev->features |= NETIF_F_GSO;
+       net_dev->features |= NETIF_F_RXCSUM;
 
        net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        /* we do not want shared skbs on TX */
@@ -334,6 +342,45 @@ static void dpaa_get_stats64(struct net_device *net_dev,
        }
 }
 
+static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
+                        struct tc_to_netdev *tc)
+{
+       struct dpaa_priv *priv = netdev_priv(net_dev);
+       u8 num_tc;
+       int i;
+
+       if (tc->type != TC_SETUP_MQPRIO)
+               return -EINVAL;
+
+       tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+       num_tc = tc->mqprio->num_tc;
+
+       if (num_tc == priv->num_tc)
+               return 0;
+
+       if (!num_tc) {
+               netdev_reset_tc(net_dev);
+               goto out;
+       }
+
+       if (num_tc > DPAA_TC_NUM) {
+               netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
+                          DPAA_TC_NUM);
+               return -EINVAL;
+       }
+
+       netdev_set_num_tc(net_dev, num_tc);
+
+       for (i = 0; i < num_tc; i++)
+               netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
+                                   i * DPAA_TC_TXQ_NUM);
+
+out:
+       priv->num_tc = num_tc ? : 1;
+       netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+       return 0;
+}
+
 static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
 {
        struct platform_device *of_dev;
@@ -557,16 +604,18 @@ static void dpaa_bps_free(struct dpaa_priv *priv)
 
 /* Use multiple WQs for FQ assignment:
  *     - Tx Confirmation queues go to WQ1.
- *     - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
- *       to be scheduled, in case there are many more FQs in WQ3).
- *     - Rx Default and Tx queues go to WQ3 (no differentiation between
- *       Rx and Tx traffic).
+ *     - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
+ *       to be scheduled, in case there are many more FQs in WQ6).
+ *     - Rx Default goes to WQ6.
+ *     - Tx queues go to different WQs depending on their priority. Equal
+ *       chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and
+ *       WQ0 (highest priority).
  * This ensures that Tx-confirmed buffers are timely released. In particular,
  * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
  * are greatly outnumbered by other FQs in the system, while
  * dequeue scheduling is round-robin.
  */
-static inline void dpaa_assign_wq(struct dpaa_fq *fq)
+static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
 {
        switch (fq->fq_type) {
        case FQ_TYPE_TX_CONFIRM:
@@ -575,11 +624,33 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq)
                break;
        case FQ_TYPE_RX_ERROR:
        case FQ_TYPE_TX_ERROR:
-               fq->wq = 2;
+               fq->wq = 5;
                break;
        case FQ_TYPE_RX_DEFAULT:
+               fq->wq = 6;
+               break;
        case FQ_TYPE_TX:
-               fq->wq = 3;
+               switch (idx / DPAA_TC_TXQ_NUM) {
+               case 0:
+                       /* Low priority (best effort) */
+                       fq->wq = 6;
+                       break;
+               case 1:
+                       /* Medium priority */
+                       fq->wq = 2;
+                       break;
+               case 2:
+                       /* High priority */
+                       fq->wq = 1;
+                       break;
+               case 3:
+                       /* Very high priority */
+                       fq->wq = 0;
+                       break;
+               default:
+                       WARN(1, "Too many TX FQs: more than %d!\n",
+                            DPAA_ETH_TXQ_NUM);
+               }
                break;
        default:
                WARN(1, "Invalid FQ type %d for FQID %d!\n",
@@ -607,7 +678,7 @@ static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
        }
 
        for (i = 0; i < count; i++)
-               dpaa_assign_wq(dpaa_fq + i);
+               dpaa_assign_wq(dpaa_fq + i, i);
 
        return dpaa_fq;
 }
@@ -903,7 +974,7 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
                 * Tx Confirmation FQs.
                 */
                if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
-                       initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
+                       initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK);
 
                /* FQ placement */
                initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
@@ -985,7 +1056,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
                /* Initialization common to all ingress queues */
                if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
-                       initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
+                       initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE |
+                                               QM_FQCTRL_CTXASTASHING);
                        initfq.fqd.context_a.stashing.exclusive =
                                QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
                                QM_STASHING_EXCL_ANNOTATION;
@@ -1055,9 +1127,9 @@ static int dpaa_fq_free(struct device *dev, struct list_head *list)
        return err;
 }
 
-static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
-                                 struct dpaa_fq *defq,
-                                 struct dpaa_buffer_layout *buf_layout)
+static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
+                                struct dpaa_fq *defq,
+                                struct dpaa_buffer_layout *buf_layout)
 {
        struct fman_buffer_prefix_content buf_prefix_content;
        struct fman_port_params params;
@@ -1076,23 +1148,29 @@ static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
        params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
 
        err = fman_port_config(port, &params);
-       if (err)
+       if (err) {
                pr_err("%s: fman_port_config failed\n", __func__);
+               return err;
+       }
 
        err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
-       if (err)
+       if (err) {
                pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
                       __func__);
+               return err;
+       }
 
        err = fman_port_init(port);
        if (err)
                pr_err("%s: fm_port_init failed\n", __func__);
+
+       return err;
 }
 
-static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
-                                 size_t count, struct dpaa_fq *errq,
-                                 struct dpaa_fq *defq,
-                                 struct dpaa_buffer_layout *buf_layout)
+static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
+                                size_t count, struct dpaa_fq *errq,
+                                struct dpaa_fq *defq,
+                                struct dpaa_buffer_layout *buf_layout)
 {
        struct fman_buffer_prefix_content buf_prefix_content;
        struct fman_port_rx_params *rx_p;
@@ -1120,32 +1198,44 @@ static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
        }
 
        err = fman_port_config(port, &params);
-       if (err)
+       if (err) {
                pr_err("%s: fman_port_config failed\n", __func__);
+               return err;
+       }
 
        err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
-       if (err)
+       if (err) {
                pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
                       __func__);
+               return err;
+       }
 
        err = fman_port_init(port);
        if (err)
                pr_err("%s: fm_port_init failed\n", __func__);
+
+       return err;
 }
 
-static void dpaa_eth_init_ports(struct mac_device *mac_dev,
-                               struct dpaa_bp **bps, size_t count,
-                               struct fm_port_fqs *port_fqs,
-                               struct dpaa_buffer_layout *buf_layout,
-                               struct device *dev)
+static int dpaa_eth_init_ports(struct mac_device *mac_dev,
+                              struct dpaa_bp **bps, size_t count,
+                              struct fm_port_fqs *port_fqs,
+                              struct dpaa_buffer_layout *buf_layout,
+                              struct device *dev)
 {
        struct fman_port *rxport = mac_dev->port[RX];
        struct fman_port *txport = mac_dev->port[TX];
+       int err;
+
+       err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
+                                   port_fqs->tx_defq, &buf_layout[TX]);
+       if (err)
+               return err;
+
+       err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
+                                   port_fqs->rx_defq, &buf_layout[RX]);
 
-       dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
-                             port_fqs->tx_defq, &buf_layout[TX]);
-       dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
-                             port_fqs->rx_defq, &buf_layout[RX]);
+       return err;
 }
 
 static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
@@ -1526,6 +1616,23 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
        return skb;
 }
 
+static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
+{
+       /* The parser has run and performed L4 checksum validation.
+        * We know there were no parser errors (and implicitly no
+        * L4 csum error), otherwise we wouldn't be here.
+        */
+       if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
+           (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV))
+               return CHECKSUM_UNNECESSARY;
+
+       /* We're here because either the parser didn't run or the L4 checksum
+        * was not verified. This may include the case of a UDP frame with
+        * checksum zero or an L4 proto other than TCP/UDP
+        */
+       return CHECKSUM_NONE;
+}
+
 /* Build a linear skb around the received buffer.
  * We are guaranteed there is enough room at the end of the data buffer to
  * accommodate the shared info area of the skb.
@@ -1556,7 +1663,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
        skb_reserve(skb, fd_off);
        skb_put(skb, qm_fd_get_length(fd));
 
-       skb->ip_summed = CHECKSUM_NONE;
+       skb->ip_summed = rx_csum_offload(priv, fd);
 
        return skb;
 
@@ -1616,7 +1723,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
                        if (WARN_ON(unlikely(!skb)))
                                goto free_buffers;
 
-                       skb->ip_summed = CHECKSUM_NONE;
+                       skb->ip_summed = rx_csum_offload(priv, fd);
 
                        /* Make sure forwarded skbs will have enough space
                         * on Tx, if extra headers are added.
@@ -2093,7 +2200,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
        dma_addr_t addr = qm_fd_addr(fd);
        enum qm_fd_format fd_format;
        struct net_device *net_dev;
-       u32 fd_status = fd->status;
+       u32 fd_status;
        struct dpaa_bp *dpaa_bp;
        struct dpaa_priv *priv;
        unsigned int skb_len;
@@ -2350,6 +2457,7 @@ static const struct net_device_ops dpaa_ops = {
        .ndo_validate_addr = eth_validate_addr,
        .ndo_set_rx_mode = dpaa_set_rx_mode,
        .ndo_do_ioctl = dpaa_ioctl,
+       .ndo_setup_tc = dpaa_setup_tc,
 };
 
 static int dpaa_napi_add(struct net_device *net_dev)
@@ -2624,8 +2732,10 @@ static int dpaa_eth_probe(struct platform_device *pdev)
        priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
 
        /* All real interfaces need their ports initialized */
-       dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
-                           &priv->buf_layout[0], dev);
+       err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
+                                 &priv->buf_layout[0], dev);
+       if (err)
+               goto init_ports_failed;
 
        priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
        if (!priv->percpu_priv) {
@@ -2638,6 +2748,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
                memset(percpu_priv, 0, sizeof(*percpu_priv));
        }
 
+       priv->num_tc = 1;
+       netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+
        /* Initialize NAPI */
        err = dpaa_napi_add(net_dev);
        if (err < 0)
@@ -2658,6 +2771,7 @@ netdev_init_failed:
 napi_add_failed:
        dpaa_napi_del(net_dev);
 alloc_percpu_failed:
+init_ports_failed:
        dpaa_fq_free(dev, &priv->dpaa_fq_list);
 fq_alloc_failed:
        qman_delete_cgr_safe(&priv->ingress_cgr);
index 1f9aebf3f3c514517816ae92e379176c6861f219..9941a7866ebea43b8153c8a4150dc17f52e3afb7 100644 (file)
 #include "mac.h"
 #include "dpaa_eth_trace.h"
 
-#define DPAA_ETH_TXQ_NUM       NR_CPUS
+/* Number of prioritised traffic classes */
+#define DPAA_TC_NUM            4
+/* Number of Tx queues per traffic class */
+#define DPAA_TC_TXQ_NUM                NR_CPUS
+/* Total number of Tx queues */
+#define DPAA_ETH_TXQ_NUM       (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
 
 #define DPAA_BPS_NUM 3 /* number of bpools per interface */
 
@@ -152,6 +157,7 @@ struct dpaa_priv {
        u16 channel;
        struct list_head dpaa_fq_list;
 
+       u8 num_tc;
        u32 msg_enable; /* net_device message level */
 
        struct {
index 91a16641e8514b7f1f99be03fe280d71e54de37d..a92bf94f8e94321899c28da6d36d6b9d30d571a5 100644 (file)
@@ -117,8 +117,9 @@ static struct platform_device_id fec_devtype[] = {
                .name = "imx6ul-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
                                FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
-                               FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE |
-                               FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+                               FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
+                               FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
+                               FEC_QUIRK_HAS_COALESCE,
        }, {
                /* sentinel */
        }
@@ -235,14 +236,14 @@ static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
                                             struct bufdesc_prop *bd)
 {
        return (bdp >= bd->last) ? bd->base
-                       : (struct bufdesc *)(((unsigned)bdp) + bd->dsize);
+                       : (struct bufdesc *)(((void *)bdp) + bd->dsize);
 }
 
 static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
                                             struct bufdesc_prop *bd)
 {
        return (bdp <= bd->base) ? bd->last
-                       : (struct bufdesc *)(((unsigned)bdp) - bd->dsize);
+                       : (struct bufdesc *)(((void *)bdp) - bd->dsize);
 }
 
 static int fec_enet_get_bd_index(struct bufdesc *bdp,
@@ -1266,7 +1267,7 @@ skb_done:
                }
        }
 
-       /* ERR006538: Keep the transmitter going */
+       /* ERR006358: Keep the transmitter going */
        if (bdp != txq->bd.cur &&
            readl(txq->bd.reg_desc_active) == 0)
                writel(0, txq->bd.reg_desc_active);
@@ -2651,7 +2652,7 @@ static void fec_enet_free_queue(struct net_device *ndev)
        for (i = 0; i < fep->num_tx_queues; i++)
                if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
                        txq = fep->tx_queue[i];
-                       dma_free_coherent(NULL,
+                       dma_free_coherent(&fep->pdev->dev,
                                          txq->bd.ring_size * TSO_HEADER_SIZE,
                                          txq->tso_hdrs,
                                          txq->tso_hdrs_dma);
@@ -2685,7 +2686,7 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
                txq->tx_wake_threshold =
                        (txq->bd.ring_size - txq->tx_stop_threshold) / 2;
 
-               txq->tso_hdrs = dma_alloc_coherent(NULL,
+               txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
                                        txq->bd.ring_size * TSO_HEADER_SIZE,
                                        &txq->tso_hdrs_dma,
                                        GFP_KERNEL);
@@ -3187,7 +3188,7 @@ static int fec_enet_init(struct net_device *ndev)
 }
 
 #ifdef CONFIG_OF
-static void fec_reset_phy(struct platform_device *pdev)
+static int fec_reset_phy(struct platform_device *pdev)
 {
        int err, phy_reset;
        bool active_high = false;
@@ -3195,16 +3196,18 @@ static void fec_reset_phy(struct platform_device *pdev)
        struct device_node *np = pdev->dev.of_node;
 
        if (!np)
-               return;
+               return 0;
 
-       of_property_read_u32(np, "phy-reset-duration", &msec);
+       err = of_property_read_u32(np, "phy-reset-duration", &msec);
        /* A sane reset duration should not be longer than 1s */
-       if (msec > 1000)
+       if (!err && msec > 1000)
                msec = 1;
 
        phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
-       if (!gpio_is_valid(phy_reset))
-               return;
+       if (phy_reset == -EPROBE_DEFER)
+               return phy_reset;
+       else if (!gpio_is_valid(phy_reset))
+               return 0;
 
        active_high = of_property_read_bool(np, "phy-reset-active-high");
 
@@ -3213,7 +3216,7 @@ static void fec_reset_phy(struct platform_device *pdev)
                        "phy-reset");
        if (err) {
                dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
-               return;
+               return err;
        }
 
        if (msec > 20)
@@ -3222,14 +3225,17 @@ static void fec_reset_phy(struct platform_device *pdev)
                usleep_range(msec * 1000, msec * 1000 + 1000);
 
        gpio_set_value_cansleep(phy_reset, !active_high);
+
+       return 0;
 }
 #else /* CONFIG_OF */
-static void fec_reset_phy(struct platform_device *pdev)
+static int fec_reset_phy(struct platform_device *pdev)
 {
        /*
         * In case of platform probe, the reset has been done
         * by machine code.
         */
+       return 0;
 }
 #endif /* CONFIG_OF */
 
@@ -3400,6 +3406,7 @@ fec_probe(struct platform_device *pdev)
                if (ret) {
                        dev_err(&pdev->dev,
                                "Failed to enable phy regulator: %d\n", ret);
+                       clk_disable_unprepare(fep->clk_ipg);
                        goto failed_regulator;
                }
        } else {
@@ -3412,7 +3419,9 @@ fec_probe(struct platform_device *pdev)
        pm_runtime_set_active(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
 
-       fec_reset_phy(pdev);
+       ret = fec_reset_phy(pdev);
+       if (ret)
+               goto failed_reset;
 
        if (fep->bufdesc_ex)
                fec_ptp_init(pdev);
@@ -3473,8 +3482,10 @@ failed_init:
        fec_ptp_stop(pdev);
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
+failed_reset:
+       pm_runtime_put(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
 failed_regulator:
-       clk_disable_unprepare(fep->clk_ipg);
 failed_clk_ipg:
        fec_enet_clk_enable(ndev, false);
 failed_clk:
index f60845f0c6cad060b193fecdf00dd3a93127fb9c..4aefe24389695457ee307a9eb3403715091d283c 100644 (file)
@@ -59,6 +59,7 @@
 #define DMA_OFFSET             0x000C2000
 #define FPM_OFFSET             0x000C3000
 #define IMEM_OFFSET            0x000C4000
+#define HWP_OFFSET             0x000C7000
 #define CGP_OFFSET             0x000DB000
 
 /* Exceptions bit map */
 
 #define QMI_GS_HALT_NOT_BUSY           0x00000002
 
+/* HWP defines */
+#define HWP_RPIMAC_PEN                 0x00000001
+
 /* IRAM defines */
 #define IRAM_IADD_AIE                  0x80000000
 #define IRAM_READY                     0x80000000
@@ -475,6 +479,12 @@ struct fman_dma_regs {
        u32 res00e0[0x400 - 56];
 };
 
+struct fman_hwp_regs {
+       u32 res0000[0x844 / 4];         /* 0x000..0x843 */
+       u32 fmprrpimac; /* FM Parser Internal memory access control */
+       u32 res[(0x1000 - 0x848) / 4];  /* 0x848..0xFFF */
+};
+
 /* Structure that holds current FMan state.
  * Used for saving run time information.
  */
@@ -606,6 +616,7 @@ struct fman {
        struct fman_bmi_regs __iomem *bmi_regs;
        struct fman_qmi_regs __iomem *qmi_regs;
        struct fman_dma_regs __iomem *dma_regs;
+       struct fman_hwp_regs __iomem *hwp_regs;
        fman_exceptions_cb *exception_cb;
        fman_bus_error_cb *bus_error_cb;
        /* Spinlock for FMan use */
@@ -999,6 +1010,12 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
        iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
 }
 
+static void hwp_init(struct fman_hwp_regs __iomem *hwp_rg)
+{
+       /* enable HW Parser */
+       iowrite32be(HWP_RPIMAC_PEN, &hwp_rg->fmprrpimac);
+}
+
 static int enable(struct fman *fman, struct fman_cfg *cfg)
 {
        u32 cfg_reg = 0;
@@ -1195,7 +1212,7 @@ static int fill_soc_specific_params(struct fman_state_struct *state)
                state->max_num_of_open_dmas     = 32;
                state->fm_port_num_of_cg        = 256;
                state->num_of_rx_ports  = 6;
-               state->total_fifo_size  = 122 * 1024;
+               state->total_fifo_size  = 136 * 1024;
                break;
 
        case 2:
@@ -1793,6 +1810,7 @@ static int fman_config(struct fman *fman)
        fman->bmi_regs = base_addr + BMI_OFFSET;
        fman->qmi_regs = base_addr + QMI_OFFSET;
        fman->dma_regs = base_addr + DMA_OFFSET;
+       fman->hwp_regs = base_addr + HWP_OFFSET;
        fman->base_addr = base_addr;
 
        spin_lock_init(&fman->spinlock);
@@ -2062,6 +2080,9 @@ static int fman_init(struct fman *fman)
        /* Init QMI Registers */
        qmi_init(fman->qmi_regs, fman->cfg);
 
+       /* Init HW Parser */
+       hwp_init(fman->hwp_regs);
+
        err = enable(fman, cfg);
        if (err != 0)
                return err;
index 57aae8d17d7710a9392fb91f7dee67904189d1ba..f53e1473dbccd667bebf8701a9b7c4cfac871663 100644 (file)
@@ -134,14 +134,14 @@ enum fman_exceptions {
 struct fman_prs_result {
        u8 lpid;                /* Logical port id */
        u8 shimr;               /* Shim header result  */
-       u16 l2r;                /* Layer 2 result */
-       u16 l3r;                /* Layer 3 result */
+       __be16 l2r;             /* Layer 2 result */
+       __be16 l3r;             /* Layer 3 result */
        u8 l4r;         /* Layer 4 result */
        u8 cplan;               /* Classification plan id */
-       u16 nxthdr;             /* Next Header  */
-       u16 cksum;              /* Running-sum */
+       __be16 nxthdr;          /* Next Header  */
+       __be16 cksum;           /* Running-sum */
        /* Flags&fragment-offset field of the last IP-header */
-       u16 flags_frag_off;
+       __be16 flags_frag_off;
        /* Routing type field of a IPV6 routing extension header */
        u8 route_type;
        /* Routing Extension Header Present; last bit is IP valid */
index 84ea130eed365b405655650e7999351135b533e5..98bba10fc38c1a5916108fc0ec4b1f6f136f0032 100644 (file)
@@ -381,6 +381,9 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
 
        /* check RGMII support */
        if (iface == PHY_INTERFACE_MODE_RGMII ||
+           iface == PHY_INTERFACE_MODE_RGMII_ID ||
+           iface == PHY_INTERFACE_MODE_RGMII_RXID ||
+           iface == PHY_INTERFACE_MODE_RGMII_TXID ||
            iface == PHY_INTERFACE_MODE_RMII)
                if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
                        return -EINVAL;
@@ -390,7 +393,10 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
                if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
                        return -EINVAL;
 
-       is_rgmii = iface == PHY_INTERFACE_MODE_RGMII;
+       is_rgmii = iface == PHY_INTERFACE_MODE_RGMII ||
+                  iface == PHY_INTERFACE_MODE_RGMII_ID ||
+                  iface == PHY_INTERFACE_MODE_RGMII_RXID ||
+                  iface == PHY_INTERFACE_MODE_RGMII_TXID;
        is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
        is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
 
index cd6a53eaf1614f7ffbeaadc7bbf6b28d8163e4c0..c0296880feba7f1afa505d4f9c08003e496de429 100644 (file)
@@ -443,7 +443,10 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
                break;
        default:
                tmp |= IF_MODE_GMII;
-               if (phy_if == PHY_INTERFACE_MODE_RGMII)
+               if (phy_if == PHY_INTERFACE_MODE_RGMII ||
+                   phy_if == PHY_INTERFACE_MODE_RGMII_ID ||
+                   phy_if == PHY_INTERFACE_MODE_RGMII_RXID ||
+                   phy_if == PHY_INTERFACE_MODE_RGMII_TXID)
                        tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
        }
        iowrite32be(tmp, &regs->if_mode);
index 173d8e0fd71668afe4292a506ef120d853f249f5..c4a66469a9074daab469f304751acc06df8f9839 100644 (file)
@@ -36,6 +36,7 @@
 #include "fman_mac.h"
 
 #include <linux/netdevice.h>
+#include <linux/phy_fixed.h>
 
 struct fman_mac *memac_config(struct fman_mac_params *params);
 int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
index 9f3bb50a23651a4fd9edf51ff2d2fc29d321cae5..57bf44fa16a10ad54e1f56fa5cb5304d3039ed2c 100644 (file)
@@ -62,6 +62,7 @@
 
 #define BMI_PORT_REGS_OFFSET                           0
 #define QMI_PORT_REGS_OFFSET                           0x400
+#define HWP_PORT_REGS_OFFSET                           0x800
 
 /* Default values */
 #define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN             \
 #define NIA_ENG_BMI                                    0x00500000
 #define NIA_ENG_QMI_ENQ                                        0x00540000
 #define NIA_ENG_QMI_DEQ                                        0x00580000
-
+#define NIA_ENG_HWP                                    0x00440000
 #define NIA_BMI_AC_ENQ_FRAME                           0x00000002
 #define NIA_BMI_AC_TX_RELEASE                          0x000002C0
 #define NIA_BMI_AC_RELEASE                             0x000000C0
@@ -317,6 +318,19 @@ struct fman_port_qmi_regs {
        u32 fmqm_pndcc;         /* PortID n Dequeue Confirm Counter */
 };
 
+#define HWP_HXS_COUNT 16
+#define HWP_HXS_PHE_REPORT 0x00000800
+#define HWP_HXS_PCAC_PSTAT 0x00000100
+#define HWP_HXS_PCAC_PSTOP 0x00000001
+struct fman_port_hwp_regs {
+       struct {
+               u32 ssa; /* Soft Sequence Attachment */
+               u32 lcv; /* Line-up Enable Confirmation Mask */
+       } pmda[HWP_HXS_COUNT]; /* Parse Memory Direct Access Registers */
+       u32 reserved080[(0x3f8 - 0x080) / 4]; /* (0x080-0x3f7) */
+       u32 fmpr_pcac; /* Configuration Access Control */
+};
+
 /* QMI dequeue prefetch modes */
 enum fman_port_deq_prefetch {
        FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */
@@ -436,6 +450,7 @@ struct fman_port {
 
        union fman_port_bmi_regs __iomem *bmi_regs;
        struct fman_port_qmi_regs __iomem *qmi_regs;
+       struct fman_port_hwp_regs __iomem *hwp_regs;
 
        struct fman_sp_buffer_offsets buffer_offsets;
 
@@ -521,9 +536,12 @@ static int init_bmi_rx(struct fman_port *port)
        /* NIA */
        tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
 
-       tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
+       tmp |= NIA_ENG_HWP;
        iowrite32be(tmp, &regs->fmbm_rfne);
 
+       /* Parser Next Engine NIA */
+       iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME, &regs->fmbm_rfpne);
+
        /* Enqueue NIA */
        iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_rfene);
 
@@ -665,6 +683,50 @@ static int init_qmi(struct fman_port *port)
        return 0;
 }
 
+static void stop_port_hwp(struct fman_port *port)
+{
+       struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
+       int cnt = 100;
+
+       iowrite32be(HWP_HXS_PCAC_PSTOP, &regs->fmpr_pcac);
+
+       while (cnt-- > 0 &&
+              (ioread32be(&regs->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
+               udelay(10);
+       if (!cnt)
+               pr_err("Timeout stopping HW Parser\n");
+}
+
+static void start_port_hwp(struct fman_port *port)
+{
+       struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
+       int cnt = 100;
+
+       iowrite32be(0, &regs->fmpr_pcac);
+
+       while (cnt-- > 0 &&
+              !(ioread32be(&regs->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
+               udelay(10);
+       if (!cnt)
+               pr_err("Timeout starting HW Parser\n");
+}
+
+static void init_hwp(struct fman_port *port)
+{
+       struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
+       int i;
+
+       stop_port_hwp(port);
+
+       for (i = 0; i < HWP_HXS_COUNT; i++) {
+               /* enable HXS error reporting into FD[STATUS] PHE */
+               iowrite32be(0x00000000, &regs->pmda[i].ssa);
+               iowrite32be(0xffffffff, &regs->pmda[i].lcv);
+       }
+
+       start_port_hwp(port);
+}
+
 static int init(struct fman_port *port)
 {
        int err;
@@ -673,6 +735,8 @@ static int init(struct fman_port *port)
        switch (port->port_type) {
        case FMAN_PORT_TYPE_RX:
                err = init_bmi_rx(port);
+               if (!err)
+                       init_hwp(port);
                break;
        case FMAN_PORT_TYPE_TX:
                err = init_bmi_tx(port);
@@ -686,7 +750,8 @@ static int init(struct fman_port *port)
 
        /* Init QMI registers */
        err = init_qmi(port);
-       return err;
+       if (err)
+               return err;
 
        return 0;
 }
@@ -1247,7 +1312,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
        /* Allocate the FM driver's parameters structure */
        port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL);
        if (!port->cfg)
-               goto err_params;
+               return -EINVAL;
 
        /* Initialize FM port parameters which will be kept by the driver */
        port->port_type = port->dts_params.type;
@@ -1276,6 +1341,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
        /* set memory map pointers */
        port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET;
        port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET;
+       port->hwp_regs = base_addr + HWP_PORT_REGS_OFFSET;
 
        port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
        /* resource distribution. */
@@ -1327,8 +1393,6 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
 
 err_port_cfg:
        kfree(port->cfg);
-err_params:
-       kfree(port);
        return -EINVAL;
 }
 EXPORT_SYMBOL(fman_port_config);
index db9c0bcf54cd9308cfac3533e2603f21955b99b5..1fc27c97e3b23205fe3466ddf9f5bb45f72faf09 100644 (file)
 #include <asm/irq.h>
 #include <linux/uaccess.h>
 
-#ifdef CONFIG_8xx
-#include <asm/8xx_immap.h>
-#include <asm/pgtable.h>
-#include <asm/cpm1.h>
-#endif
-
 #include "fs_enet.h"
 #include "fec.h"
 
index 96d44cf44fe09f4e3d8b5ff0270869a92315ad59..64300ac13e0253452ef885fcd786336de61929c9 100644 (file)
 #include <asm/irq.h>
 #include <linux/uaccess.h>
 
-#ifdef CONFIG_8xx
-#include <asm/8xx_immap.h>
-#include <asm/pgtable.h>
-#include <asm/cpm1.h>
-#endif
-
 #include "fs_enet.h"
 
 /*************************************************/
index b6ed818f78fffe21ee2b4c385c7c6222bc5df9f3..9d9b6e6dd9884fdb835e80043f2cdc83d5283bbf 100644 (file)
@@ -9,9 +9,9 @@
 
 #include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
+#include <linux/of.h>
 #include <linux/skbuff.h>
 #include <linux/slab.h>
-
 #include "hnae.h"
 
 #define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
@@ -57,11 +57,15 @@ static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
 
 static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
 {
+       if (unlikely(!cb->priv))
+               return;
+
        if (cb->type == DESC_TYPE_SKB)
                dev_kfree_skb_any((struct sk_buff *)cb->priv);
        else if (unlikely(is_rx_ring(ring)))
                put_page((struct page *)cb->priv);
-       memset(cb, 0, sizeof(*cb));
+
+       cb->priv = NULL;
 }
 
 static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
@@ -197,6 +201,7 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
 
        ring->q = q;
        ring->flags = flags;
+       spin_lock_init(&ring->lock);
        assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
 
        /* not matter for tx or rx ring, the ntc and ntc start from 0 */
index 8016854796fb7fbe4eacd5799ccf40810b72b008..04211ac73b36a3152b6642a4c797f738076bd601 100644 (file)
@@ -67,6 +67,8 @@ do { \
 #define AE_IS_VER1(ver) ((ver) == AE_VERSION_1)
 #define AE_NAME_SIZE 16
 
+#define BD_SIZE_2048_MAX_MTU   6000
+
 /* some said the RX and TX RCB format should not be the same in the future. But
  * it is the same now...
  */
@@ -101,7 +103,6 @@ enum hnae_led_state {
 #define HNS_RX_FLAG_L4ID_TCP 0x1
 #define HNS_RX_FLAG_L4ID_SCTP 0x3
 
-
 #define HNS_TXD_ASID_S 0
 #define HNS_TXD_ASID_M (0xff << HNS_TXD_ASID_S)
 #define HNS_TXD_BUFNUM_S 8
@@ -273,6 +274,9 @@ struct hnae_ring {
        /* statistic */
        struct ring_stats stats;
 
+       /* ring lock for poll one */
+       spinlock_t lock;
+
        dma_addr_t desc_dma_addr;
        u32 buf_size;       /* size for hnae_desc->addr, preset by AE */
        u16 desc_num;       /* total number of desc */
@@ -483,11 +487,11 @@ struct hnae_ae_ops {
                              u32 auto_neg, u32 rx_en, u32 tx_en);
        void (*get_coalesce_usecs)(struct hnae_handle *handle,
                                   u32 *tx_usecs, u32 *rx_usecs);
-       void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle,
-                                           u32 *tx_frames, u32 *rx_frames);
+       void (*get_max_coalesced_frames)(struct hnae_handle *handle,
+                                        u32 *tx_frames, u32 *rx_frames);
        int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
        int (*set_coalesce_frames)(struct hnae_handle *handle,
-                                  u32 coalesce_frames);
+                                  u32 tx_frames, u32 rx_frames);
        void (*get_coalesce_range)(struct hnae_handle *handle,
                                   u32 *tx_frames_low, u32 *rx_frames_low,
                                   u32 *tx_frames_high, u32 *rx_frames_high,
@@ -646,6 +650,41 @@ static inline void hnae_reuse_buffer(struct hnae_ring *ring, int i)
        ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
 }
 
+/* when reinit buffer size, we should reinit buffer description */
+static inline void hnae_reinit_all_ring_desc(struct hnae_handle *h)
+{
+       int i, j;
+       struct hnae_ring *ring;
+
+       for (i = 0; i < h->q_num; i++) {
+               ring = &h->qs[i]->rx_ring;
+               for (j = 0; j < ring->desc_num; j++)
+                       ring->desc[j].addr = cpu_to_le64(ring->desc_cb[j].dma);
+       }
+
+       wmb();  /* commit all data before submit */
+}
+
+/* when reinit buffer size, we should reinit page offset */
+static inline void hnae_reinit_all_ring_page_off(struct hnae_handle *h)
+{
+       int i, j;
+       struct hnae_ring *ring;
+
+       for (i = 0; i < h->q_num; i++) {
+               ring = &h->qs[i]->rx_ring;
+               for (j = 0; j < ring->desc_num; j++) {
+                       ring->desc_cb[j].page_offset = 0;
+                       if (ring->desc[j].addr !=
+                           cpu_to_le64(ring->desc_cb[j].dma))
+                               ring->desc[j].addr =
+                                       cpu_to_le64(ring->desc_cb[j].dma);
+               }
+       }
+
+       wmb();  /* commit all data before submit */
+}
+
 #define hnae_set_field(origin, mask, shift, val) \
        do { \
                (origin) &= (~(mask)); \
index 0a9cdf00b31afa9608414a4ad3de3089e4f61d04..ff864a187d5a71fa277f2907659621b9f87ffdcf 100644 (file)
@@ -267,8 +267,32 @@ static int hns_ae_clr_multicast(struct hnae_handle *handle)
 static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu)
 {
        struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+       struct hnae_queue *q;
+       u32 rx_buf_size;
+       int i, ret;
+
+       /* when buf_size is 2048, max mtu is 6K for rx ring max bd num is 3. */
+       if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
+               if (new_mtu <= BD_SIZE_2048_MAX_MTU)
+                       rx_buf_size = 2048;
+               else
+                       rx_buf_size = 4096;
+       } else {
+               rx_buf_size = mac_cb->dsaf_dev->buf_size;
+       }
+
+       ret = hns_mac_set_mtu(mac_cb, new_mtu, rx_buf_size);
 
-       return hns_mac_set_mtu(mac_cb, new_mtu);
+       if (!ret) {
+               /* reinit ring buf_size */
+               for (i = 0; i < handle->q_num; i++) {
+                       q = handle->qs[i];
+                       q->rx_ring.buf_size = rx_buf_size;
+                       hns_rcb_set_rx_ring_bs(q, rx_buf_size);
+               }
+       }
+
+       return ret;
 }
 
 static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable)
@@ -463,15 +487,21 @@ static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
                                               ring_pair->port_id_in_comm);
 }
 
-static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle,
-                                              u32 *tx_frames, u32 *rx_frames)
+static void hns_ae_get_max_coalesced_frames(struct hnae_handle *handle,
+                                           u32 *tx_frames, u32 *rx_frames)
 {
        struct ring_pair_cb *ring_pair =
                container_of(handle->qs[0], struct ring_pair_cb, q);
+       struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
 
-       *tx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
-                                                 ring_pair->port_id_in_comm);
-       *rx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
+       if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+           handle->port_type == HNAE_PORT_DEBUG)
+               *tx_frames = hns_rcb_get_rx_coalesced_frames(
+                       ring_pair->rcb_common, ring_pair->port_id_in_comm);
+       else
+               *tx_frames = hns_rcb_get_tx_coalesced_frames(
+                       ring_pair->rcb_common, ring_pair->port_id_in_comm);
+       *rx_frames = hns_rcb_get_rx_coalesced_frames(ring_pair->rcb_common,
                                                  ring_pair->port_id_in_comm);
 }
 
@@ -485,15 +515,34 @@ static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
                ring_pair->rcb_common, ring_pair->port_id_in_comm, timeout);
 }
 
-static int  hns_ae_set_coalesce_frames(struct hnae_handle *handle,
-                                      u32 coalesce_frames)
+static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
+                                     u32 tx_frames, u32 rx_frames)
 {
+       int ret;
        struct ring_pair_cb *ring_pair =
                container_of(handle->qs[0], struct ring_pair_cb, q);
+       struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
 
-       return hns_rcb_set_coalesced_frames(
-               ring_pair->rcb_common,
-               ring_pair->port_id_in_comm, coalesce_frames);
+       if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+           handle->port_type == HNAE_PORT_DEBUG) {
+               if (tx_frames != rx_frames)
+                       return -EINVAL;
+               return hns_rcb_set_rx_coalesced_frames(
+                       ring_pair->rcb_common,
+                       ring_pair->port_id_in_comm, rx_frames);
+       } else {
+               if (tx_frames != 1)
+                       return -EINVAL;
+               ret = hns_rcb_set_tx_coalesced_frames(
+                       ring_pair->rcb_common,
+                       ring_pair->port_id_in_comm, tx_frames);
+               if (ret)
+                       return ret;
+
+               return hns_rcb_set_rx_coalesced_frames(
+                       ring_pair->rcb_common,
+                       ring_pair->port_id_in_comm, rx_frames);
+       }
 }
 
 static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
@@ -504,20 +553,27 @@ static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
 {
        struct dsaf_device *dsaf_dev;
 
+       assert(handle);
+
        dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
 
-       *tx_frames_low  = HNS_RCB_MIN_COALESCED_FRAMES;
-       *rx_frames_low  = HNS_RCB_MIN_COALESCED_FRAMES;
-       *tx_frames_high =
-               (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ?
-               HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1;
-       *rx_frames_high =
-               (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ?
-                HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1;
-       *tx_usecs_low   = 0;
-       *rx_usecs_low   = 0;
-       *tx_usecs_high  = HNS_RCB_MAX_COALESCED_USECS;
-       *rx_usecs_high  = HNS_RCB_MAX_COALESCED_USECS;
+       *tx_frames_low  = HNS_RCB_TX_FRAMES_LOW;
+       *rx_frames_low  = HNS_RCB_RX_FRAMES_LOW;
+
+       if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+           handle->port_type == HNAE_PORT_DEBUG)
+               *tx_frames_high =
+                       (dsaf_dev->desc_num - 1 > HNS_RCB_TX_FRAMES_HIGH) ?
+                       HNS_RCB_TX_FRAMES_HIGH : dsaf_dev->desc_num - 1;
+       else
+               *tx_frames_high = 1;
+
+       *rx_frames_high = (dsaf_dev->desc_num - 1 > HNS_RCB_RX_FRAMES_HIGH) ?
+               HNS_RCB_RX_FRAMES_HIGH : dsaf_dev->desc_num - 1;
+       *tx_usecs_low   = HNS_RCB_TX_USECS_LOW;
+       *rx_usecs_low   = HNS_RCB_RX_USECS_LOW;
+       *tx_usecs_high  = HNS_RCB_TX_USECS_HIGH;
+       *rx_usecs_high  = HNS_RCB_RX_USECS_HIGH;
 }
 
 void hns_ae_update_stats(struct hnae_handle *handle,
@@ -802,8 +858,9 @@ static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key,
                memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
 
        /* update the current hash->queue mappings from the shadow RSS table */
-       memcpy(indir, ppe_cb->rss_indir_table,
-              HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
+       if (indir)
+               memcpy(indir, ppe_cb->rss_indir_table,
+                      HNS_PPEV2_RSS_IND_TBL_SIZE  * sizeof(*indir));
 
        return 0;
 }
@@ -814,15 +871,19 @@ static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir,
        struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
 
        /* set the RSS Hash Key if specififed by the user */
-       if (key)
-               hns_ppe_set_rss_key(ppe_cb, (u32 *)key);
+       if (key) {
+               memcpy(ppe_cb->rss_key, key, HNS_PPEV2_RSS_KEY_SIZE);
+               hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key);
+       }
 
-       /* update the shadow RSS table with user specified qids */
-       memcpy(ppe_cb->rss_indir_table, indir,
-              HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
+       if (indir) {
+               /* update the shadow RSS table with user specified qids */
+               memcpy(ppe_cb->rss_indir_table, indir,
+                      HNS_PPEV2_RSS_IND_TBL_SIZE  * sizeof(*indir));
 
-       /* now update the hardware */
-       hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
+               /* now update the hardware */
+               hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
+       }
 
        return 0;
 }
@@ -846,7 +907,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
        .get_autoneg = hns_ae_get_autoneg,
        .set_pauseparam = hns_ae_set_pauseparam,
        .get_coalesce_usecs = hns_ae_get_coalesce_usecs,
-       .get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames,
+       .get_max_coalesced_frames = hns_ae_get_max_coalesced_frames,
        .set_coalesce_usecs = hns_ae_set_coalesce_usecs,
        .set_coalesce_frames = hns_ae_set_coalesce_frames,
        .get_coalesce_range = hns_ae_get_coalesce_range,
index 3382441fe7b51e84bb5e815ffc1e4fa192b09a91..74bd260ca02a887869a507f8746dfc928522d4be 100644 (file)
@@ -86,12 +86,11 @@ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
                dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
 }
 
-/**
-*hns_gmac_get_en - get port enable
-*@mac_drv:mac device
-*@rx:rx enable
-*@tx:tx enable
-*/
+/* hns_gmac_get_en - get port enable
+ * @mac_drv:mac device
+ * @rx:rx enable
+ * @tx:tx enable
+ */
 static void hns_gmac_get_en(void *mac_drv, u32 *rx, u32 *tx)
 {
        struct mac_driver *drv = (struct mac_driver *)mac_drv;
@@ -148,6 +147,17 @@ static void hns_gmac_config_max_frame_length(void *mac_drv, u16 newval)
                           GMAC_MAX_FRM_SIZE_S, newval);
 }
 
+static void hns_gmac_config_pad_and_crc(void *mac_drv, u8 newval)
+{
+       u32 tx_ctrl;
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+       dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, !!newval);
+       dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, !!newval);
+       dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
+}
+
 static void hns_gmac_config_an_mode(void *mac_drv, u8 newval)
 {
        struct mac_driver *drv = (struct mac_driver *)mac_drv;
@@ -250,7 +260,6 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
 static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
                                u32 full_duplex)
 {
-       u32 tx_ctrl;
        struct mac_driver *drv = (struct mac_driver *)mac_drv;
 
        dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG,
@@ -279,14 +288,6 @@ static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
                return -EINVAL;
        }
 
-       tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
-       dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, 1);
-       dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, 1);
-       dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
-
-       dsaf_set_dev_bit(drv, GMAC_MODE_CHANGE_EN_REG,
-                        GMAC_MODE_CHANGE_EB_B, 1);
-
        return 0;
 }
 
@@ -325,6 +326,17 @@ static void hns_gmac_init(void *mac_drv)
        hns_gmac_tx_loop_pkt_dis(mac_drv);
        if (drv->mac_cb->mac_type == HNAE_PORT_DEBUG)
                hns_gmac_set_uc_match(mac_drv, 0);
+
+       hns_gmac_config_pad_and_crc(mac_drv, 1);
+
+       dsaf_set_dev_bit(drv, GMAC_MODE_CHANGE_EN_REG,
+                        GMAC_MODE_CHANGE_EB_B, 1);
+
+       /* reduce gmac tx water line to avoid gmac hang-up
+        * in speed 100M and duplex half.
+        */
+       dsaf_set_dev_field(drv, GMAC_TX_WATER_LINE_REG, GMAC_TX_WATER_LINE_MASK,
+                          GMAC_TX_WATER_LINE_SHIFT, 8);
 }
 
 void hns_gmac_update_stats(void *mac_drv)
@@ -453,24 +465,6 @@ static int hns_gmac_config_loopback(void *mac_drv, enum hnae_loop loop_mode,
        return 0;
 }
 
-static void hns_gmac_config_pad_and_crc(void *mac_drv, u8 newval)
-{
-       u32 tx_ctrl;
-       struct mac_driver *drv = (struct mac_driver *)mac_drv;
-
-       tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
-       dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, !!newval);
-       dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, !!newval);
-       dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
-}
-
-static void hns_gmac_get_id(void *mac_drv, u8 *mac_id)
-{
-       struct mac_driver *drv = (struct mac_driver *)mac_drv;
-
-       *mac_id = drv->mac_id;
-}
-
 static void hns_gmac_get_info(void *mac_drv, struct mac_info *mac_info)
 {
        enum hns_gmac_duplex_mdoe duplex;
@@ -712,7 +706,6 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
        mac_drv->config_pad_and_crc = hns_gmac_config_pad_and_crc;
        mac_drv->config_half_duplex = hns_gmac_set_duplex_type;
        mac_drv->set_rx_ignore_pause_frames = hns_gmac_set_rx_auto_pause_frames;
-       mac_drv->mac_get_id = hns_gmac_get_id;
        mac_drv->get_info = hns_gmac_get_info;
        mac_drv->autoneg_stat = hns_gmac_autoneg_stat;
        mac_drv->get_pause_enable = hns_gmac_get_pausefrm_cfg;
index 3239d27143b935dc0056490b32f700093163c74a..0c1f56e5807402d39e14a2e48277794a2e7e78c1 100644 (file)
@@ -82,9 +82,12 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
        else
                *link_status = 0;
 
-       ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, &sfp_prsnt);
-       if (!ret)
-               *link_status = *link_status && sfp_prsnt;
+       if (mac_cb->media_type == HNAE_MEDIA_TYPE_FIBER) {
+               ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb,
+                                                              &sfp_prsnt);
+               if (!ret)
+                       *link_status = *link_status && sfp_prsnt;
+       }
 
        mac_cb->link = *link_status;
 }
@@ -332,44 +335,6 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
        return 0;
 }
 
-/**
- *hns_mac_del_mac - delete mac address into dsaf table,can't delete the same
- *                  address twice
- *@net_dev: net device
- *@vfn :   vf lan
- *@mac : mac address
- *return status
- */
-int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac)
-{
-       struct mac_entry_idx *old_mac;
-       struct dsaf_device *dsaf_dev;
-       u32 ret;
-
-       dsaf_dev = mac_cb->dsaf_dev;
-
-       if (vfn < DSAF_MAX_VM_NUM) {
-               old_mac = &mac_cb->addr_entry_idx[vfn];
-       } else {
-               dev_err(mac_cb->dev,
-                       "vf queue is too large, %s mac%d queue = %#x!\n",
-                       mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vfn);
-               return -EINVAL;
-       }
-
-       if (dsaf_dev) {
-               ret = hns_dsaf_del_mac_entry(dsaf_dev, old_mac->vlan_id,
-                                            mac_cb->mac_id, old_mac->addr);
-               if (ret)
-                       return ret;
-
-               if (memcmp(old_mac->addr, mac, sizeof(old_mac->addr)) == 0)
-                       old_mac->valid = 0;
-       }
-
-       return 0;
-}
-
 int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
 {
        struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
@@ -491,10 +456,9 @@ void hns_mac_reset(struct hns_mac_cb *mac_cb)
        }
 }
 
-int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu)
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size)
 {
        struct mac_driver *drv = hns_mac_get_drv(mac_cb);
-       u32 buf_size = mac_cb->dsaf_dev->buf_size;
        u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
        u32 max_frm = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver) ?
                        MAC_MAX_MTU : MAC_MAX_MTU_V2;
@@ -855,7 +819,7 @@ static int  hns_mac_get_info(struct hns_mac_cb *mac_cb)
                of_node_put(np);
 
                np = of_parse_phandle(to_of_node(mac_cb->fw_port),
-                                       "serdes-syscon", 0);
+                                     "serdes-syscon", 0);
                syscon = syscon_node_to_regmap(np);
                of_node_put(np);
                if (IS_ERR_OR_NULL(syscon)) {
index 2bb3d1e93c64a315c92f0e493573add3e5f1e023..24dfba53a0f216c5ce55850b4a5703a1a02772e5 100644 (file)
@@ -373,8 +373,6 @@ struct mac_driver {
        void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable);
        /* config rx mode for promiscuous*/
        void (*set_promiscuous)(void *mac_drv, u8 enable);
-       /* get mac id */
-       void (*mac_get_id)(void *mac_drv, u8 *mac_id);
        void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en);
 
        void (*autoneg_stat)(void *mac_drv, u32 *enable);
@@ -436,7 +434,6 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
 int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, bool enable);
 void hns_mac_start(struct hns_mac_cb *mac_cb);
 void hns_mac_stop(struct hns_mac_cb *mac_cb);
-int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac);
 void hns_mac_uninit(struct dsaf_device *dsaf_dev);
 void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
 void hns_mac_reset(struct hns_mac_cb *mac_cb);
@@ -444,7 +441,7 @@ void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg);
 void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en);
 int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable);
 int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en);
-int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu);
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size);
 int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
                          u8 *auto_neg, u16 *speed, u8 *duplex);
 int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
index 90dbda7926144a41120d18c28a2c7d033f245f8c..e0bc79ea3d88091c5723403c724a5d4460f93e99 100644 (file)
@@ -510,10 +510,10 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
                o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
                dsaf_set_field(o_sbm_bp_cfg,
                               DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
-                              DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 48);
+                              DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 55);
                dsaf_set_field(o_sbm_bp_cfg,
                               DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
-                              DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 80);
+                              DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 110);
                dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
 
                /* for no enable pfc mode */
@@ -521,10 +521,10 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
                o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
                dsaf_set_field(o_sbm_bp_cfg,
                               DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M,
-                              DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 192);
+                              DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128);
                dsaf_set_field(o_sbm_bp_cfg,
                               DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M,
-                              DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 240);
+                              DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192);
                dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
        }
 
@@ -1519,6 +1519,7 @@ static void hns_dsaf_set_mac_key(
        mac_key->high.bits.mac_3 = addr[3];
        mac_key->low.bits.mac_4 = addr[4];
        mac_key->low.bits.mac_5 = addr[5];
+       mac_key->low.bits.port_vlan = 0;
        dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M,
                       DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
        dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
@@ -1647,87 +1648,6 @@ int hns_dsaf_rm_mac_addr(
                                      mac_entry->addr);
 }
 
-/**
- * hns_dsaf_set_mac_mc_entry - set mac mc-entry
- * @dsaf_dev: dsa fabric device struct pointer
- * @mac_entry: mc-mac entry
- */
-int hns_dsaf_set_mac_mc_entry(
-       struct dsaf_device *dsaf_dev,
-       struct dsaf_drv_mac_multi_dest_entry *mac_entry)
-{
-       u16 entry_index = DSAF_INVALID_ENTRY_IDX;
-       struct dsaf_drv_tbl_tcam_key mac_key;
-       struct dsaf_tbl_tcam_mcast_cfg mac_data;
-       struct dsaf_drv_priv *priv =
-           (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
-       struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
-       struct dsaf_drv_tbl_tcam_key tmp_mac_key;
-       struct dsaf_tbl_tcam_data tcam_data;
-
-       /* mac addr check */
-       if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
-               dev_err(dsaf_dev->dev, "set uc %s Mac %pM err!\n",
-                       dsaf_dev->ae_dev.name, mac_entry->addr);
-               return -EINVAL;
-       }
-
-       /*config key */
-       hns_dsaf_set_mac_key(dsaf_dev, &mac_key,
-                            mac_entry->in_vlan_id,
-                            mac_entry->in_port_num, mac_entry->addr);
-
-       /* entry ie exist? */
-       entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
-       if (entry_index == DSAF_INVALID_ENTRY_IDX) {
-               /*if hasnot, find enpty entry*/
-               entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
-               if (entry_index == DSAF_INVALID_ENTRY_IDX) {
-                       /*if hasnot empty, error*/
-                       dev_err(dsaf_dev->dev,
-                               "set_uc_entry failed, %s Mac key(%#x:%#x)\n",
-                               dsaf_dev->ae_dev.name,
-                               mac_key.high.val, mac_key.low.val);
-                       return -EINVAL;
-               }
-
-               /* config hardware entry */
-               memset(mac_data.tbl_mcast_port_msk,
-                      0, sizeof(mac_data.tbl_mcast_port_msk));
-       } else {
-               /* config hardware entry */
-               hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data,
-                                    &mac_data);
-
-               tmp_mac_key.high.val =
-                       le32_to_cpu(tcam_data.tbl_tcam_data_high);
-               tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
-       }
-       mac_data.tbl_mcast_old_en = 0;
-       mac_data.tbl_mcast_item_vld = 1;
-       dsaf_set_field(mac_data.tbl_mcast_port_msk[0],
-                      0x3F, 0, mac_entry->port_mask[0]);
-
-       dev_dbg(dsaf_dev->dev,
-               "set_uc_entry, %s key(%#x:%#x) entry_index%d\n",
-               dsaf_dev->ae_dev.name, mac_key.high.val,
-               mac_key.low.val, entry_index);
-
-       tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
-       tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
-
-       hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, NULL,
-                            &mac_data);
-
-       /* config software entry */
-       soft_mac_entry += entry_index;
-       soft_mac_entry->index = entry_index;
-       soft_mac_entry->tcam_key.high.val = mac_key.high.val;
-       soft_mac_entry->tcam_key.low.val = mac_key.low.val;
-
-       return 0;
-}
-
 static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src)
 {
        u16 *a = (u16 *)dst;
@@ -2089,166 +2009,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, u8 mac_id,
        return ret;
 }
 
-/**
- * hns_dsaf_get_mac_uc_entry - get mac uc entry
- * @dsaf_dev: dsa fabric device struct pointer
- * @mac_entry: mac entry
- */
-int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
-                             struct dsaf_drv_mac_single_dest_entry *mac_entry)
-{
-       u16 entry_index = DSAF_INVALID_ENTRY_IDX;
-       struct dsaf_drv_tbl_tcam_key mac_key;
-
-       struct dsaf_tbl_tcam_ucast_cfg mac_data;
-       struct dsaf_tbl_tcam_data tcam_data;
-
-       /* check macaddr */
-       if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
-           MAC_IS_BROADCAST(mac_entry->addr)) {
-               dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
-                       mac_entry->addr);
-               return -EINVAL;
-       }
-
-       /*config key */
-       hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
-                            mac_entry->in_port_num, mac_entry->addr);
-
-       /*check exist? */
-       entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
-       if (entry_index == DSAF_INVALID_ENTRY_IDX) {
-               /*find none, error */
-               dev_err(dsaf_dev->dev,
-                       "get_uc_entry failed, %s Mac key(%#x:%#x)\n",
-                       dsaf_dev->ae_dev.name,
-                       mac_key.high.val, mac_key.low.val);
-               return -EINVAL;
-       }
-       dev_dbg(dsaf_dev->dev,
-               "get_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
-               dsaf_dev->ae_dev.name, mac_key.high.val,
-               mac_key.low.val, entry_index);
-
-       /* read entry */
-       hns_dsaf_tcam_uc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
-
-       mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
-       mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
-
-       mac_entry->port_num = mac_data.tbl_ucast_out_port;
-
-       return 0;
-}
-
-/**
- * hns_dsaf_get_mac_mc_entry - get mac mc entry
- * @dsaf_dev: dsa fabric device struct pointer
- * @mac_entry: mac entry
- */
-int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
-                             struct dsaf_drv_mac_multi_dest_entry *mac_entry)
-{
-       u16 entry_index = DSAF_INVALID_ENTRY_IDX;
-       struct dsaf_drv_tbl_tcam_key mac_key;
-
-       struct dsaf_tbl_tcam_mcast_cfg mac_data;
-       struct dsaf_tbl_tcam_data tcam_data;
-
-       /*check mac addr */
-       if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
-           MAC_IS_BROADCAST(mac_entry->addr)) {
-               dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
-                       mac_entry->addr);
-               return -EINVAL;
-       }
-
-       /*config key */
-       hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
-                            mac_entry->in_port_num, mac_entry->addr);
-
-       /*check exist? */
-       entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
-       if (entry_index == DSAF_INVALID_ENTRY_IDX) {
-               /* find none, error */
-               dev_err(dsaf_dev->dev,
-                       "get_mac_uc_entry failed, %s Mac key(%#x:%#x)\n",
-                       dsaf_dev->ae_dev.name, mac_key.high.val,
-                       mac_key.low.val);
-               return -EINVAL;
-       }
-       dev_dbg(dsaf_dev->dev,
-               "get_mac_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
-               dsaf_dev->ae_dev.name, mac_key.high.val,
-               mac_key.low.val, entry_index);
-
-       /*read entry */
-       hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
-
-       mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
-       mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
-
-       mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
-       return 0;
-}
-
-/**
- * hns_dsaf_get_mac_entry_by_index - get mac entry by tab index
- * @dsaf_dev: dsa fabric device struct pointer
- * @entry_index: tab entry index
- * @mac_entry: mac entry
- */
-int hns_dsaf_get_mac_entry_by_index(
-       struct dsaf_device *dsaf_dev,
-       u16 entry_index, struct dsaf_drv_mac_multi_dest_entry *mac_entry)
-{
-       struct dsaf_drv_tbl_tcam_key mac_key;
-
-       struct dsaf_tbl_tcam_mcast_cfg mac_data;
-       struct dsaf_tbl_tcam_ucast_cfg mac_uc_data;
-       struct dsaf_tbl_tcam_data tcam_data;
-       char mac_addr[ETH_ALEN] = {0};
-
-       if (entry_index >= dsaf_dev->tcam_max_num) {
-               /* find none, del error */
-               dev_err(dsaf_dev->dev, "get_uc_entry failed, %s\n",
-                       dsaf_dev->ae_dev.name);
-               return -EINVAL;
-       }
-
-       /* mc entry, do read opt */
-       hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
-
-       mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
-       mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
-
-       mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
-
-       /***get mac addr*/
-       mac_addr[0] = mac_key.high.bits.mac_0;
-       mac_addr[1] = mac_key.high.bits.mac_1;
-       mac_addr[2] = mac_key.high.bits.mac_2;
-       mac_addr[3] = mac_key.high.bits.mac_3;
-       mac_addr[4] = mac_key.low.bits.mac_4;
-       mac_addr[5] = mac_key.low.bits.mac_5;
-       /**is mc or uc*/
-       if (MAC_IS_MULTICAST((u8 *)mac_addr) ||
-           MAC_IS_L3_MULTICAST((u8 *)mac_addr)) {
-               /**mc donot do*/
-       } else {
-               /*is not mc, just uc... */
-               hns_dsaf_tcam_uc_get(dsaf_dev, entry_index, &tcam_data,
-                                    &mac_uc_data);
-
-               mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
-               mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
-
-               mac_entry->port_mask[0] = (1 << mac_uc_data.tbl_ucast_out_port);
-       }
-
-       return 0;
-}
-
 static struct dsaf_device *hns_dsaf_alloc_dev(struct device *dev,
                                              size_t sizeof_priv)
 {
@@ -2924,10 +2684,11 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
        /* find the tcam entry index for promisc */
        entry_index = dsaf_promisc_tcam_entry(port);
 
+       memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
+       memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
+
        /* config key mask */
        if (enable) {
-               memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
-               memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
                dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
                               DSAF_TBL_TCAM_KEY_PORT_M,
                               DSAF_TBL_TCAM_KEY_PORT_S, port);
index cef6bf46ae9309bf84c9f5ff466982d59d8bed93..4507e8222683c112c05eeca4633e65990b789b8f 100644 (file)
@@ -68,7 +68,7 @@ enum dsaf_roce_qos_sl {
 };
 
 #define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
-#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP)
+#define HNS_DSAF_IS_DEBUG(dev) ((dev)->dsaf_mode == DSAF_MODE_DISABLE_SP)
 
 enum hal_dsaf_mode {
        HRD_DSAF_NO_DSAF_MODE   = 0x0,
@@ -429,23 +429,12 @@ static inline struct hnae_vf_cb *hns_ae_get_vf_cb(
 
 int hns_dsaf_set_mac_uc_entry(struct dsaf_device *dsaf_dev,
                              struct dsaf_drv_mac_single_dest_entry *mac_entry);
-int hns_dsaf_set_mac_mc_entry(struct dsaf_device *dsaf_dev,
-                             struct dsaf_drv_mac_multi_dest_entry *mac_entry);
 int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
                             struct dsaf_drv_mac_single_dest_entry *mac_entry);
 int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
                           u8 in_port_num, u8 *addr);
 int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
                             struct dsaf_drv_mac_single_dest_entry *mac_entry);
-int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
-                             struct dsaf_drv_mac_single_dest_entry *mac_entry);
-int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
-                             struct dsaf_drv_mac_multi_dest_entry *mac_entry);
-int hns_dsaf_get_mac_entry_by_index(
-       struct dsaf_device *dsaf_dev,
-       u16 entry_index,
-       struct dsaf_drv_mac_multi_dest_entry *mac_entry);
-
 void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb);
 
 int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev);
@@ -475,5 +464,4 @@ int hns_dsaf_rm_mac_addr(
 int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
                             u8 mac_id, u8 port_num);
 
-
 #endif /* __HNS_DSAF_MAIN_H__ */
index a2c22d084ce90cb03337ee09e4b4f1b723046ef4..e13aa064a8e943da7c9538e88bc425f299e944ca 100644 (file)
@@ -461,6 +461,32 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
        return 0;
 }
 
+int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+{
+       union acpi_object *obj;
+       union acpi_object obj_args, argv4;
+
+       obj_args.integer.type = ACPI_TYPE_INTEGER;
+       obj_args.integer.value = mac_cb->mac_id;
+
+       argv4.type = ACPI_TYPE_PACKAGE,
+       argv4.package.count = 1,
+       argv4.package.elements = &obj_args,
+
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+                               hns_dsaf_acpi_dsm_uuid, 0,
+                               HNS_OP_GET_SFP_STAT_FUNC, &argv4);
+
+       if (!obj || obj->type != ACPI_TYPE_INTEGER)
+               return -ENODEV;
+
+       *sfp_prsnt = obj->integer.value;
+
+       ACPI_FREE(obj);
+
+       return 0;
+}
+
 /**
  * hns_mac_config_sds_loopback - set loop back for serdes
  * @mac_cb: mac control block
@@ -592,7 +618,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
                misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
 
                misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
-               misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
+               misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi;
 
                misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi;
        } else {
index 6ea872287307bd85b13436a42c1d8aacbad05f2d..eba406bea52fba77381f2aa4196014eef6678cd1 100644 (file)
@@ -496,17 +496,17 @@ void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data)
  */
 int hns_ppe_init(struct dsaf_device *dsaf_dev)
 {
-       int i, k;
        int ret;
+       int i;
 
        for (i = 0; i < HNS_PPE_COM_NUM; i++) {
                ret = hns_ppe_common_get_cfg(dsaf_dev, i);
                if (ret)
-                       goto get_ppe_cfg_fail;
+                       goto get_cfg_fail;
 
                ret = hns_rcb_common_get_cfg(dsaf_dev, i);
                if (ret)
-                       goto get_rcb_cfg_fail;
+                       goto get_cfg_fail;
 
                hns_ppe_get_cfg(dsaf_dev->ppe_common[i]);
 
@@ -518,13 +518,12 @@ int hns_ppe_init(struct dsaf_device *dsaf_dev)
 
        return 0;
 
-get_rcb_cfg_fail:
-       hns_ppe_common_free_cfg(dsaf_dev, i);
-get_ppe_cfg_fail:
-       for (k = i - 1; k >= 0; k--) {
-               hns_rcb_common_free_cfg(dsaf_dev, k);
-               hns_ppe_common_free_cfg(dsaf_dev, k);
+get_cfg_fail:
+       for (i = 0; i < HNS_PPE_COM_NUM; i++) {
+               hns_rcb_common_free_cfg(dsaf_dev, i);
+               hns_ppe_common_free_cfg(dsaf_dev, i);
        }
+
        return ret;
 }
 
index f0ed80d6ef9cd45a8408c987ab4315646098f438..c20a0f4f8f02b351eb44fe538ae7b1e0b34ac10d 100644 (file)
@@ -32,6 +32,9 @@
 #define RCB_RESET_WAIT_TIMES 30
 #define RCB_RESET_TRY_TIMES 10
 
+/* Because default mtu is 1500, rcb buffer size is set to 2048 enough */
+#define RCB_DEFAULT_BUFFER_SIZE 2048
+
 /**
  *hns_rcb_wait_fbd_clean - clean fbd
  *@qs: ring struct pointer array
@@ -192,6 +195,30 @@ void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common)
        wmb();  /* Sync point after breakpoint */
 }
 
+/* hns_rcb_set_tx_ring_bs - init rcb ring buf size regester
+ *@q: hnae_queue
+ *@buf_size: buffer size set to hw
+ */
+void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size)
+{
+       u32 bd_size_type = hns_rcb_buf_size2type(buf_size);
+
+       dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
+                      bd_size_type);
+}
+
+/* hns_rcb_set_rx_ring_bs - init rcb ring buf size regester
+ *@q: hnae_queue
+ *@buf_size: buffer size set to hw
+ */
+void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size)
+{
+       u32 bd_size_type = hns_rcb_buf_size2type(buf_size);
+
+       dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
+                      bd_size_type);
+}
+
 /**
  *hns_rcb_ring_init - init rcb ring
  *@ring_pair: ring pair control block
@@ -200,8 +227,6 @@ void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common)
 static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
 {
        struct hnae_queue *q = &ring_pair->q;
-       struct rcb_common_cb *rcb_common = ring_pair->rcb_common;
-       u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type;
        struct hnae_ring *ring =
                (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring;
        dma_addr_t dma = ring->desc_dma_addr;
@@ -212,8 +237,8 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
                dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG,
                               (u32)((dma >> 31) >> 1));
 
-               dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
-                              bd_size_type);
+               hns_rcb_set_rx_ring_bs(q, ring->buf_size);
+
                dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
                               ring_pair->port_id_in_comm);
                dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
@@ -224,12 +249,12 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
                dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG,
                               (u32)((dma >> 31) >> 1));
 
-               dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
-                              bd_size_type);
+               hns_rcb_set_tx_ring_bs(q, ring->buf_size);
+
                dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
                               ring_pair->port_id_in_comm);
                dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
-                              ring_pair->port_id_in_comm);
+                       ring_pair->port_id_in_comm + HNS_RCB_TX_PKTLINE_OFFSET);
        }
 }
 
@@ -259,13 +284,27 @@ static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
 static void hns_rcb_set_port_timeout(
        struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
 {
-       if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
+       if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
                dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG,
                               timeout * HNS_RCB_CLK_FREQ_MHZ);
-       else
+       } else if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
+               if (timeout > HNS_RCB_DEF_GAP_TIME_USECS)
+                       dsaf_write_dev(rcb_common,
+                                      RCB_PORT_INT_GAPTIME_REG + port_idx * 4,
+                                      HNS_RCB_DEF_GAP_TIME_USECS);
+               else
+                       dsaf_write_dev(rcb_common,
+                                      RCB_PORT_INT_GAPTIME_REG + port_idx * 4,
+                                      timeout);
+
+               dsaf_write_dev(rcb_common,
+                              RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
+                              timeout);
+       } else {
                dsaf_write_dev(rcb_common,
                               RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
                               timeout);
+       }
 }
 
 static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
@@ -327,8 +366,12 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
 
        for (i = 0; i < port_num; i++) {
                hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
-               (void)hns_rcb_set_coalesced_frames(
-                       rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES);
+               hns_rcb_set_rx_coalesced_frames(
+                       rcb_common, i, HNS_RCB_DEF_RX_COALESCED_FRAMES);
+               if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver) &&
+                   !HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
+                       hns_rcb_set_tx_coalesced_frames(
+                               rcb_common, i, HNS_RCB_DEF_TX_COALESCED_FRAMES);
                hns_rcb_set_port_timeout(
                        rcb_common, i, HNS_RCB_DEF_COALESCED_USECS);
        }
@@ -380,7 +423,6 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
        struct hnae_ring *ring;
        struct rcb_common_cb *rcb_common;
        struct ring_pair_cb *ring_pair_cb;
-       u32 buf_size;
        u16 desc_num, mdnum_ppkt;
        bool irq_idx, is_ver1;
 
@@ -401,7 +443,6 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
        }
 
        rcb_common = ring_pair_cb->rcb_common;
-       buf_size = rcb_common->dsaf_dev->buf_size;
        desc_num = rcb_common->dsaf_dev->desc_num;
 
        ring->desc = NULL;
@@ -410,7 +451,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
        ring->irq = ring_pair_cb->virq[irq_idx];
        ring->desc_dma_addr = 0;
 
-       ring->buf_size = buf_size;
+       ring->buf_size = RCB_DEFAULT_BUFFER_SIZE;
        ring->desc_num = desc_num;
        ring->max_desc_num_per_pkt = mdnum_ppkt;
        ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE;
@@ -430,7 +471,6 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
 static int hns_rcb_get_port_in_comm(
        struct rcb_common_cb *rcb_common, int ring_idx)
 {
-
        return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn);
 }
 
@@ -484,18 +524,34 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
 }
 
 /**
- *hns_rcb_get_coalesced_frames - get rcb port coalesced frames
+ *hns_rcb_get_rx_coalesced_frames - get rcb port rx coalesced frames
  *@rcb_common: rcb_common device
  *@port_idx:port id in comm
  *
  *Returns: coalesced_frames
  */
-u32 hns_rcb_get_coalesced_frames(
+u32 hns_rcb_get_rx_coalesced_frames(
        struct rcb_common_cb *rcb_common, u32 port_idx)
 {
        return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4);
 }
 
+/**
+ *hns_rcb_get_tx_coalesced_frames - get rcb port tx coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *
+ *Returns: coalesced_frames
+ */
+u32 hns_rcb_get_tx_coalesced_frames(
+       struct rcb_common_cb *rcb_common, u32 port_idx)
+{
+       u64 reg;
+
+       reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4;
+       return dsaf_read_dev(rcb_common, reg);
+}
+
 /**
  *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
  *@rcb_common: rcb_common device
@@ -538,33 +594,47 @@ int hns_rcb_set_coalesce_usecs(
                        return -EINVAL;
                }
        }
-       if (timeout > HNS_RCB_MAX_COALESCED_USECS) {
+       if (timeout > HNS_RCB_MAX_COALESCED_USECS || timeout == 0) {
                dev_err(rcb_common->dsaf_dev->dev,
-                       "error: coalesce_usecs setting supports 0~1023us\n");
+                       "error: coalesce_usecs setting supports 1~1023us\n");
                return -EINVAL;
        }
+       hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
+       return 0;
+}
 
-       if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
-               if (timeout == 0)
-                       /* set timeout to 0, Disable gap time */
-                       dsaf_set_reg_field(rcb_common->io_base,
-                                          RCB_INT_GAP_TIME_REG + port_idx * 4,
-                                          PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
-                                          0);
-               else
-                       /* set timeout non 0, restore gap time to 1 */
-                       dsaf_set_reg_field(rcb_common->io_base,
-                                          RCB_INT_GAP_TIME_REG + port_idx * 4,
-                                          PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
-                                          1);
+/**
+ *hns_rcb_set_tx_coalesced_frames - set rcb coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *@coalesced_frames:tx/rx BD num for coalesced frames
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ */
+int hns_rcb_set_tx_coalesced_frames(
+       struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
+{
+       u32 old_waterline =
+               hns_rcb_get_tx_coalesced_frames(rcb_common, port_idx);
+       u64 reg;
+
+       if (coalesced_frames == old_waterline)
+               return 0;
+
+       if (coalesced_frames != 1) {
+               dev_err(rcb_common->dsaf_dev->dev,
+                       "error: not support tx coalesce_frames setting!\n");
+               return -EINVAL;
        }
 
-       hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
+       reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4;
+       dsaf_write_dev(rcb_common, reg, coalesced_frames);
        return 0;
 }
 
 /**
- *hns_rcb_set_coalesced_frames - set rcb coalesced frames
+ *hns_rcb_set_rx_coalesced_frames - set rcb rx coalesced frames
  *@rcb_common: rcb_common device
  *@port_idx:port id in comm
  *@coalesced_frames:tx/rx BD num for coalesced frames
@@ -572,10 +642,11 @@ int hns_rcb_set_coalesce_usecs(
  * Returns:
  * Zero for success, or an error code in case of failure
  */
-int hns_rcb_set_coalesced_frames(
+int hns_rcb_set_rx_coalesced_frames(
        struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
 {
-       u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx);
+       u32 old_waterline =
+               hns_rcb_get_rx_coalesced_frames(rcb_common, port_idx);
 
        if (coalesced_frames == old_waterline)
                return 0;
index 99b4e1ba0a9411a9889cbf8343605a8e79616bb6..a664ee88ab457ced89f759deb85ad853b3e8ab11 100644 (file)
@@ -35,12 +35,23 @@ struct rcb_common_cb;
 
 #define HNS_RCB_REG_OFFSET                     0x10000
 
+#define HNS_RCB_TX_FRAMES_LOW          1
+#define HNS_RCB_RX_FRAMES_LOW          1
+#define HNS_RCB_TX_FRAMES_HIGH         1023
+#define HNS_RCB_RX_FRAMES_HIGH         1023
+#define HNS_RCB_TX_USECS_LOW           1
+#define HNS_RCB_RX_USECS_LOW           1
+#define HNS_RCB_TX_USECS_HIGH          1023
+#define HNS_RCB_RX_USECS_HIGH          1023
 #define HNS_RCB_MAX_COALESCED_FRAMES           1023
 #define HNS_RCB_MIN_COALESCED_FRAMES           1
-#define HNS_RCB_DEF_COALESCED_FRAMES           50
+#define HNS_RCB_DEF_RX_COALESCED_FRAMES                50
+#define HNS_RCB_DEF_TX_COALESCED_FRAMES                1
 #define HNS_RCB_CLK_FREQ_MHZ                   350
 #define HNS_RCB_MAX_COALESCED_USECS            0x3ff
-#define HNS_RCB_DEF_COALESCED_USECS            50
+#define HNS_RCB_DEF_COALESCED_USECS            30
+#define HNS_RCB_DEF_GAP_TIME_USECS             20
+#define HNS_RCB_TX_PKTLINE_OFFSET              8
 
 #define HNS_RCB_COMMON_ENDIAN                  1
 
@@ -125,13 +136,17 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
 void hns_rcb_init_hw(struct ring_pair_cb *ring);
 void hns_rcb_reset_ring_hw(struct hnae_queue *q);
 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
-u32 hns_rcb_get_coalesced_frames(
+u32 hns_rcb_get_rx_coalesced_frames(
+       struct rcb_common_cb *rcb_common, u32 port_idx);
+u32 hns_rcb_get_tx_coalesced_frames(
        struct rcb_common_cb *rcb_common, u32 port_idx);
 u32 hns_rcb_get_coalesce_usecs(
        struct rcb_common_cb *rcb_common, u32 port_idx);
 int hns_rcb_set_coalesce_usecs(
        struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout);
-int hns_rcb_set_coalesced_frames(
+int hns_rcb_set_rx_coalesced_frames(
+       struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
+int hns_rcb_set_tx_coalesced_frames(
        struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
 void hns_rcb_update_stats(struct hnae_queue *queue);
 
@@ -146,4 +161,7 @@ int hns_rcb_get_ring_regs_count(void);
 void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data);
 
 void hns_rcb_get_strings(int stringset, u8 *data, int index);
+void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size);
+void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size);
+
 #endif /* _HNS_DSAF_RCB_H */
index 8fa18fc17cd2e25f2e3458e608abe6f5a96b60d9..46a52d9bb196326e5da7481f43616dab0afd12a2 100644 (file)
 #define RCB_CFG_OVERTIME_REG                   0x9300
 #define RCB_CFG_PKTLINE_INT_NUM_REG            0x9304
 #define RCB_CFG_OVERTIME_INT_NUM_REG           0x9308
-#define RCB_INT_GAP_TIME_REG                   0x9400
+#define RCB_PORT_INT_GAPTIME_REG               0x9400
 #define RCB_PORT_CFG_OVERTIME_REG              0x9430
 
 #define RCB_RING_RX_RING_BASEADDR_L_REG                0x00000
 
 #define GMAC_DUPLEX_TYPE_REG                   0x0008UL
 #define GMAC_FD_FC_TYPE_REG                    0x000CUL
+#define GMAC_TX_WATER_LINE_REG                 0x0010UL
 #define GMAC_FC_TX_TIMER_REG                   0x001CUL
 #define GMAC_FD_FC_ADDR_LOW_REG                        0x0020UL
 #define GMAC_FD_FC_ADDR_HIGH_REG               0x0024UL
 
 #define GMAC_DUPLEX_TYPE_B 0
 
+#define GMAC_TX_WATER_LINE_MASK                ((1UL << 8) - 1)
+#define GMAC_TX_WATER_LINE_SHIFT       0
+
 #define GMAC_FC_TX_TIMER_S 0
 #define GMAC_FC_TX_TIMER_M 0xffff
 
index aae830a93050ad5f99ece2b6901dd30531852d87..37a2fc35148f7f4201f529baa85563a56165114e 100644 (file)
@@ -299,18 +299,6 @@ static void hns_xgmac_set_tx_auto_pause_frames(void *mac_drv, u16 enable)
                dsaf_write_dev(drv, XGMAC_MAC_PAUSE_TIME_REG, enable);
 }
 
-/**
- *hns_xgmac_get_id - get xgmac port id
- *@mac_drv: mac driver
- *@newval:xgmac max frame length
- */
-static void hns_xgmac_get_id(void *mac_drv, u8 *mac_id)
-{
-       struct mac_driver *drv = (struct mac_driver *)mac_drv;
-
-       *mac_id = drv->mac_id;
-}
-
 /**
  *hns_xgmac_config_max_frame_length - set xgmac max frame length
  *@mac_drv: mac driver
@@ -833,7 +821,6 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
        mac_drv->config_half_duplex = NULL;
        mac_drv->set_rx_ignore_pause_frames =
                hns_xgmac_set_rx_ignore_pause_frames;
-       mac_drv->mac_get_id = hns_xgmac_get_id;
        mac_drv->mac_free = hns_xgmac_free;
        mac_drv->adjust_link = NULL;
        mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames;
index fca37e2c7f017d76aa537daede5f7af14cb8e152..c6700b91a2dfd3da02dc8a4b6728fbf92beff4c1 100644 (file)
@@ -512,7 +512,8 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i,
        int last_offset;
        bool twobufs;
 
-       twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
+       twobufs = ((PAGE_SIZE < 8192) &&
+               hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
 
        desc = &ring->desc[ring->next_to_clean];
        size = le16_to_cpu(desc->rx.size);
@@ -859,7 +860,7 @@ out:
        return recv_pkts;
 }
 
-static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
+static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
 {
        struct hnae_ring *ring = ring_data->ring;
        int num = 0;
@@ -873,22 +874,23 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
                ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
                        ring_data->ring, 1);
 
-               napi_schedule(&ring_data->napi);
+               return false;
+       } else {
+               return true;
        }
 }
 
-static void hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
 {
        struct hnae_ring *ring = ring_data->ring;
-       int num = 0;
+       int num;
 
        num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
 
-       if (num == 0)
-               ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
-                       ring, 0);
+       if (!num)
+               return true;
        else
-               napi_schedule(&ring_data->napi);
+               return false;
 }
 
 static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
@@ -921,12 +923,13 @@ static int is_valid_clean_head(struct hnae_ring *ring, int h)
 
 /* netif_tx_lock will turn down the performance, set only when necessary */
 #ifdef CONFIG_NET_POLL_CONTROLLER
-#define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
-#define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
+#define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock)
+#define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock)
 #else
-#define NETIF_TX_LOCK(ndev)
-#define NETIF_TX_UNLOCK(ndev)
+#define NETIF_TX_LOCK(ring)
+#define NETIF_TX_UNLOCK(ring)
 #endif
+
 /* reclaim all desc in one budget
  * return error or number of desc left
  */
@@ -940,13 +943,13 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
        int head;
        int bytes, pkts;
 
-       NETIF_TX_LOCK(ndev);
+       NETIF_TX_LOCK(ring);
 
        head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
        rmb(); /* make sure head is ready before touch any data */
 
        if (is_ring_empty(ring) || head == ring->next_to_clean) {
-               NETIF_TX_UNLOCK(ndev);
+               NETIF_TX_UNLOCK(ring);
                return 0; /* no data to poll */
        }
 
@@ -954,7 +957,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
                netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
                           ring->next_to_use, ring->next_to_clean);
                ring->stats.io_err_cnt++;
-               NETIF_TX_UNLOCK(ndev);
+               NETIF_TX_UNLOCK(ring);
                return -EIO;
        }
 
@@ -966,7 +969,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
                prefetch(&ring->desc_cb[ring->next_to_clean]);
        }
 
-       NETIF_TX_UNLOCK(ndev);
+       NETIF_TX_UNLOCK(ring);
 
        dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
        netdev_tx_completed_queue(dev_queue, pkts, bytes);
@@ -989,7 +992,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
        return 0;
 }
 
-static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
+static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
 {
        struct hnae_ring *ring = ring_data->ring;
        int head;
@@ -1002,20 +1005,21 @@ static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
                ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
                        ring_data->ring, 1);
 
-               napi_schedule(&ring_data->napi);
+               return false;
+       } else {
+               return true;
        }
 }
 
-static void hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
 {
        struct hnae_ring *ring = ring_data->ring;
        int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
 
        if (head == ring->next_to_clean)
-               ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
-                       ring, 0);
+               return true;
        else
-               napi_schedule(&ring_data->napi);
+               return false;
 }
 
 static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
@@ -1026,7 +1030,7 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
        int head;
        int bytes, pkts;
 
-       NETIF_TX_LOCK(ndev);
+       NETIF_TX_LOCK(ring);
 
        head = ring->next_to_use; /* ntu :soft setted ring position*/
        bytes = 0;
@@ -1034,7 +1038,7 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
        while (head != ring->next_to_clean)
                hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
 
-       NETIF_TX_UNLOCK(ndev);
+       NETIF_TX_UNLOCK(ring);
 
        dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
        netdev_tx_reset_queue(dev_queue);
@@ -1042,15 +1046,23 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
 
 static int hns_nic_common_poll(struct napi_struct *napi, int budget)
 {
+       int clean_complete = 0;
        struct hns_nic_ring_data *ring_data =
                container_of(napi, struct hns_nic_ring_data, napi);
-       int clean_complete = ring_data->poll_one(
-                               ring_data, budget, ring_data->ex_process);
+       struct hnae_ring *ring = ring_data->ring;
 
-       if (clean_complete >= 0 && clean_complete < budget) {
-               napi_complete(napi);
-               ring_data->fini_process(ring_data);
-               return 0;
+try_again:
+       clean_complete += ring_data->poll_one(
+                               ring_data, budget - clean_complete,
+                               ring_data->ex_process);
+
+       if (clean_complete < budget) {
+               if (ring_data->fini_process(ring_data)) {
+                       napi_complete(napi);
+                       ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
+               } else {
+                       goto try_again;
+               }
        }
 
        return clean_complete;
@@ -1196,54 +1208,31 @@ static void hns_nic_ring_close(struct net_device *netdev, int idx)
        napi_disable(&priv->ring_data[idx].napi);
 }
 
-static void hns_set_irq_affinity(struct hns_nic_priv *priv)
+static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
+                                     struct hnae_ring *ring, cpumask_t *mask)
 {
-       struct hnae_handle *h = priv->ae_handle;
-       struct hns_nic_ring_data *rd;
-       int i;
        int cpu;
-       cpumask_var_t mask;
-
-       if (!alloc_cpumask_var(&mask, GFP_KERNEL))
-               return;
 
-       /*diffrent irq banlance for 16core and 32core*/
-       if (h->q_num == num_possible_cpus()) {
-               for (i = 0; i < h->q_num * 2; i++) {
-                       rd = &priv->ring_data[i];
-                       if (cpu_online(rd->queue_index)) {
-                               cpumask_clear(mask);
-                               cpu = rd->queue_index;
-                               cpumask_set_cpu(cpu, mask);
-                               (void)irq_set_affinity_hint(rd->ring->irq,
-                                                           mask);
-                       }
-               }
+       /* Diffrent irq banlance between 16core and 32core.
+        * The cpu mask set by ring index according to the ring flag
+        * which indicate the ring is tx or rx.
+        */
+       if (q_num == num_possible_cpus()) {
+               if (is_tx_ring(ring))
+                       cpu = ring_idx;
+               else
+                       cpu = ring_idx - q_num;
        } else {
-               for (i = 0; i < h->q_num; i++) {
-                       rd = &priv->ring_data[i];
-                       if (cpu_online(rd->queue_index * 2)) {
-                               cpumask_clear(mask);
-                               cpu = rd->queue_index * 2;
-                               cpumask_set_cpu(cpu, mask);
-                               (void)irq_set_affinity_hint(rd->ring->irq,
-                                                           mask);
-                       }
-               }
-
-               for (i = h->q_num; i < h->q_num * 2; i++) {
-                       rd = &priv->ring_data[i];
-                       if (cpu_online(rd->queue_index * 2 + 1)) {
-                               cpumask_clear(mask);
-                               cpu = rd->queue_index * 2 + 1;
-                               cpumask_set_cpu(cpu, mask);
-                               (void)irq_set_affinity_hint(rd->ring->irq,
-                                                           mask);
-                       }
-               }
+               if (is_tx_ring(ring))
+                       cpu = ring_idx * 2;
+               else
+                       cpu = (ring_idx - q_num) * 2 + 1;
        }
 
-       free_cpumask_var(mask);
+       cpumask_clear(mask);
+       cpumask_set_cpu(cpu, mask);
+
+       return cpu;
 }
 
 static int hns_nic_init_irq(struct hns_nic_priv *priv)
@@ -1252,6 +1241,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
        struct hns_nic_ring_data *rd;
        int i;
        int ret;
+       int cpu;
 
        for (i = 0; i < h->q_num * 2; i++) {
                rd = &priv->ring_data[i];
@@ -1261,7 +1251,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
 
                snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
                         "%s-%s%d", priv->netdev->name,
-                        (i < h->q_num ? "tx" : "rx"), rd->queue_index);
+                        (is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index);
 
                rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
 
@@ -1273,12 +1263,17 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
                        return ret;
                }
                disable_irq(rd->ring->irq);
+
+               cpu = hns_nic_init_affinity_mask(h->q_num, i,
+                                                rd->ring, &rd->mask);
+
+               if (cpu_online(cpu))
+                       irq_set_affinity_hint(rd->ring->irq,
+                                             &rd->mask);
+
                rd->ring->irq_init_flag = RCB_IRQ_INITED;
        }
 
-       /*set cpu affinity*/
-       hns_set_irq_affinity(priv);
-
        return 0;
 }
 
@@ -1487,32 +1482,259 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
        return (netdev_tx_t)ret;
 }
 
+static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data,
+                                 struct sk_buff *skb)
+{
+       dev_kfree_skb_any(skb);
+}
+
+#define HNS_LB_TX_RING 0
+static struct sk_buff *hns_assemble_skb(struct net_device *ndev)
+{
+       struct sk_buff *skb;
+       struct ethhdr *ethhdr;
+       int frame_len;
+
+       /* allocate test skb */
+       skb = alloc_skb(64, GFP_KERNEL);
+       if (!skb)
+               return NULL;
+
+       skb_put(skb, 64);
+       skb->dev = ndev;
+       memset(skb->data, 0xFF, skb->len);
+
+       /* must be tcp/ip package */
+       ethhdr = (struct ethhdr *)skb->data;
+       ethhdr->h_proto = htons(ETH_P_IP);
+
+       frame_len = skb->len & (~1ul);
+       memset(&skb->data[frame_len / 2], 0xAA,
+              frame_len / 2 - 1);
+
+       skb->queue_mapping = HNS_LB_TX_RING;
+
+       return skb;
+}
+
+static int hns_enable_serdes_lb(struct net_device *ndev)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct hnae_handle *h = priv->ae_handle;
+       struct hnae_ae_ops *ops = h->dev->ops;
+       int speed, duplex;
+       int ret;
+
+       ret = ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 1);
+       if (ret)
+               return ret;
+
+       ret = ops->start ? ops->start(h) : 0;
+       if (ret)
+               return ret;
+
+       /* link adjust duplex*/
+       if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
+               speed = 1000;
+       else
+               speed = 10000;
+       duplex = 1;
+
+       ops->adjust_link(h, speed, duplex);
+
+       /* wait h/w ready */
+       mdelay(300);
+
+       return 0;
+}
+
+static void hns_disable_serdes_lb(struct net_device *ndev)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct hnae_handle *h = priv->ae_handle;
+       struct hnae_ae_ops *ops = h->dev->ops;
+
+       ops->stop(h);
+       ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 0);
+}
+
+/**
+ *hns_nic_clear_all_rx_fetch - clear the chip fetched descriptions. The
+ *function as follows:
+ *    1. if one rx ring has found the page_offset is not equal 0 between head
+ *       and tail, it means that the chip fetched the wrong descs for the ring
+ *       which buffer size is 4096.
+ *    2. we set the chip serdes loopback and set rss indirection to the ring.
+ *    3. construct 64-bytes ip broadcast packages, wait the associated rx ring
+ *       recieving all packages and it will fetch new descriptions.
+ *    4. recover to the original state.
+ *
+ *@ndev: net device
+ */
+static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct hnae_handle *h = priv->ae_handle;
+       struct hnae_ae_ops *ops = h->dev->ops;
+       struct hns_nic_ring_data *rd;
+       struct hnae_ring *ring;
+       struct sk_buff *skb;
+       u32 *org_indir;
+       u32 *cur_indir;
+       int indir_size;
+       int head, tail;
+       int fetch_num;
+       int i, j;
+       bool found;
+       int retry_times;
+       int ret = 0;
+
+       /* alloc indir memory */
+       indir_size = ops->get_rss_indir_size(h) * sizeof(*org_indir);
+       org_indir = kzalloc(indir_size, GFP_KERNEL);
+       if (!org_indir)
+               return -ENOMEM;
+
+       /* store the orginal indirection */
+       ops->get_rss(h, org_indir, NULL, NULL);
+
+       cur_indir = kzalloc(indir_size, GFP_KERNEL);
+       if (!cur_indir) {
+               ret = -ENOMEM;
+               goto cur_indir_alloc_err;
+       }
+
+       /* set loopback */
+       if (hns_enable_serdes_lb(ndev)) {
+               ret = -EINVAL;
+               goto enable_serdes_lb_err;
+       }
+
+       /* foreach every rx ring to clear fetch desc */
+       for (i = 0; i < h->q_num; i++) {
+               ring = &h->qs[i]->rx_ring;
+               head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+               tail = readl_relaxed(ring->io_base + RCB_REG_TAIL);
+               found = false;
+               fetch_num = ring_dist(ring, head, tail);
+
+               while (head != tail) {
+                       if (ring->desc_cb[head].page_offset != 0) {
+                               found = true;
+                               break;
+                       }
+
+                       head++;
+                       if (head == ring->desc_num)
+                               head = 0;
+               }
+
+               if (found) {
+                       for (j = 0; j < indir_size / sizeof(*org_indir); j++)
+                               cur_indir[j] = i;
+                       ops->set_rss(h, cur_indir, NULL, 0);
+
+                       for (j = 0; j < fetch_num; j++) {
+                               /* alloc one skb and init */
+                               skb = hns_assemble_skb(ndev);
+                               if (!skb)
+                                       goto out;
+                               rd = &tx_ring_data(priv, skb->queue_mapping);
+                               hns_nic_net_xmit_hw(ndev, skb, rd);
+
+                               retry_times = 0;
+                               while (retry_times++ < 10) {
+                                       mdelay(10);
+                                       /* clean rx */
+                                       rd = &rx_ring_data(priv, i);
+                                       if (rd->poll_one(rd, fetch_num,
+                                                        hns_nic_drop_rx_fetch))
+                                               break;
+                               }
+
+                               retry_times = 0;
+                               while (retry_times++ < 10) {
+                                       mdelay(10);
+                                       /* clean tx ring 0 send package */
+                                       rd = &tx_ring_data(priv,
+                                                          HNS_LB_TX_RING);
+                                       if (rd->poll_one(rd, fetch_num, NULL))
+                                               break;
+                               }
+                       }
+               }
+       }
+
+out:
+       /* restore everything */
+       ops->set_rss(h, org_indir, NULL, 0);
+       hns_disable_serdes_lb(ndev);
+enable_serdes_lb_err:
+       kfree(cur_indir);
+cur_indir_alloc_err:
+       kfree(org_indir);
+
+       return ret;
+}
+
 static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
 {
        struct hns_nic_priv *priv = netdev_priv(ndev);
        struct hnae_handle *h = priv->ae_handle;
+       bool if_running = netif_running(ndev);
        int ret;
 
+       /* MTU < 68 is an error and causes problems on some kernels */
+       if (new_mtu < 68)
+               return -EINVAL;
+
+       /* MTU no change */
+       if (new_mtu == ndev->mtu)
+               return 0;
+
        if (!h->dev->ops->set_mtu)
                return -ENOTSUPP;
 
-       if (netif_running(ndev)) {
+       if (if_running) {
                (void)hns_nic_net_stop(ndev);
                msleep(100);
+       }
 
-               ret = h->dev->ops->set_mtu(h, new_mtu);
-               if (ret)
-                       netdev_err(ndev, "set mtu fail, return value %d\n",
-                                  ret);
+       if (priv->enet_ver != AE_VERSION_1 &&
+           ndev->mtu <= BD_SIZE_2048_MAX_MTU &&
+           new_mtu > BD_SIZE_2048_MAX_MTU) {
+               /* update desc */
+               hnae_reinit_all_ring_desc(h);
 
-               if (hns_nic_net_open(ndev))
-                       netdev_err(ndev, "hns net open fail\n");
-       } else {
-               ret = h->dev->ops->set_mtu(h, new_mtu);
+               /* clear the package which the chip has fetched */
+               ret = hns_nic_clear_all_rx_fetch(ndev);
+
+               /* the page offset must be consist with desc */
+               hnae_reinit_all_ring_page_off(h);
+
+               if (ret) {
+                       netdev_err(ndev, "clear the fetched desc fail\n");
+                       goto out;
+               }
+       }
+
+       ret = h->dev->ops->set_mtu(h, new_mtu);
+       if (ret) {
+               netdev_err(ndev, "set mtu fail, return value %d\n",
+                          ret);
+               goto out;
        }
 
-       if (!ret)
-               ndev->mtu = new_mtu;
+       /* finally, set new mtu to netdevice */
+       ndev->mtu = new_mtu;
+
+out:
+       if (if_running) {
+               if (hns_nic_net_open(ndev)) {
+                       netdev_err(ndev, "hns net open fail\n");
+                       ret = -EINVAL;
+               }
+       }
 
        return ret;
 }
@@ -1791,7 +2013,7 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
 static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
 {
        WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
-
+       /* make sure to commit the things */
        smp_mb__before_atomic();
        clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
 }
index 5b412de350aa28e9099ee251e0824ed66f37b2b1..1b83232082b2a22244b77a05f4125a69cc4a5f32 100644 (file)
@@ -37,10 +37,11 @@ enum hns_nic_state {
 struct hns_nic_ring_data {
        struct hnae_ring *ring;
        struct napi_struct napi;
+       cpumask_t mask; /* affinity mask */
        int queue_index;
        int (*poll_one)(struct hns_nic_ring_data *, int, void *);
        void (*ex_process)(struct hns_nic_ring_data *, struct sk_buff *);
-       void (*fini_process)(struct hns_nic_ring_data *);
+       bool (*fini_process)(struct hns_nic_ring_data *);
 };
 
 /* compatible the difference between two versions */
index 3ac2183dbd2119e0746d35510fcfc1e390ab6d9b..b8fab149690f880394f0d973560aecca69d1171e 100644 (file)
@@ -146,7 +146,7 @@ static int hns_nic_get_link_ksettings(struct net_device *net_dev,
 
        /* When there is no phy, autoneg is off. */
        cmd->base.autoneg = false;
-       cmd->base.cmd = speed;
+       cmd->base.speed = speed;
        cmd->base.duplex = duplex;
 
        if (net_dev->phydev)
@@ -764,14 +764,14 @@ static int hns_get_coalesce(struct net_device *net_dev,
        ec->use_adaptive_tx_coalesce = 1;
 
        if ((!ops->get_coalesce_usecs) ||
-           (!ops->get_rx_max_coalesced_frames))
+           (!ops->get_max_coalesced_frames))
                return -ESRCH;
 
        ops->get_coalesce_usecs(priv->ae_handle,
                                        &ec->tx_coalesce_usecs,
                                        &ec->rx_coalesce_usecs);
 
-       ops->get_rx_max_coalesced_frames(
+       ops->get_max_coalesced_frames(
                priv->ae_handle,
                &ec->tx_max_coalesced_frames,
                &ec->rx_max_coalesced_frames);
@@ -801,30 +801,28 @@ static int hns_set_coalesce(struct net_device *net_dev,
 {
        struct hns_nic_priv *priv = netdev_priv(net_dev);
        struct hnae_ae_ops *ops;
-       int ret;
+       int rc1, rc2;
 
        ops = priv->ae_handle->dev->ops;
 
        if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
                return -EINVAL;
 
-       if (ec->rx_max_coalesced_frames != ec->tx_max_coalesced_frames)
-               return -EINVAL;
-
        if ((!ops->set_coalesce_usecs) ||
            (!ops->set_coalesce_frames))
                return -ESRCH;
 
-       ret = ops->set_coalesce_usecs(priv->ae_handle,
+       rc1 = ops->set_coalesce_usecs(priv->ae_handle,
                                      ec->rx_coalesce_usecs);
-       if (ret)
-               return ret;
 
-       ret = ops->set_coalesce_frames(
-               priv->ae_handle,
-               ec->rx_max_coalesced_frames);
+       rc2 = ops->set_coalesce_frames(priv->ae_handle,
+                                      ec->tx_max_coalesced_frames,
+                                      ec->rx_max_coalesced_frames);
 
-       return ret;
+       if (rc1 || rc2)
+               return -EINVAL;
+
+       return 0;
 }
 
 /**
@@ -1253,12 +1251,10 @@ hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
 
        ops = priv->ae_handle->dev->ops;
 
-       /* currently hfunc can only be Toeplitz hash */
-       if (key ||
-           (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+               netdev_err(netdev, "Invalid hfunc!\n");
                return -EOPNOTSUPP;
-       if (!indir)
-               return 0;
+       }
 
        return ops->set_rss(priv->ae_handle, indir, key, hfunc);
 }
index 501eb2090ca62bcd118abc136e4c433bdaa38eb6..e5221d95afe195583b7496130cf1a544d1b1f97f 100644 (file)
 #include <linux/phy.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
-#include <linux/spinlock_types.h>
 
 #define MDIO_DRV_NAME "Hi-HNS_MDIO"
 #define MDIO_BUS_NAME "Hisilicon MII Bus"
-#define MDIO_DRV_VERSION "1.3.0"
-#define MDIO_COPYRIGHT "Copyright(c) 2015 Huawei Corporation."
-#define MDIO_DRV_STRING MDIO_BUS_NAME
-#define MDIO_DEFAULT_DEVICE_DESCR MDIO_BUS_NAME
-
-#define MDIO_CTL_DEV_ADDR(x)   (x & 0x1f)
-#define MDIO_CTL_PORT_ADDR(x)  ((x & 0x1f) << 5)
 
 #define MDIO_TIMEOUT                   1000000
 
@@ -64,9 +56,7 @@ struct hns_mdio_device {
 #define MDIO_CMD_DEVAD_S       0
 #define MDIO_CMD_PRTAD_M       0x1f
 #define MDIO_CMD_PRTAD_S       5
-#define MDIO_CMD_OP_M          0x3
 #define MDIO_CMD_OP_S          10
-#define MDIO_CMD_ST_M          0x3
 #define MDIO_CMD_ST_S          12
 #define MDIO_CMD_START_B       14
 
@@ -185,18 +175,20 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
 static int hns_mdio_wait_ready(struct mii_bus *bus)
 {
        struct hns_mdio_device *mdio_dev = bus->priv;
+       u32 cmd_reg_value;
        int i;
-       u32 cmd_reg_value = 1;
 
        /* waitting for MDIO_COMMAND_REG 's mdio_start==0 */
        /* after that can do read or write*/
-       for (i = 0; cmd_reg_value; i++) {
+       for (i = 0; i < MDIO_TIMEOUT; i++) {
                cmd_reg_value = MDIO_GET_REG_BIT(mdio_dev,
                                                 MDIO_COMMAND_REG,
                                                 MDIO_CMD_START_B);
-               if (i == MDIO_TIMEOUT)
-                       return -ETIMEDOUT;
+               if (!cmd_reg_value)
+                       break;
        }
+       if ((i == MDIO_TIMEOUT) && cmd_reg_value)
+               return -ETIMEDOUT;
 
        return 0;
 }
index eba21835d90de8f88280ff82618fa9925c777c05..98768ba0955a1b3b45518b020f9eb6fda0770b5b 100644 (file)
@@ -8,4 +8,3 @@ ibm_emac-y := mal.o core.o phy.o
 ibm_emac-$(CONFIG_IBM_EMAC_ZMII) += zmii.o
 ibm_emac-$(CONFIG_IBM_EMAC_RGMII) += rgmii.o
 ibm_emac-$(CONFIG_IBM_EMAC_TAH) += tah.o
-ibm_emac-$(CONFIG_IBM_EMAC_DEBUG) += debug.o
index 275c2e2349ad92de224df1939769238d83ea5f3b..508923f39ccfe0cbc39ed23305af46860eef32c9 100644 (file)
@@ -1929,7 +1929,7 @@ static struct net_device_stats *emac_stats(struct net_device *ndev)
        struct emac_instance *dev = netdev_priv(ndev);
        struct emac_stats *st = &dev->stats;
        struct emac_error_stats *est = &dev->estats;
-       struct net_device_stats *nst = &dev->nstats;
+       struct net_device_stats *nst = &ndev->stats;
        unsigned long flags;
 
        DBG2(dev, "stats" NL);
@@ -2589,8 +2589,6 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
 static int emac_dt_phy_connect(struct emac_instance *dev,
                               struct device_node *phy_handle)
 {
-       int res;
-
        dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
                                    GFP_KERNEL);
        if (!dev->phy.def)
@@ -2617,7 +2615,7 @@ static int emac_dt_phy_probe(struct emac_instance *dev)
 {
        struct device_node *np = dev->ofdev->dev.of_node;
        struct device_node *phy_handle;
-       int res = 0;
+       int res = 1;
 
        phy_handle = of_parse_phandle(np, "phy-handle", 0);
 
@@ -2714,13 +2712,24 @@ static int emac_init_phy(struct emac_instance *dev)
        if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
                int res = emac_dt_phy_probe(dev);
 
-               mutex_unlock(&emac_phy_map_lock);
-               if (!res)
+               switch (res) {
+               case 1:
+                       /* No phy-handle property configured.
+                        * Continue with the existing phy probe
+                        * and setup code.
+                        */
+                       break;
+
+               case 0:
+                       mutex_unlock(&emac_phy_map_lock);
                        goto init_phy;
 
-               dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
-                       res);
-               return res;
+               default:
+                       mutex_unlock(&emac_phy_map_lock);
+                       dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
+                               res);
+                       return res;
+               }
        }
 
        if (dev->phy_address != 0xffffffff)
@@ -3164,8 +3173,6 @@ static int emac_probe(struct platform_device *ofdev)
                printk("%s: found %s PHY (0x%02x)\n", ndev->name,
                       dev->phy.def->name, dev->phy.address);
 
-       emac_dbg_register(dev);
-
        /* Life is good */
        return 0;
 
@@ -3234,7 +3241,6 @@ static int emac_remove(struct platform_device *ofdev)
        mal_unregister_commac(dev->mal, &dev->commac);
        emac_put_deps(dev);
 
-       emac_dbg_unregister(dev);
        iounmap(dev->emacp);
 
        if (dev->wol_irq)
@@ -3317,9 +3323,6 @@ static int __init emac_init(void)
 
        printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
 
-       /* Init debug stuff */
-       emac_init_debug();
-
        /* Build EMAC boot list */
        emac_make_bootlist();
 
@@ -3364,7 +3367,6 @@ static void __exit emac_exit(void)
        rgmii_exit();
        zmii_exit();
        mal_exit();
-       emac_fini_debug();
 
        /* Destroy EMAC boot list */
        for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
index 0710a6685489355fdd74b89259be8b360b52c557..f10e156641d511d2d5d6a133dd3a8df18498cf69 100644 (file)
@@ -265,7 +265,6 @@ struct emac_instance {
        /* Stats
         */
        struct emac_error_stats         estats;
-       struct net_device_stats         nstats;
        struct emac_stats               stats;
 
        /* Misc
diff --git a/drivers/net/ethernet/ibm/emac/debug.c b/drivers/net/ethernet/ibm/emac/debug.c
deleted file mode 100644 (file)
index a559f32..0000000
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * drivers/net/ethernet/ibm/emac/debug.c
- *
- * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
- *
- * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
- *                <benh@kernel.crashing.org>
- *
- * Based on the arch/ppc version of the driver:
- *
- * Copyright (c) 2004, 2005 Zultys Technologies
- * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- *
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/sysrq.h>
-#include <asm/io.h>
-
-#include "core.h"
-
-static DEFINE_SPINLOCK(emac_dbg_lock);
-
-static void emac_desc_dump(struct emac_instance *p)
-{
-       int i;
-       printk("** EMAC %s TX BDs **\n"
-              " tx_cnt = %d tx_slot = %d ack_slot = %d\n",
-              p->ofdev->dev.of_node->full_name,
-              p->tx_cnt, p->tx_slot, p->ack_slot);
-       for (i = 0; i < NUM_TX_BUFF / 2; ++i)
-               printk
-                   ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
-                    i, p->tx_desc[i].data_ptr, p->tx_skb[i] ? 'V' : ' ',
-                    p->tx_desc[i].ctrl, p->tx_desc[i].data_len,
-                    NUM_TX_BUFF / 2 + i,
-                    p->tx_desc[NUM_TX_BUFF / 2 + i].data_ptr,
-                    p->tx_skb[NUM_TX_BUFF / 2 + i] ? 'V' : ' ',
-                    p->tx_desc[NUM_TX_BUFF / 2 + i].ctrl,
-                    p->tx_desc[NUM_TX_BUFF / 2 + i].data_len);
-
-       printk("** EMAC %s RX BDs **\n"
-              " rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n"
-              " rx_sg_skb = 0x%p\n",
-              p->ofdev->dev.of_node->full_name,
-              p->rx_slot, p->commac.flags, p->rx_skb_size,
-              p->rx_sync_size, p->rx_sg_skb);
-       for (i = 0; i < NUM_RX_BUFF / 2; ++i)
-               printk
-                   ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
-                    i, p->rx_desc[i].data_ptr, p->rx_skb[i] ? 'V' : ' ',
-                    p->rx_desc[i].ctrl, p->rx_desc[i].data_len,
-                    NUM_RX_BUFF / 2 + i,
-                    p->rx_desc[NUM_RX_BUFF / 2 + i].data_ptr,
-                    p->rx_skb[NUM_RX_BUFF / 2 + i] ? 'V' : ' ',
-                    p->rx_desc[NUM_RX_BUFF / 2 + i].ctrl,
-                    p->rx_desc[NUM_RX_BUFF / 2 + i].data_len);
-}
-
-static void emac_mac_dump(struct emac_instance *dev)
-{
-       struct emac_regs __iomem *p = dev->emacp;
-       const int xaht_regs = EMAC_XAHT_REGS(dev);
-       u32 *gaht_base = emac_gaht_base(dev);
-       u32 *iaht_base = emac_iaht_base(dev);
-       int emac4sync = emac_has_feature(dev, EMAC_FTR_EMAC4SYNC);
-       int n;
-
-       printk("** EMAC %s registers **\n"
-              "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n"
-              "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n"
-              "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n",
-              dev->ofdev->dev.of_node->full_name,
-              in_be32(&p->mr0), in_be32(&p->mr1),
-              in_be32(&p->tmr0), in_be32(&p->tmr1),
-              in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser),
-              in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid),
-              in_be32(&p->vtci)
-              );
-
-       if (emac4sync)
-               printk("MAR = %04x%08x MMAR = %04x%08x\n",
-                      in_be32(&p->u0.emac4sync.mahr),
-                      in_be32(&p->u0.emac4sync.malr),
-                      in_be32(&p->u0.emac4sync.mmahr),
-                      in_be32(&p->u0.emac4sync.mmalr)
-                      );
-
-       for (n = 0; n < xaht_regs; n++)
-               printk("IAHT%02d = 0x%08x\n", n + 1, in_be32(iaht_base + n));
-
-       for (n = 0; n < xaht_regs; n++)
-               printk("GAHT%02d = 0x%08x\n", n + 1, in_be32(gaht_base + n));
-
-       printk("LSA = %04x%08x IPGVR = 0x%04x\n"
-              "STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n"
-              "OCTX = 0x%08x OCRX = 0x%08x\n",
-              in_be32(&p->lsah), in_be32(&p->lsal), in_be32(&p->ipgvr),
-              in_be32(&p->stacr), in_be32(&p->trtr), in_be32(&p->rwmr),
-              in_be32(&p->octx), in_be32(&p->ocrx)
-              );
-
-       if (!emac4sync) {
-               printk("IPCR = 0x%08x\n",
-                      in_be32(&p->u1.emac4.ipcr)
-                      );
-       } else {
-               printk("REVID = 0x%08x TPC = 0x%08x\n",
-                      in_be32(&p->u1.emac4sync.revid),
-                      in_be32(&p->u1.emac4sync.tpc)
-                      );
-       }
-
-       emac_desc_dump(dev);
-}
-
-static void emac_mal_dump(struct mal_instance *mal)
-{
-       int i;
-
-       printk("** MAL %s Registers **\n"
-              "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n"
-              "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n"
-              "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n",
-              mal->ofdev->dev.of_node->full_name,
-              get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR),
-              get_mal_dcrn(mal, MAL_IER),
-              get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR),
-              get_mal_dcrn(mal, MAL_TXEOBISR), get_mal_dcrn(mal, MAL_TXDEIR),
-              get_mal_dcrn(mal, MAL_RXCASR), get_mal_dcrn(mal, MAL_RXCARR),
-              get_mal_dcrn(mal, MAL_RXEOBISR), get_mal_dcrn(mal, MAL_RXDEIR)
-           );
-
-       printk("TX|");
-       for (i = 0; i < mal->num_tx_chans; ++i) {
-               if (i && !(i % 4))
-                       printk("\n   ");
-               printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_TXCTPR(i)));
-       }
-       printk("\nRX|");
-       for (i = 0; i < mal->num_rx_chans; ++i) {
-               if (i && !(i % 4))
-                       printk("\n   ");
-               printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_RXCTPR(i)));
-       }
-       printk("\n   ");
-       for (i = 0; i < mal->num_rx_chans; ++i) {
-               u32 r = get_mal_dcrn(mal, MAL_RCBS(i));
-               if (i && !(i % 3))
-                       printk("\n   ");
-               printk("RCBS%d = 0x%08x (%d) ", i, r, r * 16);
-       }
-       printk("\n");
-}
-
-static struct emac_instance *__emacs[4];
-static struct mal_instance *__mals[1];
-
-void emac_dbg_register(struct emac_instance *dev)
-{
-       unsigned long flags;
-       int i;
-
-       spin_lock_irqsave(&emac_dbg_lock, flags);
-       for (i = 0; i < ARRAY_SIZE(__emacs); i++)
-               if (__emacs[i] == NULL) {
-                       __emacs[i] = dev;
-                       break;
-               }
-       spin_unlock_irqrestore(&emac_dbg_lock, flags);
-}
-
-void emac_dbg_unregister(struct emac_instance *dev)
-{
-       unsigned long flags;
-       int i;
-
-       spin_lock_irqsave(&emac_dbg_lock, flags);
-       for (i = 0; i < ARRAY_SIZE(__emacs); i++)
-               if (__emacs[i] == dev) {
-                       __emacs[i] = NULL;
-                       break;
-               }
-       spin_unlock_irqrestore(&emac_dbg_lock, flags);
-}
-
-void mal_dbg_register(struct mal_instance *mal)
-{
-       unsigned long flags;
-       int i;
-
-       spin_lock_irqsave(&emac_dbg_lock, flags);
-       for (i = 0; i < ARRAY_SIZE(__mals); i++)
-               if (__mals[i] == NULL) {
-                       __mals[i] = mal;
-                       break;
-               }
-       spin_unlock_irqrestore(&emac_dbg_lock, flags);
-}
-
-void mal_dbg_unregister(struct mal_instance *mal)
-{
-       unsigned long flags;
-       int i;
-
-       spin_lock_irqsave(&emac_dbg_lock, flags);
-       for (i = 0; i < ARRAY_SIZE(__mals); i++)
-               if (__mals[i] == mal) {
-                       __mals[i] = NULL;
-                       break;
-               }
-       spin_unlock_irqrestore(&emac_dbg_lock, flags);
-}
-
-void emac_dbg_dump_all(void)
-{
-       unsigned int i;
-       unsigned long flags;
-
-       spin_lock_irqsave(&emac_dbg_lock, flags);
-
-       for (i = 0; i < ARRAY_SIZE(__mals); ++i)
-               if (__mals[i])
-                       emac_mal_dump(__mals[i]);
-
-       for (i = 0; i < ARRAY_SIZE(__emacs); ++i)
-               if (__emacs[i])
-                       emac_mac_dump(__emacs[i]);
-
-       spin_unlock_irqrestore(&emac_dbg_lock, flags);
-}
-
-#if defined(CONFIG_MAGIC_SYSRQ)
-static void emac_sysrq_handler(int key)
-{
-       emac_dbg_dump_all();
-}
-
-static struct sysrq_key_op emac_sysrq_op = {
-       .handler = emac_sysrq_handler,
-       .help_msg = "emac(c)",
-       .action_msg = "Show EMAC(s) status",
-};
-
-int __init emac_init_debug(void)
-{
-       return register_sysrq_key('c', &emac_sysrq_op);
-}
-
-void __exit emac_fini_debug(void)
-{
-       unregister_sysrq_key('c', &emac_sysrq_op);
-}
-
-#else
-int __init emac_init_debug(void)
-{
-       return 0;
-}
-void __exit emac_fini_debug(void)
-{
-}
-#endif                         /* CONFIG_MAGIC_SYSRQ */
index 9c45efe4c8fecfdc0e16371ee67a622e4bfcbdbe..5bdfc174a07e2cb25b040dbb1d14a00f5abb2852 100644 (file)
 #include "core.h"
 
 #if defined(CONFIG_IBM_EMAC_DEBUG)
-
-struct emac_instance;
-struct mal_instance;
-
-void emac_dbg_register(struct emac_instance *dev);
-void emac_dbg_unregister(struct emac_instance *dev);
-void mal_dbg_register(struct mal_instance *mal);
-void mal_dbg_unregister(struct mal_instance *mal);
-int emac_init_debug(void) __init;
-void emac_fini_debug(void) __exit;
-void emac_dbg_dump_all(void);
-
 # define DBG_LEVEL             1
-
 #else
-
-# define emac_dbg_register(x)  do { } while(0)
-# define emac_dbg_unregister(x)        do { } while(0)
-# define mal_dbg_register(x)   do { } while(0)
-# define mal_dbg_unregister(x) do { } while(0)
-# define emac_init_debug()     do { } while(0)
-# define emac_fini_debug()     do { } while(0)
-# define emac_dbg_dump_all()   do { } while(0)
-
 # define DBG_LEVEL             0
-
 #endif
 
 #define EMAC_DBG(d, name, fmt, arg...) \
index cd3227b088b73f51b68951401eefc7e2d1e11470..91b1a558f37d65d218404c802c8e932c8828f43b 100644 (file)
@@ -695,8 +695,6 @@ static int mal_probe(struct platform_device *ofdev)
        wmb();
        platform_set_drvdata(ofdev, mal);
 
-       mal_dbg_register(mal);
-
        return 0;
 
  fail6:
@@ -740,8 +738,6 @@ static int mal_remove(struct platform_device *ofdev)
 
        mal_reset(mal);
 
-       mal_dbg_unregister(mal);
-
        dma_free_coherent(&ofdev->dev,
                          sizeof(struct mal_descriptor) *
                          (NUM_TX_BUFF * mal->num_tx_chans +
index 7acda04d034e909269bf7c75befe125313150870..ed8780cca982cd6935eadb8a39a2e80cdc00448e 100644 (file)
@@ -146,7 +146,6 @@ struct ibmveth_adapter {
     struct vio_dev *vdev;
     struct net_device *netdev;
     struct napi_struct napi;
-    struct net_device_stats stats;
     unsigned int mcastFilterSize;
     void * buffer_list_addr;
     void * filter_list_addr;
index 9198e6bd5160f9559075f348a6d595252ea88819..7ba43cfadf3a86e4694f78632d72c40df3b466f6 100644 (file)
@@ -65,7 +65,6 @@
 #include <linux/irq.h>
 #include <linux/kthread.h>
 #include <linux/seq_file.h>
-#include <linux/debugfs.h>
 #include <linux/interrupt.h>
 #include <net/net_namespace.h>
 #include <asm/hvcall.h>
@@ -89,7 +88,6 @@ MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
 static int ibmvnic_remove(struct vio_dev *);
 static void release_sub_crqs(struct ibmvnic_adapter *);
-static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
@@ -110,6 +108,11 @@ static int ibmvnic_poll(struct napi_struct *napi, int data);
 static void send_map_query(struct ibmvnic_adapter *adapter);
 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
 static void send_request_unmap(struct ibmvnic_adapter *, u8);
+static void send_login(struct ibmvnic_adapter *adapter);
+static void send_cap_queries(struct ibmvnic_adapter *adapter);
+static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
+static int ibmvnic_init(struct ibmvnic_adapter *);
+static void release_crq_queue(struct ibmvnic_adapter *);
 
 struct ibmvnic_stat {
        char name[ETH_GSTRING_LEN];
@@ -159,21 +162,6 @@ static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
        return rc;
 }
 
-/* net_device_ops functions */
-
-static void init_rx_pool(struct ibmvnic_adapter *adapter,
-                        struct ibmvnic_rx_pool *rx_pool, int num, int index,
-                        int buff_size, int active)
-{
-       netdev_dbg(adapter->netdev,
-                  "Initializing rx_pool %d, %d buffs, %d bytes each\n",
-                  index, num, buff_size);
-       rx_pool->size = num;
-       rx_pool->index = index;
-       rx_pool->buff_size = buff_size;
-       rx_pool->active = active;
-}
-
 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
                                struct ibmvnic_long_term_buff *ltb, int size)
 {
@@ -202,47 +190,14 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
 {
        struct device *dev = &adapter->vdev->dev;
 
+       if (!ltb->buff)
+               return;
+
        dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
        if (!adapter->failover)
                send_request_unmap(adapter, ltb->map_id);
 }
 
-static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
-                        struct ibmvnic_rx_pool *pool)
-{
-       struct device *dev = &adapter->vdev->dev;
-       int i;
-
-       pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
-       if (!pool->free_map)
-               return -ENOMEM;
-
-       pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
-                               GFP_KERNEL);
-
-       if (!pool->rx_buff) {
-               dev_err(dev, "Couldn't alloc rx buffers\n");
-               kfree(pool->free_map);
-               return -ENOMEM;
-       }
-
-       if (alloc_long_term_buff(adapter, &pool->long_term_buff,
-                                pool->size * pool->buff_size)) {
-               kfree(pool->free_map);
-               kfree(pool->rx_buff);
-               return -ENOMEM;
-       }
-
-       for (i = 0; i < pool->size; ++i)
-               pool->free_map[i] = i;
-
-       atomic_set(&pool->available, 0);
-       pool->next_alloc = 0;
-       pool->next_free = 0;
-
-       return 0;
-}
-
 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
                              struct ibmvnic_rx_pool *pool)
 {
@@ -347,114 +302,349 @@ static void replenish_pools(struct ibmvnic_adapter *adapter)
        }
 }
 
-static void free_rx_pool(struct ibmvnic_adapter *adapter,
-                        struct ibmvnic_rx_pool *pool)
+static void release_stats_token(struct ibmvnic_adapter *adapter)
 {
-       int i;
+       struct device *dev = &adapter->vdev->dev;
+
+       if (!adapter->stats_token)
+               return;
+
+       dma_unmap_single(dev, adapter->stats_token,
+                        sizeof(struct ibmvnic_statistics),
+                        DMA_FROM_DEVICE);
+       adapter->stats_token = 0;
+}
 
-       kfree(pool->free_map);
-       pool->free_map = NULL;
+static int init_stats_token(struct ibmvnic_adapter *adapter)
+{
+       struct device *dev = &adapter->vdev->dev;
+       dma_addr_t stok;
+
+       stok = dma_map_single(dev, &adapter->stats,
+                             sizeof(struct ibmvnic_statistics),
+                             DMA_FROM_DEVICE);
+       if (dma_mapping_error(dev, stok)) {
+               dev_err(dev, "Couldn't map stats buffer\n");
+               return -1;
+       }
+
+       adapter->stats_token = stok;
+       return 0;
+}
+
+static void release_rx_pools(struct ibmvnic_adapter *adapter)
+{
+       struct ibmvnic_rx_pool *rx_pool;
+       int rx_scrqs;
+       int i, j;
 
-       if (!pool->rx_buff)
+       if (!adapter->rx_pool)
                return;
 
-       for (i = 0; i < pool->size; i++) {
-               if (pool->rx_buff[i].skb) {
-                       dev_kfree_skb_any(pool->rx_buff[i].skb);
-                       pool->rx_buff[i].skb = NULL;
+       rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+       for (i = 0; i < rx_scrqs; i++) {
+               rx_pool = &adapter->rx_pool[i];
+
+               kfree(rx_pool->free_map);
+               free_long_term_buff(adapter, &rx_pool->long_term_buff);
+
+               if (!rx_pool->rx_buff)
+               continue;
+
+               for (j = 0; j < rx_pool->size; j++) {
+                       if (rx_pool->rx_buff[j].skb) {
+                               dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
+                               rx_pool->rx_buff[i].skb = NULL;
+                       }
                }
+
+               kfree(rx_pool->rx_buff);
        }
-       kfree(pool->rx_buff);
-       pool->rx_buff = NULL;
+
+       kfree(adapter->rx_pool);
+       adapter->rx_pool = NULL;
 }
 
-static int ibmvnic_open(struct net_device *netdev)
+static int init_rx_pools(struct net_device *netdev)
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
        struct device *dev = &adapter->vdev->dev;
-       struct ibmvnic_tx_pool *tx_pool;
-       union ibmvnic_crq crq;
+       struct ibmvnic_rx_pool *rx_pool;
        int rxadd_subcrqs;
        u64 *size_array;
-       int tx_subcrqs;
        int i, j;
 
        rxadd_subcrqs =
-           be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
-       tx_subcrqs =
-           be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+               be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
        size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
-                                 be32_to_cpu(adapter->login_rsp_buf->
-                                             off_rxadd_buff_size));
-       adapter->map_id = 1;
-       adapter->napi = kcalloc(adapter->req_rx_queues,
-                               sizeof(struct napi_struct), GFP_KERNEL);
-       if (!adapter->napi)
-               goto alloc_napi_failed;
-       for (i = 0; i < adapter->req_rx_queues; i++) {
-               netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
-                              NAPI_POLL_WEIGHT);
-               napi_enable(&adapter->napi[i]);
+               be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
+
+       adapter->rx_pool = kcalloc(rxadd_subcrqs,
+                                  sizeof(struct ibmvnic_rx_pool),
+                                  GFP_KERNEL);
+       if (!adapter->rx_pool) {
+               dev_err(dev, "Failed to allocate rx pools\n");
+               return -1;
        }
-       adapter->rx_pool =
-           kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
 
-       if (!adapter->rx_pool)
-               goto rx_pool_arr_alloc_failed;
-       send_map_query(adapter);
        for (i = 0; i < rxadd_subcrqs; i++) {
-               init_rx_pool(adapter, &adapter->rx_pool[i],
-                            IBMVNIC_BUFFS_PER_POOL, i,
-                            be64_to_cpu(size_array[i]), 1);
-               if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
-                       dev_err(dev, "Couldn't alloc rx pool\n");
-                       goto rx_pool_alloc_failed;
+               rx_pool = &adapter->rx_pool[i];
+
+               netdev_dbg(adapter->netdev,
+                          "Initializing rx_pool %d, %lld buffs, %lld bytes each\n",
+                          i, adapter->req_rx_add_entries_per_subcrq,
+                          be64_to_cpu(size_array[i]));
+
+               rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
+               rx_pool->index = i;
+               rx_pool->buff_size = be64_to_cpu(size_array[i]);
+               rx_pool->active = 1;
+
+               rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
+                                           GFP_KERNEL);
+               if (!rx_pool->free_map) {
+                       release_rx_pools(adapter);
+                       return -1;
+               }
+
+               rx_pool->rx_buff = kcalloc(rx_pool->size,
+                                          sizeof(struct ibmvnic_rx_buff),
+                                          GFP_KERNEL);
+               if (!rx_pool->rx_buff) {
+                       dev_err(dev, "Couldn't alloc rx buffers\n");
+                       release_rx_pools(adapter);
+                       return -1;
                }
+
+               if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
+                                        rx_pool->size * rx_pool->buff_size)) {
+                       release_rx_pools(adapter);
+                       return -1;
+               }
+
+               for (j = 0; j < rx_pool->size; ++j)
+                       rx_pool->free_map[j] = j;
+
+               atomic_set(&rx_pool->available, 0);
+               rx_pool->next_alloc = 0;
+               rx_pool->next_free = 0;
+       }
+
+       return 0;
+}
+
+static void release_tx_pools(struct ibmvnic_adapter *adapter)
+{
+       struct ibmvnic_tx_pool *tx_pool;
+       int i, tx_scrqs;
+
+       if (!adapter->tx_pool)
+               return;
+
+       tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+       for (i = 0; i < tx_scrqs; i++) {
+               tx_pool = &adapter->tx_pool[i];
+               kfree(tx_pool->tx_buff);
+               free_long_term_buff(adapter, &tx_pool->long_term_buff);
+               kfree(tx_pool->free_map);
        }
-       adapter->tx_pool =
-           kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
 
+       kfree(adapter->tx_pool);
+       adapter->tx_pool = NULL;
+}
+
+static int init_tx_pools(struct net_device *netdev)
+{
+       struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+       struct device *dev = &adapter->vdev->dev;
+       struct ibmvnic_tx_pool *tx_pool;
+       int tx_subcrqs;
+       int i, j;
+
+       tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+       adapter->tx_pool = kcalloc(tx_subcrqs,
+                                  sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
        if (!adapter->tx_pool)
-               goto tx_pool_arr_alloc_failed;
+               return -1;
+
        for (i = 0; i < tx_subcrqs; i++) {
                tx_pool = &adapter->tx_pool[i];
-               tx_pool->tx_buff =
-                   kcalloc(adapter->max_tx_entries_per_subcrq,
-                           sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
-               if (!tx_pool->tx_buff)
-                       goto tx_pool_alloc_failed;
+               tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
+                                          sizeof(struct ibmvnic_tx_buff),
+                                          GFP_KERNEL);
+               if (!tx_pool->tx_buff) {
+                       dev_err(dev, "tx pool buffer allocation failed\n");
+                       release_tx_pools(adapter);
+                       return -1;
+               }
 
                if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
-                                        adapter->max_tx_entries_per_subcrq *
-                                        adapter->req_mtu))
-                       goto tx_ltb_alloc_failed;
+                                        adapter->req_tx_entries_per_subcrq *
+                                        adapter->req_mtu)) {
+                       release_tx_pools(adapter);
+                       return -1;
+               }
 
-               tx_pool->free_map =
-                   kcalloc(adapter->max_tx_entries_per_subcrq,
-                           sizeof(int), GFP_KERNEL);
-               if (!tx_pool->free_map)
-                       goto tx_fm_alloc_failed;
+               tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
+                                           sizeof(int), GFP_KERNEL);
+               if (!tx_pool->free_map) {
+                       release_tx_pools(adapter);
+                       return -1;
+               }
 
-               for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
+               for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
                        tx_pool->free_map[j] = j;
 
                tx_pool->consumer_index = 0;
                tx_pool->producer_index = 0;
        }
-       adapter->bounce_buffer_size =
-           (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
-       adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
-                                        GFP_KERNEL);
+
+       return 0;
+}
+
+static void release_bounce_buffer(struct ibmvnic_adapter *adapter)
+{
+       struct device *dev = &adapter->vdev->dev;
+
        if (!adapter->bounce_buffer)
-               goto bounce_alloc_failed;
+               return;
 
-       adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
-                                                   adapter->bounce_buffer_size,
-                                                   DMA_TO_DEVICE);
-       if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
-               dev_err(dev, "Couldn't map tx bounce buffer\n");
-               goto bounce_map_failed;
+       if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
+               dma_unmap_single(dev, adapter->bounce_buffer_dma,
+                                adapter->bounce_buffer_size,
+                                DMA_BIDIRECTIONAL);
+               adapter->bounce_buffer_dma = DMA_ERROR_CODE;
+       }
+
+       kfree(adapter->bounce_buffer);
+       adapter->bounce_buffer = NULL;
+}
+
+static int init_bounce_buffer(struct net_device *netdev)
+{
+       struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+       struct device *dev = &adapter->vdev->dev;
+       char *buf;
+       int buf_sz;
+       dma_addr_t map_addr;
+
+       buf_sz = (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
+       buf = kmalloc(adapter->bounce_buffer_size, GFP_KERNEL);
+       if (!buf)
+               return -1;
+
+       map_addr = dma_map_single(dev, buf, buf_sz, DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, map_addr)) {
+               dev_err(dev, "Couldn't map bounce buffer\n");
+               kfree(buf);
+               return -1;
+       }
+
+       adapter->bounce_buffer = buf;
+       adapter->bounce_buffer_size = buf_sz;
+       adapter->bounce_buffer_dma = map_addr;
+       return 0;
+}
+
+static int ibmvnic_login(struct net_device *netdev)
+{
+       struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+       unsigned long timeout = msecs_to_jiffies(30000);
+       struct device *dev = &adapter->vdev->dev;
+
+       do {
+               if (adapter->renegotiate) {
+                       adapter->renegotiate = false;
+                       release_sub_crqs(adapter);
+
+                       reinit_completion(&adapter->init_done);
+                       send_cap_queries(adapter);
+                       if (!wait_for_completion_timeout(&adapter->init_done,
+                                                        timeout)) {
+                               dev_err(dev, "Capabilities query timeout\n");
+                               return -1;
+                       }
+               }
+
+               reinit_completion(&adapter->init_done);
+               send_login(adapter);
+               if (!wait_for_completion_timeout(&adapter->init_done,
+                                                timeout)) {
+                       dev_err(dev, "Login timeout\n");
+                       return -1;
+               }
+       } while (adapter->renegotiate);
+
+       return 0;
+}
+
+static void release_resources(struct ibmvnic_adapter *adapter)
+{
+       release_bounce_buffer(adapter);
+       release_tx_pools(adapter);
+       release_rx_pools(adapter);
+
+       release_sub_crqs(adapter);
+       release_crq_queue(adapter);
+
+       release_stats_token(adapter);
+}
+
+static int ibmvnic_open(struct net_device *netdev)
+{
+       struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+       struct device *dev = &adapter->vdev->dev;
+       union ibmvnic_crq crq;
+       int rc = 0;
+       int i;
+
+       if (adapter->is_closed) {
+               rc = ibmvnic_init(adapter);
+               if (rc)
+                       return rc;
+       }
+
+       rc = ibmvnic_login(netdev);
+       if (rc)
+               return rc;
+
+       rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
+       if (rc) {
+               dev_err(dev, "failed to set the number of tx queues\n");
+               return -1;
+       }
+
+       rc = init_sub_crq_irqs(adapter);
+       if (rc) {
+               dev_err(dev, "failed to initialize sub crq irqs\n");
+               return -1;
+       }
+
+       adapter->map_id = 1;
+       adapter->napi = kcalloc(adapter->req_rx_queues,
+                               sizeof(struct napi_struct), GFP_KERNEL);
+       if (!adapter->napi)
+               goto ibmvnic_open_fail;
+       for (i = 0; i < adapter->req_rx_queues; i++) {
+               netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
+                              NAPI_POLL_WEIGHT);
+               napi_enable(&adapter->napi[i]);
        }
+
+       send_map_query(adapter);
+
+       rc = init_rx_pools(netdev);
+       if (rc)
+               goto ibmvnic_open_fail;
+
+       rc = init_tx_pools(netdev);
+       if (rc)
+               goto ibmvnic_open_fail;
+
+       rc = init_bounce_buffer(netdev);
+       if (rc)
+               goto ibmvnic_open_fail;
+
        replenish_pools(adapter);
 
        /* We're ready to receive frames, enable the sub-crq interrupts and
@@ -473,48 +663,20 @@ static int ibmvnic_open(struct net_device *netdev)
        ibmvnic_send_crq(adapter, &crq);
 
        netif_tx_start_all_queues(netdev);
+       adapter->is_closed = false;
 
        return 0;
 
-bounce_map_failed:
-       kfree(adapter->bounce_buffer);
-bounce_alloc_failed:
-       i = tx_subcrqs - 1;
-       kfree(adapter->tx_pool[i].free_map);
-tx_fm_alloc_failed:
-       free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
-tx_ltb_alloc_failed:
-       kfree(adapter->tx_pool[i].tx_buff);
-tx_pool_alloc_failed:
-       for (j = 0; j < i; j++) {
-               kfree(adapter->tx_pool[j].tx_buff);
-               free_long_term_buff(adapter,
-                                   &adapter->tx_pool[j].long_term_buff);
-               kfree(adapter->tx_pool[j].free_map);
-       }
-       kfree(adapter->tx_pool);
-       adapter->tx_pool = NULL;
-tx_pool_arr_alloc_failed:
-       i = rxadd_subcrqs;
-rx_pool_alloc_failed:
-       for (j = 0; j < i; j++) {
-               free_rx_pool(adapter, &adapter->rx_pool[j]);
-               free_long_term_buff(adapter,
-                                   &adapter->rx_pool[j].long_term_buff);
-       }
-       kfree(adapter->rx_pool);
-       adapter->rx_pool = NULL;
-rx_pool_arr_alloc_failed:
+ibmvnic_open_fail:
        for (i = 0; i < adapter->req_rx_queues; i++)
                napi_disable(&adapter->napi[i]);
-alloc_napi_failed:
+       release_resources(adapter);
        return -ENOMEM;
 }
 
 static int ibmvnic_close(struct net_device *netdev)
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
-       struct device *dev = &adapter->vdev->dev;
        union ibmvnic_crq crq;
        int i;
 
@@ -526,45 +688,16 @@ static int ibmvnic_close(struct net_device *netdev)
        if (!adapter->failover)
                netif_tx_stop_all_queues(netdev);
 
-       if (adapter->bounce_buffer) {
-               if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
-                       dma_unmap_single(&adapter->vdev->dev,
-                                        adapter->bounce_buffer_dma,
-                                        adapter->bounce_buffer_size,
-                                        DMA_BIDIRECTIONAL);
-                       adapter->bounce_buffer_dma = DMA_ERROR_CODE;
-               }
-               kfree(adapter->bounce_buffer);
-               adapter->bounce_buffer = NULL;
-       }
-
        memset(&crq, 0, sizeof(crq));
        crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
        crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
        crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
        ibmvnic_send_crq(adapter, &crq);
 
-       for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
-            i++) {
-               kfree(adapter->tx_pool[i].tx_buff);
-               free_long_term_buff(adapter,
-                                   &adapter->tx_pool[i].long_term_buff);
-               kfree(adapter->tx_pool[i].free_map);
-       }
-       kfree(adapter->tx_pool);
-       adapter->tx_pool = NULL;
-
-       for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
-            i++) {
-               free_rx_pool(adapter, &adapter->rx_pool[i]);
-               free_long_term_buff(adapter,
-                                   &adapter->rx_pool[i].long_term_buff);
-       }
-       kfree(adapter->rx_pool);
-       adapter->rx_pool = NULL;
+       release_resources(adapter);
 
+       adapter->is_closed = true;
        adapter->closing = false;
-
        return 0;
 }
 
@@ -705,6 +838,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
        u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
        struct device *dev = &adapter->vdev->dev;
        struct ibmvnic_tx_buff *tx_buff = NULL;
+       struct ibmvnic_sub_crq_queue *tx_scrq;
        struct ibmvnic_tx_pool *tx_pool;
        unsigned int tx_send_failed = 0;
        unsigned int tx_map_failed = 0;
@@ -724,6 +858,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
        int ret = 0;
 
        tx_pool = &adapter->tx_pool[queue_num];
+       tx_scrq = adapter->tx_scrq[queue_num];
        txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
        handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
                                   be32_to_cpu(adapter->login_rsp_buf->
@@ -744,7 +879,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        tx_pool->consumer_index =
            (tx_pool->consumer_index + 1) %
-               adapter->max_tx_entries_per_subcrq;
+               adapter->req_tx_entries_per_subcrq;
 
        tx_buff = &tx_pool->tx_buff[index];
        tx_buff->skb = skb;
@@ -817,7 +952,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 
                if (tx_pool->consumer_index == 0)
                        tx_pool->consumer_index =
-                               adapter->max_tx_entries_per_subcrq - 1;
+                               adapter->req_tx_entries_per_subcrq - 1;
                else
                        tx_pool->consumer_index--;
 
@@ -826,6 +961,14 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
                ret = NETDEV_TX_BUSY;
                goto out;
        }
+
+       atomic_inc(&tx_scrq->used);
+
+       if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
+               netdev_info(netdev, "Stopping queue %d\n", queue_num);
+               netif_stop_subqueue(netdev, queue_num);
+       }
+
        tx_packets++;
        tx_bytes += skb->len;
        txq->trans_start = jiffies;
@@ -1213,6 +1356,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
        scrq->adapter = adapter;
        scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
        scrq->cur = 0;
+       atomic_set(&scrq->used, 0);
        scrq->rx_skb_top = NULL;
        spin_lock_init(&scrq->lock);
 
@@ -1238,47 +1382,40 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
        int i;
 
        if (adapter->tx_scrq) {
-               for (i = 0; i < adapter->req_tx_queues; i++)
-                       if (adapter->tx_scrq[i]) {
+               for (i = 0; i < adapter->req_tx_queues; i++) {
+                       if (!adapter->tx_scrq[i])
+                               continue;
+
+                       if (adapter->tx_scrq[i]->irq) {
                                free_irq(adapter->tx_scrq[i]->irq,
                                         adapter->tx_scrq[i]);
                                irq_dispose_mapping(adapter->tx_scrq[i]->irq);
-                               release_sub_crq_queue(adapter,
-                                                     adapter->tx_scrq[i]);
+                               adapter->tx_scrq[i]->irq = 0;
                        }
+
+                       release_sub_crq_queue(adapter, adapter->tx_scrq[i]);
+               }
+
+               kfree(adapter->tx_scrq);
                adapter->tx_scrq = NULL;
        }
 
        if (adapter->rx_scrq) {
-               for (i = 0; i < adapter->req_rx_queues; i++)
-                       if (adapter->rx_scrq[i]) {
+               for (i = 0; i < adapter->req_rx_queues; i++) {
+                       if (!adapter->rx_scrq[i])
+                               continue;
+
+                       if (adapter->rx_scrq[i]->irq) {
                                free_irq(adapter->rx_scrq[i]->irq,
                                         adapter->rx_scrq[i]);
                                irq_dispose_mapping(adapter->rx_scrq[i]->irq);
-                               release_sub_crq_queue(adapter,
-                                                     adapter->rx_scrq[i]);
+                               adapter->rx_scrq[i]->irq = 0;
                        }
-               adapter->rx_scrq = NULL;
-       }
-}
-
-static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
-{
-       int i;
 
-       if (adapter->tx_scrq) {
-               for (i = 0; i < adapter->req_tx_queues; i++)
-                       if (adapter->tx_scrq[i])
-                               release_sub_crq_queue(adapter,
-                                                     adapter->tx_scrq[i]);
-               adapter->tx_scrq = NULL;
-       }
+                       release_sub_crq_queue(adapter, adapter->rx_scrq[i]);
+               }
 
-       if (adapter->rx_scrq) {
-               for (i = 0; i < adapter->req_rx_queues; i++)
-                       if (adapter->rx_scrq[i])
-                               release_sub_crq_queue(adapter,
-                                                     adapter->rx_scrq[i]);
+               kfree(adapter->rx_scrq);
                adapter->rx_scrq = NULL;
        }
 }
@@ -1355,14 +1492,28 @@ restart_loop:
                                                 DMA_TO_DEVICE);
                        }
 
-                       if (txbuff->last_frag)
+                       if (txbuff->last_frag) {
+                               atomic_dec(&scrq->used);
+
+                               if (atomic_read(&scrq->used) <=
+                                   (adapter->req_tx_entries_per_subcrq / 2) &&
+                                   netif_subqueue_stopped(adapter->netdev,
+                                                          txbuff->skb)) {
+                                       netif_wake_subqueue(adapter->netdev,
+                                                           scrq->pool_index);
+                                       netdev_dbg(adapter->netdev,
+                                                  "Started queue %d\n",
+                                                  scrq->pool_index);
+                               }
+
                                dev_kfree_skb_any(txbuff->skb);
+                       }
 
                        adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
                                                     producer_index] = index;
                        adapter->tx_pool[pool].producer_index =
                            (adapter->tx_pool[pool].producer_index + 1) %
-                           adapter->max_tx_entries_per_subcrq;
+                           adapter->req_tx_entries_per_subcrq;
                }
                /* remove tx_comp scrq*/
                next->tx_comp.first = 0;
@@ -1460,7 +1611,7 @@ req_tx_irq_failed:
                free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
                irq_dispose_mapping(adapter->rx_scrq[j]->irq);
        }
-       release_sub_crqs_no_irqs(adapter);
+       release_sub_crqs(adapter);
        return rc;
 }
 
@@ -2215,72 +2366,21 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
        kfree(error_buff);
 }
 
-static void handle_dump_size_rsp(union ibmvnic_crq *crq,
-                                struct ibmvnic_adapter *adapter)
+static void handle_error_indication(union ibmvnic_crq *crq,
+                                   struct ibmvnic_adapter *adapter)
 {
-       int len = be32_to_cpu(crq->request_dump_size_rsp.len);
+       int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
        struct ibmvnic_inflight_cmd *inflight_cmd;
        struct device *dev = &adapter->vdev->dev;
-       union ibmvnic_crq newcrq;
+       struct ibmvnic_error_buff *error_buff;
+       union ibmvnic_crq new_crq;
        unsigned long flags;
 
-       /* allocate and map buffer */
-       adapter->dump_data = kmalloc(len, GFP_KERNEL);
-       if (!adapter->dump_data) {
-               complete(&adapter->fw_done);
-               return;
-       }
-
-       adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
-                                                 DMA_FROM_DEVICE);
-
-       if (dma_mapping_error(dev, adapter->dump_data_token)) {
-               if (!firmware_has_feature(FW_FEATURE_CMO))
-                       dev_err(dev, "Couldn't map dump data\n");
-               kfree(adapter->dump_data);
-               complete(&adapter->fw_done);
-               return;
-       }
-
-       inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
-       if (!inflight_cmd) {
-               dma_unmap_single(dev, adapter->dump_data_token, len,
-                                DMA_FROM_DEVICE);
-               kfree(adapter->dump_data);
-               complete(&adapter->fw_done);
-               return;
-       }
-
-       memset(&newcrq, 0, sizeof(newcrq));
-       newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
-       newcrq.request_dump.cmd = REQUEST_DUMP;
-       newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
-       newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
-
-       memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
-
-       spin_lock_irqsave(&adapter->inflight_lock, flags);
-       list_add_tail(&inflight_cmd->list, &adapter->inflight);
-       spin_unlock_irqrestore(&adapter->inflight_lock, flags);
-
-       ibmvnic_send_crq(adapter, &newcrq);
-}
-
-static void handle_error_indication(union ibmvnic_crq *crq,
-                                   struct ibmvnic_adapter *adapter)
-{
-       int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
-       struct ibmvnic_inflight_cmd *inflight_cmd;
-       struct device *dev = &adapter->vdev->dev;
-       struct ibmvnic_error_buff *error_buff;
-       union ibmvnic_crq new_crq;
-       unsigned long flags;
-
-       dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
-               crq->error_indication.
-                   flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
-               be32_to_cpu(crq->error_indication.error_id),
-               be16_to_cpu(crq->error_indication.error_cause));
+       dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
+               crq->error_indication.
+                   flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
+               be32_to_cpu(crq->error_indication.error_id),
+               be16_to_cpu(crq->error_indication.error_cause));
 
        error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
        if (!error_buff)
@@ -2401,7 +2501,7 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
                         *req_value,
                         (long int)be64_to_cpu(crq->request_capability_rsp.
                                               number), name);
-               release_sub_crqs_no_irqs(adapter);
+               release_sub_crqs(adapter);
                *req_value = be64_to_cpu(crq->request_capability_rsp.number);
                init_sub_crqs(adapter, 1);
                return;
@@ -2446,7 +2546,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
        struct device *dev = &adapter->vdev->dev;
        struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
        struct ibmvnic_login_buffer *login = adapter->login_buf;
-       union ibmvnic_crq crq;
        int i;
 
        dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
@@ -2481,11 +2580,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
        }
        complete(&adapter->init_done);
 
-       memset(&crq, 0, sizeof(crq));
-       crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
-       crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
-       ibmvnic_send_crq(adapter, &crq);
-
        return 0;
 }
 
@@ -2721,476 +2815,6 @@ out:
        }
 }
 
-static void handle_control_ras_rsp(union ibmvnic_crq *crq,
-                                  struct ibmvnic_adapter *adapter)
-{
-       u8 correlator = crq->control_ras_rsp.correlator;
-       struct device *dev = &adapter->vdev->dev;
-       bool found = false;
-       int i;
-
-       if (crq->control_ras_rsp.rc.code) {
-               dev_warn(dev, "Control ras failed rc=%d\n",
-                        crq->control_ras_rsp.rc.code);
-               return;
-       }
-
-       for (i = 0; i < adapter->ras_comp_num; i++) {
-               if (adapter->ras_comps[i].correlator == correlator) {
-                       found = true;
-                       break;
-               }
-       }
-
-       if (!found) {
-               dev_warn(dev, "Correlator not found on control_ras_rsp\n");
-               return;
-       }
-
-       switch (crq->control_ras_rsp.op) {
-       case IBMVNIC_TRACE_LEVEL:
-               adapter->ras_comps[i].trace_level = crq->control_ras.level;
-               break;
-       case IBMVNIC_ERROR_LEVEL:
-               adapter->ras_comps[i].error_check_level =
-                   crq->control_ras.level;
-               break;
-       case IBMVNIC_TRACE_PAUSE:
-               adapter->ras_comp_int[i].paused = 1;
-               break;
-       case IBMVNIC_TRACE_RESUME:
-               adapter->ras_comp_int[i].paused = 0;
-               break;
-       case IBMVNIC_TRACE_ON:
-               adapter->ras_comps[i].trace_on = 1;
-               break;
-       case IBMVNIC_TRACE_OFF:
-               adapter->ras_comps[i].trace_on = 0;
-               break;
-       case IBMVNIC_CHG_TRACE_BUFF_SZ:
-               /* trace_buff_sz is 3 bytes, stuff it into an int */
-               ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
-               ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
-                   crq->control_ras_rsp.trace_buff_sz[0];
-               ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
-                   crq->control_ras_rsp.trace_buff_sz[1];
-               ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
-                   crq->control_ras_rsp.trace_buff_sz[2];
-               break;
-       default:
-               dev_err(dev, "invalid op %d on control_ras_rsp",
-                       crq->control_ras_rsp.op);
-       }
-}
-
-static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
-                         loff_t *ppos)
-{
-       struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
-       struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
-       struct device *dev = &adapter->vdev->dev;
-       struct ibmvnic_fw_trace_entry *trace;
-       int num = ras_comp_int->num;
-       union ibmvnic_crq crq;
-       dma_addr_t trace_tok;
-
-       if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
-               return 0;
-
-       trace =
-           dma_alloc_coherent(dev,
-                              be32_to_cpu(adapter->ras_comps[num].
-                                          trace_buff_size), &trace_tok,
-                              GFP_KERNEL);
-       if (!trace) {
-               dev_err(dev, "Couldn't alloc trace buffer\n");
-               return 0;
-       }
-
-       memset(&crq, 0, sizeof(crq));
-       crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
-       crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
-       crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
-       crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
-       crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
-
-       init_completion(&adapter->fw_done);
-       ibmvnic_send_crq(adapter, &crq);
-       wait_for_completion(&adapter->fw_done);
-
-       if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
-               len =
-                   be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
-                   *ppos;
-
-       copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
-
-       dma_free_coherent(dev,
-                         be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
-                         trace, trace_tok);
-       *ppos += len;
-       return len;
-}
-
-static const struct file_operations trace_ops = {
-       .owner          = THIS_MODULE,
-       .open           = simple_open,
-       .read           = trace_read,
-};
-
-static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
-                          loff_t *ppos)
-{
-       struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
-       struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
-       int num = ras_comp_int->num;
-       char buff[5]; /*  1 or 0 plus \n and \0 */
-       int size;
-
-       size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
-
-       if (*ppos >= size)
-               return 0;
-
-       copy_to_user(user_buf, buff, size);
-       *ppos += size;
-       return size;
-}
-
-static ssize_t paused_write(struct file *file, const char __user *user_buf,
-                           size_t len, loff_t *ppos)
-{
-       struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
-       struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
-       int num = ras_comp_int->num;
-       union ibmvnic_crq crq;
-       unsigned long val;
-       char buff[9]; /* decimal max int plus \n and \0 */
-
-       copy_from_user(buff, user_buf, sizeof(buff));
-       val = kstrtoul(buff, 10, NULL);
-
-       adapter->ras_comp_int[num].paused = val ? 1 : 0;
-
-       memset(&crq, 0, sizeof(crq));
-       crq.control_ras.first = IBMVNIC_CRQ_CMD;
-       crq.control_ras.cmd = CONTROL_RAS;
-       crq.control_ras.correlator = adapter->ras_comps[num].correlator;
-       crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
-       ibmvnic_send_crq(adapter, &crq);
-
-       return len;
-}
-
-static const struct file_operations paused_ops = {
-       .owner          = THIS_MODULE,
-       .open           = simple_open,
-       .read           = paused_read,
-       .write          = paused_write,
-};
-
-static ssize_t tracing_read(struct file *file, char __user *user_buf,
-                           size_t len, loff_t *ppos)
-{
-       struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
-       struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
-       int num = ras_comp_int->num;
-       char buff[5]; /*  1 or 0 plus \n and \0 */
-       int size;
-
-       size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
-
-       if (*ppos >= size)
-               return 0;
-
-       copy_to_user(user_buf, buff, size);
-       *ppos += size;
-       return size;
-}
-
-static ssize_t tracing_write(struct file *file, const char __user *user_buf,
-                            size_t len, loff_t *ppos)
-{
-       struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
-       struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
-       int num = ras_comp_int->num;
-       union ibmvnic_crq crq;
-       unsigned long val;
-       char buff[9]; /* decimal max int plus \n and \0 */
-
-       copy_from_user(buff, user_buf, sizeof(buff));
-       val = kstrtoul(buff, 10, NULL);
-
-       memset(&crq, 0, sizeof(crq));
-       crq.control_ras.first = IBMVNIC_CRQ_CMD;
-       crq.control_ras.cmd = CONTROL_RAS;
-       crq.control_ras.correlator = adapter->ras_comps[num].correlator;
-       crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
-
-       return len;
-}
-
-static const struct file_operations tracing_ops = {
-       .owner          = THIS_MODULE,
-       .open           = simple_open,
-       .read           = tracing_read,
-       .write          = tracing_write,
-};
-
-static ssize_t error_level_read(struct file *file, char __user *user_buf,
-                               size_t len, loff_t *ppos)
-{
-       struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
-       struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
-       int num = ras_comp_int->num;
-       char buff[5]; /* decimal max char plus \n and \0 */
-       int size;
-
-       size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
-
-       if (*ppos >= size)
-               return 0;
-
-       copy_to_user(user_buf, buff, size);
-       *ppos += size;
-       return size;
-}
-
-static ssize_t error_level_write(struct file *file, const char __user *user_buf,
-                                size_t len, loff_t *ppos)
-{
-       struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
-       struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
-       int num = ras_comp_int->num;
-       union ibmvnic_crq crq;
-       unsigned long val;
-       char buff[9]; /* decimal max int plus \n and \0 */
-
-       copy_from_user(buff, user_buf, sizeof(buff));
-       val = kstrtoul(buff, 10, NULL);
-
-       if (val > 9)
-               val = 9;
-
-       memset(&crq, 0, sizeof(crq));
-       crq.control_ras.first = IBMVNIC_CRQ_CMD;
-       crq.control_ras.cmd = CONTROL_RAS;
-       crq.control_ras.correlator = adapter->ras_comps[num].correlator;
-       crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
-       crq.control_ras.level = val;
-       ibmvnic_send_crq(adapter, &crq);
-
-       return len;
-}
-
-static const struct file_operations error_level_ops = {
-       .owner          = THIS_MODULE,
-       .open           = simple_open,
-       .read           = error_level_read,
-       .write          = error_level_write,
-};
-
-static ssize_t trace_level_read(struct file *file, char __user *user_buf,
-                               size_t len, loff_t *ppos)
-{
-       struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
-       struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
-       int num = ras_comp_int->num;
-       char buff[5]; /* decimal max char plus \n and \0 */
-       int size;
-
-       size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
-       if (*ppos >= size)
-               return 0;
-
-       copy_to_user(user_buf, buff, size);
-       *ppos += size;
-       return size;
-}
-
-static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
-                                size_t len, loff_t *ppos)
-{
-       struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
-       struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
-       union ibmvnic_crq crq;
-       unsigned long val;
-       char buff[9]; /* decimal max int plus \n and \0 */
-
-       copy_from_user(buff, user_buf, sizeof(buff));
-       val = kstrtoul(buff, 10, NULL);
-       if (val > 9)
-               val = 9;
-
-       memset(&crq, 0, sizeof(crq));
-       crq.control_ras.first = IBMVNIC_CRQ_CMD;
-       crq.control_ras.cmd = CONTROL_RAS;
-       crq.control_ras.correlator =
-           adapter->ras_comps[ras_comp_int->num].correlator;
-       crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
-       crq.control_ras.level = val;
-       ibmvnic_send_crq(adapter, &crq);
-
-       return len;
-}
-
-static const struct file_operations trace_level_ops = {
-       .owner          = THIS_MODULE,
-       .open           = simple_open,
-       .read           = trace_level_read,
-       .write          = trace_level_write,
-};
-
-static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
-                                   size_t len, loff_t *ppos)
-{
-       struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
-       struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
-       int num = ras_comp_int->num;
-       char buff[9]; /* decimal max int plus \n and \0 */
-       int size;
-
-       size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
-       if (*ppos >= size)
-               return 0;
-
-       copy_to_user(user_buf, buff, size);
-       *ppos += size;
-       return size;
-}
-
-static ssize_t trace_buff_size_write(struct file *file,
-                                    const char __user *user_buf, size_t len,
-                                    loff_t *ppos)
-{
-       struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
-       struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
-       union ibmvnic_crq crq;
-       unsigned long val;
-       char buff[9]; /* decimal max int plus \n and \0 */
-
-       copy_from_user(buff, user_buf, sizeof(buff));
-       val = kstrtoul(buff, 10, NULL);
-
-       memset(&crq, 0, sizeof(crq));
-       crq.control_ras.first = IBMVNIC_CRQ_CMD;
-       crq.control_ras.cmd = CONTROL_RAS;
-       crq.control_ras.correlator =
-           adapter->ras_comps[ras_comp_int->num].correlator;
-       crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
-       /* trace_buff_sz is 3 bytes, stuff an int into it */
-       crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
-       crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
-       crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
-       ibmvnic_send_crq(adapter, &crq);
-
-       return len;
-}
-
-static const struct file_operations trace_size_ops = {
-       .owner          = THIS_MODULE,
-       .open           = simple_open,
-       .read           = trace_buff_size_read,
-       .write          = trace_buff_size_write,
-};
-
-static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
-                                        struct ibmvnic_adapter *adapter)
-{
-       struct device *dev = &adapter->vdev->dev;
-       struct dentry *dir_ent;
-       struct dentry *ent;
-       int i;
-
-       debugfs_remove_recursive(adapter->ras_comps_ent);
-
-       adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
-                                                   adapter->debugfs_dir);
-       if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
-               dev_info(dev, "debugfs create ras_comps dir failed\n");
-               return;
-       }
-
-       for (i = 0; i < adapter->ras_comp_num; i++) {
-               dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
-                                            adapter->ras_comps_ent);
-               if (!dir_ent || IS_ERR(dir_ent)) {
-                       dev_info(dev, "debugfs create %s dir failed\n",
-                                adapter->ras_comps[i].name);
-                       continue;
-               }
-
-               adapter->ras_comp_int[i].adapter = adapter;
-               adapter->ras_comp_int[i].num = i;
-               adapter->ras_comp_int[i].desc_blob.data =
-                   &adapter->ras_comps[i].description;
-               adapter->ras_comp_int[i].desc_blob.size =
-                   sizeof(adapter->ras_comps[i].description);
-
-               /* Don't need to remember the dentry's because the debugfs dir
-                * gets removed recursively
-                */
-               ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
-                                         &adapter->ras_comp_int[i].desc_blob);
-               ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
-                                         dir_ent, &adapter->ras_comp_int[i],
-                                         &trace_size_ops);
-               ent = debugfs_create_file("trace_level",
-                                         S_IRUGO |
-                                         (adapter->ras_comps[i].trace_level !=
-                                          0xFF  ? S_IWUSR : 0),
-                                          dir_ent, &adapter->ras_comp_int[i],
-                                          &trace_level_ops);
-               ent = debugfs_create_file("error_level",
-                                         S_IRUGO |
-                                         (adapter->
-                                          ras_comps[i].error_check_level !=
-                                          0xFF ? S_IWUSR : 0),
-                                         dir_ent, &adapter->ras_comp_int[i],
-                                         &trace_level_ops);
-               ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
-                                         dir_ent, &adapter->ras_comp_int[i],
-                                         &tracing_ops);
-               ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
-                                         dir_ent, &adapter->ras_comp_int[i],
-                                         &paused_ops);
-               ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
-                                         &adapter->ras_comp_int[i],
-                                         &trace_ops);
-       }
-}
-
-static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
-                                           struct ibmvnic_adapter *adapter)
-{
-       int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
-       struct device *dev = &adapter->vdev->dev;
-       union ibmvnic_crq newcrq;
-
-       adapter->ras_comps = dma_alloc_coherent(dev, len,
-                                               &adapter->ras_comps_tok,
-                                               GFP_KERNEL);
-       if (!adapter->ras_comps) {
-               if (!firmware_has_feature(FW_FEATURE_CMO))
-                       dev_err(dev, "Couldn't alloc fw comps buffer\n");
-               return;
-       }
-
-       adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
-                                       sizeof(struct ibmvnic_fw_comp_internal),
-                                       GFP_KERNEL);
-       if (!adapter->ras_comp_int)
-               dma_free_coherent(dev, len, adapter->ras_comps,
-                                 adapter->ras_comps_tok);
-
-       memset(&newcrq, 0, sizeof(newcrq));
-       newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
-       newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
-       newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
-       newcrq.request_ras_comps.len = cpu_to_be32(len);
-       ibmvnic_send_crq(adapter, &newcrq);
-}
-
 static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
 {
        struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
@@ -3212,9 +2836,6 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
                        kfree(adapter->login_rsp_buf);
                        kfree(adapter->login_buf);
                        break;
-               case REQUEST_DUMP:
-                       complete(&adapter->fw_done);
-                       break;
                case REQUEST_ERROR_INFO:
                        spin_lock_irqsave(&adapter->error_list_lock, flags2);
                        list_for_each_entry_safe(error_buff, tmp2,
@@ -3374,14 +2995,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
                netdev_dbg(netdev, "Got Statistics Response\n");
                complete(&adapter->stats_done);
                break;
-       case REQUEST_DUMP_SIZE_RSP:
-               netdev_dbg(netdev, "Got Request Dump Size Response\n");
-               handle_dump_size_rsp(crq, adapter);
-               break;
-       case REQUEST_DUMP_RSP:
-               netdev_dbg(netdev, "Got Request Dump Response\n");
-               complete(&adapter->fw_done);
-               break;
        case QUERY_IP_OFFLOAD_RSP:
                netdev_dbg(netdev, "Got Query IP offload Response\n");
                handle_query_ip_offload_rsp(adapter);
@@ -3394,26 +3007,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
                dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
                                 sizeof(adapter->ip_offload_ctrl),
                                 DMA_TO_DEVICE);
-               /* We're done with the queries, perform the login */
-               send_login(adapter);
-               break;
-       case REQUEST_RAS_COMP_NUM_RSP:
-               netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
-               if (crq->request_ras_comp_num_rsp.rc.code == 10) {
-                       netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
-                       break;
-               }
-               adapter->ras_comp_num =
-                   be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
-               handle_request_ras_comp_num_rsp(crq, adapter);
-               break;
-       case REQUEST_RAS_COMPS_RSP:
-               netdev_dbg(netdev, "Got Request RAS Comps Response\n");
-               handle_request_ras_comps_rsp(crq, adapter);
-               break;
-       case CONTROL_RAS_RSP:
-               netdev_dbg(netdev, "Got Control RAS Response\n");
-               handle_control_ras_rsp(crq, adapter);
+               complete(&adapter->init_done);
                break;
        case COLLECT_FW_TRACE_RSP:
                netdev_dbg(netdev, "Got Collect firmware trace Response\n");
@@ -3520,12 +3114,15 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
        return rc;
 }
 
-static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
+static void release_crq_queue(struct ibmvnic_adapter *adapter)
 {
        struct ibmvnic_crq_queue *crq = &adapter->crq;
        struct vio_dev *vdev = adapter->vdev;
        long rc;
 
+       if (!crq->msgs)
+               return;
+
        netdev_dbg(adapter->netdev, "Releasing CRQ\n");
        free_irq(vdev->irq, adapter);
        tasklet_kill(&adapter->tasklet);
@@ -3536,15 +3133,19 @@ static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
        dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
                         DMA_BIDIRECTIONAL);
        free_page((unsigned long)crq->msgs);
+       crq->msgs = NULL;
 }
 
-static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
+static int init_crq_queue(struct ibmvnic_adapter *adapter)
 {
        struct ibmvnic_crq_queue *crq = &adapter->crq;
        struct device *dev = &adapter->vdev->dev;
        struct vio_dev *vdev = adapter->vdev;
        int rc, retrc = -ENOMEM;
 
+       if (crq->msgs)
+               return 0;
+
        crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
        /* Should we allocate more than one page? */
 
@@ -3606,48 +3207,10 @@ reg_crq_failed:
        dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
 map_failed:
        free_page((unsigned long)crq->msgs);
+       crq->msgs = NULL;
        return retrc;
 }
 
-/* debugfs for dump */
-static int ibmvnic_dump_show(struct seq_file *seq, void *v)
-{
-       struct net_device *netdev = seq->private;
-       struct ibmvnic_adapter *adapter = netdev_priv(netdev);
-       struct device *dev = &adapter->vdev->dev;
-       union ibmvnic_crq crq;
-
-       memset(&crq, 0, sizeof(crq));
-       crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
-       crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
-
-       init_completion(&adapter->fw_done);
-       ibmvnic_send_crq(adapter, &crq);
-       wait_for_completion(&adapter->fw_done);
-
-       seq_write(seq, adapter->dump_data, adapter->dump_data_size);
-
-       dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
-                        DMA_BIDIRECTIONAL);
-
-       kfree(adapter->dump_data);
-
-       return 0;
-}
-
-static int ibmvnic_dump_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ibmvnic_dump_show, inode->i_private);
-}
-
-static const struct file_operations ibmvnic_dump_ops = {
-       .owner          = THIS_MODULE,
-       .open           = ibmvnic_dump_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
 static void handle_crq_init_rsp(struct work_struct *work)
 {
        struct ibmvnic_adapter *adapter = container_of(work,
@@ -3675,26 +3238,6 @@ static void handle_crq_init_rsp(struct work_struct *work)
                goto task_failed;
        }
 
-       do {
-               if (adapter->renegotiate) {
-                       adapter->renegotiate = false;
-                       release_sub_crqs_no_irqs(adapter);
-
-                       reinit_completion(&adapter->init_done);
-                       send_cap_queries(adapter);
-                       if (!wait_for_completion_timeout(&adapter->init_done,
-                                                        timeout)) {
-                               dev_err(dev, "Passive init timeout\n");
-                               goto task_failed;
-                       }
-               }
-       } while (adapter->renegotiate);
-       rc = init_sub_crq_irqs(adapter);
-
-       if (rc)
-               goto task_failed;
-
-       netdev->real_num_tx_queues = adapter->req_tx_queues;
        netdev->mtu = adapter->req_mtu - ETH_HLEN;
 
        if (adapter->failover) {
@@ -3726,14 +3269,40 @@ task_failed:
        dev_err(dev, "Passive initialization was not successful\n");
 }
 
-static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
+static int ibmvnic_init(struct ibmvnic_adapter *adapter)
 {
+       struct device *dev = &adapter->vdev->dev;
        unsigned long timeout = msecs_to_jiffies(30000);
+       int rc;
+
+       rc = init_crq_queue(adapter);
+       if (rc) {
+               dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
+               return rc;
+       }
+
+       rc = init_stats_token(adapter);
+       if (rc) {
+               release_crq_queue(adapter);
+               return rc;
+       }
+
+       init_completion(&adapter->init_done);
+       ibmvnic_send_crq_init(adapter);
+       if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+               dev_err(dev, "Initialization sequence timed out\n");
+               release_crq_queue(adapter);
+               return -1;
+       }
+
+       return 0;
+}
+
+static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
+{
        struct ibmvnic_adapter *adapter;
        struct net_device *netdev;
        unsigned char *mac_addr_p;
-       struct dentry *ent;
-       char buf[17]; /* debugfs name buf */
        int rc;
 
        dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
@@ -3771,118 +3340,36 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
 
        spin_lock_init(&adapter->stats_lock);
 
-       rc = ibmvnic_init_crq_queue(adapter);
-       if (rc) {
-               dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
-               goto free_netdev;
-       }
-
        INIT_LIST_HEAD(&adapter->errors);
        INIT_LIST_HEAD(&adapter->inflight);
        spin_lock_init(&adapter->error_list_lock);
        spin_lock_init(&adapter->inflight_lock);
 
-       adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
-                                             sizeof(struct ibmvnic_statistics),
-                                             DMA_FROM_DEVICE);
-       if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
-               if (!firmware_has_feature(FW_FEATURE_CMO))
-                       dev_err(&dev->dev, "Couldn't map stats buffer\n");
-               rc = -ENOMEM;
-               goto free_crq;
-       }
-
-       snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
-       ent = debugfs_create_dir(buf, NULL);
-       if (!ent || IS_ERR(ent)) {
-               dev_info(&dev->dev, "debugfs create directory failed\n");
-               adapter->debugfs_dir = NULL;
-       } else {
-               adapter->debugfs_dir = ent;
-               ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
-                                         netdev, &ibmvnic_dump_ops);
-               if (!ent || IS_ERR(ent)) {
-                       dev_info(&dev->dev,
-                                "debugfs create dump file failed\n");
-                       adapter->debugfs_dump = NULL;
-               } else {
-                       adapter->debugfs_dump = ent;
-               }
-       }
-
-       init_completion(&adapter->init_done);
-       ibmvnic_send_crq_init(adapter);
-       if (!wait_for_completion_timeout(&adapter->init_done, timeout))
-               return 0;
-
-       do {
-               if (adapter->renegotiate) {
-                       adapter->renegotiate = false;
-                       release_sub_crqs_no_irqs(adapter);
-
-                       reinit_completion(&adapter->init_done);
-                       send_cap_queries(adapter);
-                       if (!wait_for_completion_timeout(&adapter->init_done,
-                                                        timeout))
-                               return 0;
-               }
-       } while (adapter->renegotiate);
-
-       rc = init_sub_crq_irqs(adapter);
+       rc = ibmvnic_init(adapter);
        if (rc) {
-               dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
-               goto free_debugfs;
+               free_netdev(netdev);
+               return rc;
        }
 
-       netdev->real_num_tx_queues = adapter->req_tx_queues;
        netdev->mtu = adapter->req_mtu - ETH_HLEN;
+       adapter->is_closed = false;
 
        rc = register_netdev(netdev);
        if (rc) {
                dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
-               goto free_sub_crqs;
+               free_netdev(netdev);
+               return rc;
        }
        dev_info(&dev->dev, "ibmvnic registered\n");
 
        return 0;
-
-free_sub_crqs:
-       release_sub_crqs(adapter);
-free_debugfs:
-       if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
-               debugfs_remove_recursive(adapter->debugfs_dir);
-free_crq:
-       ibmvnic_release_crq_queue(adapter);
-free_netdev:
-       free_netdev(netdev);
-       return rc;
 }
 
 static int ibmvnic_remove(struct vio_dev *dev)
 {
        struct net_device *netdev = dev_get_drvdata(&dev->dev);
-       struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
        unregister_netdev(netdev);
-
-       release_sub_crqs(adapter);
-
-       ibmvnic_release_crq_queue(adapter);
-
-       if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
-               debugfs_remove_recursive(adapter->debugfs_dir);
-
-       dma_unmap_single(&dev->dev, adapter->stats_token,
-                        sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
-
-       if (adapter->ras_comps)
-               dma_free_coherent(&dev->dev,
-                                 adapter->ras_comp_num *
-                                 sizeof(struct ibmvnic_fw_component),
-                                 adapter->ras_comps, adapter->ras_comps_tok);
-
-       kfree(adapter->ras_comp_int);
-
        free_netdev(netdev);
        dev_set_drvdata(&dev->dev, NULL);
 
index 422824f1f42a8accdbbe7a97c70baa9882fbcce8..b0d0b890d033a4d7d03632b1daa97798a3db5237 100644 (file)
@@ -772,20 +772,10 @@ enum ibmvnic_commands {
        ERROR_INDICATION = 0x08,
        REQUEST_ERROR_INFO = 0x09,
        REQUEST_ERROR_RSP = 0x89,
-       REQUEST_DUMP_SIZE = 0x0A,
-       REQUEST_DUMP_SIZE_RSP = 0x8A,
-       REQUEST_DUMP = 0x0B,
-       REQUEST_DUMP_RSP = 0x8B,
        LOGICAL_LINK_STATE = 0x0C,
        LOGICAL_LINK_STATE_RSP = 0x8C,
        REQUEST_STATISTICS = 0x0D,
        REQUEST_STATISTICS_RSP = 0x8D,
-       REQUEST_RAS_COMP_NUM = 0x0E,
-       REQUEST_RAS_COMP_NUM_RSP = 0x8E,
-       REQUEST_RAS_COMPS = 0x0F,
-       REQUEST_RAS_COMPS_RSP = 0x8F,
-       CONTROL_RAS = 0x10,
-       CONTROL_RAS_RSP = 0x90,
        COLLECT_FW_TRACE = 0x11,
        COLLECT_FW_TRACE_RSP = 0x91,
        LINK_STATE_INDICATION = 0x12,
@@ -806,8 +796,6 @@ enum ibmvnic_commands {
        ACL_CHANGE_INDICATION = 0x1A,
        ACL_QUERY = 0x1B,
        ACL_QUERY_RSP = 0x9B,
-       REQUEST_DEBUG_STATS = 0x1C,
-       REQUEST_DEBUG_STATS_RSP = 0x9C,
        QUERY_MAP = 0x1D,
        QUERY_MAP_RSP = 0x9D,
        REQUEST_MAP = 0x1E,
@@ -863,6 +851,7 @@ struct ibmvnic_sub_crq_queue {
        spinlock_t lock;
        struct sk_buff *rx_skb_top;
        struct ibmvnic_adapter *adapter;
+       atomic_t used;
 };
 
 struct ibmvnic_long_term_buff {
@@ -924,13 +913,6 @@ struct ibmvnic_error_buff {
        __be32 error_id;
 };
 
-struct ibmvnic_fw_comp_internal {
-       struct ibmvnic_adapter *adapter;
-       int num;
-       struct debugfs_blob_wrapper desc_blob;
-       int paused;
-};
-
 struct ibmvnic_inflight_cmd {
        union ibmvnic_crq crq;
        struct list_head list;
@@ -952,7 +934,6 @@ struct ibmvnic_adapter {
        dma_addr_t bounce_buffer_dma;
 
        /* Statistics */
-       struct net_device_stats net_stats;
        struct ibmvnic_statistics stats;
        dma_addr_t stats_token;
        struct completion stats_done;
@@ -995,18 +976,7 @@ struct ibmvnic_adapter {
        struct list_head errors;
        spinlock_t error_list_lock;
 
-       /* debugfs */
-       struct dentry *debugfs_dir;
-       struct dentry *debugfs_dump;
        struct completion fw_done;
-       char *dump_data;
-       dma_addr_t dump_data_token;
-       int dump_data_size;
-       int ras_comp_num;
-       struct ibmvnic_fw_component *ras_comps;
-       struct ibmvnic_fw_comp_internal *ras_comp_int;
-       dma_addr_t ras_comps_tok;
-       struct dentry *ras_comps_ent;
 
        /* in-flight commands that allocate and/or map memory*/
        struct list_head inflight;
@@ -1051,4 +1021,5 @@ struct ibmvnic_adapter {
        struct work_struct ibmvnic_xport;
        struct tasklet_struct tasklet;
        bool failover;
+       bool is_closed;
 };
index 1349b45f014dd1d5ce29fa9a9f38189bca2cbe3d..1542a2158e962d66915d6330e6d94fb215735b5c 100644 (file)
@@ -235,17 +235,6 @@ config I40E_DCB
 
          If unsure, say N.
 
-config I40E_FCOE
-       bool "Fibre Channel over Ethernet (FCoE)"
-       default n
-       depends on I40E && DCB && FCOE
-       ---help---
-         Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
-         in the driver. This will create new netdev for exclusive FCoE
-         use with XL710 FCoE offloads enabled.
-
-         If unsure, say N.
-
 config I40EVF
        tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
        depends on PCI_MSI
index 975eeb885ca2b52bb6a3f28e2b95d140cdf47777..ec8aa4562cc90a90dff844872278722b24daec3c 100644 (file)
@@ -103,104 +103,104 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
 
 #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
 
-static int e1000_get_settings(struct net_device *netdev,
-                             struct ethtool_cmd *ecmd)
+static int e1000_get_link_ksettings(struct net_device *netdev,
+                                   struct ethtool_link_ksettings *cmd)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
+       u32 supported, advertising;
 
        if (hw->media_type == e1000_media_type_copper) {
-               ecmd->supported = (SUPPORTED_10baseT_Half |
-                                  SUPPORTED_10baseT_Full |
-                                  SUPPORTED_100baseT_Half |
-                                  SUPPORTED_100baseT_Full |
-                                  SUPPORTED_1000baseT_Full|
-                                  SUPPORTED_Autoneg |
-                                  SUPPORTED_TP);
-               ecmd->advertising = ADVERTISED_TP;
+               supported = (SUPPORTED_10baseT_Half |
+                            SUPPORTED_10baseT_Full |
+                            SUPPORTED_100baseT_Half |
+                            SUPPORTED_100baseT_Full |
+                            SUPPORTED_1000baseT_Full|
+                            SUPPORTED_Autoneg |
+                            SUPPORTED_TP);
+               advertising = ADVERTISED_TP;
 
                if (hw->autoneg == 1) {
-                       ecmd->advertising |= ADVERTISED_Autoneg;
+                       advertising |= ADVERTISED_Autoneg;
                        /* the e1000 autoneg seems to match ethtool nicely */
-                       ecmd->advertising |= hw->autoneg_advertised;
+                       advertising |= hw->autoneg_advertised;
                }
 
-               ecmd->port = PORT_TP;
-               ecmd->phy_address = hw->phy_addr;
-
-               if (hw->mac_type == e1000_82543)
-                       ecmd->transceiver = XCVR_EXTERNAL;
-               else
-                       ecmd->transceiver = XCVR_INTERNAL;
-
+               cmd->base.port = PORT_TP;
+               cmd->base.phy_address = hw->phy_addr;
        } else {
-               ecmd->supported   = (SUPPORTED_1000baseT_Full |
-                                    SUPPORTED_FIBRE |
-                                    SUPPORTED_Autoneg);
+               supported   = (SUPPORTED_1000baseT_Full |
+                              SUPPORTED_FIBRE |
+                              SUPPORTED_Autoneg);
 
-               ecmd->advertising = (ADVERTISED_1000baseT_Full |
-                                    ADVERTISED_FIBRE |
-                                    ADVERTISED_Autoneg);
+               advertising = (ADVERTISED_1000baseT_Full |
+                              ADVERTISED_FIBRE |
+                              ADVERTISED_Autoneg);
 
-               ecmd->port = PORT_FIBRE;
-
-               if (hw->mac_type >= e1000_82545)
-                       ecmd->transceiver = XCVR_INTERNAL;
-               else
-                       ecmd->transceiver = XCVR_EXTERNAL;
+               cmd->base.port = PORT_FIBRE;
        }
 
        if (er32(STATUS) & E1000_STATUS_LU) {
                e1000_get_speed_and_duplex(hw, &adapter->link_speed,
                                           &adapter->link_duplex);
-               ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+               cmd->base.speed = adapter->link_speed;
 
                /* unfortunately FULL_DUPLEX != DUPLEX_FULL
                 * and HALF_DUPLEX != DUPLEX_HALF
                 */
                if (adapter->link_duplex == FULL_DUPLEX)
-                       ecmd->duplex = DUPLEX_FULL;
+                       cmd->base.duplex = DUPLEX_FULL;
                else
-                       ecmd->duplex = DUPLEX_HALF;
+                       cmd->base.duplex = DUPLEX_HALF;
        } else {
-               ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
-               ecmd->duplex = DUPLEX_UNKNOWN;
+               cmd->base.speed = SPEED_UNKNOWN;
+               cmd->base.duplex = DUPLEX_UNKNOWN;
        }
 
-       ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
+       cmd->base.autoneg = ((hw->media_type == e1000_media_type_fiber) ||
                         hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
 
        /* MDI-X => 1; MDI => 0 */
        if ((hw->media_type == e1000_media_type_copper) &&
            netif_carrier_ok(netdev))
-               ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
+               cmd->base.eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
                                     ETH_TP_MDI_X : ETH_TP_MDI);
        else
-               ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+               cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
 
        if (hw->mdix == AUTO_ALL_MODES)
-               ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+               cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
        else
-               ecmd->eth_tp_mdix_ctrl = hw->mdix;
+               cmd->base.eth_tp_mdix_ctrl = hw->mdix;
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
+
        return 0;
 }
 
-static int e1000_set_settings(struct net_device *netdev,
-                             struct ethtool_cmd *ecmd)
+static int e1000_set_link_ksettings(struct net_device *netdev,
+                                   const struct ethtool_link_ksettings *cmd)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
+       u32 advertising;
+
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
 
        /* MDI setting is only allowed when autoneg enabled because
         * some hardware doesn't allow MDI setting when speed or
         * duplex is forced.
         */
-       if (ecmd->eth_tp_mdix_ctrl) {
+       if (cmd->base.eth_tp_mdix_ctrl) {
                if (hw->media_type != e1000_media_type_copper)
                        return -EOPNOTSUPP;
 
-               if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
-                   (ecmd->autoneg != AUTONEG_ENABLE)) {
+               if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+                   (cmd->base.autoneg != AUTONEG_ENABLE)) {
                        e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
                        return -EINVAL;
                }
@@ -209,32 +209,31 @@ static int e1000_set_settings(struct net_device *netdev,
        while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
                msleep(1);
 
-       if (ecmd->autoneg == AUTONEG_ENABLE) {
+       if (cmd->base.autoneg == AUTONEG_ENABLE) {
                hw->autoneg = 1;
                if (hw->media_type == e1000_media_type_fiber)
                        hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
-                                    ADVERTISED_FIBRE |
-                                    ADVERTISED_Autoneg;
+                                                ADVERTISED_FIBRE |
+                                                ADVERTISED_Autoneg;
                else
-                       hw->autoneg_advertised = ecmd->advertising |
+                       hw->autoneg_advertised = advertising |
                                                 ADVERTISED_TP |
                                                 ADVERTISED_Autoneg;
-               ecmd->advertising = hw->autoneg_advertised;
        } else {
-               u32 speed = ethtool_cmd_speed(ecmd);
+               u32 speed = cmd->base.speed;
                /* calling this overrides forced MDI setting */
-               if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+               if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) {
                        clear_bit(__E1000_RESETTING, &adapter->flags);
                        return -EINVAL;
                }
        }
 
        /* MDI-X => 2; MDI => 1; Auto => 3 */
-       if (ecmd->eth_tp_mdix_ctrl) {
-               if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+       if (cmd->base.eth_tp_mdix_ctrl) {
+               if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
                        hw->mdix = AUTO_ALL_MODES;
                else
-                       hw->mdix = ecmd->eth_tp_mdix_ctrl;
+                       hw->mdix = cmd->base.eth_tp_mdix_ctrl;
        }
 
        /* reset the link */
@@ -1875,8 +1874,6 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
 }
 
 static const struct ethtool_ops e1000_ethtool_ops = {
-       .get_settings           = e1000_get_settings,
-       .set_settings           = e1000_set_settings,
        .get_drvinfo            = e1000_get_drvinfo,
        .get_regs_len           = e1000_get_regs_len,
        .get_regs               = e1000_get_regs,
@@ -1901,6 +1898,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
        .get_coalesce           = e1000_get_coalesce,
        .set_coalesce           = e1000_set_coalesce,
        .get_ts_info            = ethtool_op_get_ts_info,
+       .get_link_ksettings     = e1000_get_link_ksettings,
+       .set_link_ksettings     = e1000_set_link_ksettings,
 };
 
 void e1000_set_ethtool_ops(struct net_device *netdev)
index 7aff68a4a4df527d26c50da69d0cad2dd91c2767..e70b1ebff60df2d0a20a876c04613938b89432cc 100644 (file)
@@ -117,55 +117,52 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
 
 #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
 
-static int e1000_get_settings(struct net_device *netdev,
-                             struct ethtool_cmd *ecmd)
+static int e1000_get_link_ksettings(struct net_device *netdev,
+                                   struct ethtool_link_ksettings *cmd)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
-       u32 speed;
+       u32 speed, supported, advertising;
 
        if (hw->phy.media_type == e1000_media_type_copper) {
-               ecmd->supported = (SUPPORTED_10baseT_Half |
-                                  SUPPORTED_10baseT_Full |
-                                  SUPPORTED_100baseT_Half |
-                                  SUPPORTED_100baseT_Full |
-                                  SUPPORTED_1000baseT_Full |
-                                  SUPPORTED_Autoneg |
-                                  SUPPORTED_TP);
+               supported = (SUPPORTED_10baseT_Half |
+                            SUPPORTED_10baseT_Full |
+                            SUPPORTED_100baseT_Half |
+                            SUPPORTED_100baseT_Full |
+                            SUPPORTED_1000baseT_Full |
+                            SUPPORTED_Autoneg |
+                            SUPPORTED_TP);
                if (hw->phy.type == e1000_phy_ife)
-                       ecmd->supported &= ~SUPPORTED_1000baseT_Full;
-               ecmd->advertising = ADVERTISED_TP;
+                       supported &= ~SUPPORTED_1000baseT_Full;
+               advertising = ADVERTISED_TP;
 
                if (hw->mac.autoneg == 1) {
-                       ecmd->advertising |= ADVERTISED_Autoneg;
+                       advertising |= ADVERTISED_Autoneg;
                        /* the e1000 autoneg seems to match ethtool nicely */
-                       ecmd->advertising |= hw->phy.autoneg_advertised;
+                       advertising |= hw->phy.autoneg_advertised;
                }
 
-               ecmd->port = PORT_TP;
-               ecmd->phy_address = hw->phy.addr;
-               ecmd->transceiver = XCVR_INTERNAL;
-
+               cmd->base.port = PORT_TP;
+               cmd->base.phy_address = hw->phy.addr;
        } else {
-               ecmd->supported   = (SUPPORTED_1000baseT_Full |
-                                    SUPPORTED_FIBRE |
-                                    SUPPORTED_Autoneg);
+               supported   = (SUPPORTED_1000baseT_Full |
+                              SUPPORTED_FIBRE |
+                              SUPPORTED_Autoneg);
 
-               ecmd->advertising = (ADVERTISED_1000baseT_Full |
-                                    ADVERTISED_FIBRE |
-                                    ADVERTISED_Autoneg);
+               advertising = (ADVERTISED_1000baseT_Full |
+                              ADVERTISED_FIBRE |
+                              ADVERTISED_Autoneg);
 
-               ecmd->port = PORT_FIBRE;
-               ecmd->transceiver = XCVR_EXTERNAL;
+               cmd->base.port = PORT_FIBRE;
        }
 
        speed = SPEED_UNKNOWN;
-       ecmd->duplex = DUPLEX_UNKNOWN;
+       cmd->base.duplex = DUPLEX_UNKNOWN;
 
        if (netif_running(netdev)) {
                if (netif_carrier_ok(netdev)) {
                        speed = adapter->link_speed;
-                       ecmd->duplex = adapter->link_duplex - 1;
+                       cmd->base.duplex = adapter->link_duplex - 1;
                }
        } else if (!pm_runtime_suspended(netdev->dev.parent)) {
                u32 status = er32(STATUS);
@@ -179,30 +176,36 @@ static int e1000_get_settings(struct net_device *netdev,
                                speed = SPEED_10;
 
                        if (status & E1000_STATUS_FD)
-                               ecmd->duplex = DUPLEX_FULL;
+                               cmd->base.duplex = DUPLEX_FULL;
                        else
-                               ecmd->duplex = DUPLEX_HALF;
+                               cmd->base.duplex = DUPLEX_HALF;
                }
        }
 
-       ethtool_cmd_speed_set(ecmd, speed);
-       ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
+       cmd->base.speed = speed;
+       cmd->base.autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
                         hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
 
        /* MDI-X => 2; MDI =>1; Invalid =>0 */
        if ((hw->phy.media_type == e1000_media_type_copper) &&
            netif_carrier_ok(netdev))
-               ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI;
+               cmd->base.eth_tp_mdix = hw->phy.is_mdix ?
+                       ETH_TP_MDI_X : ETH_TP_MDI;
        else
-               ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+               cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
 
        if (hw->phy.mdix == AUTO_ALL_MODES)
-               ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+               cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
        else
-               ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+               cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix;
 
        if (hw->phy.media_type != e1000_media_type_copper)
-               ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+               cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
 
        return 0;
 }
@@ -262,12 +265,16 @@ err_inval:
        return -EINVAL;
 }
 
-static int e1000_set_settings(struct net_device *netdev,
-                             struct ethtool_cmd *ecmd)
+static int e1000_set_link_ksettings(struct net_device *netdev,
+                                   const struct ethtool_link_ksettings *cmd)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        int ret_val = 0;
+       u32 advertising;
+
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
 
        pm_runtime_get_sync(netdev->dev.parent);
 
@@ -285,14 +292,14 @@ static int e1000_set_settings(struct net_device *netdev,
         * some hardware doesn't allow MDI setting when speed or
         * duplex is forced.
         */
-       if (ecmd->eth_tp_mdix_ctrl) {
+       if (cmd->base.eth_tp_mdix_ctrl) {
                if (hw->phy.media_type != e1000_media_type_copper) {
                        ret_val = -EOPNOTSUPP;
                        goto out;
                }
 
-               if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
-                   (ecmd->autoneg != AUTONEG_ENABLE)) {
+               if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+                   (cmd->base.autoneg != AUTONEG_ENABLE)) {
                        e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
                        ret_val = -EINVAL;
                        goto out;
@@ -302,35 +309,35 @@ static int e1000_set_settings(struct net_device *netdev,
        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
                usleep_range(1000, 2000);
 
-       if (ecmd->autoneg == AUTONEG_ENABLE) {
+       if (cmd->base.autoneg == AUTONEG_ENABLE) {
                hw->mac.autoneg = 1;
                if (hw->phy.media_type == e1000_media_type_fiber)
                        hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
                            ADVERTISED_FIBRE | ADVERTISED_Autoneg;
                else
-                       hw->phy.autoneg_advertised = ecmd->advertising |
+                       hw->phy.autoneg_advertised = advertising |
                            ADVERTISED_TP | ADVERTISED_Autoneg;
-               ecmd->advertising = hw->phy.autoneg_advertised;
+               advertising = hw->phy.autoneg_advertised;
                if (adapter->fc_autoneg)
                        hw->fc.requested_mode = e1000_fc_default;
        } else {
-               u32 speed = ethtool_cmd_speed(ecmd);
+               u32 speed = cmd->base.speed;
                /* calling this overrides forced MDI setting */
-               if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+               if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) {
                        ret_val = -EINVAL;
                        goto out;
                }
        }
 
        /* MDI-X => 2; MDI => 1; Auto => 3 */
-       if (ecmd->eth_tp_mdix_ctrl) {
+       if (cmd->base.eth_tp_mdix_ctrl) {
                /* fix up the value for auto (3 => 0) as zero is mapped
                 * internally to auto
                 */
-               if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+               if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
                        hw->phy.mdix = AUTO_ALL_MODES;
                else
-                       hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+                       hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl;
        }
 
        /* reset the link */
@@ -2313,8 +2320,6 @@ static int e1000e_get_ts_info(struct net_device *netdev,
 }
 
 static const struct ethtool_ops e1000_ethtool_ops = {
-       .get_settings           = e1000_get_settings,
-       .set_settings           = e1000_set_settings,
        .get_drvinfo            = e1000_get_drvinfo,
        .get_regs_len           = e1000_get_regs_len,
        .get_regs               = e1000_get_regs,
@@ -2342,6 +2347,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
        .get_ts_info            = e1000e_get_ts_info,
        .get_eee                = e1000e_get_eee,
        .set_eee                = e1000e_set_eee,
+       .get_link_ksettings     = e1000_get_link_ksettings,
+       .set_link_ksettings     = e1000_set_link_ksettings,
 };
 
 void e1000e_set_ethtool_ops(struct net_device *netdev)
index 2175cced402f7fe84dd260eecc5f3f0ab712132a..e9af89ad039c6f0e227878b9de85ea7819cd19d9 100644 (file)
@@ -6274,8 +6274,8 @@ static int e1000e_pm_freeze(struct device *dev)
                /* Quiesce the device without resetting the hardware */
                e1000e_down(adapter, false);
                e1000_free_irq(adapter);
-               e1000e_reset_interrupt_capability(adapter);
        }
+       e1000e_reset_interrupt_capability(adapter);
 
        /* Allow time for pending master requests to run */
        e1000e_disable_pcie_master(&adapter->hw);
index 52b979443cdecd702177ff40bf71c1162a0d0783..689c413b7782f353aa7a261ecd8800b9f7b24ee9 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -65,14 +65,16 @@ enum fm10k_ring_state_t {
        __FM10K_TX_DETECT_HANG,
        __FM10K_HANG_CHECK_ARMED,
        __FM10K_TX_XPS_INIT_DONE,
+       /* This must be last and is used to calculate BITMAP size */
+       __FM10K_TX_STATE_SIZE__,
 };
 
 #define check_for_tx_hang(ring) \
-       test_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
+       test_bit(__FM10K_TX_DETECT_HANG, (ring)->state)
 #define set_check_for_tx_hang(ring) \
-       set_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
+       set_bit(__FM10K_TX_DETECT_HANG, (ring)->state)
 #define clear_check_for_tx_hang(ring) \
-       clear_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
+       clear_bit(__FM10K_TX_DETECT_HANG, (ring)->state)
 
 struct fm10k_tx_buffer {
        struct fm10k_tx_desc *next_to_watch;
@@ -126,7 +128,7 @@ struct fm10k_ring {
                struct fm10k_rx_buffer *rx_buffer;
        };
        u32 __iomem *tail;
-       unsigned long state;
+       DECLARE_BITMAP(state, __FM10K_TX_STATE_SIZE__);
        dma_addr_t dma;                 /* phys. address of descriptor ring */
        unsigned int size;              /* length in bytes */
 
@@ -249,18 +251,46 @@ struct fm10k_udp_port {
 /* one work queue for entire driver */
 extern struct workqueue_struct *fm10k_workqueue;
 
+/* The following enumeration contains flags which indicate or enable modified
+ * driver behaviors. To avoid race conditions, the flags are stored in
+ * a BITMAP in the fm10k_intfc structure. The BITMAP should be accessed using
+ * atomic *_bit() operations.
+ */
+enum fm10k_flags_t {
+       FM10K_FLAG_RESET_REQUESTED,
+       FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+       FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+       FM10K_FLAG_SWPRI_CONFIG,
+       /* __FM10K_FLAGS_SIZE__ is used to calculate the size of
+        * interface->flags and must be the last value in this
+        * enumeration.
+        */
+       __FM10K_FLAGS_SIZE__
+};
+
+enum fm10k_state_t {
+       __FM10K_RESETTING,
+       __FM10K_DOWN,
+       __FM10K_SERVICE_SCHED,
+       __FM10K_SERVICE_REQUEST,
+       __FM10K_SERVICE_DISABLE,
+       __FM10K_MBX_LOCK,
+       __FM10K_LINK_DOWN,
+       __FM10K_UPDATING_STATS,
+       /* This value must be last and determines the BITMAP size */
+       __FM10K_STATE_SIZE__,
+};
+
 struct fm10k_intfc {
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
        struct net_device *netdev;
        struct fm10k_l2_accel *l2_accel; /* pointer to L2 acceleration list */
        struct pci_dev *pdev;
-       unsigned long state;
+       DECLARE_BITMAP(state, __FM10K_STATE_SIZE__);
+
+       /* Access flag values using atomic *_bit() operations */
+       DECLARE_BITMAP(flags, __FM10K_FLAGS_SIZE__);
 
-       u32 flags;
-#define FM10K_FLAG_RESET_REQUESTED             (u32)(BIT(0))
-#define FM10K_FLAG_RSS_FIELD_IPV4_UDP          (u32)(BIT(1))
-#define FM10K_FLAG_RSS_FIELD_IPV6_UDP          (u32)(BIT(2))
-#define FM10K_FLAG_SWPRI_CONFIG                        (u32)(BIT(3))
        int xcast_mode;
 
        /* Tx fast path data */
@@ -352,22 +382,12 @@ struct fm10k_intfc {
        u16 vid;
 };
 
-enum fm10k_state_t {
-       __FM10K_RESETTING,
-       __FM10K_DOWN,
-       __FM10K_SERVICE_SCHED,
-       __FM10K_SERVICE_DISABLE,
-       __FM10K_MBX_LOCK,
-       __FM10K_LINK_DOWN,
-       __FM10K_UPDATING_STATS,
-};
-
 static inline void fm10k_mbx_lock(struct fm10k_intfc *interface)
 {
        /* busy loop if we cannot obtain the lock as some calls
         * such as ndo_set_rx_mode may be made in atomic context
         */
-       while (test_and_set_bit(__FM10K_MBX_LOCK, &interface->state))
+       while (test_and_set_bit(__FM10K_MBX_LOCK, interface->state))
                udelay(20);
 }
 
@@ -375,12 +395,12 @@ static inline void fm10k_mbx_unlock(struct fm10k_intfc *interface)
 {
        /* flush memory to make sure state is correct */
        smp_mb__before_atomic();
-       clear_bit(__FM10K_MBX_LOCK, &interface->state);
+       clear_bit(__FM10K_MBX_LOCK, interface->state);
 }
 
 static inline int fm10k_mbx_trylock(struct fm10k_intfc *interface)
 {
-       return !test_and_set_bit(__FM10K_MBX_LOCK, &interface->state);
+       return !test_and_set_bit(__FM10K_MBX_LOCK, interface->state);
 }
 
 /* fm10k_test_staterr - test bits in Rx descriptor status and error fields */
index 0c84fef750f43a2c9230a580003bbd9b9a5d4ef9..c7234f35f8ff3462dc667a01dafd10208dcc9d82 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -562,7 +562,7 @@ static int fm10k_set_ringparam(struct net_device *netdev,
                return 0;
        }
 
-       while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
+       while (test_and_set_bit(__FM10K_RESETTING, interface->state))
                usleep_range(1000, 2000);
 
        if (!netif_running(interface->netdev)) {
@@ -648,7 +648,7 @@ err_setup:
        fm10k_up(interface);
        vfree(temp_ring);
 clear_reset:
-       clear_bit(__FM10K_RESETTING, &interface->state);
+       clear_bit(__FM10K_RESETTING, interface->state);
        return err;
 }
 
@@ -716,7 +716,8 @@ static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                /* fall through */
        case UDP_V4_FLOW:
-               if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP)
+               if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+                            interface->flags))
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                /* fall through */
        case SCTP_V4_FLOW:
@@ -732,7 +733,8 @@ static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
                cmd->data |= RXH_IP_SRC | RXH_IP_DST;
                break;
        case UDP_V6_FLOW:
-               if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP)
+               if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+                            interface->flags))
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                cmd->data |= RXH_IP_SRC | RXH_IP_DST;
                break;
@@ -764,12 +766,13 @@ static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
        return ret;
 }
 
-#define UDP_RSS_FLAGS (FM10K_FLAG_RSS_FIELD_IPV4_UDP | \
-                      FM10K_FLAG_RSS_FIELD_IPV6_UDP)
 static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
                                  struct ethtool_rxnfc *nfc)
 {
-       u32 flags = interface->flags;
+       int rss_ipv4_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+                                   interface->flags);
+       int rss_ipv6_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+                                   interface->flags);
 
        /* RSS does not support anything other than hashing
         * to queues on src and dst IPs and ports
@@ -793,10 +796,12 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
                        return -EINVAL;
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       flags &= ~FM10K_FLAG_RSS_FIELD_IPV4_UDP;
+                       clear_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+                                 interface->flags);
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       flags |= FM10K_FLAG_RSS_FIELD_IPV4_UDP;
+                       set_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+                               interface->flags);
                        break;
                default:
                        return -EINVAL;
@@ -808,10 +813,12 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
                        return -EINVAL;
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       flags &= ~FM10K_FLAG_RSS_FIELD_IPV6_UDP;
+                       clear_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+                                 interface->flags);
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       flags |= FM10K_FLAG_RSS_FIELD_IPV6_UDP;
+                       set_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+                               interface->flags);
                        break;
                default:
                        return -EINVAL;
@@ -835,28 +842,41 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
                return -EINVAL;
        }
 
-       /* if we changed something we need to update flags */
-       if (flags != interface->flags) {
+       /* If something changed we need to update the MRQC register. Note that
+        * test_bit() is guaranteed to return strictly 0 or 1, so testing for
+        * equality is safe.
+        */
+       if ((rss_ipv4_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+                                     interface->flags)) ||
+           (rss_ipv6_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+                                     interface->flags))) {
                struct fm10k_hw *hw = &interface->hw;
+               bool warn = false;
                u32 mrqc;
 
-               if ((flags & UDP_RSS_FLAGS) &&
-                   !(interface->flags & UDP_RSS_FLAGS))
-                       netif_warn(interface, drv, interface->netdev,
-                                  "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
-
-               interface->flags = flags;
-
                /* Perform hash on these packet types */
                mrqc = FM10K_MRQC_IPV4 |
                       FM10K_MRQC_TCP_IPV4 |
                       FM10K_MRQC_IPV6 |
                       FM10K_MRQC_TCP_IPV6;
 
-               if (flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP)
+               if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+                            interface->flags)) {
                        mrqc |= FM10K_MRQC_UDP_IPV4;
-               if (flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP)
+                       warn = true;
+               }
+               if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+                            interface->flags)) {
                        mrqc |= FM10K_MRQC_UDP_IPV6;
+                       warn = true;
+               }
+
+               /* If we enable UDP RSS display a warning that this may cause
+                * fragmented UDP packets to arrive out of order.
+                */
+               if (warn)
+                       netif_warn(interface, drv, interface->netdev,
+                                  "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
 
                fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
        }
@@ -939,7 +959,7 @@ static void fm10k_self_test(struct net_device *dev,
 
        memset(data, 0, sizeof(*data) * FM10K_TEST_LEN);
 
-       if (FM10K_REMOVED(hw)) {
+       if (FM10K_REMOVED(hw->hw_addr)) {
                netif_err(interface, drv, dev,
                          "Interface removed - test blocked\n");
                eth_test->flags |= ETH_TEST_FL_FAILED;
index 5bb233a9614c1cc172d87a7f40bc68003f4af673..9dffaba85ae6bbd92fd608824ef480f802c452b6 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -34,7 +34,7 @@ const char fm10k_driver_version[] = DRV_VERSION;
 char fm10k_driver_name[] = "fm10k";
 static const char fm10k_driver_string[] = DRV_SUMMARY;
 static const char fm10k_copyright[] =
-       "Copyright (c) 2013 - 2016 Intel Corporation.";
+       "Copyright(c) 2013 - 2017 Intel Corporation.";
 
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION(DRV_SUMMARY);
@@ -1175,13 +1175,13 @@ bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
                /* update completed stats and continue */
                tx_ring->tx_stats.tx_done_old = tx_done;
                /* reset the countdown */
-               clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
+               clear_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state);
 
                return false;
        }
 
        /* make sure it is true for two checks in a row */
-       return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
+       return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state);
 }
 
 /**
@@ -1191,9 +1191,9 @@ bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
 void fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
 {
        /* Do the reset outside of interrupt context */
-       if (!test_bit(__FM10K_DOWN, &interface->state)) {
+       if (!test_bit(__FM10K_DOWN, interface->state)) {
                interface->tx_timeout_count++;
-               interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+               set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
                fm10k_service_event_schedule(interface);
        }
 }
@@ -1214,7 +1214,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
        unsigned int budget = q_vector->tx.work_limit;
        unsigned int i = tx_ring->next_to_clean;
 
-       if (test_bit(__FM10K_DOWN, &interface->state))
+       if (test_bit(__FM10K_DOWN, interface->state))
                return true;
 
        tx_buffer = &tx_ring->tx_buffer[i];
@@ -1344,7 +1344,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
                smp_mb();
                if (__netif_subqueue_stopped(tx_ring->netdev,
                                             tx_ring->queue_index) &&
-                   !test_bit(__FM10K_DOWN, &interface->state)) {
+                   !test_bit(__FM10K_DOWN, interface->state)) {
                        netif_wake_subqueue(tx_ring->netdev,
                                            tx_ring->queue_index);
                        ++tx_ring->tx_stats.restart_queue;
index 01db688cf5398d434e81c2d4016e5764bbd49820..24f2f6f86f5a3a50929eb52b5dbe6eb08e0e8635 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -737,6 +737,23 @@ static void fm10k_tx_timeout(struct net_device *netdev)
        }
 }
 
+/**
+ * fm10k_host_mbx_ready - Check PF interface's mailbox readiness
+ * @interface: board private structure
+ *
+ * This function checks if the PF interface's mailbox is ready before queueing
+ * mailbox messages for transmission. This will prevent filling the TX mailbox
+ * queue when the receiver is not ready. VF interfaces are exempt from this
+ * check since it will block all PF-VF mailbox messages from being sent from
+ * the VF to the PF at initialization.
+ **/
+static bool fm10k_host_mbx_ready(struct fm10k_intfc *interface)
+{
+       struct fm10k_hw *hw = &interface->hw;
+
+       return (hw->mac.type == fm10k_mac_vf || interface->host_ready);
+}
+
 static int fm10k_uc_vlan_unsync(struct net_device *netdev,
                                const unsigned char *uc_addr)
 {
@@ -745,12 +762,15 @@ static int fm10k_uc_vlan_unsync(struct net_device *netdev,
        u16 glort = interface->glort;
        u16 vid = interface->vid;
        bool set = !!(vid / VLAN_N_VID);
-       int err;
+       int err = -EHOSTDOWN;
 
        /* drop any leading bits on the VLAN ID */
        vid &= VLAN_N_VID - 1;
 
-       err = hw->mac.ops.update_uc_addr(hw, glort, uc_addr, vid, set, 0);
+       if (fm10k_host_mbx_ready(interface))
+               err = hw->mac.ops.update_uc_addr(hw, glort, uc_addr,
+                                                vid, set, 0);
+
        if (err)
                return err;
 
@@ -766,12 +786,14 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev,
        u16 glort = interface->glort;
        u16 vid = interface->vid;
        bool set = !!(vid / VLAN_N_VID);
-       int err;
+       int err = -EHOSTDOWN;
 
        /* drop any leading bits on the VLAN ID */
        vid &= VLAN_N_VID - 1;
 
-       err = hw->mac.ops.update_mc_addr(hw, glort, mc_addr, vid, set);
+       if (fm10k_host_mbx_ready(interface))
+               err = hw->mac.ops.update_mc_addr(hw, glort, mc_addr, vid, set);
+
        if (err)
                return err;
 
@@ -822,7 +844,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
        /* Do not throw an error if the interface is down. We will sync once
         * we come up
         */
-       if (test_bit(__FM10K_DOWN, &interface->state))
+       if (test_bit(__FM10K_DOWN, interface->state))
                return 0;
 
        fm10k_mbx_lock(interface);
@@ -834,9 +856,13 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
                        goto err_out;
        }
 
-       /* update our base MAC address */
-       err = hw->mac.ops.update_uc_addr(hw, interface->glort, hw->mac.addr,
-                                        vid, set, 0);
+       /* update our base MAC address if host's mailbox is ready */
+       if (fm10k_host_mbx_ready(interface))
+               err = hw->mac.ops.update_uc_addr(hw, interface->glort,
+                                                hw->mac.addr, vid, set, 0);
+       else
+               err = -EHOSTDOWN;
+
        if (err)
                goto err_out;
 
@@ -907,12 +933,15 @@ static int __fm10k_uc_sync(struct net_device *dev,
        if (!is_valid_ether_addr(addr))
                return -EADDRNOTAVAIL;
 
-       /* update table with current entries */
+       /* update table with current entries if host's mailbox is ready */
+       if (!fm10k_host_mbx_ready(interface))
+               return -EHOSTDOWN;
+
        for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
             vid < VLAN_N_VID;
             vid = fm10k_find_next_vlan(interface, vid)) {
                err = hw->mac.ops.update_uc_addr(hw, glort, addr,
-                                                 vid, sync, 0);
+                                                vid, sync, 0);
                if (err)
                        return err;
        }
@@ -970,7 +999,10 @@ static int __fm10k_mc_sync(struct net_device *dev,
        struct fm10k_hw *hw = &interface->hw;
        u16 vid, glort = interface->glort;
 
-       /* update table with current entries */
+       /* update table with current entries if host's mailbox is ready */
+       if (!fm10k_host_mbx_ready(interface))
+               return 0;
+
        for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
             vid < VLAN_N_VID;
             vid = fm10k_find_next_vlan(interface, vid)) {
@@ -1018,8 +1050,10 @@ static void fm10k_set_rx_mode(struct net_device *dev)
                if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC)
                        fm10k_clear_unused_vlans(interface);
 
-               /* update xcast mode */
-               hw->mac.ops.update_xcast_mode(hw, interface->glort, xcast_mode);
+               /* update xcast mode if host's mailbox is ready */
+               if (fm10k_host_mbx_ready(interface))
+                       hw->mac.ops.update_xcast_mode(hw, interface->glort,
+                                                     xcast_mode);
 
                /* record updated xcast mode state */
                interface->xcast_mode = xcast_mode;
@@ -1054,8 +1088,10 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
 
        fm10k_mbx_lock(interface);
 
-       /* Enable logical port */
-       hw->mac.ops.update_lport_state(hw, glort, interface->glort_count, true);
+       /* Enable logical port if host's mailbox is ready */
+       if (fm10k_host_mbx_ready(interface))
+               hw->mac.ops.update_lport_state(hw, glort,
+                                              interface->glort_count, true);
 
        /* update VLAN table */
        hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0,
@@ -1069,12 +1105,18 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
             vid < VLAN_N_VID;
             vid = fm10k_find_next_vlan(interface, vid)) {
                hw->mac.ops.update_vlan(hw, vid, 0, true);
-               hw->mac.ops.update_uc_addr(hw, glort, hw->mac.addr,
-                                          vid, true, 0);
+
+               /* Update unicast entries if host's mailbox is ready */
+               if (fm10k_host_mbx_ready(interface))
+                       hw->mac.ops.update_uc_addr(hw, glort, hw->mac.addr,
+                                                  vid, true, 0);
        }
 
-       /* update xcast mode before synchronizing addresses */
-       hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
+       /* update xcast mode before synchronizing addresses if host's mailbox
+        * is ready
+        */
+       if (fm10k_host_mbx_ready(interface))
+               hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
 
        /* synchronize all of the addresses */
        __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
@@ -1096,9 +1138,12 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface)
 
        fm10k_mbx_lock(interface);
 
-       /* clear the logical port state on lower device */
-       hw->mac.ops.update_lport_state(hw, interface->glort,
-                                      interface->glort_count, false);
+       /* clear the logical port state on lower device if host's mailbox is
+        * ready
+        */
+       if (fm10k_host_mbx_ready(interface))
+               hw->mac.ops.update_lport_state(hw, interface->glort,
+                                              interface->glort_count, false);
 
        fm10k_mbx_unlock(interface);
 
@@ -1115,8 +1160,8 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface)
  * @netdev: network interface device structure
  * @stats: storage space for 64bit statistics
  *
- * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This
- * function replaces fm10k_get_stats for kernels which support it.
+ * Obtain 64bit statistics in a way that is safe for both 32bit and 64bit
+ * architectures.
  */
 static void fm10k_get_stats64(struct net_device *netdev,
                              struct rtnl_link_stats64 *stats)
@@ -1207,7 +1252,7 @@ int fm10k_setup_tc(struct net_device *dev, u8 tc)
                goto err_open;
 
        /* flag to indicate SWPRI has yet to be updated */
-       interface->flags |= FM10K_FLAG_SWPRI_CONFIG;
+       set_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags);
 
        return 0;
 err_open:
@@ -1226,7 +1271,9 @@ static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
        if (tc->type != TC_SETUP_MQPRIO)
                return -EINVAL;
 
-       return fm10k_setup_tc(dev, tc->tc);
+       tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+       return fm10k_setup_tc(dev, tc->mqprio->num_tc);
 }
 
 static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
@@ -1317,8 +1364,13 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
        fm10k_mbx_lock(interface);
 
        glort = l2_accel->dglort + 1 + i;
-       hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_MULTI);
-       hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, 0, true, 0);
+
+       if (fm10k_host_mbx_ready(interface)) {
+               hw->mac.ops.update_xcast_mode(hw, glort,
+                                             FM10K_XCAST_MODE_MULTI);
+               hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr,
+                                          0, true, 0);
+       }
 
        fm10k_mbx_unlock(interface);
 
@@ -1352,8 +1404,13 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
        fm10k_mbx_lock(interface);
 
        glort = l2_accel->dglort + 1 + i;
-       hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_NONE);
-       hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, 0, false, 0);
+
+       if (fm10k_host_mbx_ready(interface)) {
+               hw->mac.ops.update_xcast_mode(hw, glort,
+                                             FM10K_XCAST_MODE_NONE);
+               hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr,
+                                          0, false, 0);
+       }
 
        fm10k_mbx_unlock(interface);
 
index e372a582348015355e5406eae522fbda148558bd..3e26d27ad213362bcc473262435740ccf8d2c697 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -19,6 +19,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/interrupt.h>
 #include <linux/aer.h>
 
 #include "fm10k.h"
@@ -92,18 +93,29 @@ static int fm10k_hw_ready(struct fm10k_intfc *interface)
 
 void fm10k_service_event_schedule(struct fm10k_intfc *interface)
 {
-       if (!test_bit(__FM10K_SERVICE_DISABLE, &interface->state) &&
-           !test_and_set_bit(__FM10K_SERVICE_SCHED, &interface->state))
+       if (!test_bit(__FM10K_SERVICE_DISABLE, interface->state) &&
+           !test_and_set_bit(__FM10K_SERVICE_SCHED, interface->state)) {
+               clear_bit(__FM10K_SERVICE_REQUEST, interface->state);
                queue_work(fm10k_workqueue, &interface->service_task);
+       } else {
+               set_bit(__FM10K_SERVICE_REQUEST, interface->state);
+       }
 }
 
 static void fm10k_service_event_complete(struct fm10k_intfc *interface)
 {
-       WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
+       WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, interface->state));
 
        /* flush memory to make sure state is correct before next watchog */
        smp_mb__before_atomic();
-       clear_bit(__FM10K_SERVICE_SCHED, &interface->state);
+       clear_bit(__FM10K_SERVICE_SCHED, interface->state);
+
+       /* If a service event was requested since we started, immediately
+        * re-schedule now. This ensures we don't drop a request until the
+        * next timer event.
+        */
+       if (test_bit(__FM10K_SERVICE_REQUEST, interface->state))
+               fm10k_service_event_schedule(interface);
 }
 
 /**
@@ -136,7 +148,7 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)
        if (~value) {
                interface->hw.hw_addr = interface->uc_addr;
                netif_device_attach(netdev);
-               interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+               set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
                netdev_warn(netdev, "PCIe link restored, device now attached\n");
                return;
        }
@@ -158,7 +170,7 @@ static void fm10k_prepare_for_reset(struct fm10k_intfc *interface)
        /* put off any impending NetWatchDogTimeout */
        netif_trans_update(netdev);
 
-       while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
+       while (test_and_set_bit(__FM10K_RESETTING, interface->state))
                usleep_range(1000, 2000);
 
        rtnl_lock();
@@ -241,7 +253,7 @@ static int fm10k_handle_reset(struct fm10k_intfc *interface)
 
        rtnl_unlock();
 
-       clear_bit(__FM10K_RESETTING, &interface->state);
+       clear_bit(__FM10K_RESETTING, interface->state);
 
        return err;
 err_open:
@@ -253,7 +265,7 @@ reinit_err:
 
        rtnl_unlock();
 
-       clear_bit(__FM10K_RESETTING, &interface->state);
+       clear_bit(__FM10K_RESETTING, interface->state);
 
        return err;
 }
@@ -272,11 +284,10 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
 
 static void fm10k_reset_subtask(struct fm10k_intfc *interface)
 {
-       if (!(interface->flags & FM10K_FLAG_RESET_REQUESTED))
+       if (!test_and_clear_bit(FM10K_FLAG_RESET_REQUESTED,
+                               interface->flags))
                return;
 
-       interface->flags &= ~FM10K_FLAG_RESET_REQUESTED;
-
        netdev_err(interface->netdev, "Reset interface\n");
 
        fm10k_reinit(interface);
@@ -295,7 +306,7 @@ static void fm10k_configure_swpri_map(struct fm10k_intfc *interface)
        int i;
 
        /* clear flag indicating update is needed */
-       interface->flags &= ~FM10K_FLAG_SWPRI_CONFIG;
+       clear_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags);
 
        /* these registers are only available on the PF */
        if (hw->mac.type != fm10k_mac_pf)
@@ -316,14 +327,14 @@ static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
        struct fm10k_hw *hw = &interface->hw;
        s32 err;
 
-       if (test_bit(__FM10K_LINK_DOWN, &interface->state)) {
+       if (test_bit(__FM10K_LINK_DOWN, interface->state)) {
                interface->host_ready = false;
                if (time_is_after_jiffies(interface->link_down_event))
                        return;
-               clear_bit(__FM10K_LINK_DOWN, &interface->state);
+               clear_bit(__FM10K_LINK_DOWN, interface->state);
        }
 
-       if (interface->flags & FM10K_FLAG_SWPRI_CONFIG) {
+       if (test_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags)) {
                if (rtnl_trylock()) {
                        fm10k_configure_swpri_map(interface);
                        rtnl_unlock();
@@ -335,7 +346,7 @@ static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
 
        err = hw->mac.ops.get_host_state(hw, &interface->host_ready);
        if (err && time_is_before_jiffies(interface->last_reset))
-               interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+               set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
 
        /* free the lock */
        fm10k_mbx_unlock(interface);
@@ -411,7 +422,7 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
        int i;
 
        /* ensure only one thread updates stats at a time */
-       if (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state))
+       if (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state))
                return;
 
        /* do not allow stats update via service task for next second */
@@ -492,7 +503,7 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
        net_stats->rx_errors = rx_errors;
        net_stats->rx_dropped = interface->stats.nodesc_drop.count;
 
-       clear_bit(__FM10K_UPDATING_STATS, &interface->state);
+       clear_bit(__FM10K_UPDATING_STATS, interface->state);
 }
 
 /**
@@ -522,7 +533,7 @@ static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
         * controller to flush Tx.
         */
        if (some_tx_pending)
-               interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+               set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
 }
 
 /**
@@ -532,8 +543,8 @@ static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
 static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
 {
        /* if interface is down do nothing */
-       if (test_bit(__FM10K_DOWN, &interface->state) ||
-           test_bit(__FM10K_RESETTING, &interface->state))
+       if (test_bit(__FM10K_DOWN, interface->state) ||
+           test_bit(__FM10K_RESETTING, interface->state))
                return;
 
        if (interface->host_ready)
@@ -563,8 +574,8 @@ static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
        int i;
 
        /* If we're down or resetting, just bail */
-       if (test_bit(__FM10K_DOWN, &interface->state) ||
-           test_bit(__FM10K_RESETTING, &interface->state))
+       if (test_bit(__FM10K_DOWN, interface->state) ||
+           test_bit(__FM10K_RESETTING, interface->state))
                return;
 
        /* rate limit tx hang checks to only once every 2 seconds */
@@ -663,7 +674,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
                        FM10K_PFVTCTL_FTAG_DESC_ENABLE);
 
        /* Initialize XPS */
-       if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, &ring->state) &&
+       if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, ring->state) &&
            ring->q_vector)
                netif_set_xps_queue(ring->netdev,
                                    &ring->q_vector->affinity_mask,
@@ -743,6 +754,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
        /* disable queue to avoid issues while updating state */
        rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
        rxqctl &= ~FM10K_RXQCTL_ENABLE;
+       fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
        fm10k_write_flush(hw);
 
        /* possible poll here to verify ring resources have been cleaned */
@@ -863,9 +875,9 @@ static void fm10k_configure_dglort(struct fm10k_intfc *interface)
               FM10K_MRQC_IPV6 |
               FM10K_MRQC_TCP_IPV6;
 
-       if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP)
+       if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, interface->flags))
                mrqc |= FM10K_MRQC_UDP_IPV4;
-       if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP)
+       if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, interface->flags))
                mrqc |= FM10K_MRQC_UDP_IPV6;
 
        fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
@@ -980,7 +992,7 @@ void fm10k_netpoll(struct net_device *netdev)
        int i;
 
        /* if interface is down do nothing */
-       if (test_bit(__FM10K_DOWN, &interface->state))
+       if (test_bit(__FM10K_DOWN, interface->state))
                return;
 
        for (i = 0; i < interface->num_q_vectors; i++)
@@ -1167,13 +1179,13 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
        }
 
        if (err == FM10K_ERR_RESET_REQUESTED)
-               interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+               set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
 
        /* if switch toggled state we should reset GLORTs */
        if (eicr & FM10K_EICR_SWITCHNOTREADY) {
                /* force link down for at least 4 seconds */
                interface->link_down_event = jiffies + (4 * HZ);
-               set_bit(__FM10K_LINK_DOWN, &interface->state);
+               set_bit(__FM10K_LINK_DOWN, interface->state);
 
                /* reset dglort_map back to no config */
                hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
@@ -1246,12 +1258,12 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results,
        /* MAC was changed so we need reset */
        if (is_valid_ether_addr(hw->mac.perm_addr) &&
            !ether_addr_equal(hw->mac.perm_addr, hw->mac.addr))
-               interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+               set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
 
        /* VLAN override was changed, or default VLAN changed */
        if ((vlan_override != hw->mac.vlan_override) ||
            (default_vid != hw->mac.default_vid))
-               interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+               set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
 
        return 0;
 }
@@ -1325,7 +1337,7 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
        if (!err && hw->swapi.status) {
                /* force link down for a reasonable delay */
                interface->link_down_event = jiffies + (2 * HZ);
-               set_bit(__FM10K_LINK_DOWN, &interface->state);
+               set_bit(__FM10K_LINK_DOWN, interface->state);
 
                /* reset dglort_map back to no config */
                hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
@@ -1356,7 +1368,7 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
 
        /* we need to reset if port count was just updated */
        if (dglort_map != hw->mac.dglort_map)
-               interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+               set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
 
        return 0;
 }
@@ -1395,7 +1407,7 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
 
        /* we need to reset if default VLAN was just updated */
        if (pvid != hw->mac.default_vid)
-               interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+               set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
 
        hw->mac.default_vid = pvid;
 
@@ -1623,10 +1635,10 @@ void fm10k_up(struct fm10k_intfc *interface)
        hw->mac.ops.update_int_moderator(hw);
 
        /* enable statistics capture again */
-       clear_bit(__FM10K_UPDATING_STATS, &interface->state);
+       clear_bit(__FM10K_UPDATING_STATS, interface->state);
 
        /* clear down bit to indicate we are ready to go */
-       clear_bit(__FM10K_DOWN, &interface->state);
+       clear_bit(__FM10K_DOWN, interface->state);
 
        /* enable polling cleanups */
        fm10k_napi_enable_all(interface);
@@ -1660,7 +1672,7 @@ void fm10k_down(struct fm10k_intfc *interface)
        int err, i = 0, count = 0;
 
        /* signal that we are down to the interrupt handler and service task */
-       if (test_and_set_bit(__FM10K_DOWN, &interface->state))
+       if (test_and_set_bit(__FM10K_DOWN, interface->state))
                return;
 
        /* call carrier off first to avoid false dev_watchdog timeouts */
@@ -1680,7 +1692,7 @@ void fm10k_down(struct fm10k_intfc *interface)
        fm10k_update_stats(interface);
 
        /* prevent updating statistics while we're down */
-       while (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state))
+       while (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state))
                usleep_range(1000, 2000);
 
        /* skip waiting for TX DMA if we lost PCIe link */
@@ -1849,8 +1861,8 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
        memcpy(interface->rssrk, rss_key, sizeof(rss_key));
 
        /* Start off interface as being down */
-       set_bit(__FM10K_DOWN, &interface->state);
-       set_bit(__FM10K_UPDATING_STATS, &interface->state);
+       set_bit(__FM10K_DOWN, interface->state);
+       set_bit(__FM10K_UPDATING_STATS, interface->state);
 
        return 0;
 }
@@ -2027,7 +2039,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         * must ensure it is disabled since we haven't yet requested the timer
         * or work item.
         */
-       set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+       set_bit(__FM10K_SERVICE_DISABLE, interface->state);
 
        err = fm10k_mbx_request_irq(interface);
        if (err)
@@ -2068,7 +2080,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        fm10k_iov_configure(pdev, 0);
 
        /* clear the service task disable bit to allow service task to start */
-       clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+       clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
 
        return 0;
 
@@ -2106,7 +2118,7 @@ static void fm10k_remove(struct pci_dev *pdev)
 
        del_timer_sync(&interface->service_timer);
 
-       set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+       set_bit(__FM10K_SERVICE_DISABLE, interface->state);
        cancel_work_sync(&interface->service_task);
 
        /* free netdev, this may bounce the interrupts due to setup_tc */
@@ -2145,7 +2157,7 @@ static void fm10k_prepare_suspend(struct fm10k_intfc *interface)
         * stopped. We stop the watchdog task until after we resume software
         * activity.
         */
-       set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+       set_bit(__FM10K_SERVICE_DISABLE, interface->state);
        cancel_work_sync(&interface->service_task);
 
        fm10k_prepare_for_reset(interface);
@@ -2171,10 +2183,10 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
 
        /* force link to stay down for a second to prevent link flutter */
        interface->link_down_event = jiffies + (HZ);
-       set_bit(__FM10K_LINK_DOWN, &interface->state);
+       set_bit(__FM10K_LINK_DOWN, interface->state);
 
        /* clear the service task disable bit to allow service task to start */
-       clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+       clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
        fm10k_service_event_schedule(interface);
 
        return err;
index 3b3c63e54ed638f03278506b91c0eaa854a6c575..4f454d364d0d35c33e0ab60c6c075d3764413b5b 100644 (file)
@@ -45,4 +45,3 @@ i40e-objs := i40e_main.o \
        i40e_virtchnl_pf.o
 
 i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
-i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
index 82d8040fa418a3cf905d3f27335d1ec177f00b99..e987503f8517d4211590bb555786102424b17713 100644 (file)
@@ -56,9 +56,6 @@
 #include <linux/ptp_clock_kernel.h>
 #include "i40e_type.h"
 #include "i40e_prototype.h"
-#ifdef I40E_FCOE
-#include "i40e_fcoe.h"
-#endif
 #include "i40e_client.h"
 #include "i40e_virtchnl.h"
 #include "i40e_virtchnl_pf.h"
                (((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64)
 #define I40E_FDIR_RING                 0
 #define I40E_FDIR_RING_COUNT           32
-#ifdef I40E_FCOE
-#define I40E_DEFAULT_FCOE              8 /* default number of QPs for FCoE */
-#define I40E_MINIMUM_FCOE              1 /* minimum number of QPs for FCoE */
-#endif /* I40E_FCOE */
 #define I40E_MAX_AQ_BUF_SIZE           4096
 #define I40E_AQ_LEN                    256
 #define I40E_AQ_WORK_LIMIT             66 /* max number of VFs + a little */
 #define I40E_QUEUE_WAIT_RETRY_LIMIT    10
 #define I40E_INT_NAME_STR_LEN          (IFNAMSIZ + 16)
 
-/* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_MFP_FLAG               BIT(0)
-#define I40E_PRIV_FLAGS_LINKPOLL_FLAG          BIT(1)
-#define I40E_PRIV_FLAGS_FD_ATR                 BIT(2)
-#define I40E_PRIV_FLAGS_VEB_STATS              BIT(3)
-#define I40E_PRIV_FLAGS_HW_ATR_EVICT           BIT(4)
-#define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT   BIT(5)
-
 #define I40E_NVM_VERSION_LO_SHIFT      0
 #define I40E_NVM_VERSION_LO_MASK       (0xff << I40E_NVM_VERSION_LO_SHIFT)
 #define I40E_NVM_VERSION_HI_SHIFT      12
@@ -202,17 +187,32 @@ enum i40e_fd_stat_idx {
 #define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
                        (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
 
+/* The following structure contains the data parsed from the user-defined
+ * field of the ethtool_rx_flow_spec structure.
+ */
+struct i40e_rx_flow_userdef {
+       bool flex_filter;
+       u16 flex_word;
+       u16 flex_offset;
+};
+
 struct i40e_fdir_filter {
        struct hlist_node fdir_node;
        /* filter ipnut set */
        u8 flow_type;
        u8 ip4_proto;
        /* TX packet view of src and dst */
-       __be32 dst_ip[4];
-       __be32 src_ip[4];
+       __be32 dst_ip;
+       __be32 src_ip;
        __be16 src_port;
        __be16 dst_port;
        __be32 sctp_v_tag;
+
+       /* Flexible data to match within the packet payload */
+       __be16 flex_word;
+       u16 flex_offset;
+       bool flex_filter;
+
        /* filter control */
        u16 q_index;
        u8  flex_off;
@@ -244,10 +244,80 @@ struct i40e_tc_configuration {
 };
 
 struct i40e_udp_port_config {
-       __be16 index;
+       /* AdminQ command interface expects port number in Host byte order */
+       u16 index;
        u8 type;
 };
 
+/* macros related to FLX_PIT */
+#define I40E_FLEX_SET_FSIZE(fsize) (((fsize) << \
+                                   I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
+                                   I40E_PRTQF_FLX_PIT_FSIZE_MASK)
+#define I40E_FLEX_SET_DST_WORD(dst) (((dst) << \
+                                    I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
+                                    I40E_PRTQF_FLX_PIT_DEST_OFF_MASK)
+#define I40E_FLEX_SET_SRC_WORD(src) (((src) << \
+                                    I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \
+                                    I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK)
+#define I40E_FLEX_PREP_VAL(dst, fsize, src) (I40E_FLEX_SET_DST_WORD(dst) | \
+                                            I40E_FLEX_SET_FSIZE(fsize) | \
+                                            I40E_FLEX_SET_SRC_WORD(src))
+
+#define I40E_FLEX_PIT_GET_SRC(flex) (((flex) & \
+                                    I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) >> \
+                                    I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_FLEX_PIT_GET_DST(flex) (((flex) & \
+                                    I40E_PRTQF_FLX_PIT_DEST_OFF_MASK) >> \
+                                    I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_FLEX_PIT_GET_FSIZE(flex) (((flex) & \
+                                      I40E_PRTQF_FLX_PIT_FSIZE_MASK) >> \
+                                      I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+
+#define I40E_MAX_FLEX_SRC_OFFSET 0x1F
+
+/* macros related to GLQF_ORT */
+#define I40E_ORT_SET_IDX(idx)          (((idx) << \
+                                         I40E_GLQF_ORT_PIT_INDX_SHIFT) & \
+                                        I40E_GLQF_ORT_PIT_INDX_MASK)
+
+#define I40E_ORT_SET_COUNT(count)      (((count) << \
+                                         I40E_GLQF_ORT_FIELD_CNT_SHIFT) & \
+                                        I40E_GLQF_ORT_FIELD_CNT_MASK)
+
+#define I40E_ORT_SET_PAYLOAD(payload)  (((payload) << \
+                                         I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) & \
+                                        I40E_GLQF_ORT_FLX_PAYLOAD_MASK)
+
+#define I40E_ORT_PREP_VAL(idx, count, payload) (I40E_ORT_SET_IDX(idx) | \
+                                               I40E_ORT_SET_COUNT(count) | \
+                                               I40E_ORT_SET_PAYLOAD(payload))
+
+#define I40E_L3_GLQF_ORT_IDX           34
+#define I40E_L4_GLQF_ORT_IDX           35
+
+/* Flex PIT register index */
+#define I40E_FLEX_PIT_IDX_START_L2     0
+#define I40E_FLEX_PIT_IDX_START_L3     3
+#define I40E_FLEX_PIT_IDX_START_L4     6
+
+#define I40E_FLEX_PIT_TABLE_SIZE       3
+
+#define I40E_FLEX_DEST_UNUSED          63
+
+#define I40E_FLEX_INDEX_ENTRIES                8
+
+/* Flex MASK to disable all flexible entries */
+#define I40E_FLEX_INPUT_MASK   (I40E_FLEX_50_MASK | I40E_FLEX_51_MASK | \
+                                I40E_FLEX_52_MASK | I40E_FLEX_53_MASK | \
+                                I40E_FLEX_54_MASK | I40E_FLEX_55_MASK | \
+                                I40E_FLEX_56_MASK | I40E_FLEX_57_MASK)
+
+struct i40e_flex_pit {
+       struct list_head list;
+       u16 src_offset;
+       u8 pit_index;
+};
+
 /* struct that defines the Ethernet device */
 struct i40e_pf {
        struct pci_dev *pdev;
@@ -262,10 +332,6 @@ struct i40e_pf {
        u16 num_vmdq_msix;         /* num queue vectors per vmdq pool */
        u16 num_req_vfs;           /* num VFs requested for this VF */
        u16 num_vf_qps;            /* num queue pairs per VF */
-#ifdef I40E_FCOE
-       u16 num_fcoe_qps;          /* num fcoe queues this PF has set up */
-       u16 num_fcoe_msix;         /* num queue vectors per fcoe pool */
-#endif /* I40E_FCOE */
        u16 num_lan_qps;           /* num lan queues this PF has set up */
        u16 num_lan_msix;          /* num queue vectors for the base PF vsi */
        u16 num_fdsb_msix;         /* num queue vectors for sideband Fdir */
@@ -285,7 +351,23 @@ struct i40e_pf {
        u32 fd_flush_cnt;
        u32 fd_add_err;
        u32 fd_atr_cnt;
-       u32 fd_tcp_rule;
+
+       /* Book-keeping of side-band filter count per flow-type.
+        * This is used to detect and handle input set changes for
+        * respective flow-type.
+        */
+       u16 fd_tcp4_filter_cnt;
+       u16 fd_udp4_filter_cnt;
+       u16 fd_sctp4_filter_cnt;
+       u16 fd_ip4_filter_cnt;
+
+       /* Flexible filter table values that need to be programmed into
+        * hardware, which expects L3 and L4 to be programmed separately. We
+        * need to ensure that the values are in ascended order and don't have
+        * duplicates, so we track each L3 and L4 values in separate lists.
+        */
+       struct list_head l3_flex_pit_list;
+       struct list_head l4_flex_pit_list;
 
        struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
        u16 pending_udp_bitmap;
@@ -307,17 +389,9 @@ struct i40e_pf {
 #define I40E_FLAG_MSIX_ENABLED                 BIT_ULL(3)
 #define I40E_FLAG_RSS_ENABLED                  BIT_ULL(6)
 #define I40E_FLAG_VMDQ_ENABLED                 BIT_ULL(7)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT         BIT_ULL(8)
-#define I40E_FLAG_NEED_LINK_UPDATE             BIT_ULL(9)
 #define I40E_FLAG_IWARP_ENABLED                        BIT_ULL(10)
-#ifdef I40E_FCOE
-#define I40E_FLAG_FCOE_ENABLED                 BIT_ULL(11)
-#endif /* I40E_FCOE */
-#define I40E_FLAG_CLEAN_ADMINQ                 BIT_ULL(14)
 #define I40E_FLAG_FILTER_SYNC                  BIT_ULL(15)
 #define I40E_FLAG_SERVICE_CLIENT_REQUESTED     BIT_ULL(16)
-#define I40E_FLAG_PROCESS_MDD_EVENT            BIT_ULL(17)
-#define I40E_FLAG_PROCESS_VFLR_EVENT           BIT_ULL(18)
 #define I40E_FLAG_SRIOV_ENABLED                        BIT_ULL(19)
 #define I40E_FLAG_DCB_ENABLED                  BIT_ULL(20)
 #define I40E_FLAG_FD_SB_ENABLED                        BIT_ULL(21)
@@ -348,16 +422,20 @@ struct i40e_pf {
 #define I40E_FLAG_TRUE_PROMISC_SUPPORT         BIT_ULL(51)
 #define I40E_FLAG_HAVE_CRT_RETIMER             BIT_ULL(52)
 #define I40E_FLAG_PTP_L4_CAPABLE               BIT_ULL(53)
-#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE                BIT_ULL(54)
+#define I40E_FLAG_CLIENT_RESET                 BIT_ULL(54)
 #define I40E_FLAG_TEMP_LINK_POLLING            BIT_ULL(55)
+#define I40E_FLAG_CLIENT_L2_CHANGE             BIT_ULL(56)
+#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE                BIT_ULL(57)
+#define I40E_FLAG_LEGACY_RX                    BIT_ULL(58)
+
+       /* Tracks features that are disabled due to hw limitations.
+        * If a bit is set here, it means that the corresponding
+        * bit in the 'flags' field is cleared i.e that feature
+        * is disabled
+        */
+       u64 hw_disabled_flags;
 
-       /* tracks features that get auto disabled by errors */
-       u64 auto_disable_flags;
-
-#ifdef I40E_FCOE
-       struct i40e_fcoe fcoe;
-
-#endif /* I40E_FCOE */
+       struct i40e_client_instance *cinst;
        bool stat_offsets_loaded;
        struct i40e_hw_port_stats stats;
        struct i40e_hw_port_stats stats_offsets;
@@ -412,8 +490,6 @@ struct i40e_pf {
         */
        u16 dcbx_cap;
 
-       u32 fcoe_hmc_filt_num;
-       u32 fcoe_hmc_cntx_num;
        struct i40e_filter_control_settings filter_settings;
 
        struct ptp_clock *ptp_clock;
@@ -533,16 +609,10 @@ struct i40e_vsi {
        struct rtnl_link_stats64 net_stats_offsets;
        struct i40e_eth_stats eth_stats;
        struct i40e_eth_stats eth_stats_offsets;
-#ifdef I40E_FCOE
-       struct i40e_fcoe_stats fcoe_stats;
-       struct i40e_fcoe_stats fcoe_stats_offsets;
-       bool fcoe_stat_offsets_loaded;
-#endif
        u32 tx_restart;
        u32 tx_busy;
        u64 tx_linearize;
        u64 tx_force_wb;
-       u64 tx_lost_interrupt;
        u32 rx_buf_failed;
        u32 rx_page_failed;
 
@@ -628,9 +698,6 @@ struct i40e_q_vector {
 
        u8 num_ringpairs;       /* total number of ring pairs in vector */
 
-#define I40E_Q_VECTOR_HUNG_DETECT 0 /* Bit Index for hung detection logic */
-       unsigned long hung_detected; /* Set/Reset for hung_detection logic */
-
        cpumask_t affinity_mask;
        struct irq_affinity_notify affinity_notify;
 
@@ -719,13 +786,50 @@ static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
        return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
 }
 
+/**
+ * i40e_read_fd_input_set - reads value of flow director input set register
+ * @pf: pointer to the PF struct
+ * @addr: register addr
+ *
+ * This function reads value of flow director input set register
+ * specified by 'addr' (which is specific to flow-type)
+ **/
+static inline u64 i40e_read_fd_input_set(struct i40e_pf *pf, u16 addr)
+{
+       u64 val;
+
+       val = i40e_read_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 1));
+       val <<= 32;
+       val += i40e_read_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 0));
+
+       return val;
+}
+
+/**
+ * i40e_write_fd_input_set - writes value into flow director input set register
+ * @pf: pointer to the PF struct
+ * @addr: register addr
+ * @val: value to be written
+ *
+ * This function writes specified value to the register specified by 'addr'.
+ * This register is input set register based on flow-type.
+ **/
+static inline void i40e_write_fd_input_set(struct i40e_pf *pf,
+                                          u16 addr, u64 val)
+{
+       i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 1),
+                         (u32)(val >> 32));
+       i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 0),
+                         (u32)(val & 0xFFFFFFFFULL));
+}
+
 /* needed by i40e_ethtool.c */
 int i40e_up(struct i40e_vsi *vsi);
 void i40e_down(struct i40e_vsi *vsi);
 extern const char i40e_driver_name[];
 extern const char i40e_driver_version_str[];
 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
-void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired);
 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
@@ -773,11 +877,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
                                u16 uplink, u32 param1);
 int i40e_vsi_release(struct i40e_vsi *vsi);
-#ifdef I40E_FCOE
-void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
-                             struct i40e_vsi_context *ctxt,
-                             u8 enabled_tc, bool is_add);
-#endif
 void i40e_service_event_schedule(struct i40e_pf *pf);
 void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
                                  u8 *msg, u16 len);
@@ -813,8 +912,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
 void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
 void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
 void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
-int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
-                          enum i40e_client_type type);
+int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id);
 /**
  * i40e_irq_dynamic_enable - Enable default interrupt generation settings
  * @vsi: pointer to a vsi
@@ -838,20 +936,7 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
 
 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba);
-#ifdef I40E_FCOE
-void i40e_get_netdev_stats_struct(struct net_device *netdev,
-                                 struct rtnl_link_stats64 *storage);
-int i40e_set_mac(struct net_device *netdev, void *p);
-void i40e_set_rx_mode(struct net_device *netdev);
-#endif
 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
-#ifdef I40E_FCOE
-void i40e_tx_timeout(struct net_device *netdev);
-int i40e_vlan_rx_add_vid(struct net_device *netdev,
-                        __always_unused __be16 proto, u16 vid);
-int i40e_vlan_rx_kill_vid(struct net_device *netdev,
-                         __always_unused __be16 proto, u16 vid);
-#endif
 int i40e_open(struct net_device *netdev);
 int i40e_close(struct net_device *netdev);
 int i40e_vsi_open(struct i40e_vsi *vsi);
@@ -865,25 +950,6 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
-#ifdef I40E_FCOE
-int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
-                   struct tc_to_netdev *tc);
-void i40e_netpoll(struct net_device *netdev);
-int i40e_fcoe_enable(struct net_device *netdev);
-int i40e_fcoe_disable(struct net_device *netdev);
-int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt);
-u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf);
-void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi);
-void i40e_fcoe_vsi_setup(struct i40e_pf *pf);
-void i40e_init_pf_fcoe(struct i40e_pf *pf);
-int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi);
-void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi);
-int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
-                            union i40e_rx_desc *rx_desc,
-                            struct sk_buff *skb);
-void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
-                            union i40e_rx_desc *rx_desc, u8 prog_id);
-#endif /* I40E_FCOE */
 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
 #ifdef CONFIG_I40E_DCB
 void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
index 451f48b7540aa0360615599b6681e2d31f2b6554..251074c677c497ceac12632976baf15af699a525 100644 (file)
@@ -132,6 +132,10 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_list_func_capabilities     = 0x000A,
        i40e_aqc_opc_list_dev_capabilities      = 0x000B,
 
+       /* Proxy commands */
+       i40e_aqc_opc_set_proxy_config           = 0x0104,
+       i40e_aqc_opc_set_ns_proxy_table_entry   = 0x0105,
+
        /* LAA */
        i40e_aqc_opc_mac_address_read   = 0x0107,
        i40e_aqc_opc_mac_address_write  = 0x0108,
@@ -139,6 +143,10 @@ enum i40e_admin_queue_opc {
        /* PXE */
        i40e_aqc_opc_clear_pxe_mode     = 0x0110,
 
+       /* WoL commands */
+       i40e_aqc_opc_set_wol_filter     = 0x0120,
+       i40e_aqc_opc_get_wake_reason    = 0x0121,
+
        /* internal switch commands */
        i40e_aqc_opc_get_switch_config          = 0x0200,
        i40e_aqc_opc_add_statistics             = 0x0201,
@@ -177,6 +185,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_remove_control_packet_filter       = 0x025B,
        i40e_aqc_opc_add_cloud_filters          = 0x025C,
        i40e_aqc_opc_remove_cloud_filters       = 0x025D,
+       i40e_aqc_opc_clear_wol_switch_filters   = 0x025E,
 
        i40e_aqc_opc_add_mirror_rule    = 0x0260,
        i40e_aqc_opc_delete_mirror_rule = 0x0261,
@@ -563,6 +572,56 @@ struct i40e_aqc_clear_pxe {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
 
+/* Set WoL Filter (0x0120) */
+
+struct i40e_aqc_set_wol_filter {
+       __le16 filter_index;
+#define I40E_AQC_MAX_NUM_WOL_FILTERS   8
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT       15
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK        (0x1 << \
+               I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
+
+#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT            0
+#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK     (0x7 << \
+               I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
+       __le16 cmd_flags;
+#define I40E_AQC_SET_WOL_FILTER                                0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL             0x4000
+#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR           0
+#define I40E_AQC_SET_WOL_FILTER_ACTION_SET             1
+       __le16 valid_flags;
+#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID           0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID    0x4000
+       u8 reserved[2];
+       __le32  address_high;
+       __le32  address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+
+struct i40e_aqc_set_wol_filter_data {
+       u8 filter[128];
+       u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
+/* Get Wake Reason (0x0121) */
+
+struct i40e_aqc_get_wake_reason_completion {
+       u8 reserved_1[2];
+       __le16 wake_reason;
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT     0
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
+               I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT  8
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK   (0xFF << \
+               I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
+       u8 reserved_2[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
+
 /* Switch configuration commands (0x02xx) */
 
 /* Used by many indirect commands that only pass an seid and a buffer in the
@@ -645,6 +704,8 @@ struct i40e_aqc_set_port_parameters {
 #define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
 #define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA   4
        __le16  bad_frame_vsi;
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK  0x3FF
        __le16  default_seid;        /* reserved for command */
        u8      reserved[10];
 };
@@ -696,6 +757,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
 /* Set Switch Configuration (direct 0x0205) */
 struct i40e_aqc_set_switch_config {
        __le16  flags;
+/* flags used for both fields below */
 #define I40E_AQ_SET_SWITCH_CFG_PROMISC         0x0001
 #define I40E_AQ_SET_SWITCH_CFG_L2_FILTER       0x0002
        __le16  valid_flags;
@@ -1844,11 +1906,12 @@ struct i40e_aqc_get_link_status {
 #define I40E_AQ_CONFIG_FEC_RS_ENA      0x02
 #define I40E_AQ_CONFIG_CRC_ENA         0x04
 #define I40E_AQ_CONFIG_PACING_MASK     0x78
-       u8      external_power_ability;
+       u8      power_desc;
 #define I40E_AQ_LINK_POWER_CLASS_1     0x00
 #define I40E_AQ_LINK_POWER_CLASS_2     0x01
 #define I40E_AQ_LINK_POWER_CLASS_3     0x02
 #define I40E_AQ_LINK_POWER_CLASS_4     0x03
+#define I40E_AQ_PWR_CLASS_MASK         0x03
        u8      reserved[4];
 };
 
index d570219efd9f33b8934fdfe8ad3e256fb78a2e97..eb2896fd52a628817393fbc98ee0c6f28bb15927 100644 (file)
 #include "i40e_client.h"
 
 static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR;
-
+static struct i40e_client *registered_client;
 static LIST_HEAD(i40e_devices);
 static DEFINE_MUTEX(i40e_device_mutex);
 
-static LIST_HEAD(i40e_clients);
-static DEFINE_MUTEX(i40e_client_mutex);
-
-static LIST_HEAD(i40e_client_instances);
-static DEFINE_MUTEX(i40e_client_instance_mutex);
-
 static int i40e_client_virtchnl_send(struct i40e_info *ldev,
                                     struct i40e_client *client,
                                     u32 vf_id, u8 *msg, u16 len);
@@ -66,28 +60,6 @@ static struct i40e_ops i40e_lan_ops = {
        .update_vsi_ctxt = i40e_client_update_vsi_ctxt,
 };
 
-/**
- * i40e_client_type_to_vsi_type - convert client type to vsi type
- * @client_type: the i40e_client type
- *
- * returns the related vsi type value
- **/
-static
-enum i40e_vsi_type i40e_client_type_to_vsi_type(enum i40e_client_type type)
-{
-       switch (type) {
-       case I40E_CLIENT_IWARP:
-               return I40E_VSI_IWARP;
-
-       case I40E_CLIENT_VMDQ2:
-               return I40E_VSI_VMDQ2;
-
-       default:
-               pr_err("i40e: Client type unknown\n");
-               return I40E_VSI_TYPE_UNKNOWN;
-       }
-}
-
 /**
  * i40e_client_get_params - Get the params that can change at runtime
  * @vsi: the VSI with the message
@@ -134,31 +106,22 @@ int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
 void
 i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
 {
-       struct i40e_client_instance *cdev;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_client_instance *cdev = pf->cinst;
 
-       if (!vsi)
+       if (!cdev || !cdev->client)
+               return;
+       if (!cdev->client->ops || !cdev->client->ops->virtchnl_receive) {
+               dev_dbg(&pf->pdev->dev,
+                       "Cannot locate client instance virtual channel receive routine\n");
                return;
-       mutex_lock(&i40e_client_instance_mutex);
-       list_for_each_entry(cdev, &i40e_client_instances, list) {
-               if (cdev->lan_info.pf == vsi->back) {
-                       if (!cdev->client ||
-                           !cdev->client->ops ||
-                           !cdev->client->ops->virtchnl_receive) {
-                               dev_dbg(&vsi->back->pdev->dev,
-                                       "Cannot locate client instance virtual channel receive routine\n");
-                               continue;
-                       }
-                       if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
-                                     &cdev->state)) {
-                               dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort virtchnl_receive\n");
-                               continue;
-                       }
-                       cdev->client->ops->virtchnl_receive(&cdev->lan_info,
-                                                           cdev->client,
-                                                           vf_id, msg, len);
-               }
        }
-       mutex_unlock(&i40e_client_instance_mutex);
+       if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+               dev_dbg(&pf->pdev->dev, "Client is not open, abort virtchnl_receive\n");
+               return;
+       }
+       cdev->client->ops->virtchnl_receive(&cdev->lan_info, cdev->client,
+                                           vf_id, msg, len);
 }
 
 /**
@@ -169,39 +132,30 @@ i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
  **/
 void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
 {
-       struct i40e_client_instance *cdev;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_client_instance *cdev = pf->cinst;
        struct i40e_params params;
 
-       if (!vsi)
+       if (!cdev || !cdev->client)
                return;
-       mutex_lock(&i40e_client_instance_mutex);
-       list_for_each_entry(cdev, &i40e_client_instances, list) {
-               if (cdev->lan_info.pf == vsi->back) {
-                       if (!cdev->client ||
-                           !cdev->client->ops ||
-                           !cdev->client->ops->l2_param_change) {
-                               dev_dbg(&vsi->back->pdev->dev,
-                                       "Cannot locate client instance l2_param_change routine\n");
-                               continue;
-                       }
+       if (!cdev->client->ops || !cdev->client->ops->l2_param_change) {
+               dev_dbg(&vsi->back->pdev->dev,
+                       "Cannot locate client instance l2_param_change routine\n");
+               return;
+       }
+       if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+               dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
+               return;
+       }
        memset(&params, 0, sizeof(params));
        i40e_client_get_params(vsi, &params);
-                       if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
-                                     &cdev->state)) {
-                               dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
-                               continue;
-                       }
-                       cdev->lan_info.params = params;
-                       cdev->client->ops->l2_param_change(&cdev->lan_info,
-                                                          cdev->client,
-                                                          &params);
-               }
-       }
-       mutex_unlock(&i40e_client_instance_mutex);
+       memcpy(&cdev->lan_info.params, &params, sizeof(struct i40e_params));
+       cdev->client->ops->l2_param_change(&cdev->lan_info, cdev->client,
+                                          &params);
 }
 
 /**
- * i40e_client_release_qvlist
+ * i40e_client_release_qvlist - release MSI-X vector mapping for client
  * @ldev: pointer to L2 context.
  *
  **/
@@ -237,26 +191,19 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev)
  **/
 void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
 {
-       struct i40e_client_instance *cdev;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_client_instance *cdev = pf->cinst;
 
-       if (!vsi)
+       if (!cdev || !cdev->client)
+               return;
+       if (!cdev->client->ops || !cdev->client->ops->close) {
+               dev_dbg(&vsi->back->pdev->dev,
+                       "Cannot locate client instance close routine\n");
                return;
-       mutex_lock(&i40e_client_instance_mutex);
-       list_for_each_entry(cdev, &i40e_client_instances, list) {
-               if (cdev->lan_info.netdev == vsi->netdev) {
-                       if (!cdev->client ||
-                           !cdev->client->ops || !cdev->client->ops->close) {
-                               dev_dbg(&vsi->back->pdev->dev,
-                                       "Cannot locate client instance close routine\n");
-                               continue;
-                       }
-                       cdev->client->ops->close(&cdev->lan_info, cdev->client,
-                                                reset);
-                       clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
-                       i40e_client_release_qvlist(&cdev->lan_info);
-               }
        }
-       mutex_unlock(&i40e_client_instance_mutex);
+       cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
+       clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+       i40e_client_release_qvlist(&cdev->lan_info);
 }
 
 /**
@@ -268,30 +215,20 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
  **/
 void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
 {
-       struct i40e_client_instance *cdev;
+       struct i40e_client_instance *cdev = pf->cinst;
 
-       if (!pf)
+       if (!cdev || !cdev->client)
+               return;
+       if (!cdev->client->ops || !cdev->client->ops->vf_reset) {
+               dev_dbg(&pf->pdev->dev,
+                       "Cannot locate client instance VF reset routine\n");
+               return;
+       }
+       if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,  &cdev->state)) {
+               dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n");
                return;
-       mutex_lock(&i40e_client_instance_mutex);
-       list_for_each_entry(cdev, &i40e_client_instances, list) {
-               if (cdev->lan_info.pf == pf) {
-                       if (!cdev->client ||
-                           !cdev->client->ops ||
-                           !cdev->client->ops->vf_reset) {
-                               dev_dbg(&pf->pdev->dev,
-                                       "Cannot locate client instance VF reset routine\n");
-                               continue;
-                       }
-                       if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
-                                     &cdev->state)) {
-                               dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n");
-                               continue;
-                       }
-                       cdev->client->ops->vf_reset(&cdev->lan_info,
-                                                   cdev->client, vf_id);
-               }
        }
-       mutex_unlock(&i40e_client_instance_mutex);
+       cdev->client->ops->vf_reset(&cdev->lan_info, cdev->client, vf_id);
 }
 
 /**
@@ -303,30 +240,21 @@ void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
  **/
 void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
 {
-       struct i40e_client_instance *cdev;
+       struct i40e_client_instance *cdev = pf->cinst;
 
-       if (!pf)
+       if (!cdev || !cdev->client)
+               return;
+       if (!cdev->client->ops || !cdev->client->ops->vf_enable) {
+               dev_dbg(&pf->pdev->dev,
+                       "Cannot locate client instance VF enable routine\n");
+               return;
+       }
+       if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+                     &cdev->state)) {
+               dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n");
                return;
-       mutex_lock(&i40e_client_instance_mutex);
-       list_for_each_entry(cdev, &i40e_client_instances, list) {
-               if (cdev->lan_info.pf == pf) {
-                       if (!cdev->client ||
-                           !cdev->client->ops ||
-                           !cdev->client->ops->vf_enable) {
-                               dev_dbg(&pf->pdev->dev,
-                                       "Cannot locate client instance VF enable routine\n");
-                               continue;
-                       }
-                       if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
-                                     &cdev->state)) {
-                               dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n");
-                               continue;
-                       }
-                       cdev->client->ops->vf_enable(&cdev->lan_info,
-                                                    cdev->client, num_vfs);
-               }
        }
-       mutex_unlock(&i40e_client_instance_mutex);
+       cdev->client->ops->vf_enable(&cdev->lan_info, cdev->client, num_vfs);
 }
 
 /**
@@ -337,37 +265,25 @@ void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
  * If there is a client of the specified type attached to this PF, call
  * its vf_capable routine
  **/
-int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
-                          enum i40e_client_type type)
+int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id)
 {
-       struct i40e_client_instance *cdev;
+       struct i40e_client_instance *cdev = pf->cinst;
        int capable = false;
 
-       if (!pf)
-               return false;
-       mutex_lock(&i40e_client_instance_mutex);
-       list_for_each_entry(cdev, &i40e_client_instances, list) {
-               if (cdev->lan_info.pf == pf) {
-                       if (!cdev->client ||
-                           !cdev->client->ops ||
-                           !cdev->client->ops->vf_capable ||
-                           !(cdev->client->type == type)) {
-                               dev_dbg(&pf->pdev->dev,
-                                       "Cannot locate client instance VF capability routine\n");
-                               continue;
-                       }
-                       if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
-                                     &cdev->state)) {
-                               dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-capable\n");
-                               continue;
-                       }
-                       capable = cdev->client->ops->vf_capable(&cdev->lan_info,
-                                                               cdev->client,
-                                                               vf_id);
-                       break;
-               }
+       if (!cdev || !cdev->client)
+               goto out;
+       if (!cdev->client->ops || !cdev->client->ops->vf_capable) {
+               dev_info(&pf->pdev->dev,
+                        "Cannot locate client instance VF capability routine\n");
+               goto out;
        }
-       mutex_unlock(&i40e_client_instance_mutex);
+       if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state))
+               goto out;
+
+       capable = cdev->client->ops->vf_capable(&cdev->lan_info,
+                                               cdev->client,
+                                               vf_id);
+out:
        return capable;
 }
 
@@ -377,27 +293,19 @@ int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
  * @client: pointer to a client struct in the client list.
  * @existing: if there was already an existing instance
  *
- * Returns cdev ptr on success or if already exists, NULL on failure
  **/
-static
-struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
-                                                    struct i40e_client *client,
-                                                    bool *existing)
+static void i40e_client_add_instance(struct i40e_pf *pf)
 {
-       struct i40e_client_instance *cdev;
+       struct i40e_client_instance *cdev = NULL;
        struct netdev_hw_addr *mac = NULL;
        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
 
-       mutex_lock(&i40e_client_instance_mutex);
-       list_for_each_entry(cdev, &i40e_client_instances, list) {
-               if ((cdev->lan_info.pf == pf) && (cdev->client == client)) {
-                       *existing = true;
-                       goto out;
-               }
-       }
+       if (!registered_client || pf->cinst)
+               return;
+
        cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
        if (!cdev)
-               goto out;
+               return;
 
        cdev->lan_info.pf = (void *)pf;
        cdev->lan_info.netdev = vsi->netdev;
@@ -417,7 +325,7 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
        if (i40e_client_get_params(vsi, &cdev->lan_info.params)) {
                kfree(cdev);
                cdev = NULL;
-               goto out;
+               return;
        }
 
        cdev->lan_info.msix_count = pf->num_iwarp_msix;
@@ -430,41 +338,20 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
        else
                dev_err(&pf->pdev->dev, "MAC address list is empty!\n");
 
-       cdev->client = client;
-       INIT_LIST_HEAD(&cdev->list);
-       list_add(&cdev->list, &i40e_client_instances);
-out:
-       mutex_unlock(&i40e_client_instance_mutex);
-       return cdev;
+       cdev->client = registered_client;
+       pf->cinst = cdev;
 }
 
 /**
  * i40e_client_del_instance - removes a client instance from the list
  * @pf: pointer to the board struct
  *
- * Returns 0 on success or non-0 on error
  **/
 static
-int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client)
+void i40e_client_del_instance(struct i40e_pf *pf)
 {
-       struct i40e_client_instance *cdev, *tmp;
-       int ret = -ENODEV;
-
-       mutex_lock(&i40e_client_instance_mutex);
-       list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
-               if ((cdev->lan_info.pf != pf) || (cdev->client != client))
-                       continue;
-
-               dev_info(&pf->pdev->dev, "Deleted instance of Client %s, of dev %d bus=0x%02x func=0x%02x)\n",
-                        client->name, pf->hw.pf_id,
-                        pf->hw.bus.device, pf->hw.bus.func);
-               list_del(&cdev->list);
-               kfree(cdev);
-               ret = 0;
-               break;
-       }
-       mutex_unlock(&i40e_client_instance_mutex);
-       return ret;
+       kfree(pf->cinst);
+       pf->cinst = NULL;
 }
 
 /**
@@ -473,67 +360,50 @@ int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client)
  **/
 void i40e_client_subtask(struct i40e_pf *pf)
 {
+       struct i40e_client *client = registered_client;
        struct i40e_client_instance *cdev;
-       struct i40e_client *client;
-       bool existing = false;
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
        int ret = 0;
 
        if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
                return;
        pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+       cdev = pf->cinst;
 
        /* If we're down or resetting, just bail */
        if (test_bit(__I40E_DOWN, &pf->state) ||
            test_bit(__I40E_CONFIG_BUSY, &pf->state))
                return;
 
-       /* Check client state and instantiate client if client registered */
-       mutex_lock(&i40e_client_mutex);
-       list_for_each_entry(client, &i40e_clients, list) {
-               /* first check client is registered */
-               if (!test_bit(__I40E_CLIENT_REGISTERED, &client->state))
-                       continue;
-
-               /* Do we also need the LAN VSI to be up, to create instance */
-               if (!(client->flags & I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE)) {
-                       /* check if L2 VSI is up, if not we are not ready */
-                       if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
-                               continue;
-               } else {
-                       dev_warn(&pf->pdev->dev, "This client %s is being instantiated at probe\n",
-                                client->name);
-               }
-
-               /* Add the client instance to the instance list */
-               cdev = i40e_client_add_instance(pf, client, &existing);
-               if (!cdev)
-                       continue;
-
-               if (!existing) {
-                       dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x dev=0x%02x func=0x%02x\n",
-                                client->name, pf->hw.pf_id,
-                                pf->hw.bus.bus_id, pf->hw.bus.device,
-                                pf->hw.bus.func);
-               }
+       if (!client || !cdev)
+               return;
 
-               mutex_lock(&i40e_client_instance_mutex);
-               if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
-                             &cdev->state)) {
-                       /* Send an Open request to the client */
-                       if (client->ops && client->ops->open)
-                               ret = client->ops->open(&cdev->lan_info,
-                                                       client);
-                       if (!ret) {
-                               set_bit(__I40E_CLIENT_INSTANCE_OPENED,
-                                       &cdev->state);
-                       } else {
-                               /* remove client instance */
-                               i40e_client_del_instance(pf, client);
+       /* Here we handle client opens. If the client is down, but
+        * the netdev is up, then open the client.
+        */
+       if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+               if (!test_bit(__I40E_DOWN, &vsi->state) &&
+                   client->ops && client->ops->open) {
+                       set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+                       ret = client->ops->open(&cdev->lan_info, client);
+                       if (ret) {
+                               /* Remove failed client instance */
+                               clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
+                                         &cdev->state);
+                               i40e_client_del_instance(pf);
                        }
                }
-               mutex_unlock(&i40e_client_instance_mutex);
+       } else {
+       /* Likewise for client close. If the client is up, but the netdev
+        * is down, then close the client.
+        */
+               if (test_bit(__I40E_DOWN, &vsi->state) &&
+                   client->ops && client->ops->close) {
+                       clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+                       client->ops->close(&cdev->lan_info, client, false);
+                       i40e_client_release_qvlist(&cdev->lan_info);
+               }
        }
-       mutex_unlock(&i40e_client_mutex);
 }
 
 /**
@@ -566,6 +436,12 @@ int i40e_lan_add_device(struct i40e_pf *pf)
                 pf->hw.pf_id, pf->hw.bus.bus_id,
                 pf->hw.bus.device, pf->hw.bus.func);
 
+       /* If a client has already been registered, we need to add an instance
+        * of it to our new LAN device.
+        */
+       if (registered_client)
+               i40e_client_add_instance(pf);
+
        /* Since in some cases register may have happened before a device gets
         * added, we can schedule a subtask to go initiate the clients if
         * they can be launched at probe time.
@@ -589,6 +465,9 @@ int i40e_lan_del_device(struct i40e_pf *pf)
        struct i40e_device *ldev, *tmp;
        int ret = -ENODEV;
 
+       /* First, remove any client instance. */
+       i40e_client_del_instance(pf);
+
        mutex_lock(&i40e_device_mutex);
        list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) {
                if (ldev->pf == pf) {
@@ -601,7 +480,6 @@ int i40e_lan_del_device(struct i40e_pf *pf)
                        break;
                }
        }
-
        mutex_unlock(&i40e_device_mutex);
        return ret;
 }
@@ -610,22 +488,24 @@ int i40e_lan_del_device(struct i40e_pf *pf)
  * i40e_client_release - release client specific resources
  * @client: pointer to the registered client
  *
- * Return 0 on success or < 0 on error
  **/
-static int i40e_client_release(struct i40e_client *client)
+static void i40e_client_release(struct i40e_client *client)
 {
-       struct i40e_client_instance *cdev, *tmp;
+       struct i40e_client_instance *cdev;
+       struct i40e_device *ldev;
        struct i40e_pf *pf;
-       int ret = 0;
 
-       LIST_HEAD(cdevs_tmp);
-
-       mutex_lock(&i40e_client_instance_mutex);
-       list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
-               if (strncmp(cdev->client->name, client->name,
-                           I40E_CLIENT_STR_LENGTH))
+       mutex_lock(&i40e_device_mutex);
+       list_for_each_entry(ldev, &i40e_devices, list) {
+               pf = ldev->pf;
+               cdev = pf->cinst;
+               if (!cdev)
                        continue;
-               pf = (struct i40e_pf *)cdev->lan_info.pf;
+
+               while (test_and_set_bit(__I40E_SERVICE_SCHED,
+                                       &pf->state))
+                       usleep_range(500, 1000);
+
                if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
                        if (client->ops && client->ops->close)
                                client->ops->close(&cdev->lan_info, client,
@@ -637,18 +517,13 @@ static int i40e_client_release(struct i40e_client *client)
                                 "Client %s instance for PF id %d closed\n",
                                 client->name, pf->hw.pf_id);
                }
-               /* delete the client instance from the list */
-               list_move(&cdev->list, &cdevs_tmp);
+               /* delete the client instance */
+               i40e_client_del_instance(pf);
                dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
                         client->name);
+               clear_bit(__I40E_SERVICE_SCHED, &pf->state);
        }
-       mutex_unlock(&i40e_client_instance_mutex);
-
-       /* free the client device and release its vsi */
-       list_for_each_entry_safe(cdev, tmp, &cdevs_tmp, list) {
-               kfree(cdev);
-       }
-       return ret;
+       mutex_unlock(&i40e_device_mutex);
 }
 
 /**
@@ -664,6 +539,7 @@ static void i40e_client_prepare(struct i40e_client *client)
        mutex_lock(&i40e_device_mutex);
        list_for_each_entry(ldev, &i40e_devices, list) {
                pf = ldev->pf;
+               i40e_client_add_instance(pf);
                /* Start the client subtask */
                pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
                i40e_service_event_schedule(pf);
@@ -792,8 +668,8 @@ static void i40e_client_request_reset(struct i40e_info *ldev,
                break;
        default:
                dev_warn(&pf->pdev->dev,
-                        "Client %s instance for PF id %d request an unsupported reset: %d.\n",
-                        client->name, pf->hw.pf_id, reset_level);
+                        "Client for PF id %d requested an unsupported reset: %d.\n",
+                        pf->hw.pf_id, reset_level);
                break;
        }
 
@@ -852,8 +728,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
        } else {
                update = false;
                dev_warn(&pf->pdev->dev,
-                        "Client %s instance for PF id %d request an unsupported Config: %x.\n",
-                        client->name, pf->hw.pf_id, flag);
+                        "Client for PF id %d request an unsupported Config: %x.\n",
+                        pf->hw.pf_id, flag);
        }
 
        if (update) {
@@ -878,7 +754,6 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
 int i40e_register_client(struct i40e_client *client)
 {
        int ret = 0;
-       enum i40e_vsi_type vsi_type;
 
        if (!client) {
                ret = -EIO;
@@ -891,11 +766,9 @@ int i40e_register_client(struct i40e_client *client)
                goto out;
        }
 
-       mutex_lock(&i40e_client_mutex);
-       if (i40e_client_is_registered(client)) {
+       if (registered_client) {
                pr_info("i40e: Client %s has already been registered!\n",
                        client->name);
-               mutex_unlock(&i40e_client_mutex);
                ret = -EEXIST;
                goto out;
        }
@@ -908,22 +781,11 @@ int i40e_register_client(struct i40e_client *client)
                        client->version.major, client->version.minor,
                        client->version.build,
                        i40e_client_interface_version_str);
-               mutex_unlock(&i40e_client_mutex);
                ret = -EIO;
                goto out;
        }
 
-       vsi_type = i40e_client_type_to_vsi_type(client->type);
-       if (vsi_type == I40E_VSI_TYPE_UNKNOWN) {
-               pr_info("i40e: Failed to register client %s due to unknown client type %d\n",
-                       client->name, client->type);
-               mutex_unlock(&i40e_client_mutex);
-               ret = -EIO;
-               goto out;
-       }
-       list_add(&client->list, &i40e_clients);
-       set_bit(__I40E_CLIENT_REGISTERED, &client->state);
-       mutex_unlock(&i40e_client_mutex);
+       registered_client = client;
 
        i40e_client_prepare(client);
 
@@ -943,29 +805,21 @@ int i40e_unregister_client(struct i40e_client *client)
 {
        int ret = 0;
 
-       /* When a unregister request comes through we would have to send
-        * a close for each of the client instances that were opened.
-        * client_release function is called to handle this.
-        */
-       mutex_lock(&i40e_client_mutex);
-       if (!client || i40e_client_release(client)) {
-               ret = -EIO;
-               goto out;
-       }
-
-       /* TODO: check if device is in reset, or if that matters? */
-       if (!i40e_client_is_registered(client)) {
+       if (registered_client != client) {
                pr_info("i40e: Client %s has not been registered\n",
                        client->name);
                ret = -ENODEV;
                goto out;
        }
-       clear_bit(__I40E_CLIENT_REGISTERED, &client->state);
-       list_del(&client->list);
-       pr_info("i40e: Unregistered client %s with return code %d\n",
-               client->name, ret);
+       registered_client = NULL;
+       /* When a unregister request comes through we would have to send
+        * a close for each of the client instances that were opened.
+        * client_release function is called to handle this.
+        */
+       i40e_client_release(client);
+
+       pr_info("i40e: Unregistered client %s\n", client->name);
 out:
-       mutex_unlock(&i40e_client_mutex);
        return ret;
 }
 EXPORT_SYMBOL(i40e_unregister_client);
index 528bd79b05fecc68d981ea08b144d9898c6aaaa0..15b21a5315b597a0ee4cd933fc10bfa5cec0a510 100644 (file)
@@ -57,11 +57,6 @@ enum i40e_client_instance_state {
        __I40E_CLIENT_INSTANCE_OPENED,
 };
 
-enum i40e_client_type {
-       I40E_CLIENT_IWARP,
-       I40E_CLIENT_VMDQ2
-};
-
 struct i40e_ops;
 struct i40e_client;
 
@@ -214,7 +209,8 @@ struct i40e_client {
        u32 flags;
 #define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE      BIT(0)
 #define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS      BIT(2)
-       enum i40e_client_type type;
+       u8 type;
+#define I40E_CLIENT_IWARP 0
        const struct i40e_client_ops *ops; /* client ops provided by the client */
 };
 
index ece57d6a6e232f93ca28f493f91e27428b7cb41e..f9db95aa3a2036940a2fcb8d43b6a5bba201b863 100644 (file)
@@ -1088,33 +1088,6 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
 
        wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
 }
-#ifdef I40E_FCOE
-
-/**
- * i40e_get_san_mac_addr - get SAN MAC address
- * @hw: pointer to the HW structure
- * @mac_addr: pointer to SAN MAC address
- *
- * Reads the adapter's SAN MAC address from NVM
- **/
-i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
-{
-       struct i40e_aqc_mac_address_read_data addrs;
-       i40e_status status;
-       u16 flags = 0;
-
-       status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
-       if (status)
-               return status;
-
-       if (flags & I40E_AQC_SAN_ADDR_VALID)
-               ether_addr_copy(mac_addr, addrs.pf_san_mac);
-       else
-               status = I40E_ERR_INVALID_MAC_ADDR;
-
-       return status;
-}
-#endif
 
 /**
  *  i40e_read_pba_string - Reads part number string from EEPROM
@@ -4990,7 +4963,9 @@ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
        int retry = 5;
        u32 val = 0;
 
-       use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+       use_register = (((hw->aq.api_maj_ver == 1) &&
+                       (hw->aq.api_min_ver < 5)) ||
+                       (hw->mac.type == I40E_MAC_X722));
        if (!use_register) {
 do_retry:
                status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
@@ -5049,7 +5024,9 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
        bool use_register;
        int retry = 5;
 
-       use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+       use_register = (((hw->aq.api_maj_ver == 1) &&
+                       (hw->aq.api_min_ver < 5)) ||
+                       (hw->mac.type == I40E_MAC_X722));
        if (!use_register) {
 do_retry:
                status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
index 267ad2588255deeb196340da7788b10bc89d1f3e..c5f68cc1edcdf7919c9f82e02e85362bdc9794f6 100644 (file)
@@ -484,25 +484,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                         vsi->bw_ets_limit_credits[i],
                         vsi->bw_ets_max_quanta[i]);
        }
-#ifdef I40E_FCOE
-       if (vsi->type == I40E_VSI_FCOE) {
-               dev_info(&pf->pdev->dev,
-                        "    fcoe_stats: rx_packets = %llu, rx_dwords = %llu, rx_dropped = %llu\n",
-                        vsi->fcoe_stats.rx_fcoe_packets,
-                        vsi->fcoe_stats.rx_fcoe_dwords,
-                        vsi->fcoe_stats.rx_fcoe_dropped);
-               dev_info(&pf->pdev->dev,
-                        "    fcoe_stats: tx_packets = %llu, tx_dwords = %llu\n",
-                        vsi->fcoe_stats.tx_fcoe_packets,
-                        vsi->fcoe_stats.tx_fcoe_dwords);
-               dev_info(&pf->pdev->dev,
-                        "    fcoe_stats: bad_crc = %llu, last_error = %llu\n",
-                        vsi->fcoe_stats.fcoe_bad_fccrc,
-                        vsi->fcoe_stats.fcoe_last_error);
-               dev_info(&pf->pdev->dev, "    fcoe_stats: ddp_count = %llu\n",
-                        vsi->fcoe_stats.fcoe_ddp_count);
-       }
-#endif
 }
 
 /**
index a22e26200bccb1a7e79716a7429e0eb7fe700ecc..10325b5a98055530de92b17b42bf366063f327e7 100644 (file)
@@ -89,7 +89,6 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
        I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
        I40E_VSI_STAT("tx_linearize", tx_linearize),
        I40E_VSI_STAT("tx_force_wb", tx_force_wb),
-       I40E_VSI_STAT("tx_lost_interrupt", tx_lost_interrupt),
        I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
        I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
 };
@@ -162,19 +161,6 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
        I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
 };
 
-#ifdef I40E_FCOE
-static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
-       I40E_VSI_STAT("fcoe_bad_fccrc", fcoe_stats.fcoe_bad_fccrc),
-       I40E_VSI_STAT("rx_fcoe_dropped", fcoe_stats.rx_fcoe_dropped),
-       I40E_VSI_STAT("rx_fcoe_packets", fcoe_stats.rx_fcoe_packets),
-       I40E_VSI_STAT("rx_fcoe_dwords", fcoe_stats.rx_fcoe_dwords),
-       I40E_VSI_STAT("fcoe_ddp_count", fcoe_stats.fcoe_ddp_count),
-       I40E_VSI_STAT("fcoe_last_error", fcoe_stats.fcoe_last_error),
-       I40E_VSI_STAT("tx_fcoe_packets", fcoe_stats.tx_fcoe_packets),
-       I40E_VSI_STAT("tx_fcoe_dwords", fcoe_stats.tx_fcoe_dwords),
-};
-
-#endif /* I40E_FCOE */
 #define I40E_QUEUE_STATS_LEN(n) \
        (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
            * 2 /* Tx and Rx together */                                     \
@@ -182,17 +168,9 @@ static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
 #define I40E_GLOBAL_STATS_LEN  ARRAY_SIZE(i40e_gstrings_stats)
 #define I40E_NETDEV_STATS_LEN   ARRAY_SIZE(i40e_gstrings_net_stats)
 #define I40E_MISC_STATS_LEN    ARRAY_SIZE(i40e_gstrings_misc_stats)
-#ifdef I40E_FCOE
-#define I40E_FCOE_STATS_LEN    ARRAY_SIZE(i40e_gstrings_fcoe_stats)
-#define I40E_VSI_STATS_LEN(n)  (I40E_NETDEV_STATS_LEN + \
-                                I40E_FCOE_STATS_LEN + \
-                                I40E_MISC_STATS_LEN + \
-                                I40E_QUEUE_STATS_LEN((n)))
-#else
 #define I40E_VSI_STATS_LEN(n)   (I40E_NETDEV_STATS_LEN + \
                                 I40E_MISC_STATS_LEN + \
                                 I40E_QUEUE_STATS_LEN((n)))
-#endif /* I40E_FCOE */
 #define I40E_PFC_STATS_LEN ( \
                (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
                 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
@@ -228,22 +206,37 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
 
 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
 
-static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
-       "MFP",
-       "LinkPolling",
-       "flow-director-atr",
-       "veb-stats",
-       "hw-atr-eviction",
+struct i40e_priv_flags {
+       char flag_string[ETH_GSTRING_LEN];
+       u64 flag;
+       bool read_only;
+};
+
+#define I40E_PRIV_FLAG(_name, _flag, _read_only) { \
+       .flag_string = _name, \
+       .flag = _flag, \
+       .read_only = _read_only, \
+}
+
+static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
+       /* NOTE: MFP setting cannot be changed */
+       I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1),
+       I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
+       I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
+       I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
+       I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_CAPABLE, 0),
+       I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
 };
 
-#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
+#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
 
 /* Private flags with a global effect, restricted to PF 0 */
-static const char i40e_gl_priv_flags_strings[][ETH_GSTRING_LEN] = {
-       "vf-true-promisc-support",
+static const struct i40e_priv_flags i40e_gl_gstrings_priv_flags[] = {
+       I40E_PRIV_FLAG("vf-true-promisc-support",
+                      I40E_FLAG_TRUE_PROMISC_SUPPORT, 0),
 };
 
-#define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_priv_flags_strings)
+#define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_gstrings_priv_flags)
 
 /**
  * i40e_partition_setting_complaint - generic complaint for MFP restriction
@@ -387,7 +380,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
  *
  **/
 static void i40e_get_settings_link_up(struct i40e_hw *hw,
-                                     struct ethtool_cmd *ecmd,
+                                     struct ethtool_link_ksettings *cmd,
                                      struct net_device *netdev,
                                      struct i40e_pf *pf)
 {
@@ -395,90 +388,96 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
        u32 link_speed = hw_link_info->link_speed;
        u32 e_advertising = 0x0;
        u32 e_supported = 0x0;
+       u32 supported, advertising;
+
+       ethtool_convert_link_mode_to_legacy_u32(&supported,
+                                               cmd->link_modes.supported);
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
 
        /* Initialize supported and advertised settings based on phy settings */
        switch (hw_link_info->phy_type) {
        case I40E_PHY_TYPE_40GBASE_CR4:
        case I40E_PHY_TYPE_40GBASE_CR4_CU:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_40000baseCR4_Full;
-               ecmd->advertising = ADVERTISED_Autoneg |
-                                   ADVERTISED_40000baseCR4_Full;
+               supported = SUPPORTED_Autoneg |
+                           SUPPORTED_40000baseCR4_Full;
+               advertising = ADVERTISED_Autoneg |
+                             ADVERTISED_40000baseCR4_Full;
                break;
        case I40E_PHY_TYPE_XLAUI:
        case I40E_PHY_TYPE_XLPPI:
        case I40E_PHY_TYPE_40GBASE_AOC:
-               ecmd->supported = SUPPORTED_40000baseCR4_Full;
+               supported = SUPPORTED_40000baseCR4_Full;
                break;
        case I40E_PHY_TYPE_40GBASE_SR4:
-               ecmd->supported = SUPPORTED_40000baseSR4_Full;
+               supported = SUPPORTED_40000baseSR4_Full;
                break;
        case I40E_PHY_TYPE_40GBASE_LR4:
-               ecmd->supported = SUPPORTED_40000baseLR4_Full;
+               supported = SUPPORTED_40000baseLR4_Full;
                break;
        case I40E_PHY_TYPE_10GBASE_SR:
        case I40E_PHY_TYPE_10GBASE_LR:
        case I40E_PHY_TYPE_1000BASE_SX:
        case I40E_PHY_TYPE_1000BASE_LX:
-               ecmd->supported = SUPPORTED_10000baseT_Full;
+               supported = SUPPORTED_10000baseT_Full;
                if (hw_link_info->module_type[2] &
                    I40E_MODULE_TYPE_1000BASE_SX ||
                    hw_link_info->module_type[2] &
                    I40E_MODULE_TYPE_1000BASE_LX) {
-                       ecmd->supported |= SUPPORTED_1000baseT_Full;
+                       supported |= SUPPORTED_1000baseT_Full;
                        if (hw_link_info->requested_speeds &
                            I40E_LINK_SPEED_1GB)
-                               ecmd->advertising |= ADVERTISED_1000baseT_Full;
+                               advertising |= ADVERTISED_1000baseT_Full;
                }
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
-                       ecmd->advertising |= ADVERTISED_10000baseT_Full;
+                       advertising |= ADVERTISED_10000baseT_Full;
                break;
        case I40E_PHY_TYPE_10GBASE_T:
        case I40E_PHY_TYPE_1000BASE_T:
        case I40E_PHY_TYPE_100BASE_TX:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_10000baseT_Full |
-                                 SUPPORTED_1000baseT_Full |
-                                 SUPPORTED_100baseT_Full;
-               ecmd->advertising = ADVERTISED_Autoneg;
+               supported = SUPPORTED_Autoneg |
+                           SUPPORTED_10000baseT_Full |
+                           SUPPORTED_1000baseT_Full |
+                           SUPPORTED_100baseT_Full;
+               advertising = ADVERTISED_Autoneg;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
-                       ecmd->advertising |= ADVERTISED_10000baseT_Full;
+                       advertising |= ADVERTISED_10000baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
-                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
+                       advertising |= ADVERTISED_1000baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
-                       ecmd->advertising |= ADVERTISED_100baseT_Full;
+                       advertising |= ADVERTISED_100baseT_Full;
                break;
        case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_1000baseT_Full;
-               ecmd->advertising = ADVERTISED_Autoneg |
-                                   ADVERTISED_1000baseT_Full;
+               supported = SUPPORTED_Autoneg |
+                           SUPPORTED_1000baseT_Full;
+               advertising = ADVERTISED_Autoneg |
+                             ADVERTISED_1000baseT_Full;
                break;
        case I40E_PHY_TYPE_10GBASE_CR1_CU:
        case I40E_PHY_TYPE_10GBASE_CR1:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_10000baseT_Full;
-               ecmd->advertising = ADVERTISED_Autoneg |
-                                   ADVERTISED_10000baseT_Full;
+               supported = SUPPORTED_Autoneg |
+                           SUPPORTED_10000baseT_Full;
+               advertising = ADVERTISED_Autoneg |
+                             ADVERTISED_10000baseT_Full;
                break;
        case I40E_PHY_TYPE_XAUI:
        case I40E_PHY_TYPE_XFI:
        case I40E_PHY_TYPE_SFI:
        case I40E_PHY_TYPE_10GBASE_SFPP_CU:
        case I40E_PHY_TYPE_10GBASE_AOC:
-               ecmd->supported = SUPPORTED_10000baseT_Full;
-               ecmd->advertising = SUPPORTED_10000baseT_Full;
+               supported = SUPPORTED_10000baseT_Full;
+               advertising = SUPPORTED_10000baseT_Full;
                break;
        case I40E_PHY_TYPE_SGMII:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_1000baseT_Full;
+               supported = SUPPORTED_Autoneg |
+                           SUPPORTED_1000baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
-                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
+                       advertising |= ADVERTISED_1000baseT_Full;
                if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
-                       ecmd->supported |= SUPPORTED_100baseT_Full;
+                       supported |= SUPPORTED_100baseT_Full;
                        if (hw_link_info->requested_speeds &
                            I40E_LINK_SPEED_100MB)
-                               ecmd->advertising |= ADVERTISED_100baseT_Full;
+                               advertising |= ADVERTISED_100baseT_Full;
                }
                break;
        case I40E_PHY_TYPE_40GBASE_KR4:
@@ -486,25 +485,25 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
        case I40E_PHY_TYPE_10GBASE_KR:
        case I40E_PHY_TYPE_10GBASE_KX4:
        case I40E_PHY_TYPE_1000BASE_KX:
-               ecmd->supported |= SUPPORTED_40000baseKR4_Full |
-                                  SUPPORTED_20000baseKR2_Full |
-                                  SUPPORTED_10000baseKR_Full |
-                                  SUPPORTED_10000baseKX4_Full |
-                                  SUPPORTED_1000baseKX_Full |
-                                  SUPPORTED_Autoneg;
-               ecmd->advertising |= ADVERTISED_40000baseKR4_Full |
-                                    ADVERTISED_20000baseKR2_Full |
-                                    ADVERTISED_10000baseKR_Full |
-                                    ADVERTISED_10000baseKX4_Full |
-                                    ADVERTISED_1000baseKX_Full |
-                                    ADVERTISED_Autoneg;
+               supported |= SUPPORTED_40000baseKR4_Full |
+                            SUPPORTED_20000baseKR2_Full |
+                            SUPPORTED_10000baseKR_Full |
+                            SUPPORTED_10000baseKX4_Full |
+                            SUPPORTED_1000baseKX_Full |
+                            SUPPORTED_Autoneg;
+               advertising |= ADVERTISED_40000baseKR4_Full |
+                              ADVERTISED_20000baseKR2_Full |
+                              ADVERTISED_10000baseKR_Full |
+                              ADVERTISED_10000baseKX4_Full |
+                              ADVERTISED_1000baseKX_Full |
+                              ADVERTISED_Autoneg;
                break;
        case I40E_PHY_TYPE_25GBASE_KR:
        case I40E_PHY_TYPE_25GBASE_CR:
        case I40E_PHY_TYPE_25GBASE_SR:
        case I40E_PHY_TYPE_25GBASE_LR:
-               ecmd->supported = SUPPORTED_Autoneg;
-               ecmd->advertising = ADVERTISED_Autoneg;
+               supported = SUPPORTED_Autoneg;
+               advertising = ADVERTISED_Autoneg;
                /* TODO: add speeds when ethtool is ready to support*/
                break;
        default:
@@ -520,38 +519,43 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
        i40e_phy_type_to_ethtool(pf, &e_supported,
                                 &e_advertising);
 
-       ecmd->supported = ecmd->supported & e_supported;
-       ecmd->advertising = ecmd->advertising & e_advertising;
+       supported = supported & e_supported;
+       advertising = advertising & e_advertising;
 
        /* Set speed and duplex */
        switch (link_speed) {
        case I40E_LINK_SPEED_40GB:
-               ethtool_cmd_speed_set(ecmd, SPEED_40000);
+               cmd->base.speed = SPEED_40000;
                break;
        case I40E_LINK_SPEED_25GB:
 #ifdef SPEED_25000
-               ethtool_cmd_speed_set(ecmd, SPEED_25000);
+               cmd->base.speed = SPEED_25000;
 #else
                netdev_info(netdev,
                            "Speed is 25G, display not supported by this version of ethtool.\n");
 #endif
                break;
        case I40E_LINK_SPEED_20GB:
-               ethtool_cmd_speed_set(ecmd, SPEED_20000);
+               cmd->base.speed = SPEED_20000;
                break;
        case I40E_LINK_SPEED_10GB:
-               ethtool_cmd_speed_set(ecmd, SPEED_10000);
+               cmd->base.speed = SPEED_10000;
                break;
        case I40E_LINK_SPEED_1GB:
-               ethtool_cmd_speed_set(ecmd, SPEED_1000);
+               cmd->base.speed = SPEED_1000;
                break;
        case I40E_LINK_SPEED_100MB:
-               ethtool_cmd_speed_set(ecmd, SPEED_100);
+               cmd->base.speed = SPEED_100;
                break;
        default:
                break;
        }
-       ecmd->duplex = DUPLEX_FULL;
+       cmd->base.duplex = DUPLEX_FULL;
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
 }
 
 /**
@@ -562,18 +566,24 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
  * Reports link settings that can be determined when link is down
  **/
 static void i40e_get_settings_link_down(struct i40e_hw *hw,
-                                       struct ethtool_cmd *ecmd,
+                                       struct ethtool_link_ksettings *cmd,
                                        struct i40e_pf *pf)
 {
+       u32 supported, advertising;
+
        /* link is down and the driver needs to fall back on
         * supported phy types to figure out what info to display
         */
-       i40e_phy_type_to_ethtool(pf, &ecmd->supported,
-                                &ecmd->advertising);
+       i40e_phy_type_to_ethtool(pf, &supported, &advertising);
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
 
        /* With no link speed and duplex are unknown */
-       ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
-       ecmd->duplex = DUPLEX_UNKNOWN;
+       cmd->base.speed = SPEED_UNKNOWN;
+       cmd->base.duplex = DUPLEX_UNKNOWN;
 }
 
 /**
@@ -583,74 +593,85 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw,
  *
  * Reports speed/duplex settings based on media_type
  **/
-static int i40e_get_settings(struct net_device *netdev,
-                            struct ethtool_cmd *ecmd)
+static int i40e_get_link_ksettings(struct net_device *netdev,
+                                  struct ethtool_link_ksettings *cmd)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_pf *pf = np->vsi->back;
        struct i40e_hw *hw = &pf->hw;
        struct i40e_link_status *hw_link_info = &hw->phy.link_info;
        bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+       u32 advertising;
 
        if (link_up)
-               i40e_get_settings_link_up(hw, ecmd, netdev, pf);
+               i40e_get_settings_link_up(hw, cmd, netdev, pf);
        else
-               i40e_get_settings_link_down(hw, ecmd, pf);
+               i40e_get_settings_link_down(hw, cmd, pf);
 
        /* Now set the settings that don't rely on link being up/down */
        /* Set autoneg settings */
-       ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+       cmd->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
                          AUTONEG_ENABLE : AUTONEG_DISABLE);
 
        switch (hw->phy.media_type) {
        case I40E_MEDIA_TYPE_BACKPLANE:
-               ecmd->supported |= SUPPORTED_Autoneg |
-                                  SUPPORTED_Backplane;
-               ecmd->advertising |= ADVERTISED_Autoneg |
-                                    ADVERTISED_Backplane;
-               ecmd->port = PORT_NONE;
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    Autoneg);
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    Backplane);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    Autoneg);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    Backplane);
+               cmd->base.port = PORT_NONE;
                break;
        case I40E_MEDIA_TYPE_BASET:
-               ecmd->supported |= SUPPORTED_TP;
-               ecmd->advertising |= ADVERTISED_TP;
-               ecmd->port = PORT_TP;
+               ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+               cmd->base.port = PORT_TP;
                break;
        case I40E_MEDIA_TYPE_DA:
        case I40E_MEDIA_TYPE_CX4:
-               ecmd->supported |= SUPPORTED_FIBRE;
-               ecmd->advertising |= ADVERTISED_FIBRE;
-               ecmd->port = PORT_DA;
+               ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+               cmd->base.port = PORT_DA;
                break;
        case I40E_MEDIA_TYPE_FIBER:
-               ecmd->supported |= SUPPORTED_FIBRE;
-               ecmd->port = PORT_FIBRE;
+               ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+               cmd->base.port = PORT_FIBRE;
                break;
        case I40E_MEDIA_TYPE_UNKNOWN:
        default:
-               ecmd->port = PORT_OTHER;
+               cmd->base.port = PORT_OTHER;
                break;
        }
 
-       /* Set transceiver */
-       ecmd->transceiver = XCVR_EXTERNAL;
-
        /* Set flow control settings */
-       ecmd->supported |= SUPPORTED_Pause;
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
 
        switch (hw->fc.requested_mode) {
        case I40E_FC_FULL:
-               ecmd->advertising |= ADVERTISED_Pause;
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    Pause);
                break;
        case I40E_FC_TX_PAUSE:
-               ecmd->advertising |= ADVERTISED_Asym_Pause;
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    Asym_Pause);
                break;
        case I40E_FC_RX_PAUSE:
-               ecmd->advertising |= (ADVERTISED_Pause |
-                                     ADVERTISED_Asym_Pause);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    Pause);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    Asym_Pause);
                break;
        default:
-               ecmd->advertising &= ~(ADVERTISED_Pause |
-                                      ADVERTISED_Asym_Pause);
+               ethtool_convert_link_mode_to_legacy_u32(
+                       &advertising, cmd->link_modes.advertising);
+
+               advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+               ethtool_convert_legacy_u32_to_link_mode(
+                       cmd->link_modes.advertising, advertising);
                break;
        }
 
@@ -664,8 +685,8 @@ static int i40e_get_settings(struct net_device *netdev,
  *
  * Set speed/duplex per media_types advertised/forced
  **/
-static int i40e_set_settings(struct net_device *netdev,
-                            struct ethtool_cmd *ecmd)
+static int i40e_set_link_ksettings(struct net_device *netdev,
+                                  const struct ethtool_link_ksettings *cmd)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_aq_get_phy_abilities_resp abilities;
@@ -673,12 +694,14 @@ static int i40e_set_settings(struct net_device *netdev,
        struct i40e_pf *pf = np->vsi->back;
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_hw *hw = &pf->hw;
-       struct ethtool_cmd safe_ecmd;
+       struct ethtool_link_ksettings safe_cmd;
+       struct ethtool_link_ksettings copy_cmd;
        i40e_status status = 0;
        bool change = false;
        int err = 0;
-       u8 autoneg;
+       u32 autoneg;
        u32 advertise;
+       u32 tmp;
 
        /* Changing port settings is not supported if this isn't the
         * port's controlling PF
@@ -706,23 +729,31 @@ static int i40e_set_settings(struct net_device *netdev,
                return -EOPNOTSUPP;
        }
 
+       /* copy the cmd to copy_cmd to avoid modifying the origin */
+       memcpy(&copy_cmd, cmd, sizeof(struct ethtool_link_ksettings));
+
        /* get our own copy of the bits to check against */
-       memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
-       i40e_get_settings(netdev, &safe_ecmd);
+       memset(&safe_cmd, 0, sizeof(struct ethtool_link_ksettings));
+       i40e_get_link_ksettings(netdev, &safe_cmd);
 
-       /* save autoneg and speed out of ecmd */
-       autoneg = ecmd->autoneg;
-       advertise = ecmd->advertising;
+       /* save autoneg and speed out of cmd */
+       autoneg = cmd->base.autoneg;
+       ethtool_convert_link_mode_to_legacy_u32(&advertise,
+                                               cmd->link_modes.advertising);
 
        /* set autoneg and speed back to what they currently are */
-       ecmd->autoneg = safe_ecmd.autoneg;
-       ecmd->advertising = safe_ecmd.advertising;
+       copy_cmd.base.autoneg = safe_cmd.base.autoneg;
+       ethtool_convert_link_mode_to_legacy_u32(
+               &tmp, safe_cmd.link_modes.advertising);
+       ethtool_convert_legacy_u32_to_link_mode(
+               copy_cmd.link_modes.advertising, tmp);
+
+       copy_cmd.base.cmd = safe_cmd.base.cmd;
 
-       ecmd->cmd = safe_ecmd.cmd;
-       /* If ecmd and safe_ecmd are not the same now, then they are
+       /* If copy_cmd and safe_cmd are not the same now, then they are
         * trying to set something that we do not support
         */
-       if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd)))
+       if (memcmp(&copy_cmd, &safe_cmd, sizeof(struct ethtool_link_ksettings)))
                return -EOPNOTSUPP;
 
        while (test_bit(__I40E_CONFIG_BUSY, &vsi->state))
@@ -745,7 +776,8 @@ static int i40e_set_settings(struct net_device *netdev,
                /* If autoneg was not already enabled */
                if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
                        /* If autoneg is not supported, return error */
-                       if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
+                       if (!ethtool_link_ksettings_test_link_mode(
+                                   &safe_cmd, supported, Autoneg)) {
                                netdev_info(netdev, "Autoneg not supported on this phy\n");
                                return -EINVAL;
                        }
@@ -760,7 +792,8 @@ static int i40e_set_settings(struct net_device *netdev,
                        /* If autoneg is supported 10GBASE_T is the only PHY
                         * that can disable it, so otherwise return error
                         */
-                       if (safe_ecmd.supported & SUPPORTED_Autoneg &&
+                       if (ethtool_link_ksettings_test_link_mode(
+                                   &safe_cmd, supported, Autoneg) &&
                            hw->phy.link_info.phy_type !=
                            I40E_PHY_TYPE_10GBASE_T) {
                                netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
@@ -773,7 +806,9 @@ static int i40e_set_settings(struct net_device *netdev,
                }
        }
 
-       if (advertise & ~safe_ecmd.supported)
+       ethtool_convert_link_mode_to_legacy_u32(&tmp,
+                                               safe_cmd.link_modes.supported);
+       if (advertise & ~tmp)
                return -EINVAL;
 
        if (advertise & ADVERTISED_100baseT_Full)
@@ -1165,6 +1200,11 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
        struct i40e_hw *hw = &np->vsi->back->hw;
        u32 val;
 
+#define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF
+       if (hw->mac.type == I40E_MAC_X722) {
+               val = X722_EEPROM_SCOPE_LIMIT + 1;
+               return val;
+       }
        val = (rd32(hw, I40E_GLPCI_LBARCTRL)
                & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
                >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
@@ -1483,13 +1523,6 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
                data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
                            sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
        }
-#ifdef I40E_FCOE
-       for (j = 0; j < I40E_FCOE_STATS_LEN; j++) {
-               p = (char *)vsi + i40e_gstrings_fcoe_stats[j].stat_offset;
-               data[i++] = (i40e_gstrings_fcoe_stats[j].sizeof_stat ==
-                       sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
-       }
-#endif
        rcu_read_lock();
        for (j = 0; j < vsi->num_queue_pairs; j++) {
                tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
@@ -1577,13 +1610,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
                                 i40e_gstrings_misc_stats[i].stat_string);
                        p += ETH_GSTRING_LEN;
                }
-#ifdef I40E_FCOE
-               for (i = 0; i < I40E_FCOE_STATS_LEN; i++) {
-                       snprintf(p, ETH_GSTRING_LEN, "%s",
-                                i40e_gstrings_fcoe_stats[i].stat_string);
-                       p += ETH_GSTRING_LEN;
-               }
-#endif
                for (i = 0; i < vsi->num_queue_pairs; i++) {
                        snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i);
                        p += ETH_GSTRING_LEN;
@@ -1648,12 +1674,18 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
                /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
                break;
        case ETH_SS_PRIV_FLAGS:
-               memcpy(data, i40e_priv_flags_strings,
-                      I40E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
-               data += I40E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN;
-               if (pf->hw.pf_id == 0)
-                       memcpy(data, i40e_gl_priv_flags_strings,
-                              I40E_GL_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+               for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+                       snprintf(p, ETH_GSTRING_LEN, "%s",
+                                i40e_gstrings_priv_flags[i].flag_string);
+                       p += ETH_GSTRING_LEN;
+               }
+               if (pf->hw.pf_id != 0)
+                       break;
+               for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) {
+                       snprintf(p, ETH_GSTRING_LEN, "%s",
+                                i40e_gl_gstrings_priv_flags[i].flag_string);
+                       p += ETH_GSTRING_LEN;
+               }
                break;
        default:
                break;
@@ -1819,7 +1851,7 @@ static void i40e_diag_test(struct net_device *netdev,
                         * link then the following link test would have
                         * to be moved to before the reset
                         */
-                       i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
+                       i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
 
                if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
                        eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1835,7 +1867,7 @@ static void i40e_diag_test(struct net_device *netdev,
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
                clear_bit(__I40E_TESTING, &pf->state);
-               i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
+               i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
 
                if (if_running)
                        i40e_open(netdev);
@@ -2284,6 +2316,102 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
        return 0;
 }
 
+/**
+ * i40e_check_mask - Check whether a mask field is set
+ * @mask: the full mask value
+ * @field; mask of the field to check
+ *
+ * If the given mask is fully set, return positive value. If the mask for the
+ * field is fully unset, return zero. Otherwise return a negative error code.
+ **/
+static int i40e_check_mask(u64 mask, u64 field)
+{
+       u64 value = mask & field;
+
+       if (value == field)
+               return 1;
+       else if (!value)
+               return 0;
+       else
+               return -1;
+}
+
+/**
+ * i40e_parse_rx_flow_user_data - Deconstruct user-defined data
+ * @fsp: pointer to rx flow specification
+ * @data: pointer to userdef data structure for storage
+ *
+ * Read the user-defined data and deconstruct the value into a structure. No
+ * other code should read the user-defined data, so as to ensure that every
+ * place consistently reads the value correctly.
+ *
+ * The user-defined field is a 64bit Big Endian format value, which we
+ * deconstruct by reading bits or bit fields from it. Single bit flags shall
+ * be defined starting from the highest bits, while small bit field values
+ * shall be defined starting from the lowest bits.
+ *
+ * Returns 0 if the data is valid, and non-zero if the userdef data is invalid
+ * and the filter should be rejected. The data structure will always be
+ * modified even if FLOW_EXT is not set.
+ *
+ **/
+static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
+                                       struct i40e_rx_flow_userdef *data)
+{
+       u64 value, mask;
+       int valid;
+
+       /* Zero memory first so it's always consistent. */
+       memset(data, 0, sizeof(*data));
+
+       if (!(fsp->flow_type & FLOW_EXT))
+               return 0;
+
+       value = be64_to_cpu(*((__be64 *)fsp->h_ext.data));
+       mask = be64_to_cpu(*((__be64 *)fsp->m_ext.data));
+
+#define I40E_USERDEF_FLEX_WORD         GENMASK_ULL(15, 0)
+#define I40E_USERDEF_FLEX_OFFSET       GENMASK_ULL(31, 16)
+#define I40E_USERDEF_FLEX_FILTER       GENMASK_ULL(31, 0)
+
+       valid = i40e_check_mask(mask, I40E_USERDEF_FLEX_FILTER);
+       if (valid < 0) {
+               return -EINVAL;
+       } else if (valid) {
+               data->flex_word = value & I40E_USERDEF_FLEX_WORD;
+               data->flex_offset =
+                       (value & I40E_USERDEF_FLEX_OFFSET) >> 16;
+               data->flex_filter = true;
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_fill_rx_flow_user_data - Fill in user-defined data field
+ * @fsp: pointer to rx_flow specification
+ *
+ * Reads the userdef data structure and properly fills in the user defined
+ * fields of the rx_flow_spec.
+ **/
+static void i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
+                                       struct i40e_rx_flow_userdef *data)
+{
+       u64 value = 0, mask = 0;
+
+       if (data->flex_filter) {
+               value |= data->flex_word;
+               value |= (u64)data->flex_offset << 16;
+               mask |= I40E_USERDEF_FLEX_FILTER;
+       }
+
+       if (value || mask)
+               fsp->flow_type |= FLOW_EXT;
+
+       *((__be64 *)fsp->h_ext.data) = cpu_to_be64(value);
+       *((__be64 *)fsp->m_ext.data) = cpu_to_be64(mask);
+}
+
 /**
  * i40e_get_ethtool_fdir_all - Populates the rule count of a command
  * @pf: Pointer to the physical function struct
@@ -2335,8 +2463,11 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
 {
        struct ethtool_rx_flow_spec *fsp =
                        (struct ethtool_rx_flow_spec *)&cmd->fs;
+       struct i40e_rx_flow_userdef userdef = {0};
        struct i40e_fdir_filter *rule = NULL;
        struct hlist_node *node2;
+       u64 input_set;
+       u16 index;
 
        hlist_for_each_entry_safe(rule, node2,
                                  &pf->fdir_filter_list, fdir_node) {
@@ -2359,8 +2490,48 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
         */
        fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
        fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
-       fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0];
-       fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0];
+       fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip;
+       fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip;
+
+       switch (rule->flow_type) {
+       case SCTP_V4_FLOW:
+               index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+               break;
+       case TCP_V4_FLOW:
+               index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+               break;
+       case UDP_V4_FLOW:
+               index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+               break;
+       case IP_USER_FLOW:
+               index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+               break;
+       default:
+               /* If we have stored a filter with a flow type not listed here
+                * it is almost certainly a driver bug. WARN(), and then
+                * assign the input_set as if all fields are enabled to avoid
+                * reading unassigned memory.
+                */
+               WARN(1, "Missing input set index for flow_type %d\n",
+                    rule->flow_type);
+               input_set = 0xFFFFFFFFFFFFFFFFULL;
+               goto no_input_set;
+       }
+
+       input_set = i40e_read_fd_input_set(pf, index);
+
+no_input_set:
+       if (input_set & I40E_L3_SRC_MASK)
+               fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFF);
+
+       if (input_set & I40E_L3_DST_MASK)
+               fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFF);
+
+       if (input_set & I40E_L4_SRC_MASK)
+               fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFFFFFF);
+
+       if (input_set & I40E_L4_DST_MASK)
+               fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFFFFFF);
 
        if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
                fsp->ring_cookie = RX_CLS_FLOW_DISC;
@@ -2372,11 +2543,24 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
 
                vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
                if (vsi && vsi->type == I40E_VSI_SRIOV) {
-                       fsp->h_ext.data[1] = htonl(vsi->vf_id);
-                       fsp->m_ext.data[1] = htonl(0x1);
+                       /* VFs are zero-indexed by the driver, but ethtool
+                        * expects them to be one-indexed, so add one here
+                        */
+                       u64 ring_vf = vsi->vf_id + 1;
+
+                       ring_vf <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+                       fsp->ring_cookie |= ring_vf;
                }
        }
 
+       if (rule->flex_filter) {
+               userdef.flex_filter = true;
+               userdef.flex_word = be16_to_cpu(rule->flex_word);
+               userdef.flex_offset = rule->flex_offset;
+       }
+
+       i40e_fill_rx_flow_user_data(fsp, &userdef);
+
        return 0;
 }
 
@@ -2573,24 +2757,6 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        return 0;
 }
 
-/**
- * i40e_match_fdir_input_set - Match a new filter against an existing one
- * @rule: The filter already added
- * @input: The new filter to comapre against
- *
- * Returns true if the two input set match
- **/
-static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
-                                     struct i40e_fdir_filter *input)
-{
-       if ((rule->dst_ip[0] != input->dst_ip[0]) ||
-           (rule->src_ip[0] != input->src_ip[0]) ||
-           (rule->dst_port != input->dst_port) ||
-           (rule->src_port != input->src_port))
-               return false;
-       return true;
-}
-
 /**
  * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
  * @vsi: Pointer to the targeted VSI
@@ -2626,22 +2792,22 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
 
        /* if there is an old rule occupying our place remove it */
        if (rule && (rule->fd_id == sw_idx)) {
-               if (input && !i40e_match_fdir_input_set(rule, input))
-                       err = i40e_add_del_fdir(vsi, rule, false);
-               else if (!input)
-                       err = i40e_add_del_fdir(vsi, rule, false);
+               /* Remove this rule, since we're either deleting it, or
+                * replacing it.
+                */
+               err = i40e_add_del_fdir(vsi, rule, false);
                hlist_del(&rule->fdir_node);
                kfree(rule);
                pf->fdir_pf_active_filters--;
        }
 
-       /* If no input this was a delete, err should be 0 if a rule was
-        * successfully found and removed from the list else -EINVAL
+       /* If we weren't given an input, this is a delete, so just return the
+        * error code indicating if there was an entry at the requested slot
         */
        if (!input)
                return err;
 
-       /* initialize node and set software index */
+       /* Otherwise, install the new rule as requested */
        INIT_HLIST_NODE(&input->fdir_node);
 
        /* add filter to the list */
@@ -2657,6 +2823,69 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
        return 0;
 }
 
+/**
+ * i40e_prune_flex_pit_list - Cleanup unused entries in FLX_PIT table
+ * @pf: pointer to PF structure
+ *
+ * This function searches the list of filters and determines which FLX_PIT
+ * entries are still required. It will prune any entries which are no longer
+ * in use after the deletion.
+ **/
+static void i40e_prune_flex_pit_list(struct i40e_pf *pf)
+{
+       struct i40e_flex_pit *entry, *tmp;
+       struct i40e_fdir_filter *rule;
+
+       /* First, we'll check the l3 table */
+       list_for_each_entry_safe(entry, tmp, &pf->l3_flex_pit_list, list) {
+               bool found = false;
+
+               hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) {
+                       if (rule->flow_type != IP_USER_FLOW)
+                               continue;
+                       if (rule->flex_filter &&
+                           rule->flex_offset == entry->src_offset) {
+                               found = true;
+                               break;
+                       }
+               }
+
+               /* If we didn't find the filter, then we can prune this entry
+                * from the list.
+                */
+               if (!found) {
+                       list_del(&entry->list);
+                       kfree(entry);
+               }
+       }
+
+       /* Followed by the L4 table */
+       list_for_each_entry_safe(entry, tmp, &pf->l4_flex_pit_list, list) {
+               bool found = false;
+
+               hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) {
+                       /* Skip this filter if it's L3, since we already
+                        * checked those in the above loop
+                        */
+                       if (rule->flow_type == IP_USER_FLOW)
+                               continue;
+                       if (rule->flex_filter &&
+                           rule->flex_offset == entry->src_offset) {
+                               found = true;
+                               break;
+                       }
+               }
+
+               /* If we didn't find the filter, then we can prune this entry
+                * from the list.
+                */
+               if (!found) {
+                       list_del(&entry->list);
+                       kfree(entry);
+               }
+       }
+}
+
 /**
  * i40e_del_fdir_entry - Deletes a Flow Director filter entry
  * @vsi: Pointer to the targeted VSI
@@ -2684,10 +2913,690 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
 
        ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
 
+       i40e_prune_flex_pit_list(pf);
+
        i40e_fdir_check_and_reenable(pf);
        return ret;
 }
 
+/**
+ * i40e_unused_pit_index - Find an unused PIT index for given list
+ * @pf: the PF data structure
+ *
+ * Find the first unused flexible PIT index entry. We search both the L3 and
+ * L4 flexible PIT lists so that the returned index is unique and unused by
+ * either currently programmed L3 or L4 filters. We use a bit field as storage
+ * to track which indexes are already used.
+ **/
+static u8 i40e_unused_pit_index(struct i40e_pf *pf)
+{
+       unsigned long available_index = 0xFF;
+       struct i40e_flex_pit *entry;
+
+       /* We need to make sure that the new index isn't in use by either L3
+        * or L4 filters so that IP_USER_FLOW filters can program both L3 and
+        * L4 to use the same index.
+        */
+
+       list_for_each_entry(entry, &pf->l4_flex_pit_list, list)
+               clear_bit(entry->pit_index, &available_index);
+
+       list_for_each_entry(entry, &pf->l3_flex_pit_list, list)
+               clear_bit(entry->pit_index, &available_index);
+
+       return find_first_bit(&available_index, 8);
+}
+
+/**
+ * i40e_find_flex_offset - Find an existing flex src_offset
+ * @flex_pit_list: L3 or L4 flex PIT list
+ * @src_offset: new src_offset to find
+ *
+ * Searches the flex_pit_list for an existing offset. If no offset is
+ * currently programmed, then this will return an ERR_PTR if there is no space
+ * to add a new offset, otherwise it returns NULL.
+ **/
+static
+struct i40e_flex_pit *i40e_find_flex_offset(struct list_head *flex_pit_list,
+                                           u16 src_offset)
+{
+       struct i40e_flex_pit *entry;
+       int size = 0;
+
+       /* Search for the src_offset first. If we find a matching entry
+        * already programmed, we can simply re-use it.
+        */
+       list_for_each_entry(entry, flex_pit_list, list) {
+               size++;
+               if (entry->src_offset == src_offset)
+                       return entry;
+       }
+
+       /* If we haven't found an entry yet, then the provided src offset has
+        * not yet been programmed. We will program the src offset later on,
+        * but we need to indicate whether there is enough space to do so
+        * here. We'll make use of ERR_PTR for this purpose.
+        */
+       if (size >= I40E_FLEX_PIT_TABLE_SIZE)
+               return ERR_PTR(-ENOSPC);
+
+       return NULL;
+}
+
+/**
+ * i40e_add_flex_offset - Add src_offset to flex PIT table list
+ * @flex_pit_list: L3 or L4 flex PIT list
+ * @src_offset: new src_offset to add
+ * @pit_index: the PIT index to program
+ *
+ * This function programs the new src_offset to the list. It is expected that
+ * i40e_find_flex_offset has already been tried and returned NULL, indicating
+ * that this offset is not programmed, and that the list has enough space to
+ * store another offset.
+ *
+ * Returns 0 on success, and negative value on error.
+ **/
+static int i40e_add_flex_offset(struct list_head *flex_pit_list,
+                               u16 src_offset,
+                               u8 pit_index)
+{
+       struct i40e_flex_pit *new_pit, *entry;
+
+       new_pit = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!new_pit)
+               return -ENOMEM;
+
+       new_pit->src_offset = src_offset;
+       new_pit->pit_index = pit_index;
+
+       /* We need to insert this item such that the list is sorted by
+        * src_offset in ascending order.
+        */
+       list_for_each_entry(entry, flex_pit_list, list) {
+               if (new_pit->src_offset < entry->src_offset) {
+                       list_add_tail(&new_pit->list, &entry->list);
+                       return 0;
+               }
+
+               /* If we found an entry with our offset already programmed we
+                * can simply return here, after freeing the memory. However,
+                * if the pit_index does not match we need to report an error.
+                */
+               if (new_pit->src_offset == entry->src_offset) {
+                       int err = 0;
+
+                       /* If the PIT index is not the same we can't re-use
+                        * the entry, so we must report an error.
+                        */
+                       if (new_pit->pit_index != entry->pit_index)
+                               err = -EINVAL;
+
+                       kfree(new_pit);
+                       return err;
+               }
+       }
+
+       /* If we reached here, then we haven't yet added the item. This means
+        * that we should add the item at the end of the list.
+        */
+       list_add_tail(&new_pit->list, flex_pit_list);
+       return 0;
+}
+
+/**
+ * __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table
+ * @pf: Pointer to the PF structure
+ * @flex_pit_list: list of flexible src offsets in use
+ * #flex_pit_start: index to first entry for this section of the table
+ *
+ * In order to handle flexible data, the hardware uses a table of values
+ * called the FLX_PIT table. This table is used to indicate which sections of
+ * the input correspond to what PIT index values. Unfortunately, hardware is
+ * very restrictive about programming this table. Entries must be ordered by
+ * src_offset in ascending order, without duplicates. Additionally, unused
+ * entries must be set to the unused index value, and must have valid size and
+ * length according to the src_offset ordering.
+ *
+ * This function will reprogram the FLX_PIT register from a book-keeping
+ * structure that we guarantee is already ordered correctly, and has no more
+ * than 3 entries.
+ *
+ * To make things easier, we only support flexible values of one word length,
+ * rather than allowing variable length flexible values.
+ **/
+static void __i40e_reprogram_flex_pit(struct i40e_pf *pf,
+                                     struct list_head *flex_pit_list,
+                                     int flex_pit_start)
+{
+       struct i40e_flex_pit *entry = NULL;
+       u16 last_offset = 0;
+       int i = 0, j = 0;
+
+       /* First, loop over the list of flex PIT entries, and reprogram the
+        * registers.
+        */
+       list_for_each_entry(entry, flex_pit_list, list) {
+               /* We have to be careful when programming values for the
+                * largest SRC_OFFSET value. It is possible that adding
+                * additional empty values at the end would overflow the space
+                * for the SRC_OFFSET in the FLX_PIT register. To avoid this,
+                * we check here and add the empty values prior to adding the
+                * largest value.
+                *
+                * To determine this, we will use a loop from i+1 to 3, which
+                * will determine whether the unused entries would have valid
+                * SRC_OFFSET. Note that there cannot be extra entries past
+                * this value, because the only valid values would have been
+                * larger than I40E_MAX_FLEX_SRC_OFFSET, and thus would not
+                * have been added to the list in the first place.
+                */
+               for (j = i + 1; j < 3; j++) {
+                       u16 offset = entry->src_offset + j;
+                       int index = flex_pit_start + i;
+                       u32 value = I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED,
+                                                      1,
+                                                      offset - 3);
+
+                       if (offset > I40E_MAX_FLEX_SRC_OFFSET) {
+                               i40e_write_rx_ctl(&pf->hw,
+                                                 I40E_PRTQF_FLX_PIT(index),
+                                                 value);
+                               i++;
+                       }
+               }
+
+               /* Now, we can program the actual value into the table */
+               i40e_write_rx_ctl(&pf->hw,
+                                 I40E_PRTQF_FLX_PIT(flex_pit_start + i),
+                                 I40E_FLEX_PREP_VAL(entry->pit_index + 50,
+                                                    1,
+                                                    entry->src_offset));
+               i++;
+       }
+
+       /* In order to program the last entries in the table, we need to
+        * determine the valid offset. If the list is empty, we'll just start
+        * with 0. Otherwise, we'll start with the last item offset and add 1.
+        * This ensures that all entries have valid sizes. If we don't do this
+        * correctly, the hardware will disable flexible field parsing.
+        */
+       if (!list_empty(flex_pit_list))
+               last_offset = list_prev_entry(entry, list)->src_offset + 1;
+
+       for (; i < 3; i++, last_offset++) {
+               i40e_write_rx_ctl(&pf->hw,
+                                 I40E_PRTQF_FLX_PIT(flex_pit_start + i),
+                                 I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED,
+                                                    1,
+                                                    last_offset));
+       }
+}
+
+/**
+ * i40e_reprogram_flex_pit - Reprogram all FLX_PIT tables after input set change
+ * @pf: pointer to the PF structure
+ *
+ * This function reprograms both the L3 and L4 FLX_PIT tables. See the
+ * internal helper function for implementation details.
+ **/
+static void i40e_reprogram_flex_pit(struct i40e_pf *pf)
+{
+       __i40e_reprogram_flex_pit(pf, &pf->l3_flex_pit_list,
+                                 I40E_FLEX_PIT_IDX_START_L3);
+
+       __i40e_reprogram_flex_pit(pf, &pf->l4_flex_pit_list,
+                                 I40E_FLEX_PIT_IDX_START_L4);
+
+       /* We also need to program the L3 and L4 GLQF ORT register */
+       i40e_write_rx_ctl(&pf->hw,
+                         I40E_GLQF_ORT(I40E_L3_GLQF_ORT_IDX),
+                         I40E_ORT_PREP_VAL(I40E_FLEX_PIT_IDX_START_L3,
+                                           3, 1));
+
+       i40e_write_rx_ctl(&pf->hw,
+                         I40E_GLQF_ORT(I40E_L4_GLQF_ORT_IDX),
+                         I40E_ORT_PREP_VAL(I40E_FLEX_PIT_IDX_START_L4,
+                                           3, 1));
+}
+
+/**
+ * i40e_flow_str - Converts a flow_type into a human readable string
+ * @flow_type: the flow type from a flow specification
+ *
+ * Currently only flow types we support are included here, and the string
+ * value attempts to match what ethtool would use to configure this flow type.
+ **/
+static const char *i40e_flow_str(struct ethtool_rx_flow_spec *fsp)
+{
+       switch (fsp->flow_type & ~FLOW_EXT) {
+       case TCP_V4_FLOW:
+               return "tcp4";
+       case UDP_V4_FLOW:
+               return "udp4";
+       case SCTP_V4_FLOW:
+               return "sctp4";
+       case IP_USER_FLOW:
+               return "ip4";
+       default:
+               return "unknown";
+       }
+}
+
+/**
+ * i40e_pit_index_to_mask - Return the FLEX mask for a given PIT index
+ * @pit_index: PIT index to convert
+ *
+ * Returns the mask for a given PIT index. Will return 0 if the pit_index is
+ * of range.
+ **/
+static u64 i40e_pit_index_to_mask(int pit_index)
+{
+       switch (pit_index) {
+       case 0:
+               return I40E_FLEX_50_MASK;
+       case 1:
+               return I40E_FLEX_51_MASK;
+       case 2:
+               return I40E_FLEX_52_MASK;
+       case 3:
+               return I40E_FLEX_53_MASK;
+       case 4:
+               return I40E_FLEX_54_MASK;
+       case 5:
+               return I40E_FLEX_55_MASK;
+       case 6:
+               return I40E_FLEX_56_MASK;
+       case 7:
+               return I40E_FLEX_57_MASK;
+       default:
+               return 0;
+       }
+}
+
+/**
+ * i40e_print_input_set - Show changes between two input sets
+ * @vsi: the vsi being configured
+ * @old: the old input set
+ * @new: the new input set
+ *
+ * Print the difference between old and new input sets by showing which series
+ * of words are toggled on or off. Only displays the bits we actually support
+ * changing.
+ **/
+static void i40e_print_input_set(struct i40e_vsi *vsi, u64 old, u64 new)
+{
+       struct i40e_pf *pf = vsi->back;
+       bool old_value, new_value;
+       int i;
+
+       old_value = !!(old & I40E_L3_SRC_MASK);
+       new_value = !!(new & I40E_L3_SRC_MASK);
+       if (old_value != new_value)
+               netif_info(pf, drv, vsi->netdev, "L3 source address: %s -> %s\n",
+                          old_value ? "ON" : "OFF",
+                          new_value ? "ON" : "OFF");
+
+       old_value = !!(old & I40E_L3_DST_MASK);
+       new_value = !!(new & I40E_L3_DST_MASK);
+       if (old_value != new_value)
+               netif_info(pf, drv, vsi->netdev, "L3 destination address: %s -> %s\n",
+                          old_value ? "ON" : "OFF",
+                          new_value ? "ON" : "OFF");
+
+       old_value = !!(old & I40E_L4_SRC_MASK);
+       new_value = !!(new & I40E_L4_SRC_MASK);
+       if (old_value != new_value)
+               netif_info(pf, drv, vsi->netdev, "L4 source port: %s -> %s\n",
+                          old_value ? "ON" : "OFF",
+                          new_value ? "ON" : "OFF");
+
+       old_value = !!(old & I40E_L4_DST_MASK);
+       new_value = !!(new & I40E_L4_DST_MASK);
+       if (old_value != new_value)
+               netif_info(pf, drv, vsi->netdev, "L4 destination port: %s -> %s\n",
+                          old_value ? "ON" : "OFF",
+                          new_value ? "ON" : "OFF");
+
+       old_value = !!(old & I40E_VERIFY_TAG_MASK);
+       new_value = !!(new & I40E_VERIFY_TAG_MASK);
+       if (old_value != new_value)
+               netif_info(pf, drv, vsi->netdev, "SCTP verification tag: %s -> %s\n",
+                          old_value ? "ON" : "OFF",
+                          new_value ? "ON" : "OFF");
+
+       /* Show change of flexible filter entries */
+       for (i = 0; i < I40E_FLEX_INDEX_ENTRIES; i++) {
+               u64 flex_mask = i40e_pit_index_to_mask(i);
+
+               old_value = !!(old & flex_mask);
+               new_value = !!(new & flex_mask);
+               if (old_value != new_value)
+                       netif_info(pf, drv, vsi->netdev, "FLEX index %d: %s -> %s\n",
+                                  i,
+                                  old_value ? "ON" : "OFF",
+                                  new_value ? "ON" : "OFF");
+       }
+
+       netif_info(pf, drv, vsi->netdev, "  Current input set: %0llx\n",
+                  old);
+       netif_info(pf, drv, vsi->netdev, "Requested input set: %0llx\n",
+                  new);
+}
+
+/**
+ * i40e_check_fdir_input_set - Check that a given rx_flow_spec mask is valid
+ * @vsi: pointer to the targeted VSI
+ * @fsp: pointer to Rx flow specification
+ * @userdef: userdefined data from flow specification
+ *
+ * Ensures that a given ethtool_rx_flow_spec has a valid mask. Some support
+ * for partial matches exists with a few limitations. First, hardware only
+ * supports masking by word boundary (2 bytes) and not per individual bit.
+ * Second, hardware is limited to using one mask for a flow type and cannot
+ * use a separate mask for each filter.
+ *
+ * To support these limitations, if we already have a configured filter for
+ * the specified type, this function enforces that new filters of the type
+ * match the configured input set. Otherwise, if we do not have a filter of
+ * the specified type, we allow the input set to be updated to match the
+ * desired filter.
+ *
+ * To help ensure that administrators understand why filters weren't displayed
+ * as supported, we print a diagnostic message displaying how the input set
+ * would change and warning to delete the preexisting filters if required.
+ *
+ * Returns 0 on successful input set match, and a negative return code on
+ * failure.
+ **/
+static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
+                                    struct ethtool_rx_flow_spec *fsp,
+                                    struct i40e_rx_flow_userdef *userdef)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct ethtool_tcpip4_spec *tcp_ip4_spec;
+       struct ethtool_usrip4_spec *usr_ip4_spec;
+       u64 current_mask, new_mask;
+       bool new_flex_offset = false;
+       bool flex_l3 = false;
+       u16 *fdir_filter_count;
+       u16 index, src_offset = 0;
+       u8 pit_index = 0;
+       int err;
+
+       switch (fsp->flow_type & ~FLOW_EXT) {
+       case SCTP_V4_FLOW:
+               index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+               fdir_filter_count = &pf->fd_sctp4_filter_cnt;
+               break;
+       case TCP_V4_FLOW:
+               index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+               fdir_filter_count = &pf->fd_tcp4_filter_cnt;
+               break;
+       case UDP_V4_FLOW:
+               index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+               fdir_filter_count = &pf->fd_udp4_filter_cnt;
+               break;
+       case IP_USER_FLOW:
+               index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+               fdir_filter_count = &pf->fd_ip4_filter_cnt;
+               flex_l3 = true;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       /* Read the current input set from register memory. */
+       current_mask = i40e_read_fd_input_set(pf, index);
+       new_mask = current_mask;
+
+       /* Determine, if any, the required changes to the input set in order
+        * to support the provided mask.
+        *
+        * Hardware only supports masking at word (2 byte) granularity and does
+        * not support full bitwise masking. This implementation simplifies
+        * even further and only supports fully enabled or fully disabled
+        * masks for each field, even though we could split the ip4src and
+        * ip4dst fields.
+        */
+       switch (fsp->flow_type & ~FLOW_EXT) {
+       case SCTP_V4_FLOW:
+               new_mask &= ~I40E_VERIFY_TAG_MASK;
+               /* Fall through */
+       case TCP_V4_FLOW:
+       case UDP_V4_FLOW:
+               tcp_ip4_spec = &fsp->m_u.tcp_ip4_spec;
+
+               /* IPv4 source address */
+               if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF))
+                       new_mask |= I40E_L3_SRC_MASK;
+               else if (!tcp_ip4_spec->ip4src)
+                       new_mask &= ~I40E_L3_SRC_MASK;
+               else
+                       return -EOPNOTSUPP;
+
+               /* IPv4 destination address */
+               if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
+                       new_mask |= I40E_L3_DST_MASK;
+               else if (!tcp_ip4_spec->ip4dst)
+                       new_mask &= ~I40E_L3_DST_MASK;
+               else
+                       return -EOPNOTSUPP;
+
+               /* L4 source port */
+               if (tcp_ip4_spec->psrc == htons(0xFFFF))
+                       new_mask |= I40E_L4_SRC_MASK;
+               else if (!tcp_ip4_spec->psrc)
+                       new_mask &= ~I40E_L4_SRC_MASK;
+               else
+                       return -EOPNOTSUPP;
+
+               /* L4 destination port */
+               if (tcp_ip4_spec->pdst == htons(0xFFFF))
+                       new_mask |= I40E_L4_DST_MASK;
+               else if (!tcp_ip4_spec->pdst)
+                       new_mask &= ~I40E_L4_DST_MASK;
+               else
+                       return -EOPNOTSUPP;
+
+               /* Filtering on Type of Service is not supported. */
+               if (tcp_ip4_spec->tos)
+                       return -EOPNOTSUPP;
+
+               break;
+       case IP_USER_FLOW:
+               usr_ip4_spec = &fsp->m_u.usr_ip4_spec;
+
+               /* IPv4 source address */
+               if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF))
+                       new_mask |= I40E_L3_SRC_MASK;
+               else if (!usr_ip4_spec->ip4src)
+                       new_mask &= ~I40E_L3_SRC_MASK;
+               else
+                       return -EOPNOTSUPP;
+
+               /* IPv4 destination address */
+               if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
+                       new_mask |= I40E_L3_DST_MASK;
+               else if (!usr_ip4_spec->ip4dst)
+                       new_mask &= ~I40E_L3_DST_MASK;
+               else
+                       return -EOPNOTSUPP;
+
+               /* First 4 bytes of L4 header */
+               if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF))
+                       new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK;
+               else if (!usr_ip4_spec->l4_4_bytes)
+                       new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+               else
+                       return -EOPNOTSUPP;
+
+               /* Filtering on Type of Service is not supported. */
+               if (usr_ip4_spec->tos)
+                       return -EOPNOTSUPP;
+
+               /* Filtering on IP version is not supported */
+               if (usr_ip4_spec->ip_ver)
+                       return -EINVAL;
+
+               /* Filtering on L4 protocol is not supported */
+               if (usr_ip4_spec->proto)
+                       return -EINVAL;
+
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       /* First, clear all flexible filter entries */
+       new_mask &= ~I40E_FLEX_INPUT_MASK;
+
+       /* If we have a flexible filter, try to add this offset to the correct
+        * flexible filter PIT list. Once finished, we can update the mask.
+        * If the src_offset changed, we will get a new mask value which will
+        * trigger an input set change.
+        */
+       if (userdef->flex_filter) {
+               struct i40e_flex_pit *l3_flex_pit = NULL, *flex_pit = NULL;
+
+               /* Flexible offset must be even, since the flexible payload
+                * must be aligned on 2-byte boundary.
+                */
+               if (userdef->flex_offset & 0x1) {
+                       dev_warn(&pf->pdev->dev,
+                                "Flexible data offset must be 2-byte aligned\n");
+                       return -EINVAL;
+               }
+
+               src_offset = userdef->flex_offset >> 1;
+
+               /* FLX_PIT source offset value is only so large */
+               if (src_offset > I40E_MAX_FLEX_SRC_OFFSET) {
+                       dev_warn(&pf->pdev->dev,
+                                "Flexible data must reside within first 64 bytes of the packet payload\n");
+                       return -EINVAL;
+               }
+
+               /* See if this offset has already been programmed. If we get
+                * an ERR_PTR, then the filter is not safe to add. Otherwise,
+                * if we get a NULL pointer, this means we will need to add
+                * the offset.
+                */
+               flex_pit = i40e_find_flex_offset(&pf->l4_flex_pit_list,
+                                                src_offset);
+               if (IS_ERR(flex_pit))
+                       return PTR_ERR(flex_pit);
+
+               /* IP_USER_FLOW filters match both L4 (ICMP) and L3 (unknown)
+                * packet types, and thus we need to program both L3 and L4
+                * flexible values. These must have identical flexible index,
+                * as otherwise we can't correctly program the input set. So
+                * we'll find both an L3 and L4 index and make sure they are
+                * the same.
+                */
+               if (flex_l3) {
+                       l3_flex_pit =
+                               i40e_find_flex_offset(&pf->l3_flex_pit_list,
+                                                     src_offset);
+                       if (IS_ERR(l3_flex_pit))
+                               return PTR_ERR(l3_flex_pit);
+
+                       if (flex_pit) {
+                               /* If we already had a matching L4 entry, we
+                                * need to make sure that the L3 entry we
+                                * obtained uses the same index.
+                                */
+                               if (l3_flex_pit) {
+                                       if (l3_flex_pit->pit_index !=
+                                           flex_pit->pit_index) {
+                                               return -EINVAL;
+                                       }
+                               } else {
+                                       new_flex_offset = true;
+                               }
+                       } else {
+                               flex_pit = l3_flex_pit;
+                       }
+               }
+
+               /* If we didn't find an existing flex offset, we need to
+                * program a new one. However, we don't immediately program it
+                * here because we will wait to program until after we check
+                * that it is safe to change the input set.
+                */
+               if (!flex_pit) {
+                       new_flex_offset = true;
+                       pit_index = i40e_unused_pit_index(pf);
+               } else {
+                       pit_index = flex_pit->pit_index;
+               }
+
+               /* Update the mask with the new offset */
+               new_mask |= i40e_pit_index_to_mask(pit_index);
+       }
+
+       /* If the mask and flexible filter offsets for this filter match the
+        * currently programmed values we don't need any input set change, so
+        * this filter is safe to install.
+        */
+       if (new_mask == current_mask && !new_flex_offset)
+               return 0;
+
+       netif_info(pf, drv, vsi->netdev, "Input set change requested for %s flows:\n",
+                  i40e_flow_str(fsp));
+       i40e_print_input_set(vsi, current_mask, new_mask);
+       if (new_flex_offset) {
+               netif_info(pf, drv, vsi->netdev, "FLEX index %d: Offset -> %d",
+                          pit_index, src_offset);
+       }
+
+       /* Hardware input sets are global across multiple ports, so even the
+        * main port cannot change them when in MFP mode as this would impact
+        * any filters on the other ports.
+        */
+       if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+               netif_err(pf, drv, vsi->netdev, "Cannot change Flow Director input sets while MFP is enabled\n");
+               return -EOPNOTSUPP;
+       }
+
+       /* This filter requires us to update the input set. However, hardware
+        * only supports one input set per flow type, and does not support
+        * separate masks for each filter. This means that we can only support
+        * a single mask for all filters of a specific type.
+        *
+        * If we have preexisting filters, they obviously depend on the
+        * current programmed input set. Display a diagnostic message in this
+        * case explaining why the filter could not be accepted.
+        */
+       if (*fdir_filter_count) {
+               netif_err(pf, drv, vsi->netdev, "Cannot change input set for %s flows until %d preexisting filters are removed\n",
+                         i40e_flow_str(fsp),
+                         *fdir_filter_count);
+               return -EOPNOTSUPP;
+       }
+
+       i40e_write_fd_input_set(pf, index, new_mask);
+
+       /* Add the new offset and update table, if necessary */
+       if (new_flex_offset) {
+               err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset,
+                                          pit_index);
+               if (err)
+                       return err;
+
+               if (flex_l3) {
+                       err = i40e_add_flex_offset(&pf->l3_flex_pit_list,
+                                                  src_offset,
+                                                  pit_index);
+                       if (err)
+                               return err;
+               }
+
+               i40e_reprogram_flex_pit(pf);
+       }
+
+       return 0;
+}
+
 /**
  * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
  * @vsi: pointer to the targeted VSI
@@ -2699,11 +3608,13 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
 static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
                                 struct ethtool_rxnfc *cmd)
 {
+       struct i40e_rx_flow_userdef userdef;
        struct ethtool_rx_flow_spec *fsp;
        struct i40e_fdir_filter *input;
+       u16 dest_vsi = 0, q_index = 0;
        struct i40e_pf *pf;
        int ret = -EINVAL;
-       u16 vf_id;
+       u8 dest_ctl;
 
        if (!vsi)
                return -EINVAL;
@@ -2712,7 +3623,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
        if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
                return -EOPNOTSUPP;
 
-       if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
+       if (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)
                return -ENOSPC;
 
        if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
@@ -2724,14 +3635,49 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
 
        fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
 
+       /* Parse the user-defined field */
+       if (i40e_parse_rx_flow_user_data(fsp, &userdef))
+               return -EINVAL;
+
+       /* Extended MAC field is not supported */
+       if (fsp->flow_type & FLOW_MAC_EXT)
+               return -EINVAL;
+
+       ret = i40e_check_fdir_input_set(vsi, fsp, &userdef);
+       if (ret)
+               return ret;
+
        if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
                              pf->hw.func_caps.fd_filters_guaranteed)) {
                return -EINVAL;
        }
 
-       if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
-           (fsp->ring_cookie >= vsi->num_queue_pairs))
-               return -EINVAL;
+       /* ring_cookie is either the drop index, or is a mask of the queue
+        * index and VF id we wish to target.
+        */
+       if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+               dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+       } else {
+               u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+               u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+
+               if (!vf) {
+                       if (ring >= vsi->num_queue_pairs)
+                               return -EINVAL;
+                       dest_vsi = vsi->id;
+               } else {
+                       /* VFs are zero-indexed, so we subtract one here */
+                       vf--;
+
+                       if (vf >= pf->num_alloc_vfs)
+                               return -EINVAL;
+                       if (ring >= pf->vf[vf].num_queue_pairs)
+                               return -EINVAL;
+                       dest_vsi = pf->vf[vf].lan_vsi_id;
+               }
+               dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+               q_index = ring;
+       }
 
        input = kzalloc(sizeof(*input), GFP_KERNEL);
 
@@ -2739,20 +3685,14 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
                return -ENOMEM;
 
        input->fd_id = fsp->location;
-
-       if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
-               input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
-       else
-               input->dest_ctl =
-                            I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
-
-       input->q_index = fsp->ring_cookie;
-       input->flex_off = 0;
-       input->pctype = 0;
-       input->dest_vsi = vsi->id;
+       input->q_index = q_index;
+       input->dest_vsi = dest_vsi;
+       input->dest_ctl = dest_ctl;
        input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
        input->cnt_index  = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
-       input->flow_type = fsp->flow_type;
+       input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+       input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+       input->flow_type = fsp->flow_type & ~FLOW_EXT;
        input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
 
        /* Reverse the src and dest notion, since the HW expects them to be from
@@ -2760,33 +3700,29 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
         */
        input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
        input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
-       input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
-       input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
-
-       if (ntohl(fsp->m_ext.data[1])) {
-               vf_id = ntohl(fsp->h_ext.data[1]);
-               if (vf_id >= pf->num_alloc_vfs) {
-                       netif_info(pf, drv, vsi->netdev,
-                                  "Invalid VF id %d\n", vf_id);
-                       goto free_input;
-               }
-               /* Find vsi id from vf id and override dest vsi */
-               input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
-               if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
-                       netif_info(pf, drv, vsi->netdev,
-                                  "Invalid queue id %d for VF %d\n",
-                                  input->q_index, vf_id);
-                       goto free_input;
-               }
+       input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+       input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+
+       if (userdef.flex_filter) {
+               input->flex_filter = true;
+               input->flex_word = cpu_to_be16(userdef.flex_word);
+               input->flex_offset = userdef.flex_offset;
        }
 
        ret = i40e_add_del_fdir(vsi, input, true);
-free_input:
        if (ret)
-               kfree(input);
-       else
-               i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
+               goto free_input;
+
+       /* Add the input filter to the fdir_input_list, possibly replacing
+        * a previous filter. Do not free the input structure after adding it
+        * to the list as this would cause a use-after-free bug.
+        */
+       i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
 
+       return 0;
+
+free_input:
+       kfree(input);
        return ret;
 }
 
@@ -3036,7 +3972,7 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
  * @dev: network interface device structure
  *
  * The get string set count and the string set should be matched for each
- * flag returned.  Add new strings for each flag to the i40e_priv_flags_strings
+ * flag returned.  Add new strings for each flag to the i40e_gstrings_priv_flags
  * array.
  *
  * Returns a u32 bitmap of flags.
@@ -3046,19 +3982,27 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
-       u32 ret_flags = 0;
+       u32 i, j, ret_flags = 0;
+
+       for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+               const struct i40e_priv_flags *priv_flags;
+
+               priv_flags = &i40e_gstrings_priv_flags[i];
+
+               if (priv_flags->flag & pf->flags)
+                       ret_flags |= BIT(i);
+       }
+
+       if (pf->hw.pf_id != 0)
+               return ret_flags;
+
+       for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
+               const struct i40e_priv_flags *priv_flags;
 
-       ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
-               I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
-       ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ?
-               I40E_PRIV_FLAGS_FD_ATR : 0;
-       ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
-               I40E_PRIV_FLAGS_VEB_STATS : 0;
-       ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
-               0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
-       if (pf->hw.pf_id == 0) {
-               ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ?
-                       I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT : 0;
+               priv_flags = &i40e_gl_gstrings_priv_flags[j];
+
+               if (priv_flags->flag & pf->flags)
+                       ret_flags |= BIT(i + j);
        }
 
        return ret_flags;
@@ -3074,54 +4018,66 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
-       u16 sw_flags = 0, valid_flags = 0;
-       bool reset_required = false;
-       bool promisc_change = false;
-       int ret;
+       u64 changed_flags;
+       u32 i, j;
 
-       /* NOTE: MFP is not settable */
+       changed_flags = pf->flags;
 
-       if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
-               pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
-       else
-               pf->flags &= ~I40E_FLAG_LINK_POLLING_ENABLED;
+       for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+               const struct i40e_priv_flags *priv_flags;
 
-       /* allow the user to control the state of the Flow
-        * Director ATR (Application Targeted Routing) feature
-        * of the driver
+               priv_flags = &i40e_gstrings_priv_flags[i];
+
+               if (priv_flags->read_only)
+                       continue;
+
+               if (flags & BIT(i))
+                       pf->flags |= priv_flags->flag;
+               else
+                       pf->flags &= ~(priv_flags->flag);
+       }
+
+       if (pf->hw.pf_id != 0)
+               goto flags_complete;
+
+       for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
+               const struct i40e_priv_flags *priv_flags;
+
+               priv_flags = &i40e_gl_gstrings_priv_flags[j];
+
+               if (priv_flags->read_only)
+                       continue;
+
+               if (flags & BIT(i + j))
+                       pf->flags |= priv_flags->flag;
+               else
+                       pf->flags &= ~(priv_flags->flag);
+       }
+
+flags_complete:
+       /* check for flags that changed */
+       changed_flags ^= pf->flags;
+
+       /* Process any additional changes needed as a result of flag changes.
+        * The changed_flags value reflects the list of bits that were
+        * changed in the code above.
         */
-       if (flags & I40E_PRIV_FLAGS_FD_ATR) {
-               pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
-       } else {
-               pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
-               pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
 
-               /* flush current ATR settings */
+       /* Flush current ATR settings if ATR was disabled */
+       if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) &&
+           !(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) {
+               pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
                set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
        }
 
-       if ((flags & I40E_PRIV_FLAGS_VEB_STATS) &&
-           !(pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
-               pf->flags |= I40E_FLAG_VEB_STATS_ENABLED;
-               reset_required = true;
-       } else if (!(flags & I40E_PRIV_FLAGS_VEB_STATS) &&
-                  (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
-               pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
-               reset_required = true;
-       }
-
-       if (pf->hw.pf_id == 0) {
-               if ((flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
-                   !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
-                       pf->flags |= I40E_FLAG_TRUE_PROMISC_SUPPORT;
-                       promisc_change = true;
-               } else if (!(flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
-                          (pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
-                       pf->flags &= ~I40E_FLAG_TRUE_PROMISC_SUPPORT;
-                       promisc_change = true;
-               }
-       }
-       if (promisc_change) {
+       /* Only allow ATR evict on hardware that is capable of handling it */
+       if (pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
+               pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+
+       if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) {
+               u16 sw_flags = 0, valid_flags = 0;
+               int ret;
+
                if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
                        sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
                valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
@@ -3137,22 +4093,17 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
                }
        }
 
-       if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) &&
-           (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))
-               pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
-       else
-               pf->auto_disable_flags |= I40E_FLAG_HW_ATR_EVICT_CAPABLE;
-
-       /* if needed, issue reset to cause things to take effect */
-       if (reset_required)
-               i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
+       /* Issue reset to cause things to take effect, as additional bits
+        * are added we will need to create a mask of bits requiring reset
+        */
+       if ((changed_flags & I40E_FLAG_VEB_STATS_ENABLED) ||
+           ((changed_flags & I40E_FLAG_LEGACY_RX) && netif_running(dev)))
+               i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
 
        return 0;
 }
 
 static const struct ethtool_ops i40e_ethtool_ops = {
-       .get_settings           = i40e_get_settings,
-       .set_settings           = i40e_set_settings,
        .get_drvinfo            = i40e_get_drvinfo,
        .get_regs_len           = i40e_get_regs_len,
        .get_regs               = i40e_get_regs,
@@ -3189,6 +4140,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
        .set_priv_flags         = i40e_set_priv_flags,
        .get_per_queue_coalesce = i40e_get_per_queue_coalesce,
        .set_per_queue_coalesce = i40e_set_per_queue_coalesce,
+       .get_link_ksettings     = i40e_get_link_ksettings,
+       .set_link_ksettings     = i40e_set_link_ksettings,
 };
 
 void i40e_set_ethtool_ops(struct net_device *netdev)
index e8a8351c8ea998a141bd8fb5f27d619b5b477b67..b6ec9beeebff27162924e2b6c2bd8307759e0489 100644 (file)
@@ -39,9 +39,9 @@ static const char i40e_driver_string[] =
 
 #define DRV_KERN "-k"
 
-#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 27
+#define DRV_VERSION_MAJOR 2
+#define DRV_VERSION_MINOR 1
+#define DRV_VERSION_BUILD 7
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -50,13 +50,16 @@ static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporatio
 
 /* a bit of forward declarations */
 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
-static void i40e_handle_reset_warning(struct i40e_pf *pf);
+static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
 static int i40e_add_vsi(struct i40e_vsi *vsi);
 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
 static int i40e_setup_misc_vector(struct i40e_pf *pf);
 static void i40e_determine_queue_usage(struct i40e_pf *pf);
 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
+static int i40e_reset(struct i40e_pf *pf);
+static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
 
@@ -299,11 +302,7 @@ void i40e_service_event_schedule(struct i40e_pf *pf)
  * device is munged, not just the one netdev port, so go for the full
  * reset.
  **/
-#ifdef I40E_FCOE
-void i40e_tx_timeout(struct net_device *netdev)
-#else
 static void i40e_tx_timeout(struct net_device *netdev)
-#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -408,10 +407,7 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
  * Returns the address of the device statistics structure.
  * The statistics are actually updated from the service task.
  **/
-#ifndef I40E_FCOE
-static
-#endif
-void i40e_get_netdev_stats_struct(struct net_device *netdev,
+static void i40e_get_netdev_stats_struct(struct net_device *netdev,
                                  struct rtnl_link_stats64 *stats)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -723,55 +719,6 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
        veb->stat_offsets_loaded = true;
 }
 
-#ifdef I40E_FCOE
-/**
- * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
- * @vsi: the VSI that is capable of doing FCoE
- **/
-static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
-{
-       struct i40e_pf *pf = vsi->back;
-       struct i40e_hw *hw = &pf->hw;
-       struct i40e_fcoe_stats *ofs;
-       struct i40e_fcoe_stats *fs;     /* device's eth stats */
-       int idx;
-
-       if (vsi->type != I40E_VSI_FCOE)
-               return;
-
-       idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET;
-       fs = &vsi->fcoe_stats;
-       ofs = &vsi->fcoe_stats_offsets;
-
-       i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
-                          vsi->fcoe_stat_offsets_loaded,
-                          &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
-       i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
-                          vsi->fcoe_stat_offsets_loaded,
-                          &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
-       i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
-                          vsi->fcoe_stat_offsets_loaded,
-                          &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
-       i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
-                          vsi->fcoe_stat_offsets_loaded,
-                          &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
-       i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
-                          vsi->fcoe_stat_offsets_loaded,
-                          &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
-       i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
-                          vsi->fcoe_stat_offsets_loaded,
-                          &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
-       i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
-                          vsi->fcoe_stat_offsets_loaded,
-                          &ofs->fcoe_last_error, &fs->fcoe_last_error);
-       i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
-                          vsi->fcoe_stat_offsets_loaded,
-                          &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
-
-       vsi->fcoe_stat_offsets_loaded = true;
-}
-
-#endif
 /**
  * i40e_update_vsi_stats - Update the vsi statistics counters.
  * @vsi: the VSI to be updated
@@ -790,7 +737,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        struct i40e_eth_stats *oes;
        struct i40e_eth_stats *es;     /* device's eth stats */
        u32 tx_restart, tx_busy;
-       u64 tx_lost_interrupt;
        struct i40e_ring *p;
        u32 rx_page, rx_buf;
        u64 bytes, packets;
@@ -816,7 +762,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        rx_b = rx_p = 0;
        tx_b = tx_p = 0;
        tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
-       tx_lost_interrupt = 0;
        rx_page = 0;
        rx_buf = 0;
        rcu_read_lock();
@@ -835,7 +780,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
                tx_busy += p->tx_stats.tx_busy;
                tx_linearize += p->tx_stats.tx_linearize;
                tx_force_wb += p->tx_stats.tx_force_wb;
-               tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
 
                /* Rx queue is part of the same block as Tx queue */
                p = &p[1];
@@ -854,7 +798,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        vsi->tx_busy = tx_busy;
        vsi->tx_linearize = tx_linearize;
        vsi->tx_force_wb = tx_force_wb;
-       vsi->tx_lost_interrupt = tx_lost_interrupt;
        vsi->rx_page_failed = rx_page;
        vsi->rx_buf_failed = rx_buf;
 
@@ -1101,13 +1044,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
                           &osd->rx_lpi_count, &nsd->rx_lpi_count);
 
        if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
-           !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+           !(pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED))
                nsd->fd_sb_status = true;
        else
                nsd->fd_sb_status = false;
 
        if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
-           !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+           !(pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
                nsd->fd_atr_status = true;
        else
                nsd->fd_atr_status = false;
@@ -1129,9 +1072,6 @@ void i40e_update_stats(struct i40e_vsi *vsi)
                i40e_update_pf_stats(pf);
 
        i40e_update_vsi_stats(vsi);
-#ifdef I40E_FCOE
-       i40e_update_fcoe_stats(vsi);
-#endif
 }
 
 /**
@@ -1562,11 +1502,7 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
  *
  * Returns 0 on success, negative on failure
  **/
-#ifdef I40E_FCOE
-int i40e_set_mac(struct net_device *netdev, void *p)
-#else
 static int i40e_set_mac(struct net_device *netdev, void *p)
-#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -1626,17 +1562,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
  *
  * Setup VSI queue mapping for enabled traffic classes.
  **/
-#ifdef I40E_FCOE
-void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
-                             struct i40e_vsi_context *ctxt,
-                             u8 enabled_tc,
-                             bool is_add)
-#else
 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                                     struct i40e_vsi_context *ctxt,
                                     u8 enabled_tc,
                                     bool is_add)
-#endif
 {
        struct i40e_pf *pf = vsi->back;
        u16 sections = 0;
@@ -1686,11 +1615,6 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                                qcount = min_t(int, pf->alloc_rss_size,
                                               num_tc_qps);
                                break;
-#ifdef I40E_FCOE
-                       case I40E_VSI_FCOE:
-                               qcount = num_tc_qps;
-                               break;
-#endif
                        case I40E_VSI_FDIR:
                        case I40E_VSI_SRIOV:
                        case I40E_VSI_VMDQ2:
@@ -1800,11 +1724,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
  * i40e_set_rx_mode - NDO callback to set the netdev filters
  * @netdev: network interface device structure
  **/
-#ifdef I40E_FCOE
-void i40e_set_rx_mode(struct net_device *netdev)
-#else
 static void i40e_set_rx_mode(struct net_device *netdev)
-#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -1883,19 +1803,12 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
 static
 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
 {
-       while (next) {
-               next = hlist_entry(next->hlist.next,
-                                  typeof(struct i40e_new_mac_filter),
-                                  hlist);
-
-               /* keep going if we found a broadcast filter */
-               if (next && is_broadcast_ether_addr(next->f->macaddr))
-                       continue;
-
-               break;
+       hlist_for_each_entry_continue(next, hlist) {
+               if (!is_broadcast_ether_addr(next->f->macaddr))
+                       return next;
        }
 
-       return next;
+       return NULL;
 }
 
 /**
@@ -2487,13 +2400,15 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
 
        netdev_info(netdev, "changing MTU from %d to %d\n",
                    netdev->mtu, new_mtu);
        netdev->mtu = new_mtu;
        if (netif_running(netdev))
                i40e_vsi_reinit_locked(vsi);
-       i40e_notify_client_of_l2_param_changes(vsi);
+       pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
+                     I40E_FLAG_CLIENT_L2_CHANGE);
        return 0;
 }
 
@@ -2707,13 +2622,8 @@ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
  *
  * net_device_ops implementation for adding vlan ids
  **/
-#ifdef I40E_FCOE
-int i40e_vlan_rx_add_vid(struct net_device *netdev,
-                        __always_unused __be16 proto, u16 vid)
-#else
 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
                                __always_unused __be16 proto, u16 vid)
-#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -2744,13 +2654,8 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
  *
  * net_device_ops implementation for removing vlan ids
  **/
-#ifdef I40E_FCOE
-int i40e_vlan_rx_kill_vid(struct net_device *netdev,
-                         __always_unused __be16 proto, u16 vid)
-#else
 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
                                 __always_unused __be16 proto, u16 vid)
-#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -2920,9 +2825,6 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
 
        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
                err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
-#ifdef I40E_FCOE
-       i40e_fcoe_setup_ddp_resources(vsi);
-#endif
        return err;
 }
 
@@ -2942,9 +2844,6 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
        for (i = 0; i < vsi->num_queue_pairs; i++)
                if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
                        i40e_free_rx_resources(vsi->rx_rings[i]);
-#ifdef I40E_FCOE
-       i40e_fcoe_free_ddp_resources(vsi);
-#endif
 }
 
 /**
@@ -3015,9 +2914,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
        tx_ctx.qlen = ring->count;
        tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
                                               I40E_FLAG_FD_ATR_ENABLED));
-#ifdef I40E_FCOE
-       tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
-#endif
        tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
        /* FDIR VSI tx ring can still use RS bit and writebacks */
        if (vsi->type != I40E_VSI_FDIR)
@@ -3098,7 +2994,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
 
        ring->rx_buf_len = vsi->rx_buf_len;
 
-       rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
+       rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
+                                   BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
 
        rx_ctx.base = (ring->dma / 128);
        rx_ctx.qlen = ring->count;
@@ -3120,9 +3017,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
        rx_ctx.l2tsel = 1;
        /* this controls whether VLAN is stripped from inner headers */
        rx_ctx.showiv = 0;
-#ifdef I40E_FCOE
-       rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
-#endif
        /* set the prefena field to 1 because the manual says to */
        rx_ctx.prefena = 1;
 
@@ -3144,6 +3038,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
                return -ENOMEM;
        }
 
+       /* configure Rx buffer alignment */
+       if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+               clear_ring_build_skb_enabled(ring);
+       else
+               set_ring_build_skb_enabled(ring);
+
        /* cache tail for quicker writes, and clear the reg before use */
        ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
        writel(0, ring->tail);
@@ -3181,27 +3081,21 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
        int err = 0;
        u16 i;
 
-       if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
-               vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
-                              + ETH_FCS_LEN + VLAN_HLEN;
-       else
-               vsi->max_frame = I40E_RXBUFFER_2048;
-
-       vsi->rx_buf_len = I40E_RXBUFFER_2048;
-
-#ifdef I40E_FCOE
-       /* setup rx buffer for FCoE */
-       if ((vsi->type == I40E_VSI_FCOE) &&
-           (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
-               vsi->rx_buf_len = I40E_RXBUFFER_3072;
-               vsi->max_frame = I40E_RXBUFFER_3072;
+       if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
+               vsi->max_frame = I40E_MAX_RXBUFFER;
+               vsi->rx_buf_len = I40E_RXBUFFER_2048;
+#if (PAGE_SIZE < 8192)
+       } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
+                  (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+               vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+               vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+#endif
+       } else {
+               vsi->max_frame = I40E_MAX_RXBUFFER;
+               vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
+                                                      I40E_RXBUFFER_2048;
        }
 
-#endif /* I40E_FCOE */
-       /* round up for the chip's needs */
-       vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
-                               BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
-
        /* set up individual rings */
        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
                err = i40e_configure_rx_ring(vsi->rx_rings[i]);
@@ -3281,6 +3175,12 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
        if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
                return;
 
+       /* Reset FDir counters as we're replaying all existing filters */
+       pf->fd_tcp4_filter_cnt = 0;
+       pf->fd_udp4_filter_cnt = 0;
+       pf->fd_sctp4_filter_cnt = 0;
+       pf->fd_ip4_filter_cnt = 0;
+
        hlist_for_each_entry_safe(filter, node,
                                  &pf->fdir_filter_list, fdir_node) {
                i40e_add_del_fdir(vsi, filter, true);
@@ -3993,11 +3893,7 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
  * This is used by netconsole to send skbs without having to re-enable
  * interrupts.  It's not called while the normal interrupt routine is executing.
  **/
-#ifdef I40E_FCOE
-void i40e_netpoll(struct net_device *netdev)
-#else
 static void i40e_netpoll(struct net_device *netdev)
-#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -4100,8 +3996,6 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
                }
        }
 
-       if (hw->revision_id == 0)
-               mdelay(50);
        return ret;
 }
 
@@ -4180,6 +4074,12 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
                }
        }
 
+       /* Due to HW errata, on Rx disable only, the register can indicate done
+        * before it really is. Needs 50ms to be sure
+        */
+       if (!enable)
+               mdelay(50);
+
        return ret;
 }
 
@@ -4438,8 +4338,12 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
        if (!vsi->netdev)
                return;
 
-       for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-               napi_enable(&vsi->q_vectors[q_idx]->napi);
+       for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+               struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+               if (q_vector->rx.ring || q_vector->tx.ring)
+                       napi_enable(&q_vector->napi);
+       }
 }
 
 /**
@@ -4453,8 +4357,12 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
        if (!vsi->netdev)
                return;
 
-       for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-               napi_disable(&vsi->q_vectors[q_idx]->napi);
+       for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+               struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+               if (q_vector->rx.ring || q_vector->tx.ring)
+                       napi_disable(&q_vector->napi);
+       }
 }
 
 /**
@@ -4463,17 +4371,16 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
  **/
 static void i40e_vsi_close(struct i40e_vsi *vsi)
 {
-       bool reset = false;
-
+       struct i40e_pf *pf = vsi->back;
        if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
                i40e_down(vsi);
        i40e_vsi_free_irq(vsi);
        i40e_vsi_free_tx_resources(vsi);
        i40e_vsi_free_rx_resources(vsi);
        vsi->current_netdev_flags = 0;
-       if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
-               reset = true;
-       i40e_notify_client_of_netdev_close(vsi, reset);
+       pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+       if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+               pf->flags |=  I40E_FLAG_CLIENT_RESET;
 }
 
 /**
@@ -4485,14 +4392,6 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
        if (test_bit(__I40E_DOWN, &vsi->state))
                return;
 
-       /* No need to disable FCoE VSI when Tx suspended */
-       if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
-           vsi->type == I40E_VSI_FCOE) {
-               dev_dbg(&vsi->back->pdev->dev,
-                        "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
-               return;
-       }
-
        set_bit(__I40E_NEEDS_RESTART, &vsi->state);
        if (vsi->netdev && netif_running(vsi->netdev))
                vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
@@ -4549,7 +4448,7 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
  * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
  * @vsi: the VSI being configured
  *
- * This function waits for the given VSI's queues to be disabled.
+ * Wait until all queues on a given VSI have been disabled.
  **/
 static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
 {
@@ -4558,7 +4457,7 @@ static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
 
        pf_q = vsi->base_queue;
        for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
-               /* Check and wait for the disable status of the queue */
+               /* Check and wait for the Tx queue */
                ret = i40e_pf_txq_wait(pf, pf_q, false);
                if (ret) {
                        dev_info(&pf->pdev->dev,
@@ -4566,11 +4465,7 @@ static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
                                 vsi->seid, pf_q);
                        return ret;
                }
-       }
-
-       pf_q = vsi->base_queue;
-       for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
-               /* Check and wait for the disable status of the queue */
+               /* Check and wait for the Tx queue */
                ret = i40e_pf_rxq_wait(pf, pf_q, false);
                if (ret) {
                        dev_info(&pf->pdev->dev,
@@ -4595,8 +4490,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
        int v, ret = 0;
 
        for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
-               /* No need to wait for FCoE VSI queues */
-               if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
+               if (pf->vsi[v]) {
                        ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
                        if (ret)
                                break;
@@ -4614,16 +4508,15 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
  * @vsi: Pointer to VSI struct
  *
  * This function checks specified queue for given VSI. Detects hung condition.
- * Sets hung bit since it is two step process. Before next run of service task
- * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
- * hung condition remain unchanged and during subsequent run, this function
- * issues SW interrupt to recover from hung condition.
+ * We proactively detect hung TX queues by checking if interrupts are disabled
+ * but there are pending descriptors.  If it appears hung, attempt to recover
+ * by triggering a SW interrupt.
  **/
 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
 {
        struct i40e_ring *tx_ring = NULL;
        struct i40e_pf  *pf;
-       u32 head, val, tx_pending_hw;
+       u32 val, tx_pending;
        int i;
 
        pf = vsi->back;
@@ -4649,47 +4542,15 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
        else
                val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
 
-       head = i40e_get_head(tx_ring);
-
-       tx_pending_hw = i40e_get_tx_pending(tx_ring, false);
+       tx_pending = i40e_get_tx_pending(tx_ring);
 
-       /* HW is done executing descriptors, updated HEAD write back,
-        * but SW hasn't processed those descriptors. If interrupt is
-        * not generated from this point ON, it could result into
-        * dev_watchdog detecting timeout on those netdev_queue,
-        * hence proactively trigger SW interrupt.
+       /* Interrupts are disabled and TX pending is non-zero,
+        * trigger the SW interrupt (don't wait). Worst case
+        * there will be one extra interrupt which may result
+        * into not cleaning any queues because queues are cleaned.
         */
-       if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
-               /* NAPI Poll didn't run and clear since it was set */
-               if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
-                                      &tx_ring->q_vector->hung_detected)) {
-                       netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
-                                   vsi->seid, q_idx, tx_pending_hw,
-                                   tx_ring->next_to_clean, head,
-                                   tx_ring->next_to_use,
-                                   readl(tx_ring->tail));
-                       netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
-                                   vsi->seid, q_idx, val);
-                       i40e_force_wb(vsi, tx_ring->q_vector);
-               } else {
-                       /* First Chance - detected possible hung */
-                       set_bit(I40E_Q_VECTOR_HUNG_DETECT,
-                               &tx_ring->q_vector->hung_detected);
-               }
-       }
-
-       /* This is the case where we have interrupts missing,
-        * so the tx_pending in HW will most likely be 0, but we
-        * will have tx_pending in SW since the WB happened but the
-        * interrupt got lost.
-        */
-       if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
-           (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
-               local_bh_disable();
-               if (napi_reschedule(&tx_ring->q_vector->napi))
-                       tx_ring->tx_stats.tx_lost_interrupt++;
-               local_bh_enable();
-       }
+       if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
+               i40e_force_wb(vsi, tx_ring->q_vector);
 }
 
 /**
@@ -5220,20 +5081,12 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
                        continue;
 
                /* - Enable all TCs for the LAN VSI
-#ifdef I40E_FCOE
-                * - For FCoE VSI only enable the TC configured
-                *   as per the APP TLV
-#endif
                 * - For all others keep them at TC0 for now
                 */
                if (v == pf->lan_vsi)
                        tc_map = i40e_pf_get_tc_map(pf);
                else
                        tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
-#ifdef I40E_FCOE
-               if (pf->vsi[v]->type == I40E_VSI_FCOE)
-                       tc_map = i40e_get_fcoe_tc_map(pf);
-#endif /* #ifdef I40E_FCOE */
 
                ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
                if (ret) {
@@ -5300,10 +5153,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
                    (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
                        dev_info(&pf->pdev->dev,
                                 "DCBX offload is not supported or is disabled for this PF.\n");
-
-                       if (pf->flags & I40E_FLAG_MFP_ENABLED)
-                               goto out;
-
                } else {
                        /* When status is not DISABLED then DCBX in FW */
                        pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
@@ -5464,13 +5313,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
        /* replay FDIR SB filters */
        if (vsi->type == I40E_VSI_FDIR) {
                /* reset fd counters */
-               pf->fd_add_err = pf->fd_atr_cnt = 0;
-               if (pf->fd_tcp_rule > 0) {
-                       pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
-                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
-                               dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
-                       pf->fd_tcp_rule = 0;
-               }
+               pf->fd_add_err = 0;
+               pf->fd_atr_cnt = 0;
                i40e_fdir_filter_restore(vsi);
        }
 
@@ -5542,8 +5386,6 @@ void i40e_down(struct i40e_vsi *vsi)
                i40e_clean_rx_ring(vsi->rx_rings[i]);
        }
 
-       i40e_notify_client_of_netdev_close(vsi, false);
-
 }
 
 /**
@@ -5604,17 +5446,15 @@ exit:
        return ret;
 }
 
-#ifdef I40E_FCOE
-int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
-                   struct tc_to_netdev *tc)
-#else
 static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
                           struct tc_to_netdev *tc)
-#endif
 {
-       if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
+       if (tc->type != TC_SETUP_MQPRIO)
                return -EINVAL;
-       return i40e_setup_tc(netdev, tc->tc);
+
+       tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+       return i40e_setup_tc(netdev, tc->mqprio->num_tc);
 }
 
 /**
@@ -5667,6 +5507,8 @@ int i40e_open(struct net_device *netdev)
  * Finish initialization of the VSI.
  *
  * Returns 0 on success, negative value on failure
+ *
+ * Note: expects to be called while under rtnl_lock()
  **/
 int i40e_vsi_open(struct i40e_vsi *vsi)
 {
@@ -5730,7 +5572,7 @@ err_setup_rx:
 err_setup_tx:
        i40e_vsi_free_tx_resources(vsi);
        if (vsi == pf->vsi[pf->lan_vsi])
-               i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+               i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
 
        return err;
 }
@@ -5745,6 +5587,7 @@ err_setup_tx:
 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
 {
        struct i40e_fdir_filter *filter;
+       struct i40e_flex_pit *pit_entry, *tmp;
        struct hlist_node *node2;
 
        hlist_for_each_entry_safe(filter, node2,
@@ -5752,7 +5595,43 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
                hlist_del(&filter->fdir_node);
                kfree(filter);
        }
+
+       list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
+               list_del(&pit_entry->list);
+               kfree(pit_entry);
+       }
+       INIT_LIST_HEAD(&pf->l3_flex_pit_list);
+
+       list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
+               list_del(&pit_entry->list);
+               kfree(pit_entry);
+       }
+       INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+
        pf->fdir_pf_active_filters = 0;
+       pf->fd_tcp4_filter_cnt = 0;
+       pf->fd_udp4_filter_cnt = 0;
+       pf->fd_sctp4_filter_cnt = 0;
+       pf->fd_ip4_filter_cnt = 0;
+
+       /* Reprogram the default input set for TCP/IPv4 */
+       i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+                               I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+                               I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+       /* Reprogram the default input set for UDP/IPv4 */
+       i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+                               I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+                               I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+       /* Reprogram the default input set for SCTP/IPv4 */
+       i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+                               I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+                               I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+       /* Reprogram the default input set for Other/IPv4 */
+       i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+                               I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
 }
 
 /**
@@ -5779,12 +5658,14 @@ int i40e_close(struct net_device *netdev)
  * i40e_do_reset - Start a PF or Core Reset sequence
  * @pf: board private structure
  * @reset_flags: which reset is requested
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
  *
  * The essential difference in resets is that the PF Reset
  * doesn't clear the packet buffers, doesn't reset the PE
  * firmware, and doesn't bother the other PFs on the chip.
  **/
-void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
 {
        u32 val;
 
@@ -5830,7 +5711,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                 * for the Core Reset.
                 */
                dev_dbg(&pf->pdev->dev, "PFR requested\n");
-               i40e_handle_reset_warning(pf);
+               i40e_handle_reset_warning(pf, lock_acquired);
 
        } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
                int v;
@@ -6021,8 +5902,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
                i40e_service_event_schedule(pf);
        } else {
                i40e_pf_unquiesce_all_vsi(pf);
-               /* Notify the client for the DCB changes */
-               i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);
+       pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
+                     I40E_FLAG_CLIENT_L2_CHANGE);
        }
 
 exit:
@@ -6039,7 +5920,7 @@ exit:
 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
 {
        rtnl_lock();
-       i40e_do_reset(pf, reset_flags);
+       i40e_do_reset(pf, reset_flags, true);
        rtnl_unlock();
 }
 
@@ -6144,8 +6025,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
            (pf->fd_add_err == 0) ||
            (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
                if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
-                   (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
-                       pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+                   (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) {
+                       pf->hw_disabled_flags &= ~I40E_FLAG_FD_SB_ENABLED;
                        if (I40E_DEBUG_FD & pf->hw.debug_mask)
                                dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
                }
@@ -6156,9 +6037,9 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
         */
        if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
                if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
-                   (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) &&
-                   (pf->fd_tcp_rule == 0)) {
-                       pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+                   (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED) &&
+                   (pf->fd_tcp4_filter_cnt == 0)) {
+                       pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
                        if (I40E_DEBUG_FD & pf->hw.debug_mask)
                                dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
                }
@@ -6210,7 +6091,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
        }
 
        pf->fd_flush_timestamp = jiffies;
-       pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+       pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
        /* flush all filters */
        wr32(&pf->hw, I40E_PFQF_CTL_1,
             I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
@@ -6229,8 +6110,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
        } else {
                /* replay sideband filters */
                i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
-               if (!disable_atr)
-                       pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+               if (!disable_atr && !pf->fd_tcp4_filter_cnt)
+                       pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
                clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
                if (I40E_DEBUG_FD & pf->hw.debug_mask)
                        dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
@@ -6283,9 +6164,6 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
 
        switch (vsi->type) {
        case I40E_VSI_MAIN:
-#ifdef I40E_FCOE
-       case I40E_VSI_FCOE:
-#endif
                if (!vsi->netdev || !vsi->netdev_registered)
                        break;
 
@@ -6444,7 +6322,6 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
 {
        u32 reset_flags = 0;
 
-       rtnl_lock();
        if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
                reset_flags |= BIT(__I40E_REINIT_REQUESTED);
                clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
@@ -6470,18 +6347,19 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
         * precedence before starting a new reset sequence.
         */
        if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
-               i40e_handle_reset_warning(pf);
-               goto unlock;
+               i40e_prep_for_reset(pf, false);
+               i40e_reset(pf);
+               i40e_rebuild(pf, false, false);
        }
 
        /* If we're already down or resetting, just bail */
        if (reset_flags &&
            !test_bit(__I40E_DOWN, &pf->state) &&
-           !test_bit(__I40E_CONFIG_BUSY, &pf->state))
-               i40e_do_reset(pf, reset_flags);
-
-unlock:
-       rtnl_unlock();
+           !test_bit(__I40E_CONFIG_BUSY, &pf->state)) {
+               rtnl_lock();
+               i40e_do_reset(pf, reset_flags, true);
+               rtnl_unlock();
+       }
 }
 
 /**
@@ -6627,9 +6505,11 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
                                 opcode);
                        break;
                }
-       } while (pending && (i++ < pf->adminq_work_limit));
+       } while (i++ < pf->adminq_work_limit);
+
+       if (i < pf->adminq_work_limit)
+               clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
 
-       clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
        /* re-enable Admin queue interrupt cause */
        val = rd32(hw, I40E_PFINT_ICR0_ENA);
        val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
@@ -6967,10 +6847,12 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
 /**
  * i40e_prep_for_reset - prep for the core to reset
  * @pf: board private structure
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
  *
  * Close up the VFs and other things in prep for PF Reset.
   **/
-static void i40e_prep_for_reset(struct i40e_pf *pf)
+static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
 {
        struct i40e_hw *hw = &pf->hw;
        i40e_status ret = 0;
@@ -6985,7 +6867,12 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
        dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
 
        /* quiesce the VSIs and their queues that are not already DOWN */
+       /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
+       if (!lock_acquired)
+               rtnl_lock();
        i40e_pf_quiesce_all_vsi(pf);
+       if (!lock_acquired)
+               rtnl_unlock();
 
        for (v = 0; v < pf->num_alloc_vsi; v++) {
                if (pf->vsi[v])
@@ -7020,29 +6907,39 @@ static void i40e_send_version(struct i40e_pf *pf)
 }
 
 /**
- * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
  * @pf: board private structure
- * @reinit: if the Main VSI needs to re-initialized.
  **/
-static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
+static int i40e_reset(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
-       u8 set_fc_aq_fail = 0;
        i40e_status ret;
-       u32 val;
-       u32 v;
 
-       /* Now we wait for GRST to settle out.
-        * We don't have to delete the VEBs or VSIs from the hw switch
-        * because the reset will make them disappear.
-        */
        ret = i40e_pf_reset(hw);
        if (ret) {
                dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
                set_bit(__I40E_RESET_FAILED, &pf->state);
-               goto clear_recovery;
+               clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+       } else {
+               pf->pfr_count++;
        }
-       pf->pfr_count++;
+       return ret;
+}
+
+/**
+ * i40e_rebuild - rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ **/
+static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+{
+       struct i40e_hw *hw = &pf->hw;
+       u8 set_fc_aq_fail = 0;
+       i40e_status ret;
+       u32 val;
+       int v;
 
        if (test_bit(__I40E_DOWN, &pf->state))
                goto clear_recovery;
@@ -7067,8 +6964,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                goto end_core_reset;
 
        ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
-                               hw->func_caps.num_rx_qp,
-                               pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+                               hw->func_caps.num_rx_qp, 0, 0);
        if (ret) {
                dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
                goto end_core_reset;
@@ -7087,14 +6983,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                /* Continue without DCB enabled */
        }
 #endif /* CONFIG_I40E_DCB */
-#ifdef I40E_FCOE
-       i40e_init_pf_fcoe(pf);
-
-#endif
        /* do basic switch setup */
+       if (!lock_acquired)
+               rtnl_lock();
        ret = i40e_setup_pf_switch(pf, reinit);
        if (ret)
-               goto end_core_reset;
+               goto end_unlock;
 
        /* The driver only wants link up/down and module qualification
         * reports from firmware.  Note the negative logic.
@@ -7165,7 +7059,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                if (ret) {
                        dev_info(&pf->pdev->dev,
                                 "rebuild of Main VSI failed: %d\n", ret);
-                       goto end_core_reset;
+                       goto end_unlock;
                }
        }
 
@@ -7216,23 +7110,48 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
        /* tell the firmware that we're starting */
        i40e_send_version(pf);
 
+end_unlock:
+if (!lock_acquired)
+       rtnl_unlock();
 end_core_reset:
        clear_bit(__I40E_RESET_FAILED, &pf->state);
 clear_recovery:
        clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
 }
 
+/**
+ * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ **/
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+                                  bool lock_acquired)
+{
+       int ret;
+       /* Now we wait for GRST to settle out.
+        * We don't have to delete the VEBs or VSIs from the hw switch
+        * because the reset will make them disappear.
+        */
+       ret = i40e_reset(pf);
+       if (!ret)
+               i40e_rebuild(pf, reinit, lock_acquired);
+}
+
 /**
  * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
  * @pf: board private structure
  *
  * Close up the VFs and other things in prep for a Core Reset,
  * then get ready to rebuild the world.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
  **/
-static void i40e_handle_reset_warning(struct i40e_pf *pf)
+static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
 {
-       i40e_prep_for_reset(pf);
-       i40e_reset_and_rebuild(pf, false);
+       i40e_prep_for_reset(pf, lock_acquired);
+       i40e_reset_and_rebuild(pf, false, lock_acquired);
 }
 
 /**
@@ -7351,7 +7270,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
        i40e_status ret;
-       __be16 port;
+       u16 port;
        int i;
 
        if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
@@ -7375,7 +7294,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
                                        "%s %s port %d, index %d failed, err %s aq_err %s\n",
                                        pf->udp_ports[i].type ? "vxlan" : "geneve",
                                        port ? "add" : "delete",
-                                       ntohs(port), i,
+                                       port, i,
                                        i40e_stat_str(&pf->hw, ret),
                                        i40e_aq_str(&pf->hw,
                                                    pf->hw.aq.asq_last_status));
@@ -7411,7 +7330,18 @@ static void i40e_service_task(struct work_struct *work)
        i40e_vc_process_vflr_event(pf);
        i40e_watchdog_subtask(pf);
        i40e_fdir_reinit_subtask(pf);
-       i40e_client_subtask(pf);
+       if (pf->flags & I40E_FLAG_CLIENT_RESET) {
+               /* Client subtask will reopen next time through. */
+               i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
+               pf->flags &= ~I40E_FLAG_CLIENT_RESET;
+       } else {
+               i40e_client_subtask(pf);
+               if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
+                       i40e_notify_client_of_l2_param_changes(
+                                                       pf->vsi[pf->lan_vsi]);
+                       pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
+               }
+       }
        i40e_sync_filters_subtask(pf);
        i40e_sync_udp_filters_subtask(pf);
        i40e_clean_adminq_subtask(pf);
@@ -7484,15 +7414,6 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
                                      I40E_REQ_DESCRIPTOR_MULTIPLE);
                break;
 
-#ifdef I40E_FCOE
-       case I40E_VSI_FCOE:
-               vsi->alloc_queue_pairs = pf->num_fcoe_qps;
-               vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
-                                     I40E_REQ_DESCRIPTOR_MULTIPLE);
-               vsi->num_q_vectors = pf->num_fcoe_msix;
-               break;
-
-#endif /* I40E_FCOE */
        default:
                WARN_ON(1);
                return -ENODATA;
@@ -7809,6 +7730,7 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
 static int i40e_init_msix(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
+       int cpus, extra_vectors;
        int vectors_left;
        int v_budget, i;
        int v_actual;
@@ -7827,9 +7749,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
         *      - assumes symmetric Tx/Rx pairing
         *   - The number of VMDq pairs
         *   - The CPU count within the NUMA node if iWARP is enabled
-#ifdef I40E_FCOE
-        *   - The number of FCOE qps.
-#endif
         * Once we count this up, try the request.
         *
         * If we can't get what we want, we'll simplify to nearly nothing
@@ -7844,10 +7763,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
                vectors_left--;
        }
 
-       /* reserve vectors for the main PF traffic queues */
-       pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
+       /* reserve some vectors for the main PF traffic queues. Initially we
+        * only reserve at most 50% of the available vectors, in the case that
+        * the number of online CPUs is large. This ensures that we can enable
+        * extra features as well. Once we've enabled the other features, we
+        * will use any remaining vectors to reach as close as we can to the
+        * number of online CPUs.
+        */
+       cpus = num_online_cpus();
+       pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
        vectors_left -= pf->num_lan_msix;
-       v_budget += pf->num_lan_msix;
 
        /* reserve one vector for sideband flow director */
        if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
@@ -7860,20 +7785,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
                }
        }
 
-#ifdef I40E_FCOE
-       /* can we reserve enough for FCoE? */
-       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
-               if (!vectors_left)
-                       pf->num_fcoe_msix = 0;
-               else if (vectors_left >= pf->num_fcoe_qps)
-                       pf->num_fcoe_msix = pf->num_fcoe_qps;
-               else
-                       pf->num_fcoe_msix = 1;
-               v_budget += pf->num_fcoe_msix;
-               vectors_left -= pf->num_fcoe_msix;
-       }
-
-#endif
        /* can we reserve enough for iWARP? */
        if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
                iwarp_requested = pf->num_iwarp_msix;
@@ -7910,6 +7821,23 @@ static int i40e_init_msix(struct i40e_pf *pf)
                }
        }
 
+       /* On systems with a large number of SMP cores, we previously limited
+        * the number of vectors for num_lan_msix to be at most 50% of the
+        * available vectors, to allow for other features. Now, we add back
+        * the remaining vectors. However, we ensure that the total
+        * num_lan_msix will not exceed num_online_cpus(). To do this, we
+        * calculate the number of vectors we can add without going over the
+        * cap of CPUs. For systems with a small number of CPUs this will be
+        * zero.
+        */
+       extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
+       pf->num_lan_msix += extra_vectors;
+       vectors_left -= extra_vectors;
+
+       WARN(vectors_left < 0,
+            "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
+
+       v_budget += pf->num_lan_msix;
        pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
                                   GFP_KERNEL);
        if (!pf->msix_entries)
@@ -7950,10 +7878,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
                pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
                pf->num_vmdq_vsis = 1;
                pf->num_vmdq_qps = 1;
-#ifdef I40E_FCOE
-               pf->num_fcoe_qps = 0;
-               pf->num_fcoe_msix = 0;
-#endif
 
                /* partition out the remaining vectors */
                switch (vec) {
@@ -7967,13 +7891,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
                        } else {
                                pf->num_lan_msix = 2;
                        }
-#ifdef I40E_FCOE
-                       /* give one vector to FCoE */
-                       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
-                               pf->num_lan_msix = 1;
-                               pf->num_fcoe_msix = 1;
-                       }
-#endif
                        break;
                default:
                        if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
@@ -7993,13 +7910,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
                               (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
                                                              pf->num_lan_msix);
                        pf->num_lan_qps = pf->num_lan_msix;
-#ifdef I40E_FCOE
-                       /* give one vector to FCoE */
-                       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
-                               pf->num_fcoe_msix = 1;
-                               vec--;
-                       }
-#endif
                        break;
                }
        }
@@ -8020,13 +7930,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
                dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
                pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
        }
-#ifdef I40E_FCOE
-
-       if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
-               dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
-               pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
-       }
-#endif
        i40e_debug(&pf->hw, I40E_DEBUG_INIT,
                   "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
                   pf->num_lan_msix,
@@ -8125,9 +8028,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
                if (vectors < 0) {
                        pf->flags &= ~(I40E_FLAG_MSIX_ENABLED   |
                                       I40E_FLAG_IWARP_ENABLED  |
-#ifdef I40E_FCOE
-                                      I40E_FLAG_FCOE_ENABLED   |
-#endif
                                       I40E_FLAG_RSS_ENABLED    |
                                       I40E_FLAG_DCB_CAPABLE    |
                                       I40E_FLAG_DCB_ENABLED    |
@@ -8360,13 +8260,10 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
 
                if (vsi->type == I40E_VSI_MAIN) {
                        for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
-                               i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
-                                                 seed_dw[i]);
+                               wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
                } else if (vsi->type == I40E_VSI_SRIOV) {
                        for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
-                               i40e_write_rx_ctl(hw,
-                                                 I40E_VFQF_HKEY1(i, vf_id),
-                                                 seed_dw[i]);
+                               wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
                } else {
                        dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
                }
@@ -8384,9 +8281,7 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
                        if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
                                return -EINVAL;
                        for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
-                               i40e_write_rx_ctl(hw,
-                                                 I40E_VFQF_HLUT1(i, vf_id),
-                                                 lut_dw[i]);
+                               wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
                } else {
                        dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
                }
@@ -8514,9 +8409,12 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
        i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
 
        /* Determine the RSS size of the VSI */
-       if (!vsi->rss_size)
-               vsi->rss_size = min_t(int, pf->alloc_rss_size,
-                                     vsi->num_queue_pairs);
+       if (!vsi->rss_size) {
+               u16 qcount;
+
+               qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
+               vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
+       }
        if (!vsi->rss_size)
                return -EINVAL;
 
@@ -8550,6 +8448,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
  *
  * returns 0 if rss is not enabled, if enabled returns the final rss queue
  * count which may be different from the requested queue count.
+ * Note: expects to be called while under rtnl_lock()
  **/
 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
 {
@@ -8562,12 +8461,14 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
        new_rss_size = min_t(int, queue_count, pf->rss_size_max);
 
        if (queue_count != vsi->num_queue_pairs) {
+               u16 qcount;
+
                vsi->req_queue_pairs = queue_count;
-               i40e_prep_for_reset(pf);
+               i40e_prep_for_reset(pf, true);
 
                pf->alloc_rss_size = new_rss_size;
 
-               i40e_reset_and_rebuild(pf, true);
+               i40e_reset_and_rebuild(pf, true, true);
 
                /* Discard the user configured hash keys and lut, if less
                 * queues are enabled.
@@ -8579,8 +8480,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
                }
 
                /* Reset vsi->rss_size, as number of enabled queues changed */
-               vsi->rss_size = min_t(int, pf->alloc_rss_size,
-                                     vsi->num_queue_pairs);
+               qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
+               vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
 
                i40e_pf_config_rss(pf);
        }
@@ -8813,10 +8714,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
                pf->num_iwarp_msix = (int)num_online_cpus() + 1;
        }
 
-#ifdef I40E_FCOE
-       i40e_init_pf_fcoe(pf);
-
-#endif /* I40E_FCOE */
 #ifdef CONFIG_PCI_IOV
        if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
                pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
@@ -8843,9 +8740,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
                    (pf->hw.aq.api_min_ver > 4))) {
                /* Supported in FW API version higher than 1.4 */
                pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
-               pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+               pf->hw_disabled_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
        } else {
-               pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+               pf->hw_disabled_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
        }
 
        pf->eeprom_version = 0xDEAD;
@@ -8906,14 +8803,14 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
                        i40e_fdir_filter_exit(pf);
                }
                pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
-               pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+               pf->hw_disabled_flags &= ~I40E_FLAG_FD_SB_ENABLED;
                /* reset fd counters */
-               pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
-               pf->fdir_pf_active_filters = 0;
+               pf->fd_add_err = 0;
+               pf->fd_atr_cnt = 0;
                /* if ATR was auto disabled it can be re-enabled. */
                if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
-                   (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
-                       pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+                   (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+                       pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
                        if (I40E_DEBUG_FD & pf->hw.debug_mask)
                                dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
                }
@@ -8947,6 +8844,7 @@ static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
  * i40e_set_features - set the netdev feature flags
  * @netdev: ptr to the netdev being adjusted
  * @features: the feature set that the stack is suggesting
+ * Note: expects to be called while under rtnl_lock()
  **/
 static int i40e_set_features(struct net_device *netdev,
                             netdev_features_t features)
@@ -8970,7 +8868,7 @@ static int i40e_set_features(struct net_device *netdev,
        need_reset = i40e_set_ntuple(pf, features);
 
        if (need_reset)
-               i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+               i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
 
        return 0;
 }
@@ -8982,7 +8880,7 @@ static int i40e_set_features(struct net_device *netdev,
  *
  * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
  **/
-static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
+static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
 {
        u8 i;
 
@@ -9005,7 +8903,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
-       __be16 port = ti->port;
+       u16 port = ntohs(ti->port);
        u8 next_idx;
        u8 idx;
 
@@ -9013,8 +8911,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
 
        /* Check if port already exists */
        if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
-               netdev_info(netdev, "port %d already offloaded\n",
-                           ntohs(port));
+               netdev_info(netdev, "port %d already offloaded\n", port);
                return;
        }
 
@@ -9023,7 +8920,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
 
        if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
                netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
-                           ntohs(port));
+                           port);
                return;
        }
 
@@ -9057,7 +8954,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
-       __be16 port = ti->port;
+       u16 port = ntohs(ti->port);
        u8 idx;
 
        idx = i40e_get_udp_port_idx(pf, port);
@@ -9089,7 +8986,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
        return;
 not_found:
        netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
-                   ntohs(port));
+                   port);
 }
 
 static int i40e_get_phys_port_id(struct net_device *netdev,
@@ -9166,6 +9063,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
  * is to change the mode then that requires a PF reset to
  * allow rebuild of the components with required hardware
  * bridge mode enabled.
+ *
+ * Note: expects to be called while under rtnl_lock()
  **/
 static int i40e_ndo_bridge_setlink(struct net_device *dev,
                                   struct nlmsghdr *nlh,
@@ -9221,7 +9120,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
                                pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
                        else
                                pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
-                       i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+                       i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED),
+                                     true);
                        break;
                }
        }
@@ -9344,10 +9244,6 @@ static const struct net_device_ops i40e_netdev_ops = {
        .ndo_poll_controller    = i40e_netpoll,
 #endif
        .ndo_setup_tc           = __i40e_setup_tc,
-#ifdef I40E_FCOE
-       .ndo_fcoe_enable        = i40e_fcoe_enable,
-       .ndo_fcoe_disable       = i40e_fcoe_disable,
-#endif
        .ndo_set_features       = i40e_set_features,
        .ndo_set_vf_mac         = i40e_ndo_set_vf_mac,
        .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
@@ -9380,6 +9276,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        u8 broadcast[ETH_ALEN];
        u8 mac_addr[ETH_ALEN];
        int etherdev_size;
+       netdev_features_t hw_enc_features;
+       netdev_features_t hw_features;
 
        etherdev_size = sizeof(struct i40e_netdev_priv);
        netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
@@ -9390,52 +9288,57 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        np = netdev_priv(netdev);
        np->vsi = vsi;
 
-       netdev->hw_enc_features |= NETIF_F_SG                   |
-                                  NETIF_F_IP_CSUM              |
-                                  NETIF_F_IPV6_CSUM            |
-                                  NETIF_F_HIGHDMA              |
-                                  NETIF_F_SOFT_FEATURES        |
-                                  NETIF_F_TSO                  |
-                                  NETIF_F_TSO_ECN              |
-                                  NETIF_F_TSO6                 |
-                                  NETIF_F_GSO_GRE              |
-                                  NETIF_F_GSO_GRE_CSUM         |
-                                  NETIF_F_GSO_IPXIP4           |
-                                  NETIF_F_GSO_IPXIP6           |
-                                  NETIF_F_GSO_UDP_TUNNEL       |
-                                  NETIF_F_GSO_UDP_TUNNEL_CSUM  |
-                                  NETIF_F_GSO_PARTIAL          |
-                                  NETIF_F_SCTP_CRC             |
-                                  NETIF_F_RXHASH               |
-                                  NETIF_F_RXCSUM               |
-                                  0;
+       hw_enc_features = NETIF_F_SG                    |
+                         NETIF_F_IP_CSUM               |
+                         NETIF_F_IPV6_CSUM             |
+                         NETIF_F_HIGHDMA               |
+                         NETIF_F_SOFT_FEATURES         |
+                         NETIF_F_TSO                   |
+                         NETIF_F_TSO_ECN               |
+                         NETIF_F_TSO6                  |
+                         NETIF_F_GSO_GRE               |
+                         NETIF_F_GSO_GRE_CSUM          |
+                         NETIF_F_GSO_PARTIAL           |
+                         NETIF_F_GSO_UDP_TUNNEL        |
+                         NETIF_F_GSO_UDP_TUNNEL_CSUM   |
+                         NETIF_F_SCTP_CRC              |
+                         NETIF_F_RXHASH                |
+                         NETIF_F_RXCSUM                |
+                         0;
 
        if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
                netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
 
        netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
 
+       netdev->hw_enc_features |= hw_enc_features;
+
        /* record features VLANs can make use of */
-       netdev->vlan_features |= netdev->hw_enc_features |
-                                NETIF_F_TSO_MANGLEID;
+       netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
 
        if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
                netdev->hw_features |= NETIF_F_NTUPLE;
+       hw_features = hw_enc_features           |
+                     NETIF_F_HW_VLAN_CTAG_TX   |
+                     NETIF_F_HW_VLAN_CTAG_RX;
 
-       netdev->hw_features |= netdev->hw_enc_features  |
-                              NETIF_F_HW_VLAN_CTAG_TX  |
-                              NETIF_F_HW_VLAN_CTAG_RX;
+       netdev->hw_features |= hw_features;
 
-       netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+       netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
        netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
 
        if (vsi->type == I40E_VSI_MAIN) {
                SET_NETDEV_DEV(netdev, &pf->pdev->dev);
                ether_addr_copy(mac_addr, hw->mac.perm_addr);
-               /* The following steps are necessary to prevent reception
-                * of tagged packets - some older NVM configurations load a
-                * default a MAC-VLAN filter that accepts any tagged packet
-                * which must be replaced by a normal filter.
+               /* The following steps are necessary for two reasons. First,
+                * some older NVM configurations load a default MAC-VLAN
+                * filter that will accept any tagged packet, and we want to
+                * replace this with a normal filter. Additionally, it is
+                * possible our MAC address was provided by the platform using
+                * Open Firmware or similar.
+                *
+                * Thus, we need to remove the default filter and install one
+                * specific to the MAC address.
                 */
                i40e_rm_default_mac_filter(vsi, mac_addr);
                spin_lock_bh(&vsi->mac_filter_hash_lock);
@@ -9481,9 +9384,6 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        netdev->netdev_ops = &i40e_netdev_ops;
        netdev->watchdog_timeo = 5 * HZ;
        i40e_set_ethtool_ops(netdev);
-#ifdef I40E_FCOE
-       i40e_fcoe_config_netdev(netdev, vsi);
-#endif
 
        /* MTU range: 68 - 9706 */
        netdev->min_mtu = ETH_MIN_MTU;
@@ -9707,16 +9607,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
                break;
 
-#ifdef I40E_FCOE
-       case I40E_VSI_FCOE:
-               ret = i40e_fcoe_vsi_init(vsi, &ctxt);
-               if (ret) {
-                       dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
-                       return ret;
-               }
-               break;
-
-#endif /* I40E_FCOE */
        case I40E_VSI_IWARP:
                /* send down message to iWARP */
                break;
@@ -10133,7 +10023,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
                        }
                }
        case I40E_VSI_VMDQ2:
-       case I40E_VSI_FCOE:
                ret = i40e_config_netdev(vsi);
                if (ret)
                        goto err_netdev;
@@ -10793,9 +10682,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
        int queues_left;
 
        pf->num_lan_qps = 0;
-#ifdef I40E_FCOE
-       pf->num_fcoe_qps = 0;
-#endif
 
        /* Find the max queues to be put into basic use.  We'll always be
         * using TC0, whether or not DCB is running, and TC0 will get the
@@ -10812,9 +10698,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                /* make sure all the fancies are disabled */
                pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
                               I40E_FLAG_IWARP_ENABLED  |
-#ifdef I40E_FCOE
-                              I40E_FLAG_FCOE_ENABLED   |
-#endif
                               I40E_FLAG_FD_SB_ENABLED  |
                               I40E_FLAG_FD_ATR_ENABLED |
                               I40E_FLAG_DCB_CAPABLE    |
@@ -10831,9 +10714,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
 
                pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
                               I40E_FLAG_IWARP_ENABLED  |
-#ifdef I40E_FCOE
-                              I40E_FLAG_FCOE_ENABLED   |
-#endif
                               I40E_FLAG_FD_SB_ENABLED  |
                               I40E_FLAG_FD_ATR_ENABLED |
                               I40E_FLAG_DCB_ENABLED    |
@@ -10854,22 +10734,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                queues_left -= pf->num_lan_qps;
        }
 
-#ifdef I40E_FCOE
-       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
-               if (I40E_DEFAULT_FCOE <= queues_left) {
-                       pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
-               } else if (I40E_MINIMUM_FCOE <= queues_left) {
-                       pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
-               } else {
-                       pf->num_fcoe_qps = 0;
-                       pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
-                       dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
-               }
-
-               queues_left -= pf->num_fcoe_qps;
-       }
-
-#endif
        if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
                if (queues_left > 1) {
                        queues_left -= 1; /* save 1 queue for FD */
@@ -10901,9 +10765,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
                pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
                queues_left);
-#ifdef I40E_FCOE
-       dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
-#endif
 }
 
 /**
@@ -10970,10 +10831,6 @@ static void i40e_print_features(struct i40e_pf *pf)
        i += snprintf(&buf[i], REMAIN(i), " Geneve");
        if (pf->flags & I40E_FLAG_PTP)
                i += snprintf(&buf[i], REMAIN(i), " PTP");
-#ifdef I40E_FCOE
-       if (pf->flags & I40E_FLAG_FCOE_ENABLED)
-               i += snprintf(&buf[i], REMAIN(i), " FCOE");
-#endif
        if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
                i += snprintf(&buf[i], REMAIN(i), " VEB");
        else
@@ -10986,20 +10843,18 @@ static void i40e_print_features(struct i40e_pf *pf)
 
 /**
  * i40e_get_platform_mac_addr - get platform-specific MAC address
- *
  * @pdev: PCI device information struct
  * @pf: board private structure
  *
- * Look up the MAC address in Open Firmware  on systems that support it,
- * and use IDPROM on SPARC if no OF address is found. On return, the
- * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value
- * has been selected.
+ * Look up the MAC address for the device. First we'll try
+ * eth_platform_get_mac_address, which will check Open Firmware, or arch
+ * specific fallback. Otherwise, we'll default to the stored value in
+ * firmware.
  **/
 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
 {
-       pf->flags &= ~I40E_FLAG_PF_MAC;
-       if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
-               pf->flags |= I40E_FLAG_PF_MAC;
+       if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
+               i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
 }
 
 /**
@@ -11090,6 +10945,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw->bus.bus_id = pdev->bus->number;
        pf->instance = pfs_found;
 
+       INIT_LIST_HEAD(&pf->l3_flex_pit_list);
+       INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+
        /* set up the locks for the AQ, do this only once in probe
         * and destroy them only once in remove
         */
@@ -11188,8 +11046,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
-                               hw->func_caps.num_rx_qp,
-                               pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+                               hw->func_caps.num_rx_qp, 0, 0);
        if (err) {
                dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
                goto err_init_lan_hmc;
@@ -11211,9 +11068,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                i40e_aq_stop_lldp(hw, true, NULL);
        }
 
-       i40e_get_mac_addr(hw, hw->mac.addr);
        /* allow a platform config to override the HW addr */
        i40e_get_platform_mac_addr(pdev, pf);
+
        if (!is_valid_ether_addr(hw->mac.addr)) {
                dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
                err = -EIO;
@@ -11224,18 +11081,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        i40e_get_port_mac_addr(hw, hw->mac.port_addr);
        if (is_valid_ether_addr(hw->mac.port_addr))
                pf->flags |= I40E_FLAG_PORT_ID_VALID;
-#ifdef I40E_FCOE
-       err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
-       if (err)
-               dev_info(&pdev->dev,
-                        "(non-fatal) SAN MAC retrieval failed: %d\n", err);
-       if (!is_valid_ether_addr(hw->mac.san_addr)) {
-               dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
-                        hw->mac.san_addr);
-               ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
-       }
-       dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
-#endif /* I40E_FCOE */
 
        pci_set_drvdata(pdev, pf);
        pci_save_state(pdev);
@@ -11254,7 +11099,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        INIT_WORK(&pf->service_task, i40e_service_task);
        clear_bit(__I40E_SERVICE_SCHED, &pf->state);
-       pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
 
        /* NVM bit on means WoL disabled for the port */
        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
@@ -11426,16 +11270,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                  round_jiffies(jiffies + pf->service_timer_period));
 
        /* add this PF to client device list and launch a client service task */
-       err = i40e_lan_add_device(pf);
-       if (err)
-               dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
-                        err);
-
-#ifdef I40E_FCOE
-       /* create FCoE interface */
-       i40e_fcoe_vsi_setup(pf);
+       if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+               err = i40e_lan_add_device(pf);
+               if (err)
+                       dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
+                                err);
+       }
 
-#endif
 #define PCI_SPEED_SIZE 8
 #define PCI_WIDTH_SIZE 8
        /* Devices on the IOSF bus do not have this information
@@ -11581,6 +11422,11 @@ static void i40e_remove(struct pci_dev *pdev)
        if (pf->service_task.func)
                cancel_work_sync(&pf->service_task);
 
+       /* Client close must be called explicitly here because the timer
+        * has been stopped.
+        */
+       i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+
        if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
                i40e_free_vfs(pf);
                pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
@@ -11607,10 +11453,11 @@ static void i40e_remove(struct pci_dev *pdev)
                i40e_vsi_release(pf->vsi[pf->lan_vsi]);
 
        /* remove attached clients */
-       ret_code = i40e_lan_del_device(pf);
-       if (ret_code) {
-               dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
-                        ret_code);
+       if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+               ret_code = i40e_lan_del_device(pf);
+               if (ret_code)
+                       dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
+                                ret_code);
        }
 
        /* shutdown and destroy the HMC */
@@ -11679,7 +11526,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
        /* shutdown all operations */
        if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
                rtnl_lock();
-               i40e_prep_for_reset(pf);
+               i40e_prep_for_reset(pf, true);
                rtnl_unlock();
        }
 
@@ -11748,7 +11595,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
                return;
 
        rtnl_lock();
-       i40e_handle_reset_warning(pf);
+       i40e_handle_reset_warning(pf, true);
        rtnl_unlock();
 }
 
@@ -11811,7 +11658,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
        set_bit(__I40E_SUSPENDED, &pf->state);
        set_bit(__I40E_DOWN, &pf->state);
        rtnl_lock();
-       i40e_prep_for_reset(pf);
+       i40e_prep_for_reset(pf, true);
        rtnl_unlock();
 
        wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
@@ -11821,11 +11668,16 @@ static void i40e_shutdown(struct pci_dev *pdev)
        cancel_work_sync(&pf->service_task);
        i40e_fdir_teardown(pf);
 
+       /* Client close must be called explicitly here because the timer
+        * has been stopped.
+        */
+       i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+
        if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
                i40e_enable_mc_magic_wake(pf);
 
        rtnl_lock();
-       i40e_prep_for_reset(pf);
+       i40e_prep_for_reset(pf, true);
        rtnl_unlock();
 
        wr32(hw, I40E_PFPM_APM,
@@ -11859,7 +11711,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
                i40e_enable_mc_magic_wake(pf);
 
        rtnl_lock();
-       i40e_prep_for_reset(pf);
+       i40e_prep_for_reset(pf, true);
        rtnl_unlock();
 
        wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
@@ -11907,7 +11759,7 @@ static int i40e_resume(struct pci_dev *pdev)
        if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
                clear_bit(__I40E_DOWN, &pf->state);
                rtnl_lock();
-               i40e_reset_and_rebuild(pf, false);
+               i40e_reset_and_rebuild(pf, false, true);
                rtnl_unlock();
        }
 
index 38ee18f1112444df1ad753f845d1dd1b1fd6ddcc..800bd55d0159c083c3d2267c5eda0289b1d6435c 100644 (file)
@@ -292,14 +292,14 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
 {
        enum i40e_status_code ret_code = 0;
 
-       if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
-               ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
-               if (!ret_code) {
+       ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+       if (!ret_code) {
+               if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
                        ret_code = i40e_read_nvm_word_aq(hw, offset, data);
-                       i40e_release_nvm(hw);
+               } else {
+                       ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
                }
-       } else {
-               ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+               i40e_release_nvm(hw);
        }
        return ret_code;
 }
index fea81ed065db8a57a26d89e31b87758b5c069d20..80e66da6b145e07ac4faaf761b9788d67118fc40 100644 (file)
@@ -78,7 +78,4 @@ do {                                                          \
 } while (0)
 
 typedef enum i40e_status_code i40e_status;
-#ifdef CONFIG_I40E_FCOE
-#define I40E_FCOE
-#endif
 #endif /* _I40E_OSDEP_H_ */
index 2551fc8274441f81196c64d34c6eef7d9dbecad1..dfc5e5901be5c54a1ff8e6a01b5d67ab7bc4fc49 100644 (file)
@@ -304,9 +304,6 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
                                 u32 pba_num_size);
 i40e_status i40e_validate_mac_addr(u8 *mac_addr);
 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
-#ifdef I40E_FCOE
-i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
-#endif
 /* prototype for functions used for NVM access */
 i40e_status i40e_init_nvm(struct i40e_hw *hw);
 i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
index 97d46058d71d3c118dfa8f5408b888a63fd1a933..20691d2bf113af872834c8afac642ddcf83dda57 100644 (file)
@@ -71,6 +71,9 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
        flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
                      (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
 
+       flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
+                     (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
+
        /* Use LAN VSI Id if not programmed by user */
        flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
                      ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
@@ -203,7 +206,6 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
        struct i40e_pf *pf = vsi->back;
        struct udphdr *udp;
        struct iphdr *ip;
-       bool err = false;
        u8 *raw_packet;
        int ret;
        static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
@@ -219,18 +221,28 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
        udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
              + sizeof(struct iphdr));
 
-       ip->daddr = fd_data->dst_ip[0];
+       ip->daddr = fd_data->dst_ip;
        udp->dest = fd_data->dst_port;
-       ip->saddr = fd_data->src_ip[0];
+       ip->saddr = fd_data->src_ip;
        udp->source = fd_data->src_port;
 
+       if (fd_data->flex_filter) {
+               u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
+               __be16 pattern = fd_data->flex_word;
+               u16 off = fd_data->flex_offset;
+
+               *((__force __be16 *)(payload + off)) = pattern;
+       }
+
        fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
        ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
        if (ret) {
                dev_info(&pf->pdev->dev,
                         "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
                         fd_data->pctype, fd_data->fd_id, ret);
-               err = true;
+               /* Free the packet buffer since it wasn't added to the ring */
+               kfree(raw_packet);
+               return -EOPNOTSUPP;
        } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
                if (add)
                        dev_info(&pf->pdev->dev,
@@ -241,10 +253,13 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
                                 "Filter deleted for PCTYPE %d loc = %d\n",
                                 fd_data->pctype, fd_data->fd_id);
        }
-       if (err)
-               kfree(raw_packet);
 
-       return err ? -EOPNOTSUPP : 0;
+       if (add)
+               pf->fd_udp4_filter_cnt++;
+       else
+               pf->fd_udp4_filter_cnt--;
+
+       return 0;
 }
 
 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
@@ -263,7 +278,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
        struct i40e_pf *pf = vsi->back;
        struct tcphdr *tcp;
        struct iphdr *ip;
-       bool err = false;
        u8 *raw_packet;
        int ret;
        /* Dummy packet */
@@ -281,39 +295,116 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
        tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
              + sizeof(struct iphdr));
 
-       ip->daddr = fd_data->dst_ip[0];
+       ip->daddr = fd_data->dst_ip;
        tcp->dest = fd_data->dst_port;
-       ip->saddr = fd_data->src_ip[0];
+       ip->saddr = fd_data->src_ip;
        tcp->source = fd_data->src_port;
 
+       if (fd_data->flex_filter) {
+               u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
+               __be16 pattern = fd_data->flex_word;
+               u16 off = fd_data->flex_offset;
+
+               *((__force __be16 *)(payload + off)) = pattern;
+       }
+
+       fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+       ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
+                        fd_data->pctype, fd_data->fd_id, ret);
+               /* Free the packet buffer since it wasn't added to the ring */
+               kfree(raw_packet);
+               return -EOPNOTSUPP;
+       } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
+               if (add)
+                       dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
+                                fd_data->pctype, fd_data->fd_id);
+               else
+                       dev_info(&pf->pdev->dev,
+                                "Filter deleted for PCTYPE %d loc = %d\n",
+                                fd_data->pctype, fd_data->fd_id);
+       }
+
        if (add) {
-               pf->fd_tcp_rule++;
+               pf->fd_tcp4_filter_cnt++;
                if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
                    I40E_DEBUG_FD & pf->hw.debug_mask)
                        dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
-               pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+               pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
        } else {
-               pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
-                                 (pf->fd_tcp_rule - 1) : 0;
-               if (pf->fd_tcp_rule == 0) {
+               pf->fd_tcp4_filter_cnt--;
+               if (pf->fd_tcp4_filter_cnt == 0) {
                        if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
                            I40E_DEBUG_FD & pf->hw.debug_mask)
                                dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
-                       pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+                       pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
                }
        }
 
-       fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
-       ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+       return 0;
+}
+
+#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
+/**
+ * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
+                                   struct i40e_fdir_filter *fd_data,
+                                   bool add)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct sctphdr *sctp;
+       struct iphdr *ip;
+       u8 *raw_packet;
+       int ret;
+       /* Dummy packet */
+       static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+               0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
+               0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
 
+       raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+       if (!raw_packet)
+               return -ENOMEM;
+       memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
+
+       ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+       sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
+             + sizeof(struct iphdr));
+
+       ip->daddr = fd_data->dst_ip;
+       sctp->dest = fd_data->dst_port;
+       ip->saddr = fd_data->src_ip;
+       sctp->source = fd_data->src_port;
+
+       if (fd_data->flex_filter) {
+               u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
+               __be16 pattern = fd_data->flex_word;
+               u16 off = fd_data->flex_offset;
+
+               *((__force __be16 *)(payload + off)) = pattern;
+       }
+
+       fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+       ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
        if (ret) {
                dev_info(&pf->pdev->dev,
                         "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
                         fd_data->pctype, fd_data->fd_id, ret);
-               err = true;
+               /* Free the packet buffer since it wasn't added to the ring */
+               kfree(raw_packet);
+               return -EOPNOTSUPP;
        } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
                if (add)
-                       dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
+                       dev_info(&pf->pdev->dev,
+                                "Filter OK for PCTYPE %d loc = %d\n",
                                 fd_data->pctype, fd_data->fd_id);
                else
                        dev_info(&pf->pdev->dev,
@@ -321,10 +412,12 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
                                 fd_data->pctype, fd_data->fd_id);
        }
 
-       if (err)
-               kfree(raw_packet);
+       if (add)
+               pf->fd_sctp4_filter_cnt++;
+       else
+               pf->fd_sctp4_filter_cnt--;
 
-       return err ? -EOPNOTSUPP : 0;
+       return 0;
 }
 
 #define I40E_IP_DUMMY_PACKET_LEN 34
@@ -343,7 +436,6 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
 {
        struct i40e_pf *pf = vsi->back;
        struct iphdr *ip;
-       bool err = false;
        u8 *raw_packet;
        int ret;
        int i;
@@ -359,18 +451,29 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
                memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
                ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
 
-               ip->saddr = fd_data->src_ip[0];
-               ip->daddr = fd_data->dst_ip[0];
+               ip->saddr = fd_data->src_ip;
+               ip->daddr = fd_data->dst_ip;
                ip->protocol = 0;
 
+               if (fd_data->flex_filter) {
+                       u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
+                       __be16 pattern = fd_data->flex_word;
+                       u16 off = fd_data->flex_offset;
+
+                       *((__force __be16 *)(payload + off)) = pattern;
+               }
+
                fd_data->pctype = i;
                ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
-
                if (ret) {
                        dev_info(&pf->pdev->dev,
                                 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
                                 fd_data->pctype, fd_data->fd_id, ret);
-                       err = true;
+                       /* The packet buffer wasn't added to the ring so we
+                        * need to free it now.
+                        */
+                       kfree(raw_packet);
+                       return -EOPNOTSUPP;
                } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
                        if (add)
                                dev_info(&pf->pdev->dev,
@@ -383,10 +486,12 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
                }
        }
 
-       if (err)
-               kfree(raw_packet);
+       if (add)
+               pf->fd_ip4_filter_cnt++;
+       else
+               pf->fd_ip4_filter_cnt--;
 
-       return err ? -EOPNOTSUPP : 0;
+       return 0;
 }
 
 /**
@@ -409,6 +514,9 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
        case UDP_V4_FLOW:
                ret = i40e_add_del_fdir_udpv4(vsi, input, add);
                break;
+       case SCTP_V4_FLOW:
+               ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
+               break;
        case IP_USER_FLOW:
                switch (input->ip4_proto) {
                case IPPROTO_TCP:
@@ -417,19 +525,23 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
                case IPPROTO_UDP:
                        ret = i40e_add_del_fdir_udpv4(vsi, input, add);
                        break;
+               case IPPROTO_SCTP:
+                       ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
+                       break;
                case IPPROTO_IP:
                        ret = i40e_add_del_fdir_ipv4(vsi, input, add);
                        break;
                default:
                        /* We cannot support masking based on protocol */
-                       goto unsupported_flow;
+                       dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
+                                input->ip4_proto);
+                       return -EINVAL;
                }
                break;
        default:
-unsupported_flow:
-               dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
+               dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
                         input->flow_type);
-               ret = -EINVAL;
+               return -EINVAL;
        }
 
        /* The buffer allocated here will be normally be freed by
@@ -484,8 +596,8 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
                pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
 
                if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
-                   (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
-                       pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+                   (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) {
+                       pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
                        set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
                }
 
@@ -498,11 +610,11 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
                 */
                if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
                        if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
-                           !(pf->auto_disable_flags &
+                           !(pf->hw_disabled_flags &
                                     I40E_FLAG_FD_SB_ENABLED)) {
                                if (I40E_DEBUG_FD & pf->hw.debug_mask)
                                        dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
-                               pf->auto_disable_flags |=
+                               pf->hw_disabled_flags |=
                                                        I40E_FLAG_FD_SB_ENABLED;
                        }
                }
@@ -599,19 +711,15 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
 /**
  * i40e_get_tx_pending - how many tx descriptors not processed
  * @tx_ring: the ring of descriptors
- * @in_sw: is tx_pending being checked in SW or HW
  *
  * Since there is no access to the ring head register
  * in XL710, we need to use our local copies
  **/
-u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
+u32 i40e_get_tx_pending(struct i40e_ring *ring)
 {
        u32 head, tail;
 
-       if (!in_sw)
-               head = i40e_get_head(ring);
-       else
-               head = ring->next_to_clean;
+       head = i40e_get_head(ring);
        tail = readl(ring->tail);
 
        if (head != tail)
@@ -734,7 +842,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
                 * them to be written back in case we stay in NAPI.
                 * In this mode on X722 we do not enable Interrupt.
                 */
-               unsigned int j = i40e_get_tx_pending(tx_ring, false);
+               unsigned int j = i40e_get_tx_pending(tx_ring);
 
                if (budget &&
                    ((j / WB_STRIDE) == 0) && (j > 0) &&
@@ -951,11 +1059,6 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
 
        if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
                i40e_fd_handle_status(rx_ring, rx_desc, id);
-#ifdef I40E_FCOE
-       else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
-                (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
-               i40e_fcoe_handle_status(rx_ring, rx_desc, id);
-#endif
 }
 
 /**
@@ -1010,7 +1113,6 @@ err:
  **/
 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
 {
-       struct device *dev = rx_ring->dev;
        unsigned long bi_size;
        u16 i;
 
@@ -1030,8 +1132,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
                if (!rx_bi->page)
                        continue;
 
-               dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
-               __free_pages(rx_bi->page, 0);
+               /* Invalidate cache lines that may have been written to by
+                * device so that we avoid corrupting memory.
+                */
+               dma_sync_single_range_for_cpu(rx_ring->dev,
+                                             rx_bi->dma,
+                                             rx_bi->page_offset,
+                                             rx_ring->rx_buf_len,
+                                             DMA_FROM_DEVICE);
+
+               /* free resources associated with mapping */
+               dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
+                                    i40e_rx_pg_size(rx_ring),
+                                    DMA_FROM_DEVICE,
+                                    I40E_RX_DMA_ATTR);
+
+               __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
 
                rx_bi->page = NULL;
                rx_bi->page_offset = 0;
@@ -1131,6 +1247,17 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
        writel(val, rx_ring->tail);
 }
 
+/**
+ * i40e_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
+{
+       return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
+}
+
 /**
  * i40e_alloc_mapped_page - recycle or make a new page
  * @rx_ring: ring to use
@@ -1152,27 +1279,33 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
        }
 
        /* alloc new page for storage */
-       page = dev_alloc_page();
+       page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
        if (unlikely(!page)) {
                rx_ring->rx_stats.alloc_page_failed++;
                return false;
        }
 
        /* map page for use */
-       dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+       dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+                                i40e_rx_pg_size(rx_ring),
+                                DMA_FROM_DEVICE,
+                                I40E_RX_DMA_ATTR);
 
        /* if mapping failed free memory back to system since
         * there isn't much point in holding memory we can't use
         */
        if (dma_mapping_error(rx_ring->dev, dma)) {
-               __free_pages(page, 0);
+               __free_pages(page, i40e_rx_pg_order(rx_ring));
                rx_ring->rx_stats.alloc_page_failed++;
                return false;
        }
 
        bi->dma = dma;
        bi->page = page;
-       bi->page_offset = 0;
+       bi->page_offset = i40e_rx_offset(rx_ring);
+
+       /* initialize pagecnt_bias to 1 representing we fully own page */
+       bi->pagecnt_bias = 1;
 
        return true;
 }
@@ -1219,6 +1352,12 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
                if (!i40e_alloc_mapped_page(rx_ring, bi))
                        goto no_buffers;
 
+               /* sync the buffer for use by the device */
+               dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+                                                bi->page_offset,
+                                                rx_ring->rx_buf_len,
+                                                DMA_FROM_DEVICE);
+
                /* Refresh the desc even if buffer_addrs didn't change
                 * because each write-back erases this info.
                 */
@@ -1259,8 +1398,6 @@ no_buffers:
  * @vsi: the VSI we care about
  * @skb: skb currently being received and modified
  * @rx_desc: the receive descriptor
- *
- * skb->protocol must be set before this function is called
  **/
 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                    struct sk_buff *skb,
@@ -1422,12 +1559,12 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
 
        i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
 
-       /* modifies the skb - consumes the enet header */
-       skb->protocol = eth_type_trans(skb, rx_ring->netdev);
-
        i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
 
        skb_record_rx_queue(skb, rx_ring->queue_index);
+
+       /* modifies the skb - consumes the enet header */
+       skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 }
 
 /**
@@ -1472,7 +1609,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
        rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 
        /* transfer page from old buffer to new buffer */
-       *new_buff = *old_buff;
+       new_buff->dma           = old_buff->dma;
+       new_buff->page          = old_buff->page;
+       new_buff->page_offset   = old_buff->page_offset;
+       new_buff->pagecnt_bias  = old_buff->pagecnt_bias;
 }
 
 /**
@@ -1493,8 +1633,6 @@ static inline bool i40e_page_is_reusable(struct page *page)
  * the adapter for another receive
  *
  * @rx_buffer: buffer containing the page
- * @page: page address from rx_buffer
- * @truesize: actual size of the buffer in this page
  *
  * If page is reusable, rx_buffer->page_offset is adjusted to point to
  * an unused region in the page.
@@ -1517,13 +1655,10 @@ static inline bool i40e_page_is_reusable(struct page *page)
  *
  * In either case, if the page is reusable its refcount is increased.
  **/
-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
-                                  struct page *page,
-                                  const unsigned int truesize)
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
 {
-#if (PAGE_SIZE >= 8192)
-       unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
-#endif
+       unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+       struct page *page = rx_buffer->page;
 
        /* Is any reuse possible? */
        if (unlikely(!i40e_page_is_reusable(page)))
@@ -1531,21 +1666,23 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
 
 #if (PAGE_SIZE < 8192)
        /* if we are only owner of page we can reuse it */
-       if (unlikely(page_count(page) != 1))
+       if (unlikely((page_count(page) - pagecnt_bias) > 1))
                return false;
-
-       /* flip page offset to other buffer */
-       rx_buffer->page_offset ^= truesize;
 #else
-       /* move offset up to the next cache line */
-       rx_buffer->page_offset += truesize;
-
-       if (rx_buffer->page_offset > last_offset)
+#define I40E_LAST_OFFSET \
+       (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
+       if (rx_buffer->page_offset > I40E_LAST_OFFSET)
                return false;
 #endif
 
-       /* Inc ref count on page before passing it up to the stack */
-       get_page(page);
+       /* If we have drained the page fragment pool we need to update
+        * the pagecnt_bias and page count so that we fully restock the
+        * number of references the driver holds.
+        */
+       if (unlikely(!pagecnt_bias)) {
+               page_ref_add(page, USHRT_MAX);
+               rx_buffer->pagecnt_bias = USHRT_MAX;
+       }
 
        return true;
 }
@@ -1554,145 +1691,201 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
  * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
  * @rx_ring: rx descriptor ring to transact packets on
  * @rx_buffer: buffer containing page to add
- * @size: packet length from rx_desc
  * @skb: sk_buff to place the data into
+ * @size: packet length from rx_desc
  *
  * This function will add the data contained in rx_buffer->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
+ * It will just attach the page as a frag to the skb.
  *
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
+ * The function will then update the page offset.
  **/
-static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
                             struct i40e_rx_buffer *rx_buffer,
-                            unsigned int size,
-                            struct sk_buff *skb)
+                            struct sk_buff *skb,
+                            unsigned int size)
 {
-       struct page *page = rx_buffer->page;
-       unsigned char *va = page_address(page) + rx_buffer->page_offset;
 #if (PAGE_SIZE < 8192)
-       unsigned int truesize = I40E_RXBUFFER_2048;
+       unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
-       unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+       unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
 #endif
-       unsigned int pull_len;
 
-       if (unlikely(skb_is_nonlinear(skb)))
-               goto add_tail_frag;
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+                       rx_buffer->page_offset, size, truesize);
 
-       /* will the data fit in the skb we allocated? if so, just
-        * copy it as it is pretty small anyway
-        */
-       if (size <= I40E_RX_HDR_SIZE) {
-               memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
-               /* page is reusable, we can reuse buffer as-is */
-               if (likely(i40e_page_is_reusable(page)))
-                       return true;
-
-               /* this page cannot be reused so discard it */
-               __free_pages(page, 0);
-               return false;
-       }
+       /* page is being used so we must update the page offset */
+#if (PAGE_SIZE < 8192)
+       rx_buffer->page_offset ^= truesize;
+#else
+       rx_buffer->page_offset += truesize;
+#endif
+}
 
-       /* we need the header to contain the greater of either
-        * ETH_HLEN or 60 bytes if the skb->len is less than
-        * 60 for skb_pad.
-        */
-       pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+/**
+ * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @size: size of buffer to add to skb
+ *
+ * This function will pull an Rx buffer from the ring and synchronize it
+ * for use by the CPU.
+ */
+static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
+                                                const unsigned int size)
+{
+       struct i40e_rx_buffer *rx_buffer;
 
-       /* align pull length to size of long to optimize
-        * memcpy performance
-        */
-       memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+       rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+       prefetchw(rx_buffer->page);
 
-       /* update all of the pointers */
-       va += pull_len;
-       size -= pull_len;
+       /* we are reusing so sync this buffer for CPU use */
+       dma_sync_single_range_for_cpu(rx_ring->dev,
+                                     rx_buffer->dma,
+                                     rx_buffer->page_offset,
+                                     size,
+                                     DMA_FROM_DEVICE);
 
-add_tail_frag:
-       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-                       (unsigned long)va & ~PAGE_MASK, size, truesize);
+       /* We have pulled a buffer for use, so decrement pagecnt_bias */
+       rx_buffer->pagecnt_bias--;
 
-       return i40e_can_reuse_rx_page(rx_buffer, page, truesize);
+       return rx_buffer;
 }
 
 /**
- * i40e_fetch_rx_buffer - Allocate skb and populate it
+ * i40e_construct_skb - Allocate skb and populate it
  * @rx_ring: rx descriptor ring to transact packets on
- * @rx_desc: descriptor containing info written by hardware
+ * @rx_buffer: rx buffer to pull data from
+ * @size: size of buffer to add to skb
  *
- * This function allocates an skb on the fly, and populates it with the page
- * data from the current receive descriptor, taking care to set up the skb
- * correctly, as well as handling calling the page recycle function if
- * necessary.
+ * This function allocates an skb.  It then populates it with the page
+ * data from the current receive descriptor, taking care to set up the
+ * skb correctly.
  */
-static inline
-struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
-                                    union i40e_rx_desc *rx_desc,
-                                    struct sk_buff *skb)
+static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
+                                         struct i40e_rx_buffer *rx_buffer,
+                                         unsigned int size)
 {
-       u64 local_status_error_len =
-               le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-       unsigned int size =
-               (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
-               I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-       struct i40e_rx_buffer *rx_buffer;
-       struct page *page;
+       void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+       unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+       unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+       unsigned int headlen;
+       struct sk_buff *skb;
 
-       rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
-       page = rx_buffer->page;
-       prefetchw(page);
+       /* prefetch first cache line of first page */
+       prefetch(va);
+#if L1_CACHE_BYTES < 128
+       prefetch(va + L1_CACHE_BYTES);
+#endif
+
+       /* allocate a skb to store the frags */
+       skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+                              I40E_RX_HDR_SIZE,
+                              GFP_ATOMIC | __GFP_NOWARN);
+       if (unlikely(!skb))
+               return NULL;
+
+       /* Determine available headroom for copy */
+       headlen = size;
+       if (headlen > I40E_RX_HDR_SIZE)
+               headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+
+       /* align pull length to size of long to optimize memcpy performance */
+       memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+
+       /* update all of the pointers */
+       size -= headlen;
+       if (size) {
+               skb_add_rx_frag(skb, 0, rx_buffer->page,
+                               rx_buffer->page_offset + headlen,
+                               size, truesize);
+
+               /* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+               rx_buffer->page_offset ^= truesize;
+#else
+               rx_buffer->page_offset += truesize;
+#endif
+       } else {
+               /* buffer is unused, reset bias back to rx_buffer */
+               rx_buffer->pagecnt_bias++;
+       }
 
-       if (likely(!skb)) {
-               void *page_addr = page_address(page) + rx_buffer->page_offset;
+       return skb;
+}
 
-               /* prefetch first cache line of first page */
-               prefetch(page_addr);
+/**
+ * i40e_build_skb - Build skb around an existing buffer
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buffer: Rx buffer to pull data from
+ * @size: size of buffer to add to skb
+ *
+ * This function builds an skb around an existing Rx buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead.
+ */
+static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
+                                     struct i40e_rx_buffer *rx_buffer,
+                                     unsigned int size)
+{
+       void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+       unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+       unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+       struct sk_buff *skb;
+
+       /* prefetch first cache line of first page */
+       prefetch(va);
 #if L1_CACHE_BYTES < 128
-               prefetch(page_addr + L1_CACHE_BYTES);
+       prefetch(va + L1_CACHE_BYTES);
 #endif
+       /* build an skb around the page buffer */
+       skb = build_skb(va - I40E_SKB_PAD, truesize);
+       if (unlikely(!skb))
+               return NULL;
 
-               /* allocate a skb to store the frags */
-               skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
-                                      I40E_RX_HDR_SIZE,
-                                      GFP_ATOMIC | __GFP_NOWARN);
-               if (unlikely(!skb)) {
-                       rx_ring->rx_stats.alloc_buff_failed++;
-                       return NULL;
-               }
+       /* update pointers within the skb to store the data */
+       skb_reserve(skb, I40E_SKB_PAD);
+       __skb_put(skb, size);
 
-               /* we will be copying header into skb->data in
-                * pskb_may_pull so it is in our interest to prefetch
-                * it now to avoid a possible cache miss
-                */
-               prefetchw(skb->data);
-       }
+       /* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+       rx_buffer->page_offset ^= truesize;
+#else
+       rx_buffer->page_offset += truesize;
+#endif
 
-       /* we are reusing so sync this buffer for CPU use */
-       dma_sync_single_range_for_cpu(rx_ring->dev,
-                                     rx_buffer->dma,
-                                     rx_buffer->page_offset,
-                                     size,
-                                     DMA_FROM_DEVICE);
+       return skb;
+}
 
-       /* pull page into skb */
-       if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
+/**
+ * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: rx buffer to pull data from
+ *
+ * This function will clean up the contents of the rx_buffer.  It will
+ * either recycle the bufer or unmap it and free the associated resources.
+ */
+static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
+                              struct i40e_rx_buffer *rx_buffer)
+{
+       if (i40e_can_reuse_rx_page(rx_buffer)) {
                /* hand second half of page back to the ring */
                i40e_reuse_rx_page(rx_ring, rx_buffer);
                rx_ring->rx_stats.page_reuse_count++;
        } else {
                /* we are not reusing the buffer so unmap it */
-               dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
-                              DMA_FROM_DEVICE);
+               dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+                                    i40e_rx_pg_size(rx_ring),
+                                    DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
+               __page_frag_cache_drain(rx_buffer->page,
+                                       rx_buffer->pagecnt_bias);
        }
 
        /* clear contents of buffer_info */
        rx_buffer->page = NULL;
-
-       return skb;
 }
 
 /**
@@ -1753,7 +1946,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
        bool failure = false;
 
        while (likely(total_rx_packets < budget)) {
+               struct i40e_rx_buffer *rx_buffer;
                union i40e_rx_desc *rx_desc;
+               unsigned int size;
                u16 vlan_tag;
                u8 rx_ptype;
                u64 qword;
@@ -1770,22 +1965,38 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                /* status_error_len will always be zero for unused descriptors
                 * because it's cleared in cleanup, and overlaps with hdr_addr
                 * which is always zero because packet split isn't used, if the
-                * hardware wrote DD then it will be non-zero
+                * hardware wrote DD then the length will be non-zero
                 */
-               if (!i40e_test_staterr(rx_desc,
-                                      BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
+               qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+               size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+                      I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+               if (!size)
                        break;
 
                /* This memory barrier is needed to keep us from reading
-                * any other fields out of the rx_desc until we know the
-                * DD bit is set.
+                * any other fields out of the rx_desc until we have
+                * verified the descriptor has been written back.
                 */
                dma_rmb();
 
-               skb = i40e_fetch_rx_buffer(rx_ring, rx_desc, skb);
-               if (!skb)
+               rx_buffer = i40e_get_rx_buffer(rx_ring, size);
+
+               /* retrieve a buffer from the ring */
+               if (skb)
+                       i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
+               else if (ring_uses_build_skb(rx_ring))
+                       skb = i40e_build_skb(rx_ring, rx_buffer, size);
+               else
+                       skb = i40e_construct_skb(rx_ring, rx_buffer, size);
+
+               /* exit if we failed to retrieve a buffer */
+               if (!skb) {
+                       rx_ring->rx_stats.alloc_buff_failed++;
+                       rx_buffer->pagecnt_bias++;
                        break;
+               }
 
+               i40e_put_rx_buffer(rx_ring, rx_buffer);
                cleaned_count++;
 
                if (i40e_is_non_eop(rx_ring, rx_desc, skb))
@@ -1798,6 +2009,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                 */
                if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
                        dev_kfree_skb_any(skb);
+                       skb = NULL;
                        continue;
                }
 
@@ -1816,15 +2028,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                /* populate checksum, VLAN, and protocol */
                i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
 
-#ifdef I40E_FCOE
-               if (unlikely(
-                   i40e_rx_is_fcoe(rx_ptype) &&
-                   !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
-                       dev_kfree_skb_any(skb);
-                       continue;
-               }
-#endif
-
                vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
                           le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
 
@@ -1978,8 +2181,6 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
                return 0;
        }
 
-       /* Clear hung_detected bit */
-       clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
        /* Since the actual Tx work is minimal, we can give the Tx a larger
         * budget and be more aggressive about cleaning up the Tx descriptors.
         */
@@ -2079,7 +2280,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
                return;
 
-       if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+       if ((pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
                return;
 
        /* if sampling is disabled do nothing */
@@ -2113,10 +2314,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        th = (struct tcphdr *)(hdr.network + hlen);
 
        /* Due to lack of space, no more new filters can be programmed */
-       if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+       if (th->syn && (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
                return;
-       if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
-           (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
+       if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
                /* HW ATR eviction will take care of removing filters on FIN
                 * and RST packets.
                 */
@@ -2178,8 +2378,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
                        I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
                        I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 
-       if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
-           (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
+       if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
                dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
 
        fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
@@ -2200,15 +2399,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
  * Returns error code indicate the frame should be dropped upon error and the
  * otherwise  returns 0 to indicate the flags has been set properly.
  **/
-#ifdef I40E_FCOE
-inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
-                                     struct i40e_ring *tx_ring,
-                                     u32 *flags)
-#else
 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
                                             struct i40e_ring *tx_ring,
                                             u32 *flags)
-#endif
 {
        __be16 protocol = skb->protocol;
        u32  tx_flags = 0;
@@ -2716,15 +2909,9 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
  * @td_cmd:   the command field in the descriptor
  * @td_offset: offset for checksum or crc
  **/
-#ifdef I40E_FCOE
-inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                       struct i40e_tx_buffer *first, u32 tx_flags,
-                       const u8 hdr_len, u32 td_cmd, u32 td_offset)
-#else
 static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                               struct i40e_tx_buffer *first, u32 tx_flags,
                               const u8 hdr_len, u32 td_cmd, u32 td_offset)
-#endif
 {
        unsigned int data_len = skb->data_len;
        unsigned int size = skb_headlen(skb);
index f80979025c0131a07e7b956826f17877b8b7081a..f5de51124caee489cac1db58826f8c28a7ecccb6 100644 (file)
@@ -117,10 +117,9 @@ enum i40e_dyn_idx_t {
 
 /* Supported Rx Buffer Sizes (a multiple of 128) */
 #define I40E_RXBUFFER_256   256
+#define I40E_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */
 #define I40E_RXBUFFER_2048  2048
-#define I40E_RXBUFFER_3072  3072   /* For FCoE MTU of 2158 */
-#define I40E_RXBUFFER_4096  4096
-#define I40E_RXBUFFER_8192  8192
+#define I40E_RXBUFFER_3072  3072  /* Used for large frames w/ padding */
 #define I40E_MAX_RXBUFFER   9728  /* largest size for single descriptor */
 
 /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
@@ -133,6 +132,61 @@ enum i40e_dyn_idx_t {
 #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
 #define i40e_rx_desc i40e_32byte_rx_desc
 
+#define I40E_RX_DMA_ATTR \
+       (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+/* Attempt to maximize the headroom available for incoming frames.  We
+ * use a 2K buffer for receives and need 1536/1534 to store the data for
+ * the frame.  This leaves us with 512 bytes of room.  From that we need
+ * to deduct the space needed for the shared info and the padding needed
+ * to IP align the frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ *      up negative.  In these cases we should fall back to the legacy
+ *      receive path.
+ */
+#if (PAGE_SIZE < 8192)
+#define I40E_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
+
+static inline int i40e_compute_pad(int rx_buf_len)
+{
+       int page_size, pad_size;
+
+       page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+       pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+       return pad_size;
+}
+
+static inline int i40e_skb_pad(void)
+{
+       int rx_buf_len;
+
+       /* If a 2K buffer cannot handle a standard Ethernet frame then
+        * optimize padding for a 3K buffer instead of a 1.5K buffer.
+        *
+        * For a 3K buffer we need to add enough padding to allow for
+        * tailroom due to NET_IP_ALIGN possibly shifting us out of
+        * cache-line alignment.
+        */
+       if (I40E_2K_TOO_SMALL_WITH_PADDING)
+               rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+       else
+               rx_buf_len = I40E_RXBUFFER_1536;
+
+       /* if needed make room for NET_IP_ALIGN */
+       rx_buf_len -= NET_IP_ALIGN;
+
+       return i40e_compute_pad(rx_buf_len);
+}
+
+#define I40E_SKB_PAD i40e_skb_pad()
+#else
+#define I40E_2K_TOO_SMALL_WITH_PADDING false
+#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+
 /**
  * i40e_test_staterr - tests bits in Rx descriptor status and error fields
  * @rx_desc: pointer to receive descriptor (in le64 format)
@@ -255,7 +309,12 @@ struct i40e_tx_buffer {
 struct i40e_rx_buffer {
        dma_addr_t dma;
        struct page *page;
-       unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+       __u32 page_offset;
+#else
+       __u16 page_offset;
+#endif
+       __u16 pagecnt_bias;
 };
 
 struct i40e_queue_stats {
@@ -269,7 +328,6 @@ struct i40e_tx_queue_stats {
        u64 tx_done_old;
        u64 tx_linearize;
        u64 tx_force_wb;
-       u64 tx_lost_interrupt;
 };
 
 struct i40e_rx_queue_stats {
@@ -335,7 +393,8 @@ struct i40e_ring {
        u8 packet_stride;
 
        u16 flags;
-#define I40E_TXR_FLAGS_WB_ON_ITR       BIT(0)
+#define I40E_TXR_FLAGS_WB_ON_ITR               BIT(0)
+#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED       BIT(1)
 
        /* stats structs */
        struct i40e_queue_stats stats;
@@ -363,6 +422,21 @@ struct i40e_ring {
                                         */
 } ____cacheline_internodealigned_in_smp;
 
+static inline bool ring_uses_build_skb(struct i40e_ring *ring)
+{
+       return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
+}
+
+static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+       ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
+static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+       ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
 enum i40e_latency_range {
        I40E_LOWEST_LATENCY = 0,
        I40E_LOW_LATENCY = 1,
@@ -384,6 +458,17 @@ struct i40e_ring_container {
 #define i40e_for_each_ring(pos, head) \
        for (pos = (head).ring; pos != NULL; pos = pos->next)
 
+static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+       if (ring->rx_buf_len > (PAGE_SIZE / 2))
+               return 1;
+#endif
+       return 0;
+}
+
+#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
+
 bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
@@ -393,15 +478,8 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
 void i40e_free_tx_resources(struct i40e_ring *tx_ring);
 void i40e_free_rx_resources(struct i40e_ring *rx_ring);
 int i40e_napi_poll(struct napi_struct *napi, int budget);
-#ifdef I40E_FCOE
-void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                struct i40e_tx_buffer *first, u32 tx_flags,
-                const u8 hdr_len, u32 td_cmd, u32 td_offset);
-int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
-                              struct i40e_ring *tx_ring, u32 *flags);
-#endif
 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
-u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
+u32 i40e_get_tx_pending(struct i40e_ring *ring);
 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
 bool __i40e_chk_linearize(struct sk_buff *skb);
 
@@ -482,16 +560,6 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
        return count != I40E_MAX_BUFFER_TXD;
 }
 
-/**
- * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
- * @ptype: the packet type field from Rx descriptor write-back
- **/
-static inline bool i40e_rx_is_fcoe(u16 ptype)
-{
-       return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
-              (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
-}
-
 /**
  * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
  * @ring: Tx ring to find the netdev equivalent of
index 939f9fdc8f8573fa35554d6917e1722348d2da29..9200f2d9c752b18d3525b3b8174dac67778e3819 100644 (file)
@@ -1213,25 +1213,6 @@ struct i40e_veb_tc_stats {
        u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
 };
 
-#ifdef I40E_FCOE
-/* Statistics collected per function for FCoE */
-struct i40e_fcoe_stats {
-       u64 rx_fcoe_packets;            /* fcoeprc */
-       u64 rx_fcoe_dwords;             /* focedwrc */
-       u64 rx_fcoe_dropped;            /* fcoerpdc */
-       u64 tx_fcoe_packets;            /* fcoeptc */
-       u64 tx_fcoe_dwords;             /* focedwtc */
-       u64 fcoe_bad_fccrc;             /* fcoecrc */
-       u64 fcoe_last_error;            /* fcoelast */
-       u64 fcoe_ddp_count;             /* fcoeddpc */
-};
-
-/* offset to per function FCoE statistics block */
-#define I40E_FCOE_VF_STAT_OFFSET       0
-#define I40E_FCOE_PF_STAT_OFFSET       128
-#define I40E_FCOE_STAT_MAX             (I40E_FCOE_PF_STAT_OFFSET + I40E_MAX_PF)
-
-#endif
 /* Statistics collected by the MAC */
 struct i40e_hw_port_stats {
        /* eth stats collected by the port */
@@ -1319,125 +1300,6 @@ struct i40e_hw_port_stats {
 
 #define I40E_SRRD_SRCTL_ATTEMPTS       100000
 
-#ifdef I40E_FCOE
-/* FCoE Tx context descriptor - Use the i40e_tx_context_desc struct */
-
-enum i40E_fcoe_tx_ctx_desc_cmd_bits {
-       I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND        = 0x00, /* 4 BITS */
-       I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2      = 0x01, /* 4 BITS */
-       I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3      = 0x05, /* 4 BITS */
-       I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2     = 0x02, /* 4 BITS */
-       I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3     = 0x06, /* 4 BITS */
-       I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2      = 0x03, /* 4 BITS */
-       I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3      = 0x07, /* 4 BITS */
-       I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL       = 0x08, /* 4 BITS */
-       I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL       = 0x09, /* 4 BITS */
-       I40E_FCOE_TX_CTX_DESC_RELOFF                    = 0x10,
-       I40E_FCOE_TX_CTX_DESC_CLRSEQ                    = 0x20,
-       I40E_FCOE_TX_CTX_DESC_DIFENA                    = 0x40,
-       I40E_FCOE_TX_CTX_DESC_IL2TAG2                   = 0x80
-};
-
-/* FCoE DDP Context descriptor */
-struct i40e_fcoe_ddp_context_desc {
-       __le64 rsvd;
-       __le64 type_cmd_foff_lsize;
-};
-
-#define I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT      0
-#define I40E_FCOE_DDP_CTX_QW1_DTYPE_MASK       (0xFULL << \
-                                       I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT)
-
-#define I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT        4
-#define I40E_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \
-                                        I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT)
-
-enum i40e_fcoe_ddp_ctx_desc_cmd_bits {
-       I40E_FCOE_DDP_CTX_DESC_BSIZE_512B       = 0x00, /* 2 BITS */
-       I40E_FCOE_DDP_CTX_DESC_BSIZE_4K         = 0x01, /* 2 BITS */
-       I40E_FCOE_DDP_CTX_DESC_BSIZE_8K         = 0x02, /* 2 BITS */
-       I40E_FCOE_DDP_CTX_DESC_BSIZE_16K        = 0x03, /* 2 BITS */
-       I40E_FCOE_DDP_CTX_DESC_DIFENA           = 0x04, /* 1 BIT  */
-       I40E_FCOE_DDP_CTX_DESC_LASTSEQH         = 0x08, /* 1 BIT  */
-};
-
-#define I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT       16
-#define I40E_FCOE_DDP_CTX_QW1_FOFF_MASK        (0x3FFFULL << \
-                                        I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT)
-
-#define I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT      32
-#define I40E_FCOE_DDP_CTX_QW1_LSIZE_MASK       (0x3FFFULL << \
-                                       I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT)
-
-/* FCoE DDP/DWO Queue Context descriptor */
-struct i40e_fcoe_queue_context_desc {
-       __le64 dmaindx_fbase;           /* 0:11 DMAINDX, 12:63 FBASE */
-       __le64 flen_tph;                /* 0:12 FLEN, 13:15 TPH */
-};
-
-#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT  0
-#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK   (0xFFFULL << \
-                                       I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT)
-
-#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT    12
-#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_MASK     (0xFFFFFFFFFFFFFULL << \
-                                       I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT)
-
-#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT     0
-#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_MASK      (0x1FFFULL << \
-                                       I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
-
-#define I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT      13
-#define I40E_FCOE_QUEUE_CTX_QW1_TPH_MASK       (0x7ULL << \
-                                       I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
-
-enum i40e_fcoe_queue_ctx_desc_tph_bits {
-       I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC       = 0x1,
-       I40E_FCOE_QUEUE_CTX_DESC_TPHDATA        = 0x2
-};
-
-#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT   30
-#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_MASK    (0x3ULL << \
-                                       I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT)
-
-/* FCoE DDP/DWO Filter Context descriptor */
-struct i40e_fcoe_filter_context_desc {
-       __le32 param;
-       __le16 seqn;
-
-       /* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */
-       __le16 rsvd_dmaindx;
-
-       /* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */
-       __le64 flags_rsvd_lanq;
-};
-
-#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4
-#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_MASK  (0xFFF << \
-                                       I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT)
-
-enum i40e_fcoe_filter_ctx_desc_flags_bits {
-       I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP      = 0x00,
-       I40E_FCOE_FILTER_CTX_DESC_CTYP_DWO      = 0x01,
-       I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT    = 0x00,
-       I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP     = 0x02,
-       I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2     = 0x00,
-       I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3     = 0x04
-};
-
-#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT   0
-#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_MASK    (0xFFULL << \
-                                       I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT)
-
-#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT     8
-#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_MASK      (0x3FULL << \
-                       I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT)
-
-#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT     53
-#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_MASK      (0x7FFULL << \
-                       I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT)
-
-#endif /* I40E_FCOE */
 enum i40e_switch_element_types {
        I40E_SWITCH_ELEMENT_TYPE_MAC    = 1,
        I40E_SWITCH_ELEMENT_TYPE_PF     = 2,
index 974ba2baf6ea006d2f3dae4e7aa841e7bffb96d3..8552192a5bde7b804bb7f38c9142ac36f9031926 100644 (file)
@@ -163,7 +163,8 @@ struct i40e_virtchnl_vsi_resource {
 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING    0x00020000
 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF                0X00080000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM    0X00100000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP         0X00100000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM    0X00200000
 
 #define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
                                    I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
index 78460c52b7c445112cd777385f0e81058f26e918..65c95ffc15ec9c1e97ed3a0ffbc5dc9d15f3f1ef 100644 (file)
@@ -702,10 +702,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
                        dev_info(&pf->pdev->dev,
                                 "Could not allocate VF broadcast filter\n");
                spin_unlock_bh(&vsi->mac_filter_hash_lock);
-               i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id),
-                                 (u32)hena);
-               i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
-                                 (u32)(hena >> 32));
+               wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
+               wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
        }
 
        /* program mac filter */
@@ -811,6 +809,11 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
        u32 reg_idx, reg;
        int i, msix_vf;
 
+       /* Start by disabling VF's configuration API to prevent the OS from
+        * accessing the VF's VSI after it's freed / invalidated.
+        */
+       clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
+
        /* free vsi & disconnect it from the parent uplink */
        if (vf->lan_vsi_idx) {
                i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
@@ -850,7 +853,6 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
        /* reset some of the state variables keeping track of the resources */
        vf->num_queue_pairs = 0;
        vf->vf_states = 0;
-       clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
 }
 
 /**
@@ -941,6 +943,14 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
        /* warn the VF */
        clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
 
+       /* Disable VF's configuration API during reset. The flag is re-enabled
+        * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
+        * It's normally disabled in i40e_free_vf_res(), but it's safer
+        * to do it earlier to give some time to finish to any VF config
+        * functions that may still be running at this point.
+        */
+       clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
+
        /* In the case of a VFLR, the HW has already reset the VF and we
         * just need to clean up, so don't hit the VFRTRIG register.
         */
@@ -984,11 +994,6 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
        if (!rsd)
                dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
                        vf->vf_id);
-       wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
-       /* clear the reset bit in the VPGEN_VFRTRIG reg */
-       reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
-       reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
-       wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
 
        /* On initial reset, we won't have any queues */
        if (vf->lan_vsi_idx == 0)
@@ -996,8 +1001,24 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
 
        i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
 complete_reset:
-       /* reallocate VF resources to reset the VSI state */
+       /* free VF resources to begin resetting the VSI state */
        i40e_free_vf_res(vf);
+
+       /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
+        * By doing this we allow HW to access VF memory at any point. If we
+        * did it any sooner, HW could access memory while it was being freed
+        * in i40e_free_vf_res(), causing an IOMMU fault.
+        *
+        * On the other hand, this needs to be done ASAP, because the VF driver
+        * is waiting for this to happen and may report a timeout. It's
+        * harmless, but it gets logged into Guest OS kernel log, so best avoid
+        * it.
+        */
+       reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+       reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+       wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+
+       /* reallocate VF resources to finish resetting the VSI state */
        if (!i40e_alloc_vf_res(vf)) {
                int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
                i40e_enable_vf_mappings(vf);
@@ -1008,7 +1029,11 @@ complete_reset:
                        i40e_notify_client_of_vf_reset(pf, abs_vf_id);
                vf->num_vlan = 0;
        }
-       /* tell the VF the reset is done */
+
+       /* Tell the VF driver the reset is done. This needs to be done only
+        * after VF has been fully initialized, because the VF driver may
+        * request resources immediately after setting this flag.
+        */
        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
 
        i40e_flush(hw);
@@ -1359,7 +1384,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
        if (!vsi->info.pvid)
                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
 
-       if (i40e_vf_client_capable(pf, vf->vf_id, I40E_CLIENT_IWARP) &&
+       if (i40e_vf_client_capable(pf, vf->vf_id) &&
            (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) {
                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP;
                set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states);
@@ -1383,6 +1408,13 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
                                I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
        }
 
+       if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP)
+               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP;
+
+       if ((pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
+           (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
+               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+
        if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
                if (pf->flags & I40E_FLAG_MFP_ENABLED) {
                        dev_err(&pf->pdev->dev,
@@ -1853,7 +1885,7 @@ error_param:
 }
 
 /* If the VF is not trusted restrict the number of MAC/VLAN it can program */
-#define I40E_VC_MAX_MAC_ADDR_PER_VF 8
+#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
 #define I40E_VC_MAX_VLAN_PER_VF 8
 
 /**
index 4012d069939ab3211cd9f668a3de89dbe4561a07..37af437daa5daa499b16f3f5c469b023fb3d18de 100644 (file)
@@ -87,7 +87,6 @@ struct i40e_vf {
        u16 stag;
 
        struct i40e_virtchnl_ether_addr default_lan_addr;
-       struct i40e_virtchnl_ether_addr default_fcoe_addr;
        u16 port_vlan_id;
        bool pf_set_mac;        /* The VMM admin set the VF MAC address */
        bool trusted;
index 3a423836a565294aa82dadbeb93e4d45619ccd86..827c7a6ed0bafc7dc2d8300ed46701053d78eb34 100644 (file)
@@ -32,5 +32,5 @@
 obj-$(CONFIG_I40EVF) += i40evf.o
 
 i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
-               i40e_txrx.o i40e_common.o i40e_adminq.o
+               i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o
 
index eeb9864bc5b152a90508af879cd5f32b43c3bead..c28cb8f27243f36c8bd240f88bd24897288eea2c 100644 (file)
@@ -132,6 +132,10 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_list_func_capabilities     = 0x000A,
        i40e_aqc_opc_list_dev_capabilities      = 0x000B,
 
+       /* Proxy commands */
+       i40e_aqc_opc_set_proxy_config           = 0x0104,
+       i40e_aqc_opc_set_ns_proxy_table_entry   = 0x0105,
+
        /* LAA */
        i40e_aqc_opc_mac_address_read   = 0x0107,
        i40e_aqc_opc_mac_address_write  = 0x0108,
@@ -139,6 +143,10 @@ enum i40e_admin_queue_opc {
        /* PXE */
        i40e_aqc_opc_clear_pxe_mode     = 0x0110,
 
+       /* WoL commands */
+       i40e_aqc_opc_set_wol_filter     = 0x0120,
+       i40e_aqc_opc_get_wake_reason    = 0x0121,
+
        /* internal switch commands */
        i40e_aqc_opc_get_switch_config          = 0x0200,
        i40e_aqc_opc_add_statistics             = 0x0201,
@@ -177,6 +185,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_remove_control_packet_filter       = 0x025B,
        i40e_aqc_opc_add_cloud_filters          = 0x025C,
        i40e_aqc_opc_remove_cloud_filters       = 0x025D,
+       i40e_aqc_opc_clear_wol_switch_filters   = 0x025E,
 
        i40e_aqc_opc_add_mirror_rule    = 0x0260,
        i40e_aqc_opc_delete_mirror_rule = 0x0261,
@@ -558,6 +567,56 @@ struct i40e_aqc_clear_pxe {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
 
+/* Set WoL Filter (0x0120) */
+
+struct i40e_aqc_set_wol_filter {
+       __le16 filter_index;
+#define I40E_AQC_MAX_NUM_WOL_FILTERS   8
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT       15
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK        (0x1 << \
+               I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
+
+#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT            0
+#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK     (0x7 << \
+               I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
+       __le16 cmd_flags;
+#define I40E_AQC_SET_WOL_FILTER                                0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL             0x4000
+#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR           0
+#define I40E_AQC_SET_WOL_FILTER_ACTION_SET             1
+       __le16 valid_flags;
+#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID           0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID    0x4000
+       u8 reserved[2];
+       __le32  address_high;
+       __le32  address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+
+struct i40e_aqc_set_wol_filter_data {
+       u8 filter[128];
+       u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
+/* Get Wake Reason (0x0121) */
+
+struct i40e_aqc_get_wake_reason_completion {
+       u8 reserved_1[2];
+       __le16 wake_reason;
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT     0
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
+               I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT  8
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK   (0xFF << \
+               I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
+       u8 reserved_2[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
+
 /* Switch configuration commands (0x02xx) */
 
 /* Used by many indirect commands that only pass an seid and a buffer in the
@@ -640,6 +699,8 @@ struct i40e_aqc_set_port_parameters {
 #define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
 #define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA   4
        __le16  bad_frame_vsi;
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK  0x3FF
        __le16  default_seid;        /* reserved for command */
        u8      reserved[10];
 };
@@ -691,6 +752,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
 /* Set Switch Configuration (direct 0x0205) */
 struct i40e_aqc_set_switch_config {
        __le16  flags;
+/* flags used for both fields below */
 #define I40E_AQ_SET_SWITCH_CFG_PROMISC         0x0001
 #define I40E_AQ_SET_SWITCH_CFG_L2_FILTER       0x0002
        __le16  valid_flags;
@@ -1839,11 +1901,12 @@ struct i40e_aqc_get_link_status {
 #define I40E_AQ_CONFIG_FEC_RS_ENA      0x02
 #define I40E_AQ_CONFIG_CRC_ENA         0x04
 #define I40E_AQ_CONFIG_PACING_MASK     0x78
-       u8      external_power_ability;
+       u8      power_desc;
 #define I40E_AQ_LINK_POWER_CLASS_1     0x00
 #define I40E_AQ_LINK_POWER_CLASS_2     0x01
 #define I40E_AQ_LINK_POWER_CLASS_3     0x02
 #define I40E_AQ_LINK_POWER_CLASS_4     0x03
+#define I40E_AQ_PWR_CLASS_MASK         0x03
        u8      reserved[4];
 };
 
index 89dfdbca13db794afe5ac0cbecee9c5ee4720626..626fbf1ead4d3acca9935427073819e916d2ff2e 100644 (file)
@@ -958,7 +958,9 @@ u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
        int retry = 5;
        u32 val = 0;
 
-       use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+       use_register = (((hw->aq.api_maj_ver == 1) &&
+                       (hw->aq.api_min_ver < 5)) ||
+                       (hw->mac.type == I40E_MAC_X722));
        if (!use_register) {
 do_retry:
                status = i40evf_aq_rx_ctl_read_register(hw, reg_addr,
@@ -1019,7 +1021,9 @@ void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
        bool use_register;
        int retry = 5;
 
-       use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+       use_register = (((hw->aq.api_maj_ver == 1) &&
+                       (hw->aq.api_min_ver < 5)) ||
+                       (hw->mac.type == I40E_MAC_X722));
        if (!use_register) {
 do_retry:
                status = i40evf_aq_rx_ctl_write_register(hw, reg_addr,
index c91fcf43ccbc5eb7bfe95a9a3eec9c559519fcd0..460171edc412e88eab3cbbbf48cf0e41c6e8dc43 100644 (file)
@@ -137,10 +137,7 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
 {
        u32 head, tail;
 
-       if (!in_sw)
-               head = i40e_get_head(ring);
-       else
-               head = ring->next_to_clean;
+       head = ring->next_to_clean;
        tail = readl(ring->tail);
 
        if (head != tail)
@@ -165,7 +162,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
 {
        u16 i = tx_ring->next_to_clean;
        struct i40e_tx_buffer *tx_buf;
-       struct i40e_tx_desc *tx_head;
        struct i40e_tx_desc *tx_desc;
        unsigned int total_bytes = 0, total_packets = 0;
        unsigned int budget = vsi->work_limit;
@@ -174,8 +170,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
        tx_desc = I40E_TX_DESC(tx_ring, i);
        i -= tx_ring->count;
 
-       tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
-
        do {
                struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
 
@@ -186,8 +180,9 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
                /* prevent any other reads prior to eop_desc */
                read_barrier_depends();
 
-               /* we have caught up to head, no work left to do */
-               if (tx_head == tx_desc)
+               /* if the descriptor isn't done, no work yet to do */
+               if (!(eop_desc->cmd_type_offset_bsz &
+                     cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
                        break;
 
                /* clear next_to_watch to prevent false hangs */
@@ -464,10 +459,6 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
 
        /* round up to nearest 4K */
        tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
-       /* add u32 for head writeback, align after this takes care of
-        * guaranteeing this is at least one cache line in size
-        */
-       tx_ring->size += sizeof(u32);
        tx_ring->size = ALIGN(tx_ring->size, 4096);
        tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
                                           &tx_ring->dma, GFP_KERNEL);
@@ -493,7 +484,6 @@ err:
  **/
 void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
 {
-       struct device *dev = rx_ring->dev;
        unsigned long bi_size;
        u16 i;
 
@@ -513,8 +503,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
                if (!rx_bi->page)
                        continue;
 
-               dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
-               __free_pages(rx_bi->page, 0);
+               /* Invalidate cache lines that may have been written to by
+                * device so that we avoid corrupting memory.
+                */
+               dma_sync_single_range_for_cpu(rx_ring->dev,
+                                             rx_bi->dma,
+                                             rx_bi->page_offset,
+                                             rx_ring->rx_buf_len,
+                                             DMA_FROM_DEVICE);
+
+               /* free resources associated with mapping */
+               dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
+                                    i40e_rx_pg_size(rx_ring),
+                                    DMA_FROM_DEVICE,
+                                    I40E_RX_DMA_ATTR);
+
+               __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
 
                rx_bi->page = NULL;
                rx_bi->page_offset = 0;
@@ -614,6 +618,17 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
        writel(val, rx_ring->tail);
 }
 
+/**
+ * i40e_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
+{
+       return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
+}
+
 /**
  * i40e_alloc_mapped_page - recycle or make a new page
  * @rx_ring: ring to use
@@ -635,27 +650,33 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
        }
 
        /* alloc new page for storage */
-       page = dev_alloc_page();
+       page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
        if (unlikely(!page)) {
                rx_ring->rx_stats.alloc_page_failed++;
                return false;
        }
 
        /* map page for use */
-       dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+       dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+                                i40e_rx_pg_size(rx_ring),
+                                DMA_FROM_DEVICE,
+                                I40E_RX_DMA_ATTR);
 
        /* if mapping failed free memory back to system since
         * there isn't much point in holding memory we can't use
         */
        if (dma_mapping_error(rx_ring->dev, dma)) {
-               __free_pages(page, 0);
+               __free_pages(page, i40e_rx_pg_order(rx_ring));
                rx_ring->rx_stats.alloc_page_failed++;
                return false;
        }
 
        bi->dma = dma;
        bi->page = page;
-       bi->page_offset = 0;
+       bi->page_offset = i40e_rx_offset(rx_ring);
+
+       /* initialize pagecnt_bias to 1 representing we fully own page */
+       bi->pagecnt_bias = 1;
 
        return true;
 }
@@ -702,6 +723,12 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
                if (!i40e_alloc_mapped_page(rx_ring, bi))
                        goto no_buffers;
 
+               /* sync the buffer for use by the device */
+               dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+                                                bi->page_offset,
+                                                rx_ring->rx_buf_len,
+                                                DMA_FROM_DEVICE);
+
                /* Refresh the desc even if buffer_addrs didn't change
                 * because each write-back erases this info.
                 */
@@ -742,8 +769,6 @@ no_buffers:
  * @vsi: the VSI we care about
  * @skb: skb currently being received and modified
  * @rx_desc: the receive descriptor
- *
- * skb->protocol must be set before this function is called
  **/
 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                    struct sk_buff *skb,
@@ -895,12 +920,12 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
 {
        i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
 
-       /* modifies the skb - consumes the enet header */
-       skb->protocol = eth_type_trans(skb, rx_ring->netdev);
-
        i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
 
        skb_record_rx_queue(skb, rx_ring->queue_index);
+
+       /* modifies the skb - consumes the enet header */
+       skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 }
 
 /**
@@ -945,7 +970,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
        rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 
        /* transfer page from old buffer to new buffer */
-       *new_buff = *old_buff;
+       new_buff->dma           = old_buff->dma;
+       new_buff->page          = old_buff->page;
+       new_buff->page_offset   = old_buff->page_offset;
+       new_buff->pagecnt_bias  = old_buff->pagecnt_bias;
 }
 
 /**
@@ -966,8 +994,6 @@ static inline bool i40e_page_is_reusable(struct page *page)
  * the adapter for another receive
  *
  * @rx_buffer: buffer containing the page
- * @page: page address from rx_buffer
- * @truesize: actual size of the buffer in this page
  *
  * If page is reusable, rx_buffer->page_offset is adjusted to point to
  * an unused region in the page.
@@ -990,13 +1016,10 @@ static inline bool i40e_page_is_reusable(struct page *page)
  *
  * In either case, if the page is reusable its refcount is increased.
  **/
-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
-                                  struct page *page,
-                                  const unsigned int truesize)
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
 {
-#if (PAGE_SIZE >= 8192)
-       unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
-#endif
+       unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+       struct page *page = rx_buffer->page;
 
        /* Is any reuse possible? */
        if (unlikely(!i40e_page_is_reusable(page)))
@@ -1004,21 +1027,23 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
 
 #if (PAGE_SIZE < 8192)
        /* if we are only owner of page we can reuse it */
-       if (unlikely(page_count(page) != 1))
+       if (unlikely((page_count(page) - pagecnt_bias) > 1))
                return false;
-
-       /* flip page offset to other buffer */
-       rx_buffer->page_offset ^= truesize;
 #else
-       /* move offset up to the next cache line */
-       rx_buffer->page_offset += truesize;
-
-       if (rx_buffer->page_offset > last_offset)
+#define I40E_LAST_OFFSET \
+       (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
+       if (rx_buffer->page_offset > I40E_LAST_OFFSET)
                return false;
 #endif
 
-       /* Inc ref count on page before passing it up to the stack */
-       get_page(page);
+       /* If we have drained the page fragment pool we need to update
+        * the pagecnt_bias and page count so that we fully restock the
+        * number of references the driver holds.
+        */
+       if (unlikely(!pagecnt_bias)) {
+               page_ref_add(page, USHRT_MAX);
+               rx_buffer->pagecnt_bias = USHRT_MAX;
+       }
 
        return true;
 }
@@ -1027,145 +1052,201 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
  * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
  * @rx_ring: rx descriptor ring to transact packets on
  * @rx_buffer: buffer containing page to add
- * @size: packet length from rx_desc
  * @skb: sk_buff to place the data into
+ * @size: packet length from rx_desc
  *
  * This function will add the data contained in rx_buffer->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
+ * It will just attach the page as a frag to the skb.
  *
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
+ * The function will then update the page offset.
  **/
-static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
                             struct i40e_rx_buffer *rx_buffer,
-                            unsigned int size,
-                            struct sk_buff *skb)
+                            struct sk_buff *skb,
+                            unsigned int size)
 {
-       struct page *page = rx_buffer->page;
-       unsigned char *va = page_address(page) + rx_buffer->page_offset;
 #if (PAGE_SIZE < 8192)
-       unsigned int truesize = I40E_RXBUFFER_2048;
+       unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
-       unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+       unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
 #endif
-       unsigned int pull_len;
-
-       if (unlikely(skb_is_nonlinear(skb)))
-               goto add_tail_frag;
-
-       /* will the data fit in the skb we allocated? if so, just
-        * copy it as it is pretty small anyway
-        */
-       if (size <= I40E_RX_HDR_SIZE) {
-               memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
-               /* page is reusable, we can reuse buffer as-is */
-               if (likely(i40e_page_is_reusable(page)))
-                       return true;
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+                       rx_buffer->page_offset, size, truesize);
 
-               /* this page cannot be reused so discard it */
-               __free_pages(page, 0);
-               return false;
-       }
+       /* page is being used so we must update the page offset */
+#if (PAGE_SIZE < 8192)
+       rx_buffer->page_offset ^= truesize;
+#else
+       rx_buffer->page_offset += truesize;
+#endif
+}
 
-       /* we need the header to contain the greater of either
-        * ETH_HLEN or 60 bytes if the skb->len is less than
-        * 60 for skb_pad.
-        */
-       pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+/**
+ * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @size: size of buffer to add to skb
+ *
+ * This function will pull an Rx buffer from the ring and synchronize it
+ * for use by the CPU.
+ */
+static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
+                                                const unsigned int size)
+{
+       struct i40e_rx_buffer *rx_buffer;
 
-       /* align pull length to size of long to optimize
-        * memcpy performance
-        */
-       memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+       rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+       prefetchw(rx_buffer->page);
 
-       /* update all of the pointers */
-       va += pull_len;
-       size -= pull_len;
+       /* we are reusing so sync this buffer for CPU use */
+       dma_sync_single_range_for_cpu(rx_ring->dev,
+                                     rx_buffer->dma,
+                                     rx_buffer->page_offset,
+                                     size,
+                                     DMA_FROM_DEVICE);
 
-add_tail_frag:
-       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-                       (unsigned long)va & ~PAGE_MASK, size, truesize);
+       /* We have pulled a buffer for use, so decrement pagecnt_bias */
+       rx_buffer->pagecnt_bias--;
 
-       return i40e_can_reuse_rx_page(rx_buffer, page, truesize);
+       return rx_buffer;
 }
 
 /**
- * i40evf_fetch_rx_buffer - Allocate skb and populate it
+ * i40e_construct_skb - Allocate skb and populate it
  * @rx_ring: rx descriptor ring to transact packets on
- * @rx_desc: descriptor containing info written by hardware
+ * @rx_buffer: rx buffer to pull data from
+ * @size: size of buffer to add to skb
  *
- * This function allocates an skb on the fly, and populates it with the page
- * data from the current receive descriptor, taking care to set up the skb
- * correctly, as well as handling calling the page recycle function if
- * necessary.
+ * This function allocates an skb.  It then populates it with the page
+ * data from the current receive descriptor, taking care to set up the
+ * skb correctly.
  */
-static inline
-struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
-                                      union i40e_rx_desc *rx_desc,
-                                      struct sk_buff *skb)
+static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
+                                         struct i40e_rx_buffer *rx_buffer,
+                                         unsigned int size)
 {
-       u64 local_status_error_len =
-               le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-       unsigned int size =
-               (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
-               I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-       struct i40e_rx_buffer *rx_buffer;
-       struct page *page;
+       void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+       unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+       unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+       unsigned int headlen;
+       struct sk_buff *skb;
 
-       rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
-       page = rx_buffer->page;
-       prefetchw(page);
+       /* prefetch first cache line of first page */
+       prefetch(va);
+#if L1_CACHE_BYTES < 128
+       prefetch(va + L1_CACHE_BYTES);
+#endif
+
+       /* allocate a skb to store the frags */
+       skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+                              I40E_RX_HDR_SIZE,
+                              GFP_ATOMIC | __GFP_NOWARN);
+       if (unlikely(!skb))
+               return NULL;
+
+       /* Determine available headroom for copy */
+       headlen = size;
+       if (headlen > I40E_RX_HDR_SIZE)
+               headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+
+       /* align pull length to size of long to optimize memcpy performance */
+       memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+
+       /* update all of the pointers */
+       size -= headlen;
+       if (size) {
+               skb_add_rx_frag(skb, 0, rx_buffer->page,
+                               rx_buffer->page_offset + headlen,
+                               size, truesize);
+
+               /* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+               rx_buffer->page_offset ^= truesize;
+#else
+               rx_buffer->page_offset += truesize;
+#endif
+       } else {
+               /* buffer is unused, reset bias back to rx_buffer */
+               rx_buffer->pagecnt_bias++;
+       }
 
-       if (likely(!skb)) {
-               void *page_addr = page_address(page) + rx_buffer->page_offset;
+       return skb;
+}
+
+/**
+ * i40e_build_skb - Build skb around an existing buffer
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buffer: Rx buffer to pull data from
+ * @size: size of buffer to add to skb
+ *
+ * This function builds an skb around an existing Rx buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead.
+ */
+static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
+                                     struct i40e_rx_buffer *rx_buffer,
+                                     unsigned int size)
+{
+       void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+       unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+       unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+       struct sk_buff *skb;
 
-               /* prefetch first cache line of first page */
-               prefetch(page_addr);
+       /* prefetch first cache line of first page */
+       prefetch(va);
 #if L1_CACHE_BYTES < 128
-               prefetch(page_addr + L1_CACHE_BYTES);
+       prefetch(va + L1_CACHE_BYTES);
 #endif
+       /* build an skb around the page buffer */
+       skb = build_skb(va - I40E_SKB_PAD, truesize);
+       if (unlikely(!skb))
+               return NULL;
 
-               /* allocate a skb to store the frags */
-               skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
-                                      I40E_RX_HDR_SIZE,
-                                      GFP_ATOMIC | __GFP_NOWARN);
-               if (unlikely(!skb)) {
-                       rx_ring->rx_stats.alloc_buff_failed++;
-                       return NULL;
-               }
+       /* update pointers within the skb to store the data */
+       skb_reserve(skb, I40E_SKB_PAD);
+       __skb_put(skb, size);
 
-               /* we will be copying header into skb->data in
-                * pskb_may_pull so it is in our interest to prefetch
-                * it now to avoid a possible cache miss
-                */
-               prefetchw(skb->data);
-       }
+       /* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+       rx_buffer->page_offset ^= truesize;
+#else
+       rx_buffer->page_offset += truesize;
+#endif
 
-       /* we are reusing so sync this buffer for CPU use */
-       dma_sync_single_range_for_cpu(rx_ring->dev,
-                                     rx_buffer->dma,
-                                     rx_buffer->page_offset,
-                                     size,
-                                     DMA_FROM_DEVICE);
+       return skb;
+}
 
-       /* pull page into skb */
-       if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
+/**
+ * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: rx buffer to pull data from
+ *
+ * This function will clean up the contents of the rx_buffer.  It will
+ * either recycle the bufer or unmap it and free the associated resources.
+ */
+static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
+                              struct i40e_rx_buffer *rx_buffer)
+{
+       if (i40e_can_reuse_rx_page(rx_buffer)) {
                /* hand second half of page back to the ring */
                i40e_reuse_rx_page(rx_ring, rx_buffer);
                rx_ring->rx_stats.page_reuse_count++;
        } else {
                /* we are not reusing the buffer so unmap it */
-               dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
-                              DMA_FROM_DEVICE);
+               dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+                                    i40e_rx_pg_size(rx_ring),
+                                    DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
+               __page_frag_cache_drain(rx_buffer->page,
+                                       rx_buffer->pagecnt_bias);
        }
 
        /* clear contents of buffer_info */
        rx_buffer->page = NULL;
-
-       return skb;
 }
 
 /**
@@ -1221,7 +1302,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
        bool failure = false;
 
        while (likely(total_rx_packets < budget)) {
+               struct i40e_rx_buffer *rx_buffer;
                union i40e_rx_desc *rx_desc;
+               unsigned int size;
                u16 vlan_tag;
                u8 rx_ptype;
                u64 qword;
@@ -1238,22 +1321,38 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                /* status_error_len will always be zero for unused descriptors
                 * because it's cleared in cleanup, and overlaps with hdr_addr
                 * which is always zero because packet split isn't used, if the
-                * hardware wrote DD then it will be non-zero
+                * hardware wrote DD then the length will be non-zero
                 */
-               if (!i40e_test_staterr(rx_desc,
-                                      BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
+               qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+               size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+                      I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+               if (!size)
                        break;
 
                /* This memory barrier is needed to keep us from reading
-                * any other fields out of the rx_desc until we know the
-                * DD bit is set.
+                * any other fields out of the rx_desc until we have
+                * verified the descriptor has been written back.
                 */
                dma_rmb();
 
-               skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc, skb);
-               if (!skb)
+               rx_buffer = i40e_get_rx_buffer(rx_ring, size);
+
+               /* retrieve a buffer from the ring */
+               if (skb)
+                       i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
+               else if (ring_uses_build_skb(rx_ring))
+                       skb = i40e_build_skb(rx_ring, rx_buffer, size);
+               else
+                       skb = i40e_construct_skb(rx_ring, rx_buffer, size);
+
+               /* exit if we failed to retrieve a buffer */
+               if (!skb) {
+                       rx_ring->rx_stats.alloc_buff_failed++;
+                       rx_buffer->pagecnt_bias++;
                        break;
+               }
 
+               i40e_put_rx_buffer(rx_ring, rx_buffer);
                cleaned_count++;
 
                if (i40e_is_non_eop(rx_ring, rx_desc, skb))
@@ -1266,6 +1365,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                 */
                if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
                        dev_kfree_skb_any(skb);
+                       skb = NULL;
                        continue;
                }
 
@@ -1980,7 +2080,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
        u16 i = tx_ring->next_to_use;
        u32 td_tag = 0;
        dma_addr_t dma;
-       u16 desc_count = 1;
 
        if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
                td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -2016,7 +2115,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 
                        tx_desc++;
                        i++;
-                       desc_count++;
 
                        if (i == tx_ring->count) {
                                tx_desc = I40E_TX_DESC(tx_ring, 0);
@@ -2038,7 +2136,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 
                tx_desc++;
                i++;
-               desc_count++;
 
                if (i == tx_ring->count) {
                        tx_desc = I40E_TX_DESC(tx_ring, 0);
@@ -2064,46 +2161,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 
        i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
-       /* write last descriptor with EOP bit */
-       td_cmd |= I40E_TX_DESC_CMD_EOP;
-
-       /* We can OR these values together as they both are checked against
-        * 4 below and at this point desc_count will be used as a boolean value
-        * after this if/else block.
-        */
-       desc_count |= ++tx_ring->packet_stride;
-
-       /* Algorithm to optimize tail and RS bit setting:
-        * if queue is stopped
-        *      mark RS bit
-        *      reset packet counter
-        * else if xmit_more is supported and is true
-        *      advance packet counter to 4
-        *      reset desc_count to 0
-        *
-        * if desc_count >= 4
-        *      mark RS bit
-        *      reset packet counter
-        * if desc_count > 0
-        *      update tail
-        *
-        * Note: If there are less than 4 descriptors
-        * pending and interrupts were disabled the service task will
-        * trigger a force WB.
-        */
-       if (netif_xmit_stopped(txring_txq(tx_ring))) {
-               goto do_rs;
-       } else if (skb->xmit_more) {
-               /* set stride to arm on next packet and reset desc_count */
-               tx_ring->packet_stride = WB_STRIDE;
-               desc_count = 0;
-       } else if (desc_count >= WB_STRIDE) {
-do_rs:
-               /* write last descriptor with RS bit set */
-               td_cmd |= I40E_TX_DESC_CMD_RS;
-               tx_ring->packet_stride = 0;
-       }
-
+       /* write last descriptor with RS and EOP bits */
+       td_cmd |= I40E_TXD_CMD;
        tx_desc->cmd_type_offset_bsz =
                        build_ctob(td_cmd, td_offset, size, td_tag);
 
@@ -2119,7 +2178,7 @@ do_rs:
        first->next_to_watch = tx_desc;
 
        /* notify HW of packet */
-       if (desc_count) {
+       if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
                writel(i, tx_ring->tail);
 
                /* we need this if more than one processor can write to our tail
index 8274ba68bd32a6583538f7af6fd80417dc25e6ec..901282c87cf6ca67f44edcc51b4f990eedbba1d9 100644 (file)
@@ -104,10 +104,9 @@ enum i40e_dyn_idx_t {
 
 /* Supported Rx Buffer Sizes (a multiple of 128) */
 #define I40E_RXBUFFER_256   256
+#define I40E_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */
 #define I40E_RXBUFFER_2048  2048
-#define I40E_RXBUFFER_3072  3072   /* For FCoE MTU of 2158 */
-#define I40E_RXBUFFER_4096  4096
-#define I40E_RXBUFFER_8192  8192
+#define I40E_RXBUFFER_3072  3072  /* Used for large frames w/ padding */
 #define I40E_MAX_RXBUFFER   9728  /* largest size for single descriptor */
 
 /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
@@ -120,6 +119,61 @@ enum i40e_dyn_idx_t {
 #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
 #define i40e_rx_desc i40e_32byte_rx_desc
 
+#define I40E_RX_DMA_ATTR \
+       (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+/* Attempt to maximize the headroom available for incoming frames.  We
+ * use a 2K buffer for receives and need 1536/1534 to store the data for
+ * the frame.  This leaves us with 512 bytes of room.  From that we need
+ * to deduct the space needed for the shared info and the padding needed
+ * to IP align the frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ *      up negative.  In these cases we should fall back to the legacy
+ *      receive path.
+ */
+#if (PAGE_SIZE < 8192)
+#define I40E_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
+
+static inline int i40e_compute_pad(int rx_buf_len)
+{
+       int page_size, pad_size;
+
+       page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+       pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+       return pad_size;
+}
+
+static inline int i40e_skb_pad(void)
+{
+       int rx_buf_len;
+
+       /* If a 2K buffer cannot handle a standard Ethernet frame then
+        * optimize padding for a 3K buffer instead of a 1.5K buffer.
+        *
+        * For a 3K buffer we need to add enough padding to allow for
+        * tailroom due to NET_IP_ALIGN possibly shifting us out of
+        * cache-line alignment.
+        */
+       if (I40E_2K_TOO_SMALL_WITH_PADDING)
+               rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+       else
+               rx_buf_len = I40E_RXBUFFER_1536;
+
+       /* if needed make room for NET_IP_ALIGN */
+       rx_buf_len -= NET_IP_ALIGN;
+
+       return i40e_compute_pad(rx_buf_len);
+}
+
+#define I40E_SKB_PAD i40e_skb_pad()
+#else
+#define I40E_2K_TOO_SMALL_WITH_PADDING false
+#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+
 /**
  * i40e_test_staterr - tests bits in Rx descriptor status and error fields
  * @rx_desc: pointer to receive descriptor (in le64 format)
@@ -241,7 +295,12 @@ struct i40e_tx_buffer {
 struct i40e_rx_buffer {
        dma_addr_t dma;
        struct page *page;
-       unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+       __u32 page_offset;
+#else
+       __u16 page_offset;
+#endif
+       __u16 pagecnt_bias;
 };
 
 struct i40e_queue_stats {
@@ -321,7 +380,8 @@ struct i40e_ring {
        u8 packet_stride;
 
        u16 flags;
-#define I40E_TXR_FLAGS_WB_ON_ITR       BIT(0)
+#define I40E_TXR_FLAGS_WB_ON_ITR               BIT(0)
+#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED       BIT(1)
 
        /* stats structs */
        struct i40e_queue_stats stats;
@@ -349,6 +409,21 @@ struct i40e_ring {
                                         */
 } ____cacheline_internodealigned_in_smp;
 
+static inline bool ring_uses_build_skb(struct i40e_ring *ring)
+{
+       return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
+}
+
+static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+       ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
+static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+       ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
 enum i40e_latency_range {
        I40E_LOWEST_LATENCY = 0,
        I40E_LOW_LATENCY = 1,
@@ -370,6 +445,17 @@ struct i40e_ring_container {
 #define i40e_for_each_ring(pos, head) \
        for (pos = (head).ring; pos != NULL; pos = pos->next)
 
+static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+       if (ring->rx_buf_len > (PAGE_SIZE / 2))
+               return 1;
+#endif
+       return 0;
+}
+
+#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
+
 bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
 netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
@@ -384,20 +470,6 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
 int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
 bool __i40evf_chk_linearize(struct sk_buff *skb);
 
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: Tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
-       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
-       return le32_to_cpu(*(volatile __le32 *)head);
-}
-
 /**
  * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
  * @skb:     send buffer
@@ -460,19 +532,7 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
        /* we can support up to 8 data buffers for a single send */
        return count != I40E_MAX_BUFFER_TXD;
 }
-
-/**
- * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
- * @ptype: the packet type field from Rx descriptor write-back
- **/
-static inline bool i40e_rx_is_fcoe(u16 ptype)
-{
-       return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
-              (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
-}
-
 /**
- * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
  * @ring: Tx ring to find the netdev equivalent of
  **/
 static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
index d38a2b2aea2b20d2b0bbf9700c90578ba9fc0197..c5ad0388c3d577be4991ff0b36283151f0be3824 100644 (file)
@@ -81,7 +81,9 @@ enum i40e_virtchnl_ops {
        I40E_VIRTCHNL_OP_GET_STATS = 15,
        I40E_VIRTCHNL_OP_FCOE = 16,
        I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+       I40E_VIRTCHNL_OP_IWARP = 20,
        I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
+       I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
        I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
        I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
        I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
@@ -161,7 +163,8 @@ struct i40e_virtchnl_vsi_resource {
 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING    0x00020000
 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF                0X00080000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM    0X00100000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP         0X00100000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM    0X00200000
 
 #define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
                                    I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
@@ -393,6 +396,37 @@ struct i40e_virtchnl_pf_event {
        int severity;
 };
 
+/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define I40E_QUEUE_TYPE_PE_AEQ  0x80
+#define I40E_QUEUE_INVALID_IDX  0xFFFF
+
+struct i40e_virtchnl_iwarp_qv_info {
+       u32 v_idx; /* msix_vector */
+       u16 ceq_idx;
+       u16 aeq_idx;
+       u8 itr_idx;
+};
+
+struct i40e_virtchnl_iwarp_qvlist_info {
+       u32 num_vectors;
+       struct i40e_virtchnl_iwarp_qv_info qv_info[1];
+};
+
 /* VF reset states - these are written into the RSTAT register:
  * I40E_VFGEN_RSTAT1 on the PF
  * I40E_VFGEN_RSTAT on the VF
index 00c42d80327668ef3cc8c614570c44ae58738791..35ded19e9cc207a43075ff0b549b52b2d5f22f2f 100644 (file)
@@ -60,6 +60,7 @@ struct i40e_vsi {
        int base_vector;
        u16 work_limit;
        u16 qs_handle;
+       void *priv;     /* client driver data reference. */
 };
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -71,10 +72,6 @@ struct i40e_vsi {
 #define I40EVF_MAX_RXD         4096
 #define I40EVF_MIN_RXD         64
 #define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
-
-/* Supported Rx Buffer Sizes */
-#define I40EVF_RXBUFFER_2048   2048
-#define I40EVF_MAX_RXBUFFER    16384  /* largest size for single descriptor */
 #define I40EVF_MAX_AQ_BUF_SIZE 4096
 #define I40EVF_AQ_LEN          32
 #define I40EVF_AQ_MAX_ERR      20 /* times to try before resetting AQ */
@@ -169,6 +166,7 @@ enum i40evf_state_t {
 
 enum i40evf_critical_section_t {
        __I40EVF_IN_CRITICAL_TASK,      /* cannot be interrupted */
+       __I40EVF_IN_CLIENT_TASK,
 };
 /* make common code happy */
 #define __I40E_DOWN __I40EVF_DOWN
@@ -178,6 +176,7 @@ struct i40evf_adapter {
        struct timer_list watchdog_timer;
        struct work_struct reset_task;
        struct work_struct adminq_task;
+       struct delayed_work client_task;
        struct delayed_work init_task;
        struct i40e_q_vector *q_vectors;
        struct list_head vlan_filter_list;
@@ -195,7 +194,10 @@ struct i40evf_adapter {
        u64 hw_csum_rx_error;
        u32 rx_desc_count;
        int num_msix_vectors;
+       int num_iwarp_msix;
+       int iwarp_base_vector;
        u32 client_pending;
+       struct i40e_client_instance *cinst;
        struct msix_entry *msix_entries;
 
        u32 flags;
@@ -203,7 +205,6 @@ struct i40evf_adapter {
 #define I40EVF_FLAG_IN_NETPOLL                 BIT(4)
 #define I40EVF_FLAG_IMIR_ENABLED               BIT(5)
 #define I40EVF_FLAG_MQ_CAPABLE                 BIT(6)
-#define I40EVF_FLAG_NEED_LINK_UPDATE           BIT(7)
 #define I40EVF_FLAG_PF_COMMS_FAILED            BIT(8)
 #define I40EVF_FLAG_RESET_PENDING              BIT(9)
 #define I40EVF_FLAG_RESET_NEEDED               BIT(10)
@@ -211,8 +212,12 @@ struct i40evf_adapter {
 #define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE     BIT(12)
 #define I40EVF_FLAG_ADDR_SET_BY_PF             BIT(13)
 #define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED   BIT(14)
-#define I40EVF_FLAG_PROMISC_ON                 BIT(15)
-#define I40EVF_FLAG_ALLMULTI_ON                        BIT(16)
+#define I40EVF_FLAG_CLIENT_NEEDS_OPEN          BIT(15)
+#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE         BIT(16)
+#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS     BIT(17)
+#define I40EVF_FLAG_PROMISC_ON                 BIT(18)
+#define I40EVF_FLAG_ALLMULTI_ON                        BIT(19)
+#define I40EVF_FLAG_LEGACY_RX                  BIT(20)
 /* duplicates for common code */
 #define I40E_FLAG_FDIR_ATR_ENABLED             0
 #define I40E_FLAG_DCB_ENABLED                  0
@@ -220,6 +225,7 @@ struct i40evf_adapter {
 #define I40E_FLAG_RX_CSUM_ENABLED              I40EVF_FLAG_RX_CSUM_ENABLED
 #define I40E_FLAG_WB_ON_ITR_CAPABLE            I40EVF_FLAG_WB_ON_ITR_CAPABLE
 #define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE       I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
+#define I40E_FLAG_LEGACY_RX                    I40EVF_FLAG_LEGACY_RX
        /* flags for admin queue service task */
        u32 aq_required;
 #define I40EVF_FLAG_AQ_ENABLE_QUEUES           BIT(0)
@@ -258,10 +264,11 @@ struct i40evf_adapter {
        bool link_up;
        enum i40e_aq_link_speed link_speed;
        enum i40e_virtchnl_ops current_op;
-#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \
+#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
                            (_a)->vf_res->vf_offload_flags & \
                                I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \
                            0)
+#define CLIENT_ENABLED(_a) ((_a)->cinst)
 /* RSS by the PF should be preferred over RSS via other methods. */
 #define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \
                    I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
@@ -292,6 +299,12 @@ struct i40evf_adapter {
 
 /* Ethtool Private Flags */
 
+/* lan device */
+struct i40e_device {
+       struct list_head list;
+       struct i40evf_adapter *vf;
+};
+
 /* needed by i40evf_ethtool.c */
 extern char i40evf_driver_name[];
 extern const char i40evf_driver_version[];
@@ -337,4 +350,11 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
                                enum i40e_virtchnl_ops v_opcode,
                                i40e_status v_retval, u8 *msg, u16 msglen);
 int i40evf_config_rss(struct i40evf_adapter *adapter);
+int i40evf_lan_add_device(struct i40evf_adapter *adapter);
+int i40evf_lan_del_device(struct i40evf_adapter *adapter);
+void i40evf_client_subtask(struct i40evf_adapter *adapter);
+void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len);
+void i40evf_notify_client_l2_params(struct i40e_vsi *vsi);
+void i40evf_notify_client_open(struct i40e_vsi *vsi);
+void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset);
 #endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
new file mode 100644 (file)
index 0000000..ee73768
--- /dev/null
@@ -0,0 +1,564 @@
+#include <linux/list.h>
+#include <linux/errno.h>
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+#include "i40evf_client.h"
+
+static
+const char i40evf_client_interface_version_str[] = I40EVF_CLIENT_VERSION_STR;
+static struct i40e_client *vf_registered_client;
+static LIST_HEAD(i40evf_devices);
+static DEFINE_MUTEX(i40evf_device_mutex);
+
+static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
+                                      struct i40e_client *client,
+                                      u8 *msg, u16 len);
+
+static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
+                                     struct i40e_client *client,
+                                     struct i40e_qvlist_info *qvlist_info);
+
+static struct i40e_ops i40evf_lan_ops = {
+       .virtchnl_send = i40evf_client_virtchnl_send,
+       .setup_qvlist = i40evf_client_setup_qvlist,
+};
+
+/**
+ * i40evf_notify_client_message - call the client message receive callback
+ * @vsi: the VSI associated with this client
+ * @msg: message buffer
+ * @len: length of message
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
+{
+       struct i40e_client_instance *cinst;
+
+       if (!vsi)
+               return;
+
+       cinst = vsi->back->cinst;
+       if (!cinst || !cinst->client || !cinst->client->ops ||
+           !cinst->client->ops->virtchnl_receive) {
+               dev_dbg(&vsi->back->pdev->dev,
+                       "Cannot locate client instance virtchnl_receive function\n");
+               return;
+       }
+       cinst->client->ops->virtchnl_receive(&cinst->lan_info,  cinst->client,
+                                            msg, len);
+}
+
+/**
+ * i40evf_notify_client_l2_params - call the client notify callback
+ * @vsi: the VSI with l2 param changes
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
+{
+       struct i40e_client_instance *cinst;
+       struct i40e_params params;
+
+       if (!vsi)
+               return;
+
+       cinst = vsi->back->cinst;
+       memset(&params, 0, sizeof(params));
+       params.mtu = vsi->netdev->mtu;
+       params.link_up = vsi->back->link_up;
+       params.qos.prio_qos[0].qs_handle = vsi->qs_handle;
+
+       if (!cinst || !cinst->client || !cinst->client->ops ||
+           !cinst->client->ops->l2_param_change) {
+               dev_dbg(&vsi->back->pdev->dev,
+                       "Cannot locate client instance l2_param_change function\n");
+               return;
+       }
+       cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
+                                           &params);
+}
+
+/**
+ * i40evf_notify_client_open - call the client open callback
+ * @vsi: the VSI with netdev opened
+ *
+ * If there is a client to this netdev, call the client with open
+ **/
+void i40evf_notify_client_open(struct i40e_vsi *vsi)
+{
+       struct i40evf_adapter *adapter = vsi->back;
+       struct i40e_client_instance *cinst = adapter->cinst;
+       int ret;
+
+       if (!cinst || !cinst->client || !cinst->client->ops ||
+           !cinst->client->ops->open) {
+               dev_dbg(&vsi->back->pdev->dev,
+                       "Cannot locate client instance open function\n");
+               return;
+       }
+       if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state))) {
+               ret = cinst->client->ops->open(&cinst->lan_info, cinst->client);
+               if (!ret)
+                       set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+       }
+}
+
+/**
+ * i40evf_client_release_qvlist - send a message to the PF to release iwarp qv map
+ * @ldev: pointer to L2 context.
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40evf_client_release_qvlist(struct i40e_info *ldev)
+{
+       struct i40evf_adapter *adapter = ldev->vf;
+       i40e_status err;
+
+       if (adapter->aq_required)
+               return -EAGAIN;
+
+       err = i40e_aq_send_msg_to_pf(&adapter->hw,
+                       I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+                       I40E_SUCCESS, NULL, 0, NULL);
+
+       if (err)
+               dev_err(&adapter->pdev->dev,
+                       "Unable to send iWarp vector release message to PF, error %d, aq status %d\n",
+                       err, adapter->hw.aq.asq_last_status);
+
+       return err;
+}
+
+/**
+ * i40evf_notify_client_close - call the client close callback
+ * @vsi: the VSI with netdev closed
+ * @reset: true when close called due to reset pending
+ *
+ * If there is a client to this netdev, call the client with close
+ **/
+void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset)
+{
+       struct i40evf_adapter *adapter = vsi->back;
+       struct i40e_client_instance *cinst = adapter->cinst;
+
+       if (!cinst || !cinst->client || !cinst->client->ops ||
+           !cinst->client->ops->close) {
+               dev_dbg(&vsi->back->pdev->dev,
+                       "Cannot locate client instance close function\n");
+               return;
+       }
+       cinst->client->ops->close(&cinst->lan_info, cinst->client, reset);
+       i40evf_client_release_qvlist(&cinst->lan_info);
+       clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+}
+
+/**
+ * i40evf_client_add_instance - add a client instance to the instance list
+ * @adapter: pointer to the board struct
+ * @client: pointer to a client struct in the client list.
+ *
+ * Returns cinst ptr on success, NULL on failure
+ **/
+static struct i40e_client_instance *
+i40evf_client_add_instance(struct i40evf_adapter *adapter)
+{
+       struct i40e_client_instance *cinst = NULL;
+       struct netdev_hw_addr *mac = NULL;
+       struct i40e_vsi *vsi = &adapter->vsi;
+       int i;
+
+       if (!vf_registered_client)
+               goto out;
+
+       if (adapter->cinst) {
+               cinst = adapter->cinst;
+               goto out;
+       }
+
+       cinst = kzalloc(sizeof(*cinst), GFP_KERNEL);
+       if (!cinst)
+               goto out;
+
+       cinst->lan_info.vf = (void *)adapter;
+       cinst->lan_info.netdev = vsi->netdev;
+       cinst->lan_info.pcidev = adapter->pdev;
+       cinst->lan_info.fid = 0;
+       cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF;
+       cinst->lan_info.hw_addr = adapter->hw.hw_addr;
+       cinst->lan_info.ops = &i40evf_lan_ops;
+       cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR;
+       cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR;
+       cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD;
+       set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
+
+       cinst->lan_info.msix_count = adapter->num_iwarp_msix;
+       cinst->lan_info.msix_entries =
+                       &adapter->msix_entries[adapter->iwarp_base_vector];
+
+       for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+               cinst->lan_info.params.qos.prio_qos[i].tc = 0;
+               cinst->lan_info.params.qos.prio_qos[i].qs_handle =
+                                                               vsi->qs_handle;
+       }
+
+       mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
+                              struct netdev_hw_addr, list);
+       if (mac)
+               ether_addr_copy(cinst->lan_info.lanmac, mac->addr);
+       else
+               dev_err(&adapter->pdev->dev, "MAC address list is empty!\n");
+
+       cinst->client = vf_registered_client;
+       adapter->cinst = cinst;
+out:
+       return cinst;
+}
+
+/**
+ * i40evf_client_del_instance - removes a client instance from the list
+ * @adapter: pointer to the board struct
+ * @client: pointer to the client struct
+ *
+ **/
+static
+void i40evf_client_del_instance(struct i40evf_adapter *adapter)
+{
+       kfree(adapter->cinst);
+       adapter->cinst = NULL;
+}
+
+/**
+ * i40evf_client_subtask - client maintenance work
+ * @adapter: board private structure
+ **/
+void i40evf_client_subtask(struct i40evf_adapter *adapter)
+{
+       struct i40e_client *client = vf_registered_client;
+       struct i40e_client_instance *cinst;
+       int ret = 0;
+
+       if (adapter->state < __I40EVF_DOWN)
+               return;
+
+       /* first check client is registered */
+       if (!client)
+               return;
+
+       /* Add the client instance to the instance list */
+       cinst = i40evf_client_add_instance(adapter);
+       if (!cinst)
+               return;
+
+       dev_info(&adapter->pdev->dev, "Added instance of Client %s\n",
+                client->name);
+
+       if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
+               /* Send an Open request to the client */
+
+               if (client->ops && client->ops->open)
+                       ret = client->ops->open(&cinst->lan_info, client);
+               if (!ret)
+                       set_bit(__I40E_CLIENT_INSTANCE_OPENED,
+                               &cinst->state);
+               else
+                       /* remove client instance */
+                       i40evf_client_del_instance(adapter);
+       }
+}
+
+/**
+ * i40evf_lan_add_device - add a lan device struct to the list of lan devices
+ * @adapter: pointer to the board struct
+ *
+ * Returns 0 on success or none 0 on error
+ **/
+int i40evf_lan_add_device(struct i40evf_adapter *adapter)
+{
+       struct i40e_device *ldev;
+       int ret = 0;
+
+       mutex_lock(&i40evf_device_mutex);
+       list_for_each_entry(ldev, &i40evf_devices, list) {
+               if (ldev->vf == adapter) {
+                       ret = -EEXIST;
+                       goto out;
+               }
+       }
+       ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+       if (!ldev) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       ldev->vf = adapter;
+       INIT_LIST_HEAD(&ldev->list);
+       list_add(&ldev->list, &i40evf_devices);
+       dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
+                adapter->hw.bus.bus_id, adapter->hw.bus.device,
+                adapter->hw.bus.func);
+
+       /* Since in some cases register may have happened before a device gets
+        * added, we can schedule a subtask to go initiate the clients.
+        */
+       adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+
+out:
+       mutex_unlock(&i40evf_device_mutex);
+       return ret;
+}
+
+/**
+ * i40evf_lan_del_device - removes a lan device from the device list
+ * @adapter: pointer to the board struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_lan_del_device(struct i40evf_adapter *adapter)
+{
+       struct i40e_device *ldev, *tmp;
+       int ret = -ENODEV;
+
+       mutex_lock(&i40evf_device_mutex);
+       list_for_each_entry_safe(ldev, tmp, &i40evf_devices, list) {
+               if (ldev->vf == adapter) {
+                       dev_info(&adapter->pdev->dev,
+                                "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
+                                adapter->hw.bus.bus_id, adapter->hw.bus.device,
+                                adapter->hw.bus.func);
+                       list_del(&ldev->list);
+                       kfree(ldev);
+                       ret = 0;
+                       break;
+               }
+       }
+
+       mutex_unlock(&i40evf_device_mutex);
+       return ret;
+}
+
+/**
+ * i40evf_client_release - release client specific resources
+ * @client: pointer to the registered client
+ *
+ **/
+static void i40evf_client_release(struct i40e_client *client)
+{
+       struct i40e_client_instance *cinst;
+       struct i40e_device *ldev;
+       struct i40evf_adapter *adapter;
+
+       mutex_lock(&i40evf_device_mutex);
+       list_for_each_entry(ldev, &i40evf_devices, list) {
+               adapter = ldev->vf;
+               cinst = adapter->cinst;
+               if (!cinst)
+                       continue;
+               if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
+                       if (client->ops && client->ops->close)
+                               client->ops->close(&cinst->lan_info, client,
+                                                  false);
+                       i40evf_client_release_qvlist(&cinst->lan_info);
+                       clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+
+                       dev_warn(&adapter->pdev->dev,
+                                "Client %s instance closed\n", client->name);
+               }
+               /* delete the client instance */
+               i40evf_client_del_instance(adapter);
+               dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n",
+                        client->name);
+       }
+       mutex_unlock(&i40evf_device_mutex);
+}
+
+/**
+ * i40evf_client_prepare - prepare client specific resources
+ * @client: pointer to the registered client
+ *
+ **/
+static void i40evf_client_prepare(struct i40e_client *client)
+{
+       struct i40e_device *ldev;
+       struct i40evf_adapter *adapter;
+
+       mutex_lock(&i40evf_device_mutex);
+       list_for_each_entry(ldev, &i40evf_devices, list) {
+               adapter = ldev->vf;
+               /* Signal the watchdog to service the client */
+               adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+       }
+       mutex_unlock(&i40evf_device_mutex);
+}
+
+/**
+ * i40evf_client_virtchnl_send - send a message to the PF instance
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @msg: pointer to message buffer
+ * @len: message length
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
+                                      struct i40e_client *client,
+                                      u8 *msg, u16 len)
+{
+       struct i40evf_adapter *adapter = ldev->vf;
+       i40e_status err;
+
+       if (adapter->aq_required)
+               return -EAGAIN;
+
+       err = i40e_aq_send_msg_to_pf(&adapter->hw, I40E_VIRTCHNL_OP_IWARP,
+                                    I40E_SUCCESS, msg, len, NULL);
+       if (err)
+               dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
+                       err, adapter->hw.aq.asq_last_status);
+
+       return err;
+}
+
+/**
+ * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @qv_info: queue and vector list
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
+                                     struct i40e_client *client,
+                                     struct i40e_qvlist_info *qvlist_info)
+{
+       struct i40e_virtchnl_iwarp_qvlist_info *v_qvlist_info;
+       struct i40evf_adapter *adapter = ldev->vf;
+       struct i40e_qv_info *qv_info;
+       i40e_status err;
+       u32 v_idx, i;
+       u32 msg_size;
+
+       if (adapter->aq_required)
+               return -EAGAIN;
+
+       /* A quick check on whether the vectors belong to the client */
+       for (i = 0; i < qvlist_info->num_vectors; i++) {
+               qv_info = &qvlist_info->qv_info[i];
+               if (!qv_info)
+                       continue;
+               v_idx = qv_info->v_idx;
+               if ((v_idx >=
+                   (adapter->iwarp_base_vector + adapter->num_iwarp_msix)) ||
+                   (v_idx < adapter->iwarp_base_vector))
+                       return -EINVAL;
+       }
+
+       v_qvlist_info = (struct i40e_virtchnl_iwarp_qvlist_info *)qvlist_info;
+       msg_size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) +
+                       (sizeof(struct i40e_virtchnl_iwarp_qv_info) *
+                       (v_qvlist_info->num_vectors - 1));
+
+       adapter->client_pending |= BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
+       err = i40e_aq_send_msg_to_pf(&adapter->hw,
+                       I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
+                       I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL);
+
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Unable to send iWarp vector config message to PF, error %d, aq status %d\n",
+                       err, adapter->hw.aq.asq_last_status);
+               goto out;
+       }
+
+       err = -EBUSY;
+       for (i = 0; i < 5; i++) {
+               msleep(100);
+               if (!(adapter->client_pending &
+                     BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) {
+                       err = 0;
+                       break;
+               }
+       }
+out:
+       return err;
+}
+
+/**
+ * i40evf_register_client - Register a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_register_client(struct i40e_client *client)
+{
+       int ret = 0;
+
+       if (!client) {
+               ret = -EIO;
+               goto out;
+       }
+
+       if (strlen(client->name) == 0) {
+               pr_info("i40evf: Failed to register client with no name\n");
+               ret = -EIO;
+               goto out;
+       }
+
+       if (vf_registered_client) {
+               pr_info("i40evf: Client %s has already been registered!\n",
+                       client->name);
+               ret = -EEXIST;
+               goto out;
+       }
+
+       if ((client->version.major != I40EVF_CLIENT_VERSION_MAJOR) ||
+           (client->version.minor != I40EVF_CLIENT_VERSION_MINOR)) {
+               pr_info("i40evf: Failed to register client %s due to mismatched client interface version\n",
+                       client->name);
+               pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
+                       client->version.major, client->version.minor,
+                       client->version.build,
+                       i40evf_client_interface_version_str);
+               ret = -EIO;
+               goto out;
+       }
+
+       vf_registered_client = client;
+
+       i40evf_client_prepare(client);
+
+       pr_info("i40evf: Registered client %s with return code %d\n",
+               client->name, ret);
+out:
+       return ret;
+}
+EXPORT_SYMBOL(i40evf_register_client);
+
+/**
+ * i40evf_unregister_client - Unregister a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_unregister_client(struct i40e_client *client)
+{
+       int ret = 0;
+
+       /* When a unregister request comes through we would have to send
+        * a close for each of the client instances that were opened.
+        * client_release function is called to handle this.
+        */
+       i40evf_client_release(client);
+
+       if (vf_registered_client != client) {
+               pr_info("i40evf: Client %s has not been registered\n",
+                       client->name);
+               ret = -ENODEV;
+               goto out;
+       }
+       vf_registered_client = NULL;
+       pr_info("i40evf: Unregistered client %s\n", client->name);
+out:
+       return ret;
+}
+EXPORT_SYMBOL(i40evf_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
new file mode 100644 (file)
index 0000000..7d283c7
--- /dev/null
@@ -0,0 +1,166 @@
+#ifndef _I40E_CLIENT_H_
+#define _I40E_CLIENT_H_
+
+#define I40EVF_CLIENT_STR_LENGTH 10
+
+/* Client interface version should be updated anytime there is a change in the
+ * existing APIs or data structures.
+ */
+#define I40EVF_CLIENT_VERSION_MAJOR 0
+#define I40EVF_CLIENT_VERSION_MINOR 01
+#define I40EVF_CLIENT_VERSION_BUILD 00
+#define I40EVF_CLIENT_VERSION_STR     \
+       __stringify(I40EVF_CLIENT_VERSION_MAJOR) "." \
+       __stringify(I40EVF_CLIENT_VERSION_MINOR) "." \
+       __stringify(I40EVF_CLIENT_VERSION_BUILD)
+
+struct i40e_client_version {
+       u8 major;
+       u8 minor;
+       u8 build;
+       u8 rsvd;
+};
+
+enum i40e_client_state {
+       __I40E_CLIENT_NULL,
+       __I40E_CLIENT_REGISTERED
+};
+
+enum i40e_client_instance_state {
+       __I40E_CLIENT_INSTANCE_NONE,
+       __I40E_CLIENT_INSTANCE_OPENED,
+};
+
+struct i40e_ops;
+struct i40e_client;
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define I40E_QUEUE_TYPE_PE_AEQ  0x80
+#define I40E_QUEUE_INVALID_IDX 0xFFFF
+
+struct i40e_qv_info {
+       u32 v_idx; /* msix_vector */
+       u16 ceq_idx;
+       u16 aeq_idx;
+       u8 itr_idx;
+};
+
+struct i40e_qvlist_info {
+       u32 num_vectors;
+       struct i40e_qv_info qv_info[1];
+};
+
+#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
+
+/* set of LAN parameters useful for clients managed by LAN */
+
+/* Struct to hold per priority info */
+struct i40e_prio_qos_params {
+       u16 qs_handle; /* qs handle for prio */
+       u8 tc; /* TC mapped to prio */
+       u8 reserved;
+};
+
+#define I40E_CLIENT_MAX_USER_PRIORITY        8
+/* Struct to hold Client QoS */
+struct i40e_qos_params {
+       struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
+};
+
+struct i40e_params {
+       struct i40e_qos_params qos;
+       u16 mtu;
+       u16 link_up; /* boolean */
+};
+
+/* Structure to hold LAN device info for a client device */
+struct i40e_info {
+       struct i40e_client_version version;
+       u8 lanmac[6];
+       struct net_device *netdev;
+       struct pci_dev *pcidev;
+       u8 __iomem *hw_addr;
+       u8 fid; /* function id, PF id or VF id */
+#define I40E_CLIENT_FTYPE_PF 0
+#define I40E_CLIENT_FTYPE_VF 1
+       u8 ftype; /* function type, PF or VF */
+       void *vf; /* cast to i40evf_adapter */
+
+       /* All L2 params that could change during the life span of the device
+        * and needs to be communicated to the client when they change
+        */
+       struct i40e_params params;
+       struct i40e_ops *ops;
+
+       u16 msix_count;  /* number of msix vectors*/
+       /* Array down below will be dynamically allocated based on msix_count */
+       struct msix_entry *msix_entries;
+       u16 itr_index; /* Which ITR index the PE driver is suppose to use */
+};
+
+struct i40e_ops {
+       /* setup_q_vector_list enables queues with a particular vector */
+       int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
+                           struct i40e_qvlist_info *qv_info);
+
+       u32 (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
+                            u8 *msg, u16 len);
+
+       /* If the PE Engine is unresponsive, RDMA driver can request a reset.*/
+       void (*request_reset)(struct i40e_info *ldev,
+                             struct i40e_client *client);
+};
+
+struct i40e_client_ops {
+       /* Should be called from register_client() or whenever the driver is
+        * ready to create a specific client instance.
+        */
+       int (*open)(struct i40e_info *ldev, struct i40e_client *client);
+
+       /* Should be closed when netdev is unavailable or when unregister
+        * call comes in. If the close happens due to a reset, set the reset
+        * bit to true.
+        */
+       void (*close)(struct i40e_info *ldev, struct i40e_client *client,
+                     bool reset);
+
+       /* called when some l2 managed parameters changes - mss */
+       void (*l2_param_change)(struct i40e_info *ldev,
+                               struct i40e_client *client,
+                               struct i40e_params *params);
+
+       /* called when a message is received from the PF */
+       int (*virtchnl_receive)(struct i40e_info *ldev,
+                               struct i40e_client *client,
+                               u8 *msg, u16 len);
+};
+
+/* Client device */
+struct i40e_client_instance {
+       struct list_head list;
+       struct i40e_info lan_info;
+       struct i40e_client *client;
+       unsigned long  state;
+};
+
+struct i40e_client {
+       struct list_head list;          /* list of registered clients */
+       char name[I40EVF_CLIENT_STR_LENGTH];
+       struct i40e_client_version version;
+       unsigned long state;            /* client state */
+       atomic_t ref_cnt;  /* Count of all the client devices of this kind */
+       u32 flags;
+#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE      BIT(0)
+#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS      BIT(2)
+       u8 type;
+#define I40E_CLIENT_IWARP 0
+       struct i40e_client_ops *ops;    /* client ops provided by the client */
+};
+
+/* used by clients */
+int i40evf_register_client(struct i40e_client *client);
+int i40evf_unregister_client(struct i40e_client *client);
+#endif /* _I40E_CLIENT_H_ */
index 272d600c1ed06bf7b891bd88b4576c2964512ca4..9bb2cc7dd4e4afa5d98ac4f76b3794ffa359e7aa 100644 (file)
@@ -63,52 +63,74 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
 #define I40EVF_STATS_LEN(_dev) \
        (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
 
+/* For now we have one and only one private flag and it is only defined
+ * when we have support for the SKIP_CPU_SYNC DMA attribute.  Instead
+ * of leaving all this code sitting around empty we will strip it unless
+ * our one private flag is actually available.
+ */
+struct i40evf_priv_flags {
+       char flag_string[ETH_GSTRING_LEN];
+       u32 flag;
+       bool read_only;
+};
+
+#define I40EVF_PRIV_FLAG(_name, _flag, _read_only) { \
+       .flag_string = _name, \
+       .flag = _flag, \
+       .read_only = _read_only, \
+}
+
+static const struct i40evf_priv_flags i40evf_gstrings_priv_flags[] = {
+       I40EVF_PRIV_FLAG("legacy-rx", I40EVF_FLAG_LEGACY_RX, 0),
+};
+
+#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_gstrings_priv_flags)
+
 /**
- * i40evf_get_settings - Get Link Speed and Duplex settings
+ * i40evf_get_link_ksettings - Get Link Speed and Duplex settings
  * @netdev: network interface device structure
- * @ecmd: ethtool command
+ * @cmd: ethtool command
  *
  * Reports speed/duplex settings. Because this is a VF, we don't know what
  * kind of link we really have, so we fake it.
  **/
-static int i40evf_get_settings(struct net_device *netdev,
-                              struct ethtool_cmd *ecmd)
+static int i40evf_get_link_ksettings(struct net_device *netdev,
+                                    struct ethtool_link_ksettings *cmd)
 {
        struct i40evf_adapter *adapter = netdev_priv(netdev);
 
-       ecmd->supported = 0;
-       ecmd->autoneg = AUTONEG_DISABLE;
-       ecmd->transceiver = XCVR_DUMMY1;
-       ecmd->port = PORT_NONE;
+       ethtool_link_ksettings_zero_link_mode(cmd, supported);
+       cmd->base.autoneg = AUTONEG_DISABLE;
+       cmd->base.port = PORT_NONE;
        /* Set speed and duplex */
        switch (adapter->link_speed) {
        case I40E_LINK_SPEED_40GB:
-               ethtool_cmd_speed_set(ecmd, SPEED_40000);
+               cmd->base.speed = SPEED_40000;
                break;
        case I40E_LINK_SPEED_25GB:
 #ifdef SPEED_25000
-               ethtool_cmd_speed_set(ecmd, SPEED_25000);
+               cmd->base.speed = SPEED_25000;
 #else
                netdev_info(netdev,
                            "Speed is 25G, display not supported by this version of ethtool.\n");
 #endif
                break;
        case I40E_LINK_SPEED_20GB:
-               ethtool_cmd_speed_set(ecmd, SPEED_20000);
+               cmd->base.speed = SPEED_20000;
                break;
        case I40E_LINK_SPEED_10GB:
-               ethtool_cmd_speed_set(ecmd, SPEED_10000);
+               cmd->base.speed = SPEED_10000;
                break;
        case I40E_LINK_SPEED_1GB:
-               ethtool_cmd_speed_set(ecmd, SPEED_1000);
+               cmd->base.speed = SPEED_1000;
                break;
        case I40E_LINK_SPEED_100MB:
-               ethtool_cmd_speed_set(ecmd, SPEED_100);
+               cmd->base.speed = SPEED_100;
                break;
        default:
                break;
        }
-       ecmd->duplex = DUPLEX_FULL;
+       cmd->base.duplex = DUPLEX_FULL;
 
        return 0;
 }
@@ -125,6 +147,8 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset)
 {
        if (sset == ETH_SS_STATS)
                return I40EVF_STATS_LEN(netdev);
+       else if (sset == ETH_SS_PRIV_FLAGS)
+               return I40EVF_PRIV_FLAGS_STR_LEN;
        else
                return -EINVAL;
 }
@@ -190,7 +214,83 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
                        snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
                        p += ETH_GSTRING_LEN;
                }
+       } else if (sset == ETH_SS_PRIV_FLAGS) {
+               for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
+                       snprintf(p, ETH_GSTRING_LEN, "%s",
+                                i40evf_gstrings_priv_flags[i].flag_string);
+                       p += ETH_GSTRING_LEN;
+               }
+       }
+}
+
+/**
+ * i40evf_get_priv_flags - report device private flags
+ * @dev: network interface device structure
+ *
+ * The get string set count and the string set should be matched for each
+ * flag returned.  Add new strings for each flag to the i40e_gstrings_priv_flags
+ * array.
+ *
+ * Returns a u32 bitmap of flags.
+ **/
+static u32 i40evf_get_priv_flags(struct net_device *netdev)
+{
+       struct i40evf_adapter *adapter = netdev_priv(netdev);
+       u32 i, ret_flags = 0;
+
+       for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
+               const struct i40evf_priv_flags *priv_flags;
+
+               priv_flags = &i40evf_gstrings_priv_flags[i];
+
+               if (priv_flags->flag & adapter->flags)
+                       ret_flags |= BIT(i);
+       }
+
+       return ret_flags;
+}
+
+/**
+ * i40evf_set_priv_flags - set private flags
+ * @dev: network interface device structure
+ * @flags: bit flags to be set
+ **/
+static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+       struct i40evf_adapter *adapter = netdev_priv(netdev);
+       u64 changed_flags;
+       u32 i;
+
+       changed_flags = adapter->flags;
+
+       for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
+               const struct i40evf_priv_flags *priv_flags;
+
+               priv_flags = &i40evf_gstrings_priv_flags[i];
+
+               if (priv_flags->read_only)
+                       continue;
+
+               if (flags & BIT(i))
+                       adapter->flags |= priv_flags->flag;
+               else
+                       adapter->flags &= ~(priv_flags->flag);
+       }
+
+       /* check for flags that changed */
+       changed_flags ^= adapter->flags;
+
+       /* Process any additional changes needed as a result of flag changes. */
+
+       /* issue a reset to force legacy-rx change to take effect */
+       if (changed_flags & I40EVF_FLAG_LEGACY_RX) {
+               if (netif_running(netdev)) {
+                       adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+                       schedule_work(&adapter->reset_task);
+               }
        }
+
+       return 0;
 }
 
 /**
@@ -239,6 +339,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
        strlcpy(drvinfo->version, i40evf_driver_version, 32);
        strlcpy(drvinfo->fw_version, "N/A", 4);
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+       drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
 }
 
 /**
@@ -643,7 +744,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
 }
 
 static const struct ethtool_ops i40evf_ethtool_ops = {
-       .get_settings           = i40evf_get_settings,
        .get_drvinfo            = i40evf_get_drvinfo,
        .get_link               = ethtool_op_get_link,
        .get_ringparam          = i40evf_get_ringparam,
@@ -651,6 +751,8 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
        .get_strings            = i40evf_get_strings,
        .get_ethtool_stats      = i40evf_get_ethtool_stats,
        .get_sset_count         = i40evf_get_sset_count,
+       .get_priv_flags         = i40evf_get_priv_flags,
+       .set_priv_flags         = i40evf_set_priv_flags,
        .get_msglevel           = i40evf_get_msglevel,
        .set_msglevel           = i40evf_set_msglevel,
        .get_coalesce           = i40evf_get_coalesce,
@@ -663,6 +765,7 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
        .set_rxfh               = i40evf_set_rxfh,
        .get_channels           = i40evf_get_channels,
        .get_rxfh_key_size      = i40evf_get_rxfh_key_size,
+       .get_link_ksettings     = i40evf_get_link_ksettings,
 };
 
 /**
index f35dcaac5bb7bd9bf86412c1bb40f8e971d086c7..12a930e879af3d47f30f238800fc57d228e1e9dd 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "i40evf.h"
 #include "i40e_prototype.h"
+#include "i40evf_client.h"
 static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
 static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
 static int i40evf_close(struct net_device *netdev);
@@ -36,9 +37,9 @@ static const char i40evf_driver_string[] =
 
 #define DRV_KERN "-k"
 
-#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 27
+#define DRV_VERSION_MAJOR 2
+#define DRV_VERSION_MINOR 1
+#define DRV_VERSION_BUILD 7
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD) \
@@ -685,12 +686,38 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
  **/
 static void i40evf_configure_rx(struct i40evf_adapter *adapter)
 {
+       unsigned int rx_buf_len = I40E_RXBUFFER_2048;
+       struct net_device *netdev = adapter->netdev;
        struct i40e_hw *hw = &adapter->hw;
        int i;
 
+       /* Legacy Rx will always default to a 2048 buffer size. */
+#if (PAGE_SIZE < 8192)
+       if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
+               /* For jumbo frames on systems with 4K pages we have to use
+                * an order 1 page, so we might as well increase the size
+                * of our Rx buffer to make better use of the available space
+                */
+               rx_buf_len = I40E_RXBUFFER_3072;
+
+               /* We use a 1536 buffer size for configurations with
+                * standard Ethernet mtu.  On x86 this gives us enough room
+                * for shared info and 192 bytes of padding.
+                */
+               if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
+                   (netdev->mtu <= ETH_DATA_LEN))
+                       rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+       }
+#endif
+
        for (i = 0; i < adapter->num_active_queues; i++) {
                adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
-               adapter->rx_rings[i].rx_buf_len = I40EVF_RXBUFFER_2048;
+               adapter->rx_rings[i].rx_buf_len = rx_buf_len;
+
+               if (adapter->flags & I40EVF_FLAG_LEGACY_RX)
+                       clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
+               else
+                       set_ring_build_skb_enabled(&adapter->rx_rings[i]);
        }
 }
 
@@ -1058,6 +1085,8 @@ static void i40evf_up_complete(struct i40evf_adapter *adapter)
        i40evf_napi_enable_all(adapter);
 
        adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+       if (CLIENT_ENABLED(adapter))
+               adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN;
        mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
 }
 
@@ -1685,6 +1714,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
                i40evf_set_promiscuous(adapter, 0);
                goto watchdog_done;
        }
+       schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
 
        if (adapter->state == __I40EVF_RUNNING)
                i40evf_request_stats(adapter);
@@ -1773,10 +1803,17 @@ static void i40evf_reset_task(struct work_struct *work)
        u32 reg_val;
        int i = 0, err;
 
-       while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+       while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
                                &adapter->crit_section))
                usleep_range(500, 1000);
-
+       if (CLIENT_ENABLED(adapter)) {
+               adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN |
+                                   I40EVF_FLAG_CLIENT_NEEDS_CLOSE |
+                                   I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
+                                   I40EVF_FLAG_SERVICE_CLIENT_REQUESTED);
+               cancel_delayed_work_sync(&adapter->client_task);
+               i40evf_notify_client_close(&adapter->vsi, true);
+       }
        i40evf_misc_irq_disable(adapter);
        if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
                adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
@@ -1819,6 +1856,7 @@ static void i40evf_reset_task(struct work_struct *work)
                dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
                        reg_val);
                i40evf_disable_vf(adapter);
+               clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
                return; /* Do not attempt to reinit. It's dead, Jim. */
        }
 
@@ -1861,9 +1899,8 @@ continue_reset:
        }
        adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
        adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
-       /* Open RDMA Client again */
-       adapter->aq_required |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
        clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+       clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
        i40evf_misc_irq_enable(adapter);
 
        mod_timer(&adapter->watchdog_timer, jiffies + 2);
@@ -1979,6 +2016,48 @@ out:
        i40evf_misc_irq_enable(adapter);
 }
 
+/**
+ * i40evf_client_task - worker thread to perform client work
+ * @work: pointer to work_struct containing our data
+ *
+ * This task handles client interactions. Because client calls can be
+ * reentrant, we can't handle them in the watchdog.
+ **/
+static void i40evf_client_task(struct work_struct *work)
+{
+       struct i40evf_adapter *adapter =
+               container_of(work, struct i40evf_adapter, client_task.work);
+
+       /* If we can't get the client bit, just give up. We'll be rescheduled
+        * later.
+        */
+
+       if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section))
+               return;
+
+       if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) {
+               i40evf_client_subtask(adapter);
+               adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+               goto out;
+       }
+       if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
+               i40evf_notify_client_close(&adapter->vsi, false);
+               adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
+               goto out;
+       }
+       if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
+               i40evf_notify_client_open(&adapter->vsi);
+               adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
+               goto out;
+       }
+       if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
+               i40evf_notify_client_l2_params(&adapter->vsi);
+               adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
+       }
+out:
+       clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+}
+
 /**
  * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
  * @adapter: board private structure
@@ -2148,6 +2227,8 @@ static int i40evf_close(struct net_device *netdev)
 
 
        set_bit(__I40E_DOWN, &adapter->vsi.state);
+       if (CLIENT_ENABLED(adapter))
+               adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
 
        i40evf_down(adapter);
        adapter->state = __I40EVF_DOWN_PENDING;
@@ -2188,6 +2269,10 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
        struct i40evf_adapter *adapter = netdev_priv(netdev);
 
        netdev->mtu = new_mtu;
+       if (CLIENT_ENABLED(adapter)) {
+               i40evf_notify_client_l2_params(&adapter->vsi);
+               adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+       }
        adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
        schedule_work(&adapter->reset_task);
 
@@ -2328,6 +2413,8 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        struct i40e_vsi *vsi = &adapter->vsi;
        int i;
+       netdev_features_t hw_enc_features;
+       netdev_features_t hw_features;
 
        /* got VF config message back from PF, now we can parse it */
        for (i = 0; i < vfres->num_vsis; i++) {
@@ -2339,46 +2426,52 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
                return -ENODEV;
        }
 
-       netdev->hw_enc_features |= NETIF_F_SG                   |
-                                  NETIF_F_IP_CSUM              |
-                                  NETIF_F_IPV6_CSUM            |
-                                  NETIF_F_HIGHDMA              |
-                                  NETIF_F_SOFT_FEATURES        |
-                                  NETIF_F_TSO                  |
-                                  NETIF_F_TSO_ECN              |
-                                  NETIF_F_TSO6                 |
+       hw_enc_features = NETIF_F_SG                    |
+                         NETIF_F_IP_CSUM               |
+                         NETIF_F_IPV6_CSUM             |
+                         NETIF_F_HIGHDMA               |
+                         NETIF_F_SOFT_FEATURES |
+                         NETIF_F_TSO                   |
+                         NETIF_F_TSO_ECN               |
+                         NETIF_F_TSO6                  |
+                         NETIF_F_SCTP_CRC              |
+                         NETIF_F_RXHASH                |
+                         NETIF_F_RXCSUM                |
+                         0;
+
+       /* advertise to stack only if offloads for encapsulated packets is
+        * supported
+        */
+       if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP) {
+               hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL       |
                                   NETIF_F_GSO_GRE              |
                                   NETIF_F_GSO_GRE_CSUM         |
                                   NETIF_F_GSO_IPXIP4           |
                                   NETIF_F_GSO_IPXIP6           |
-                                  NETIF_F_GSO_UDP_TUNNEL       |
                                   NETIF_F_GSO_UDP_TUNNEL_CSUM  |
                                   NETIF_F_GSO_PARTIAL          |
-                                  NETIF_F_SCTP_CRC             |
-                                  NETIF_F_RXHASH               |
-                                  NETIF_F_RXCSUM               |
                                   0;
 
-       if (!(adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE))
-               netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
-       netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+               if (!(vfres->vf_offload_flags &
+                     I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
+                       netdev->gso_partial_features |=
+                               NETIF_F_GSO_UDP_TUNNEL_CSUM;
 
+               netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+               netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+               netdev->hw_enc_features |= hw_enc_features;
+       }
        /* record features VLANs can make use of */
-       netdev->vlan_features |= netdev->hw_enc_features |
-                                NETIF_F_TSO_MANGLEID;
+       netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
 
        /* Write features and hw_features separately to avoid polluting
-        * with, or dropping, features that are set when we registgered.
+        * with, or dropping, features that are set when we registered.
         */
-       netdev->hw_features |= netdev->hw_enc_features;
+       hw_features = hw_enc_features;
 
-       netdev->features |= netdev->hw_enc_features | I40EVF_VLAN_FEATURES;
-       netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+       netdev->hw_features |= hw_features;
 
-       /* disable VLAN features if not supported */
-       if (!(vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN))
-               netdev->features ^= I40EVF_VLAN_FEATURES;
+       netdev->features |= hw_features | I40EVF_VLAN_FEATURES;
 
        adapter->vsi.id = adapter->vsi_res->vsi_id;
 
@@ -2519,9 +2612,6 @@ static void i40evf_init_task(struct work_struct *work)
                goto err_alloc;
        }
 
-       if (hw->mac.type == I40E_MAC_X722_VF)
-               adapter->flags |= I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE;
-
        if (i40evf_process_config(adapter))
                goto err_alloc;
        adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
@@ -2581,6 +2671,12 @@ static void i40evf_init_task(struct work_struct *work)
        adapter->netdev_registered = true;
 
        netif_tx_stop_all_queues(netdev);
+       if (CLIENT_ALLOWED(adapter)) {
+               err = i40evf_lan_add_device(adapter);
+               if (err)
+                       dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
+                                err);
+       }
 
        dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
        if (netdev->features & NETIF_F_GRO)
@@ -2745,6 +2841,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        INIT_WORK(&adapter->reset_task, i40evf_reset_task);
        INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
        INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
+       INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task);
        INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
        schedule_delayed_work(&adapter->init_task,
                              msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
@@ -2857,14 +2954,21 @@ static void i40evf_remove(struct pci_dev *pdev)
        struct i40evf_adapter *adapter = netdev_priv(netdev);
        struct i40evf_mac_filter *f, *ftmp;
        struct i40e_hw *hw = &adapter->hw;
+       int err;
 
        cancel_delayed_work_sync(&adapter->init_task);
        cancel_work_sync(&adapter->reset_task);
-
+       cancel_delayed_work_sync(&adapter->client_task);
        if (adapter->netdev_registered) {
                unregister_netdev(netdev);
                adapter->netdev_registered = false;
        }
+       if (CLIENT_ALLOWED(adapter)) {
+               err = i40evf_lan_del_device(adapter);
+               if (err)
+                       dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
+                                err);
+       }
 
        /* Shut down all the garbage mashers on the detention level */
        adapter->state = __I40EVF_REMOVE;
index bee58af390e1c633006ddc38efddf256a307161a..3bccfbb1db14b4f2d03e49a2a0d17032892d15bb 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "i40evf.h"
 #include "i40e_prototype.h"
+#include "i40evf_client.h"
 
 /* busy wait delay in msec */
 #define I40EVF_BUSY_WAIT_DELAY 10
@@ -158,7 +159,9 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
               I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
               I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
               I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
-              I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+              I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
+              I40E_VIRTCHNL_VF_OFFLOAD_ENCAP |
+              I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
 
        adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
        adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
@@ -233,7 +236,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
        struct i40e_virtchnl_vsi_queue_config_info *vqci;
        struct i40e_virtchnl_queue_pair_info *vqpi;
        int pairs = adapter->num_active_queues;
-       int i, len;
+       int i, len, max_frame = I40E_MAX_RXBUFFER;
 
        if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
                /* bail because we already have a command pending */
@@ -248,6 +251,11 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
        if (!vqci)
                return;
 
+       /* Limit maximum frame size when jumbo frames is not enabled */
+       if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) &&
+           (adapter->netdev->mtu <= ETH_DATA_LEN))
+               max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+
        vqci->vsi_id = adapter->vsi_res->vsi_id;
        vqci->num_queue_pairs = pairs;
        vqpi = vqci->qpair;
@@ -259,17 +267,14 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
                vqpi->txq.queue_id = i;
                vqpi->txq.ring_len = adapter->tx_rings[i].count;
                vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
-               vqpi->txq.headwb_enabled = 1;
-               vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
-                   (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
-
                vqpi->rxq.vsi_id = vqci->vsi_id;
                vqpi->rxq.queue_id = i;
                vqpi->rxq.ring_len = adapter->rx_rings[i].count;
                vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
-               vqpi->rxq.max_pkt_size = adapter->netdev->mtu
-                                       + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
-               vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
+               vqpi->rxq.max_pkt_size = max_frame;
+               vqpi->rxq.databuffer_size =
+                       ALIGN(adapter->rx_rings[i].rx_buf_len,
+                             BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
                vqpi++;
        }
 
@@ -999,6 +1004,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
                if (v_opcode != adapter->current_op)
                        return;
                break;
+       case I40E_VIRTCHNL_OP_IWARP:
+               /* Gobble zero-length replies from the PF. They indicate that
+                * a previous message was received OK, and the client doesn't
+                * care about that.
+                */
+               if (msglen && CLIENT_ENABLED(adapter))
+                       i40evf_notify_client_message(&adapter->vsi,
+                                                    msg, msglen);
+               break;
+
        case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
                adapter->client_pending &=
                                ~(BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
@@ -1014,7 +1029,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
                }
                break;
        default:
-               if (v_opcode != adapter->current_op)
+               if (adapter->current_op && (v_opcode != adapter->current_op))
                        dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
                                 adapter->current_op, v_opcode);
                break;
index acbc3abe2dddfc7bdf87a3724a95b136237a519e..dc6e2980718f5d09c34a28660ebf47c4d5ff360d 100644 (file)
@@ -142,12 +142,24 @@ struct vf_data_storage {
 /* Supported Rx Buffer Sizes */
 #define IGB_RXBUFFER_256       256
 #define IGB_RXBUFFER_2048      2048
+#define IGB_RXBUFFER_3072      3072
 #define IGB_RX_HDR_LEN         IGB_RXBUFFER_256
-#define IGB_RX_BUFSZ           IGB_RXBUFFER_2048
+#define IGB_TS_HDR_LEN         16
+
+#define IGB_SKB_PAD            (NET_SKB_PAD + NET_IP_ALIGN)
+#if (PAGE_SIZE < 8192)
+#define IGB_MAX_FRAME_BUILD_SKB \
+       (SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048) - IGB_SKB_PAD - IGB_TS_HDR_LEN)
+#else
+#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_2048 - IGB_TS_HDR_LEN)
+#endif
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
 #define IGB_RX_BUFFER_WRITE    16 /* Must be power of 2 */
 
+#define IGB_RX_DMA_ATTR \
+       (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
 #define AUTO_ALL_MODES         0
 #define IGB_EEPROM_APME                0x0400
 
@@ -301,12 +313,51 @@ struct igb_q_vector {
 };
 
 enum e1000_ring_flags_t {
+       IGB_RING_FLAG_RX_3K_BUFFER,
+       IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
        IGB_RING_FLAG_RX_SCTP_CSUM,
        IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
        IGB_RING_FLAG_TX_CTX_IDX,
        IGB_RING_FLAG_TX_DETECT_HANG
 };
 
+#define ring_uses_large_buffer(ring) \
+       test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+#define set_ring_uses_large_buffer(ring) \
+       set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+#define clear_ring_uses_large_buffer(ring) \
+       clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+
+#define ring_uses_build_skb(ring) \
+       test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define set_ring_build_skb_enabled(ring) \
+       set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define clear_ring_build_skb_enabled(ring) \
+       clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+
+static inline unsigned int igb_rx_bufsz(struct igb_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+       if (ring_uses_large_buffer(ring))
+               return IGB_RXBUFFER_3072;
+
+       if (ring_uses_build_skb(ring))
+               return IGB_MAX_FRAME_BUILD_SKB + IGB_TS_HDR_LEN;
+#endif
+       return IGB_RXBUFFER_2048;
+}
+
+static inline unsigned int igb_rx_pg_order(struct igb_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+       if (ring_uses_large_buffer(ring))
+               return 1;
+#endif
+       return 0;
+}
+
+#define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring))
+
 #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
 
 #define IGB_RX_DESC(R, i)      \
@@ -545,6 +596,7 @@ struct igb_adapter {
 #define IGB_FLAG_HAS_MSIX              BIT(13)
 #define IGB_FLAG_EEE                   BIT(14)
 #define IGB_FLAG_VLAN_PROMISC          BIT(15)
+#define IGB_FLAG_RX_LEGACY             BIT(16)
 
 /* Media Auto Sense */
 #define IGB_MAS_ENABLE_0               0X0001
@@ -558,7 +610,6 @@ struct igb_adapter {
 #define IGB_DMCTLX_DCFLUSH_DIS 0x80000000  /* Disable DMA Coal Flush */
 
 #define IGB_82576_TSYNC_SHIFT  19
-#define IGB_TS_HDR_LEN         16
 enum e1000_state_t {
        __IGB_TESTING,
        __IGB_RESETTING,
@@ -591,7 +642,6 @@ void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
 void igb_setup_tctl(struct igb_adapter *);
 void igb_setup_rctl(struct igb_adapter *);
 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
-void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
 void igb_alloc_rx_buffers(struct igb_ring *, u16);
 void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
 bool igb_has_link(struct igb_adapter *adapter);
@@ -604,7 +654,7 @@ void igb_ptp_reset(struct igb_adapter *adapter);
 void igb_ptp_suspend(struct igb_adapter *adapter);
 void igb_ptp_rx_hang(struct igb_adapter *adapter);
 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
                         struct sk_buff *skb);
 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
index 737b664d004cbb372222f6f63acc5c9f38ad4c95..0efb62db6efdd0fa2bfa7d531343790a3156f2be 100644 (file)
@@ -144,7 +144,15 @@ static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
 };
 #define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
 
-static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static const char igb_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define IGB_PRIV_FLAGS_LEGACY_RX       BIT(0)
+       "legacy-rx",
+};
+
+#define IGB_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igb_priv_flags_strings)
+
+static int igb_get_link_ksettings(struct net_device *netdev,
+                                 struct ethtool_link_ksettings *cmd)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -152,76 +160,73 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
        u32 status;
        u32 speed;
+       u32 supported, advertising;
 
        status = rd32(E1000_STATUS);
        if (hw->phy.media_type == e1000_media_type_copper) {
 
-               ecmd->supported = (SUPPORTED_10baseT_Half |
-                                  SUPPORTED_10baseT_Full |
-                                  SUPPORTED_100baseT_Half |
-                                  SUPPORTED_100baseT_Full |
-                                  SUPPORTED_1000baseT_Full|
-                                  SUPPORTED_Autoneg |
-                                  SUPPORTED_TP |
-                                  SUPPORTED_Pause);
-               ecmd->advertising = ADVERTISED_TP;
+               supported = (SUPPORTED_10baseT_Half |
+                            SUPPORTED_10baseT_Full |
+                            SUPPORTED_100baseT_Half |
+                            SUPPORTED_100baseT_Full |
+                            SUPPORTED_1000baseT_Full|
+                            SUPPORTED_Autoneg |
+                            SUPPORTED_TP |
+                            SUPPORTED_Pause);
+               advertising = ADVERTISED_TP;
 
                if (hw->mac.autoneg == 1) {
-                       ecmd->advertising |= ADVERTISED_Autoneg;
+                       advertising |= ADVERTISED_Autoneg;
                        /* the e1000 autoneg seems to match ethtool nicely */
-                       ecmd->advertising |= hw->phy.autoneg_advertised;
+                       advertising |= hw->phy.autoneg_advertised;
                }
 
-               ecmd->port = PORT_TP;
-               ecmd->phy_address = hw->phy.addr;
-               ecmd->transceiver = XCVR_INTERNAL;
+               cmd->base.port = PORT_TP;
+               cmd->base.phy_address = hw->phy.addr;
        } else {
-               ecmd->supported = (SUPPORTED_FIBRE |
-                                  SUPPORTED_1000baseKX_Full |
-                                  SUPPORTED_Autoneg |
-                                  SUPPORTED_Pause);
-               ecmd->advertising = (ADVERTISED_FIBRE |
-                                    ADVERTISED_1000baseKX_Full);
+               supported = (SUPPORTED_FIBRE |
+                            SUPPORTED_1000baseKX_Full |
+                            SUPPORTED_Autoneg |
+                            SUPPORTED_Pause);
+               advertising = (ADVERTISED_FIBRE |
+                              ADVERTISED_1000baseKX_Full);
                if (hw->mac.type == e1000_i354) {
                        if ((hw->device_id ==
                             E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) &&
                            !(status & E1000_STATUS_2P5_SKU_OVER)) {
-                               ecmd->supported |= SUPPORTED_2500baseX_Full;
-                               ecmd->supported &=
-                                       ~SUPPORTED_1000baseKX_Full;
-                               ecmd->advertising |= ADVERTISED_2500baseX_Full;
-                               ecmd->advertising &=
-                                       ~ADVERTISED_1000baseKX_Full;
+                               supported |= SUPPORTED_2500baseX_Full;
+                               supported &= ~SUPPORTED_1000baseKX_Full;
+                               advertising |= ADVERTISED_2500baseX_Full;
+                               advertising &= ~ADVERTISED_1000baseKX_Full;
                        }
                }
                if (eth_flags->e100_base_fx) {
-                       ecmd->supported |= SUPPORTED_100baseT_Full;
-                       ecmd->advertising |= ADVERTISED_100baseT_Full;
+                       supported |= SUPPORTED_100baseT_Full;
+                       advertising |= ADVERTISED_100baseT_Full;
                }
                if (hw->mac.autoneg == 1)
-                       ecmd->advertising |= ADVERTISED_Autoneg;
+                       advertising |= ADVERTISED_Autoneg;
 
-               ecmd->port = PORT_FIBRE;
-               ecmd->transceiver = XCVR_EXTERNAL;
+               cmd->base.port = PORT_FIBRE;
        }
        if (hw->mac.autoneg != 1)
-               ecmd->advertising &= ~(ADVERTISED_Pause |
-                                      ADVERTISED_Asym_Pause);
+               advertising &= ~(ADVERTISED_Pause |
+                                ADVERTISED_Asym_Pause);
 
        switch (hw->fc.requested_mode) {
        case e1000_fc_full:
-               ecmd->advertising |= ADVERTISED_Pause;
+               advertising |= ADVERTISED_Pause;
                break;
        case e1000_fc_rx_pause:
-               ecmd->advertising |= (ADVERTISED_Pause |
-                                     ADVERTISED_Asym_Pause);
+               advertising |= (ADVERTISED_Pause |
+                               ADVERTISED_Asym_Pause);
                break;
        case e1000_fc_tx_pause:
-               ecmd->advertising |=  ADVERTISED_Asym_Pause;
+               advertising |=  ADVERTISED_Asym_Pause;
                break;
        default:
-               ecmd->advertising &= ~(ADVERTISED_Pause |
-                                      ADVERTISED_Asym_Pause);
+               advertising &= ~(ADVERTISED_Pause |
+                                ADVERTISED_Asym_Pause);
        }
        if (status & E1000_STATUS_LU) {
                if ((status & E1000_STATUS_2P5_SKU) &&
@@ -236,39 +241,46 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                }
                if ((status & E1000_STATUS_FD) ||
                    hw->phy.media_type != e1000_media_type_copper)
-                       ecmd->duplex = DUPLEX_FULL;
+                       cmd->base.duplex = DUPLEX_FULL;
                else
-                       ecmd->duplex = DUPLEX_HALF;
+                       cmd->base.duplex = DUPLEX_HALF;
        } else {
                speed = SPEED_UNKNOWN;
-               ecmd->duplex = DUPLEX_UNKNOWN;
+               cmd->base.duplex = DUPLEX_UNKNOWN;
        }
-       ethtool_cmd_speed_set(ecmd, speed);
+       cmd->base.speed = speed;
        if ((hw->phy.media_type == e1000_media_type_fiber) ||
            hw->mac.autoneg)
-               ecmd->autoneg = AUTONEG_ENABLE;
+               cmd->base.autoneg = AUTONEG_ENABLE;
        else
-               ecmd->autoneg = AUTONEG_DISABLE;
+               cmd->base.autoneg = AUTONEG_DISABLE;
 
        /* MDI-X => 2; MDI =>1; Invalid =>0 */
        if (hw->phy.media_type == e1000_media_type_copper)
-               ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+               cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
                                                      ETH_TP_MDI;
        else
-               ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+               cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
 
        if (hw->phy.mdix == AUTO_ALL_MODES)
-               ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+               cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
        else
-               ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+               cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix;
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
 
        return 0;
 }
 
-static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static int igb_set_link_ksettings(struct net_device *netdev,
+                                 const struct ethtool_link_ksettings *cmd)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
+       u32 advertising;
 
        /* When SoL/IDER sessions are active, autoneg/speed/duplex
         * cannot be changed
@@ -283,12 +295,12 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
         * some hardware doesn't allow MDI setting when speed or
         * duplex is forced.
         */
-       if (ecmd->eth_tp_mdix_ctrl) {
+       if (cmd->base.eth_tp_mdix_ctrl) {
                if (hw->phy.media_type != e1000_media_type_copper)
                        return -EOPNOTSUPP;
 
-               if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
-                   (ecmd->autoneg != AUTONEG_ENABLE)) {
+               if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+                   (cmd->base.autoneg != AUTONEG_ENABLE)) {
                        dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
                        return -EINVAL;
                }
@@ -297,10 +309,13 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
                usleep_range(1000, 2000);
 
-       if (ecmd->autoneg == AUTONEG_ENABLE) {
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
+
+       if (cmd->base.autoneg == AUTONEG_ENABLE) {
                hw->mac.autoneg = 1;
                if (hw->phy.media_type == e1000_media_type_fiber) {
-                       hw->phy.autoneg_advertised = ecmd->advertising |
+                       hw->phy.autoneg_advertised = advertising |
                                                     ADVERTISED_FIBRE |
                                                     ADVERTISED_Autoneg;
                        switch (adapter->link_speed) {
@@ -320,31 +335,31 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                                break;
                        }
                } else {
-                       hw->phy.autoneg_advertised = ecmd->advertising |
+                       hw->phy.autoneg_advertised = advertising |
                                                     ADVERTISED_TP |
                                                     ADVERTISED_Autoneg;
                }
-               ecmd->advertising = hw->phy.autoneg_advertised;
+               advertising = hw->phy.autoneg_advertised;
                if (adapter->fc_autoneg)
                        hw->fc.requested_mode = e1000_fc_default;
        } else {
-               u32 speed = ethtool_cmd_speed(ecmd);
+               u32 speed = cmd->base.speed;
                /* calling this overrides forced MDI setting */
-               if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+               if (igb_set_spd_dplx(adapter, speed, cmd->base.duplex)) {
                        clear_bit(__IGB_RESETTING, &adapter->state);
                        return -EINVAL;
                }
        }
 
        /* MDI-X => 2; MDI => 1; Auto => 3 */
-       if (ecmd->eth_tp_mdix_ctrl) {
+       if (cmd->base.eth_tp_mdix_ctrl) {
                /* fix up the value for auto (3 => 0) as zero is mapped
                 * internally to auto
                 */
-               if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+               if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
                        hw->phy.mdix = AUTO_ALL_MODES;
                else
-                       hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+                       hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl;
        }
 
        /* reset the link */
@@ -852,6 +867,8 @@ static void igb_get_drvinfo(struct net_device *netdev,
                sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
+
+       drvinfo->n_priv_flags = IGB_PRIV_FLAGS_STR_LEN;
 }
 
 static void igb_get_ringparam(struct net_device *netdev,
@@ -1811,14 +1828,14 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
        tx_ntc = tx_ring->next_to_clean;
        rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
 
-       while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
+       while (rx_desc->wb.upper.length) {
                /* check Rx buffer */
                rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
 
                /* sync Rx buffer for CPU read */
                dma_sync_single_for_cpu(rx_ring->dev,
                                        rx_buffer_info->dma,
-                                       IGB_RX_BUFSZ,
+                                       size,
                                        DMA_FROM_DEVICE);
 
                /* verify contents of skb */
@@ -1828,12 +1845,21 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
                /* sync Rx buffer for device write */
                dma_sync_single_for_device(rx_ring->dev,
                                           rx_buffer_info->dma,
-                                          IGB_RX_BUFSZ,
+                                          size,
                                           DMA_FROM_DEVICE);
 
                /* unmap buffer on Tx side */
                tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
-               igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+
+               /* Free all the Tx ring sk_buffs */
+               dev_kfree_skb_any(tx_buffer_info->skb);
+
+               /* unmap skb header data */
+               dma_unmap_single(tx_ring->dev,
+                                dma_unmap_addr(tx_buffer_info, dma),
+                                dma_unmap_len(tx_buffer_info, len),
+                                DMA_TO_DEVICE);
+               dma_unmap_len_set(tx_buffer_info, len, 0);
 
                /* increment Rx/Tx next to clean counters */
                rx_ntc++;
@@ -2271,6 +2297,8 @@ static int igb_get_sset_count(struct net_device *netdev, int sset)
                return IGB_STATS_LEN;
        case ETH_SS_TEST:
                return IGB_TEST_LEN;
+       case ETH_SS_PRIV_FLAGS:
+               return IGB_PRIV_FLAGS_STR_LEN;
        default:
                return -ENOTSUPP;
        }
@@ -2376,6 +2404,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
                }
                /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
                break;
+       case ETH_SS_PRIV_FLAGS:
+               memcpy(data, igb_priv_flags_strings,
+                      IGB_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+               break;
        }
 }
 
@@ -3388,9 +3420,38 @@ static int igb_set_channels(struct net_device *netdev,
        return 0;
 }
 
+static u32 igb_get_priv_flags(struct net_device *netdev)
+{
+       struct igb_adapter *adapter = netdev_priv(netdev);
+       u32 priv_flags = 0;
+
+       if (adapter->flags & IGB_FLAG_RX_LEGACY)
+               priv_flags |= IGB_PRIV_FLAGS_LEGACY_RX;
+
+       return priv_flags;
+}
+
+static int igb_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+       struct igb_adapter *adapter = netdev_priv(netdev);
+       unsigned int flags = adapter->flags;
+
+       flags &= ~IGB_FLAG_RX_LEGACY;
+       if (priv_flags & IGB_PRIV_FLAGS_LEGACY_RX)
+               flags |= IGB_FLAG_RX_LEGACY;
+
+       if (flags != adapter->flags) {
+               adapter->flags = flags;
+
+               /* reset interface to repopulate queues */
+               if (netif_running(netdev))
+                       igb_reinit_locked(adapter);
+       }
+
+       return 0;
+}
+
 static const struct ethtool_ops igb_ethtool_ops = {
-       .get_settings           = igb_get_settings,
-       .set_settings           = igb_set_settings,
        .get_drvinfo            = igb_get_drvinfo,
        .get_regs_len           = igb_get_regs_len,
        .get_regs               = igb_get_regs,
@@ -3426,8 +3487,12 @@ static const struct ethtool_ops igb_ethtool_ops = {
        .set_rxfh               = igb_set_rxfh,
        .get_channels           = igb_get_channels,
        .set_channels           = igb_set_channels,
+       .get_priv_flags         = igb_get_priv_flags,
+       .set_priv_flags         = igb_set_priv_flags,
        .begin                  = igb_ethtool_begin,
        .complete               = igb_ethtool_complete,
+       .get_link_ksettings     = igb_get_link_ksettings,
+       .set_link_ksettings     = igb_set_link_ksettings,
 };
 
 void igb_set_ethtool_ops(struct net_device *netdev)
index be456bae816906e24338006a8b3597b539f86959..26a821fcd22012884843fbe3d81357cc4bcff985 100644 (file)
@@ -554,7 +554,7 @@ rx_ring_summary:
                                          16, 1,
                                          page_address(buffer_info->page) +
                                                      buffer_info->page_offset,
-                                         IGB_RX_BUFSZ, true);
+                                         igb_rx_bufsz(rx_ring), true);
                                }
                        }
                }
@@ -3293,7 +3293,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
 
        size = sizeof(struct igb_tx_buffer) * tx_ring->count;
 
-       tx_ring->tx_buffer_info = vzalloc(size);
+       tx_ring->tx_buffer_info = vmalloc(size);
        if (!tx_ring->tx_buffer_info)
                goto err;
 
@@ -3404,6 +3404,10 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
        txdctl |= IGB_TX_HTHRESH << 8;
        txdctl |= IGB_TX_WTHRESH << 16;
 
+       /* reinitialize tx_buffer_info */
+       memset(ring->tx_buffer_info, 0,
+              sizeof(struct igb_tx_buffer) * ring->count);
+
        txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
        wr32(E1000_TXDCTL(reg_idx), txdctl);
 }
@@ -3435,7 +3439,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
 
        size = sizeof(struct igb_rx_buffer) * rx_ring->count;
 
-       rx_ring->rx_buffer_info = vzalloc(size);
+       rx_ring->rx_buffer_info = vmalloc(size);
        if (!rx_ring->rx_buffer_info)
                goto err;
 
@@ -3720,6 +3724,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
                           struct igb_ring *ring)
 {
        struct e1000_hw *hw = &adapter->hw;
+       union e1000_adv_rx_desc *rx_desc;
        u64 rdba = ring->dma;
        int reg_idx = ring->reg_idx;
        u32 srrctl = 0, rxdctl = 0;
@@ -3741,7 +3746,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
 
        /* set descriptor configuration */
        srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-       srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+       if (ring_uses_large_buffer(ring))
+               srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+       else
+               srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
        srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
        if (hw->mac.type >= e1000_82580)
                srrctl |= E1000_SRRCTL_TIMESTAMP;
@@ -3758,11 +3766,39 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
        rxdctl |= IGB_RX_HTHRESH << 8;
        rxdctl |= IGB_RX_WTHRESH << 16;
 
+       /* initialize rx_buffer_info */
+       memset(ring->rx_buffer_info, 0,
+              sizeof(struct igb_rx_buffer) * ring->count);
+
+       /* initialize Rx descriptor 0 */
+       rx_desc = IGB_RX_DESC(ring, 0);
+       rx_desc->wb.upper.length = 0;
+
        /* enable receive descriptor fetching */
        rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
        wr32(E1000_RXDCTL(reg_idx), rxdctl);
 }
 
+static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
+                                 struct igb_ring *rx_ring)
+{
+       /* set build_skb and buffer size flags */
+       clear_ring_build_skb_enabled(rx_ring);
+       clear_ring_uses_large_buffer(rx_ring);
+
+       if (adapter->flags & IGB_FLAG_RX_LEGACY)
+               return;
+
+       set_ring_build_skb_enabled(rx_ring);
+
+#if (PAGE_SIZE < 8192)
+       if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
+               return;
+
+       set_ring_uses_large_buffer(rx_ring);
+#endif
+}
+
 /**
  *  igb_configure_rx - Configure receive Unit after Reset
  *  @adapter: board private structure
@@ -3780,8 +3816,12 @@ static void igb_configure_rx(struct igb_adapter *adapter)
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
         * the Base and Length of the Rx Descriptor Ring
         */
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               struct igb_ring *rx_ring = adapter->rx_ring[i];
+
+               igb_set_rx_buffer_len(adapter, rx_ring);
+               igb_configure_rx_ring(adapter, rx_ring);
+       }
 }
 
 /**
@@ -3822,55 +3862,63 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
                        igb_free_tx_resources(adapter->tx_ring[i]);
 }
 
-void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
-                                   struct igb_tx_buffer *tx_buffer)
-{
-       if (tx_buffer->skb) {
-               dev_kfree_skb_any(tx_buffer->skb);
-               if (dma_unmap_len(tx_buffer, len))
-                       dma_unmap_single(ring->dev,
-                                        dma_unmap_addr(tx_buffer, dma),
-                                        dma_unmap_len(tx_buffer, len),
-                                        DMA_TO_DEVICE);
-       } else if (dma_unmap_len(tx_buffer, len)) {
-               dma_unmap_page(ring->dev,
-                              dma_unmap_addr(tx_buffer, dma),
-                              dma_unmap_len(tx_buffer, len),
-                              DMA_TO_DEVICE);
-       }
-       tx_buffer->next_to_watch = NULL;
-       tx_buffer->skb = NULL;
-       dma_unmap_len_set(tx_buffer, len, 0);
-       /* buffer_info must be completely set up in the transmit path */
-}
-
 /**
  *  igb_clean_tx_ring - Free Tx Buffers
  *  @tx_ring: ring to be cleaned
  **/
 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
 {
-       struct igb_tx_buffer *buffer_info;
-       unsigned long size;
-       u16 i;
+       u16 i = tx_ring->next_to_clean;
+       struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
 
-       if (!tx_ring->tx_buffer_info)
-               return;
-       /* Free all the Tx ring sk_buffs */
+       while (i != tx_ring->next_to_use) {
+               union e1000_adv_tx_desc *eop_desc, *tx_desc;
 
-       for (i = 0; i < tx_ring->count; i++) {
-               buffer_info = &tx_ring->tx_buffer_info[i];
-               igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
-       }
+               /* Free all the Tx ring sk_buffs */
+               dev_kfree_skb_any(tx_buffer->skb);
 
-       netdev_tx_reset_queue(txring_txq(tx_ring));
+               /* unmap skb header data */
+               dma_unmap_single(tx_ring->dev,
+                                dma_unmap_addr(tx_buffer, dma),
+                                dma_unmap_len(tx_buffer, len),
+                                DMA_TO_DEVICE);
 
-       size = sizeof(struct igb_tx_buffer) * tx_ring->count;
-       memset(tx_ring->tx_buffer_info, 0, size);
+               /* check for eop_desc to determine the end of the packet */
+               eop_desc = tx_buffer->next_to_watch;
+               tx_desc = IGB_TX_DESC(tx_ring, i);
+
+               /* unmap remaining buffers */
+               while (tx_desc != eop_desc) {
+                       tx_buffer++;
+                       tx_desc++;
+                       i++;
+                       if (unlikely(i == tx_ring->count)) {
+                               i = 0;
+                               tx_buffer = tx_ring->tx_buffer_info;
+                               tx_desc = IGB_TX_DESC(tx_ring, 0);
+                       }
+
+                       /* unmap any remaining paged data */
+                       if (dma_unmap_len(tx_buffer, len))
+                               dma_unmap_page(tx_ring->dev,
+                                              dma_unmap_addr(tx_buffer, dma),
+                                              dma_unmap_len(tx_buffer, len),
+                                              DMA_TO_DEVICE);
+               }
 
-       /* Zero out the descriptor ring */
-       memset(tx_ring->desc, 0, tx_ring->size);
+               /* move us one more past the eop_desc for start of next pkt */
+               tx_buffer++;
+               i++;
+               if (unlikely(i == tx_ring->count)) {
+                       i = 0;
+                       tx_buffer = tx_ring->tx_buffer_info;
+               }
+       }
 
+       /* reset BQL for queue */
+       netdev_tx_reset_queue(txring_txq(tx_ring));
+
+       /* reset next_to_use and next_to_clean */
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
 }
@@ -3932,50 +3980,39 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
  **/
 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
 {
-       unsigned long size;
-       u16 i;
+       u16 i = rx_ring->next_to_clean;
 
        if (rx_ring->skb)
                dev_kfree_skb(rx_ring->skb);
        rx_ring->skb = NULL;
 
-       if (!rx_ring->rx_buffer_info)
-               return;
-
        /* Free all the Rx ring sk_buffs */
-       for (i = 0; i < rx_ring->count; i++) {
+       while (i != rx_ring->next_to_alloc) {
                struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
 
-               if (!buffer_info->page)
-                       continue;
-
                /* Invalidate cache lines that may have been written to by
                 * device so that we avoid corrupting memory.
                 */
                dma_sync_single_range_for_cpu(rx_ring->dev,
                                              buffer_info->dma,
                                              buffer_info->page_offset,
-                                             IGB_RX_BUFSZ,
+                                             igb_rx_bufsz(rx_ring),
                                              DMA_FROM_DEVICE);
 
                /* free resources associated with mapping */
                dma_unmap_page_attrs(rx_ring->dev,
                                     buffer_info->dma,
-                                    PAGE_SIZE,
+                                    igb_rx_pg_size(rx_ring),
                                     DMA_FROM_DEVICE,
-                                    DMA_ATTR_SKIP_CPU_SYNC);
+                                    IGB_RX_DMA_ATTR);
                __page_frag_cache_drain(buffer_info->page,
                                        buffer_info->pagecnt_bias);
 
-               buffer_info->page = NULL;
+               i++;
+               if (i == rx_ring->count)
+                       i = 0;
        }
 
-       size = sizeof(struct igb_rx_buffer) * rx_ring->count;
-       memset(rx_ring->rx_buffer_info, 0, size);
-
-       /* Zero out the descriptor ring */
-       memset(rx_ring->desc, 0, rx_ring->size);
-
        rx_ring->next_to_alloc = 0;
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
@@ -4240,7 +4277,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        unsigned int vfn = adapter->vfs_allocated_count;
-       u32 rctl = 0, vmolr = 0;
+       u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
        int count;
 
        /* Check for Promiscuous and All Multicast modes */
@@ -4298,6 +4335,14 @@ static void igb_set_rx_mode(struct net_device *netdev)
                                     E1000_RCTL_VFE);
        wr32(E1000_RCTL, rctl);
 
+#if (PAGE_SIZE < 8192)
+       if (!adapter->vfs_allocated_count) {
+               if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
+                       rlpml = IGB_MAX_FRAME_BUILD_SKB;
+       }
+#endif
+       wr32(E1000_RLPML, rlpml);
+
        /* In order to support SR-IOV and eventually VMDq it is necessary to set
         * the VMOLR to enable the appropriate modes.  Without this workaround
         * we will have issues with VLAN tag stripping not being done for frames
@@ -4312,12 +4357,17 @@ static void igb_set_rx_mode(struct net_device *netdev)
        vmolr |= rd32(E1000_VMOLR(vfn)) &
                 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
 
-       /* enable Rx jumbo frames, no need for restriction */
+       /* enable Rx jumbo frames, restrict as needed to support build_skb */
        vmolr &= ~E1000_VMOLR_RLPML_MASK;
-       vmolr |= MAX_JUMBO_FRAME_SIZE | E1000_VMOLR_LPE;
+#if (PAGE_SIZE < 8192)
+       if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
+               vmolr |= IGB_MAX_FRAME_BUILD_SKB;
+       else
+#endif
+               vmolr |= MAX_JUMBO_FRAME_SIZE;
+       vmolr |= E1000_VMOLR_LPE;
 
        wr32(E1000_VMOLR(vfn), vmolr);
-       wr32(E1000_RLPML, MAX_JUMBO_FRAME_SIZE);
 
        igb_restore_vf_multicasts(adapter);
 }
@@ -5256,18 +5306,32 @@ static void igb_tx_map(struct igb_ring *tx_ring,
 
 dma_error:
        dev_err(tx_ring->dev, "TX DMA map failed\n");
+       tx_buffer = &tx_ring->tx_buffer_info[i];
 
        /* clear dma mappings for failed tx_buffer_info map */
-       for (;;) {
+       while (tx_buffer != first) {
+               if (dma_unmap_len(tx_buffer, len))
+                       dma_unmap_page(tx_ring->dev,
+                                      dma_unmap_addr(tx_buffer, dma),
+                                      dma_unmap_len(tx_buffer, len),
+                                      DMA_TO_DEVICE);
+               dma_unmap_len_set(tx_buffer, len, 0);
+
+               if (i--)
+                       i += tx_ring->count;
                tx_buffer = &tx_ring->tx_buffer_info[i];
-               igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
-               if (tx_buffer == first)
-                       break;
-               if (i == 0)
-                       i = tx_ring->count;
-               i--;
        }
 
+       if (dma_unmap_len(tx_buffer, len))
+               dma_unmap_single(tx_ring->dev,
+                                dma_unmap_addr(tx_buffer, dma),
+                                dma_unmap_len(tx_buffer, len),
+                                DMA_TO_DEVICE);
+       dma_unmap_len_set(tx_buffer, len, 0);
+
+       dev_kfree_skb_any(tx_buffer->skb);
+       tx_buffer->skb = NULL;
+
        tx_ring->next_to_use = i;
 }
 
@@ -5339,7 +5403,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
        return NETDEV_TX_OK;
 
 out_drop:
-       igb_unmap_and_free_tx_resource(tx_ring, first);
+       dev_kfree_skb_any(first->skb);
+       first->skb = NULL;
 
        return NETDEV_TX_OK;
 }
@@ -6686,7 +6751,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
                                 DMA_TO_DEVICE);
 
                /* clear tx_buffer data */
-               tx_buffer->skb = NULL;
                dma_unmap_len_set(tx_buffer, len, 0);
 
                /* clear last DMA location and unmap remaining buffers */
@@ -6822,8 +6886,14 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
        nta++;
        rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 
-       /* transfer page from old buffer to new buffer */
-       *new_buff = *old_buff;
+       /* Transfer page from old buffer to new buffer.
+        * Move each member individually to avoid possible store
+        * forwarding stalls.
+        */
+       new_buff->dma           = old_buff->dma;
+       new_buff->page          = old_buff->page;
+       new_buff->page_offset   = old_buff->page_offset;
+       new_buff->pagecnt_bias  = old_buff->pagecnt_bias;
 }
 
 static inline bool igb_page_is_reserved(struct page *page)
@@ -6831,11 +6901,10 @@ static inline bool igb_page_is_reserved(struct page *page)
        return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 }
 
-static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
-                                 struct page *page,
-                                 unsigned int truesize)
+static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
 {
-       unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
+       unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+       struct page *page = rx_buffer->page;
 
        /* avoid re-using remote pages */
        if (unlikely(igb_page_is_reserved(page)))
@@ -6843,16 +6912,13 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
 
 #if (PAGE_SIZE < 8192)
        /* if we are only owner of page we can reuse it */
-       if (unlikely(page_ref_count(page) != pagecnt_bias))
+       if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
                return false;
-
-       /* flip page offset to other buffer */
-       rx_buffer->page_offset ^= IGB_RX_BUFSZ;
 #else
-       /* move offset up to the next cache line */
-       rx_buffer->page_offset += truesize;
+#define IGB_LAST_OFFSET \
+       (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
 
-       if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+       if (rx_buffer->page_offset > IGB_LAST_OFFSET)
                return false;
 #endif
 
@@ -6860,7 +6926,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
         * the pagecnt_bias and page count so that we fully restock the
         * number of references the driver holds.
         */
-       if (unlikely(pagecnt_bias == 1)) {
+       if (unlikely(!pagecnt_bias)) {
                page_ref_add(page, USHRT_MAX);
                rx_buffer->pagecnt_bias = USHRT_MAX;
        }
@@ -6872,34 +6938,56 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
  *  igb_add_rx_frag - Add contents of Rx buffer to sk_buff
  *  @rx_ring: rx descriptor ring to transact packets on
  *  @rx_buffer: buffer containing page to add
- *  @rx_desc: descriptor containing length of buffer written by hardware
  *  @skb: sk_buff to place the data into
+ *  @size: size of buffer to be added
  *
  *  This function will add the data contained in rx_buffer->page to the skb.
- *  This is done either through a direct copy if the data in the buffer is
- *  less than the skb header size, otherwise it will just attach the page as
- *  a frag to the skb.
- *
- *  The function will then update the page offset if necessary and return
- *  true if the buffer can be reused by the adapter.
  **/
-static bool igb_add_rx_frag(struct igb_ring *rx_ring,
+static void igb_add_rx_frag(struct igb_ring *rx_ring,
                            struct igb_rx_buffer *rx_buffer,
-                           unsigned int size,
-                           union e1000_adv_rx_desc *rx_desc,
-                           struct sk_buff *skb)
+                           struct sk_buff *skb,
+                           unsigned int size)
 {
-       struct page *page = rx_buffer->page;
-       unsigned char *va = page_address(page) + rx_buffer->page_offset;
 #if (PAGE_SIZE < 8192)
-       unsigned int truesize = IGB_RX_BUFSZ;
+       unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
+#else
+       unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+                               SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
+                               SKB_DATA_ALIGN(size);
+#endif
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+                       rx_buffer->page_offset, size, truesize);
+#if (PAGE_SIZE < 8192)
+       rx_buffer->page_offset ^= truesize;
+#else
+       rx_buffer->page_offset += truesize;
+#endif
+}
+
+static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
+                                        struct igb_rx_buffer *rx_buffer,
+                                        union e1000_adv_rx_desc *rx_desc,
+                                        unsigned int size)
+{
+       void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+       unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
 #else
        unsigned int truesize = SKB_DATA_ALIGN(size);
 #endif
-       unsigned int pull_len;
+       unsigned int headlen;
+       struct sk_buff *skb;
 
-       if (unlikely(skb_is_nonlinear(skb)))
-               goto add_tail_frag;
+       /* prefetch first cache line of first page */
+       prefetch(va);
+#if L1_CACHE_BYTES < 128
+       prefetch(va + L1_CACHE_BYTES);
+#endif
+
+       /* allocate a skb to store the frags */
+       skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
+       if (unlikely(!skb))
+               return NULL;
 
        if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
                igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
@@ -6907,95 +6995,73 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
                size -= IGB_TS_HDR_LEN;
        }
 
-       if (likely(size <= IGB_RX_HDR_LEN)) {
-               memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
-               /* page is not reserved, we can reuse buffer as-is */
-               if (likely(!igb_page_is_reserved(page)))
-                       return true;
-
-               /* this page cannot be reused so discard it */
-               return false;
-       }
-
-       /* we need the header to contain the greater of either ETH_HLEN or
-        * 60 bytes if the skb->len is less than 60 for skb_pad.
-        */
-       pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
+       /* Determine available headroom for copy */
+       headlen = size;
+       if (headlen > IGB_RX_HDR_LEN)
+               headlen = eth_get_headlen(va, IGB_RX_HDR_LEN);
 
        /* align pull length to size of long to optimize memcpy performance */
-       memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+       memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
 
        /* update all of the pointers */
-       va += pull_len;
-       size -= pull_len;
-
-add_tail_frag:
-       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-                       (unsigned long)va & ~PAGE_MASK, size, truesize);
+       size -= headlen;
+       if (size) {
+               skb_add_rx_frag(skb, 0, rx_buffer->page,
+                               (va + headlen) - page_address(rx_buffer->page),
+                               size, truesize);
+#if (PAGE_SIZE < 8192)
+               rx_buffer->page_offset ^= truesize;
+#else
+               rx_buffer->page_offset += truesize;
+#endif
+       } else {
+               rx_buffer->pagecnt_bias++;
+       }
 
-       return igb_can_reuse_rx_page(rx_buffer, page, truesize);
+       return skb;
 }
 
-static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
-                                          union e1000_adv_rx_desc *rx_desc,
-                                          struct sk_buff *skb)
+static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
+                                    struct igb_rx_buffer *rx_buffer,
+                                    union e1000_adv_rx_desc *rx_desc,
+                                    unsigned int size)
 {
-       unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
-       struct igb_rx_buffer *rx_buffer;
-       struct page *page;
-
-       rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
-       page = rx_buffer->page;
-       prefetchw(page);
-
-       /* we are reusing so sync this buffer for CPU use */
-       dma_sync_single_range_for_cpu(rx_ring->dev,
-                                     rx_buffer->dma,
-                                     rx_buffer->page_offset,
-                                     size,
-                                     DMA_FROM_DEVICE);
-
-       if (likely(!skb)) {
-               void *page_addr = page_address(page) +
-                                 rx_buffer->page_offset;
+       void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+       unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
+#else
+       unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+                               SKB_DATA_ALIGN(IGB_SKB_PAD + size);
+#endif
+       struct sk_buff *skb;
 
-               /* prefetch first cache line of first page */
-               prefetch(page_addr);
+       /* prefetch first cache line of first page */
+       prefetch(va);
 #if L1_CACHE_BYTES < 128
-               prefetch(page_addr + L1_CACHE_BYTES);
+       prefetch(va + L1_CACHE_BYTES);
 #endif
 
-               /* allocate a skb to store the frags */
-               skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
-               if (unlikely(!skb)) {
-                       rx_ring->rx_stats.alloc_failed++;
-                       return NULL;
-               }
+       /* build an skb around the page buffer */
+       skb = build_skb(va - IGB_SKB_PAD, truesize);
+       if (unlikely(!skb))
+               return NULL;
 
-               /* we will be copying header into skb->data in
-                * pskb_may_pull so it is in our interest to prefetch
-                * it now to avoid a possible cache miss
-                */
-               prefetchw(skb->data);
-       }
+       /* update pointers within the skb to store the data */
+       skb_reserve(skb, IGB_SKB_PAD);
+       __skb_put(skb, size);
 
-       /* pull page into skb */
-       if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
-               /* hand second half of page back to the ring */
-               igb_reuse_rx_page(rx_ring, rx_buffer);
-       } else {
-               /* We are not reusing the buffer so unmap it and free
-                * any references we are holding to it
-                */
-               dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
-                                    PAGE_SIZE, DMA_FROM_DEVICE,
-                                    DMA_ATTR_SKIP_CPU_SYNC);
-               __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
+       /* pull timestamp out of packet data */
+       if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+               igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
+               __skb_pull(skb, IGB_TS_HDR_LEN);
        }
 
-       /* clear contents of rx_buffer */
-       rx_buffer->page = NULL;
+       /* update buffer offset */
+#if (PAGE_SIZE < 8192)
+       rx_buffer->page_offset ^= truesize;
+#else
+       rx_buffer->page_offset += truesize;
+#endif
 
        return skb;
 }
@@ -7154,6 +7220,47 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
        skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 }
 
+static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
+                                              const unsigned int size)
+{
+       struct igb_rx_buffer *rx_buffer;
+
+       rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+       prefetchw(rx_buffer->page);
+
+       /* we are reusing so sync this buffer for CPU use */
+       dma_sync_single_range_for_cpu(rx_ring->dev,
+                                     rx_buffer->dma,
+                                     rx_buffer->page_offset,
+                                     size,
+                                     DMA_FROM_DEVICE);
+
+       rx_buffer->pagecnt_bias--;
+
+       return rx_buffer;
+}
+
+static void igb_put_rx_buffer(struct igb_ring *rx_ring,
+                             struct igb_rx_buffer *rx_buffer)
+{
+       if (igb_can_reuse_rx_page(rx_buffer)) {
+               /* hand second half of page back to the ring */
+               igb_reuse_rx_page(rx_ring, rx_buffer);
+       } else {
+               /* We are not reusing the buffer so unmap it and free
+                * any references we are holding to it
+                */
+               dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+                                    igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
+                                    IGB_RX_DMA_ATTR);
+               __page_frag_cache_drain(rx_buffer->page,
+                                       rx_buffer->pagecnt_bias);
+       }
+
+       /* clear contents of rx_buffer */
+       rx_buffer->page = NULL;
+}
+
 static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
 {
        struct igb_ring *rx_ring = q_vector->rx.ring;
@@ -7163,6 +7270,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
 
        while (likely(total_packets < budget)) {
                union e1000_adv_rx_desc *rx_desc;
+               struct igb_rx_buffer *rx_buffer;
+               unsigned int size;
 
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
@@ -7171,8 +7280,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
                }
 
                rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
-
-               if (!rx_desc->wb.upper.status_error)
+               size = le16_to_cpu(rx_desc->wb.upper.length);
+               if (!size)
                        break;
 
                /* This memory barrier is needed to keep us from reading
@@ -7181,13 +7290,25 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
                 */
                dma_rmb();
 
+               rx_buffer = igb_get_rx_buffer(rx_ring, size);
+
                /* retrieve a buffer from the ring */
-               skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+               if (skb)
+                       igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
+               else if (ring_uses_build_skb(rx_ring))
+                       skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
+               else
+                       skb = igb_construct_skb(rx_ring, rx_buffer,
+                                               rx_desc, size);
 
                /* exit if we failed to retrieve a buffer */
-               if (!skb)
+               if (!skb) {
+                       rx_ring->rx_stats.alloc_failed++;
+                       rx_buffer->pagecnt_bias++;
                        break;
+               }
 
+               igb_put_rx_buffer(rx_ring, rx_buffer);
                cleaned_count++;
 
                /* fetch next buffer in frame if non-eop */
@@ -7231,6 +7352,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
        return total_packets;
 }
 
+static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
+{
+       return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
+}
+
 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
                                  struct igb_rx_buffer *bi)
 {
@@ -7242,21 +7368,23 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
                return true;
 
        /* alloc new page for storage */
-       page = dev_alloc_page();
+       page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
        if (unlikely(!page)) {
                rx_ring->rx_stats.alloc_failed++;
                return false;
        }
 
        /* map page for use */
-       dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
-                                DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+       dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+                                igb_rx_pg_size(rx_ring),
+                                DMA_FROM_DEVICE,
+                                IGB_RX_DMA_ATTR);
 
        /* if mapping failed free memory back to system since
         * there isn't much point in holding memory we can't use
         */
        if (dma_mapping_error(rx_ring->dev, dma)) {
-               __free_page(page);
+               __free_pages(page, igb_rx_pg_order(rx_ring));
 
                rx_ring->rx_stats.alloc_failed++;
                return false;
@@ -7264,7 +7392,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
 
        bi->dma = dma;
        bi->page = page;
-       bi->page_offset = 0;
+       bi->page_offset = igb_rx_offset(rx_ring);
        bi->pagecnt_bias = 1;
 
        return true;
@@ -7279,6 +7407,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
        union e1000_adv_rx_desc *rx_desc;
        struct igb_rx_buffer *bi;
        u16 i = rx_ring->next_to_use;
+       u16 bufsz;
 
        /* nothing to do */
        if (!cleaned_count)
@@ -7288,14 +7417,15 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
        bi = &rx_ring->rx_buffer_info[i];
        i -= rx_ring->count;
 
+       bufsz = igb_rx_bufsz(rx_ring);
+
        do {
                if (!igb_alloc_mapped_page(rx_ring, bi))
                        break;
 
                /* sync the buffer for use by the device */
                dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
-                                                bi->page_offset,
-                                                IGB_RX_BUFSZ,
+                                                bi->page_offset, bufsz,
                                                 DMA_FROM_DEVICE);
 
                /* Refresh the desc even if buffer_addrs didn't change
@@ -7312,8 +7442,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
                        i -= rx_ring->count;
                }
 
-               /* clear the status bits for the next_to_use descriptor */
-               rx_desc->wb.upper.status_error = 0;
+               /* clear the length for the next_to_use descriptor */
+               rx_desc->wb.upper.length = 0;
 
                cleaned_count--;
        } while (cleaned_count);
index c4477552ce9ef2e153eb1678c783ff9e9fb08eb8..7a3fd4d745928c809961c589e05ce25abc0077a9 100644 (file)
@@ -764,8 +764,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
  * incoming frame.  The value is stored in little endian format starting on
  * byte 8.
  **/
-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
-                        unsigned char *va,
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
                         struct sk_buff *skb)
 {
        __le64 *regval = (__le64 *)va;
index 8dea1b1367ef65603592d9949fe55af0521cbcf8..34faa113a8a018e1e42a5ff1eebc867be65ad10d 100644 (file)
@@ -71,45 +71,45 @@ static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = {
 
 #define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test)
 
-static int igbvf_get_settings(struct net_device *netdev,
-                             struct ethtool_cmd *ecmd)
+static int igbvf_get_link_ksettings(struct net_device *netdev,
+                                   struct ethtool_link_ksettings *cmd)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        u32 status;
 
-       ecmd->supported   = SUPPORTED_1000baseT_Full;
+       ethtool_link_ksettings_zero_link_mode(cmd, supported);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full);
+       ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
 
-       ecmd->advertising = ADVERTISED_1000baseT_Full;
-
-       ecmd->port = -1;
-       ecmd->transceiver = XCVR_DUMMY1;
+       cmd->base.port = -1;
 
        status = er32(STATUS);
        if (status & E1000_STATUS_LU) {
                if (status & E1000_STATUS_SPEED_1000)
-                       ethtool_cmd_speed_set(ecmd, SPEED_1000);
+                       cmd->base.speed = SPEED_1000;
                else if (status & E1000_STATUS_SPEED_100)
-                       ethtool_cmd_speed_set(ecmd, SPEED_100);
+                       cmd->base.speed = SPEED_100;
                else
-                       ethtool_cmd_speed_set(ecmd, SPEED_10);
+                       cmd->base.speed = SPEED_10;
 
                if (status & E1000_STATUS_FD)
-                       ecmd->duplex = DUPLEX_FULL;
+                       cmd->base.duplex = DUPLEX_FULL;
                else
-                       ecmd->duplex = DUPLEX_HALF;
+                       cmd->base.duplex = DUPLEX_HALF;
        } else {
-               ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
-               ecmd->duplex = DUPLEX_UNKNOWN;
+               cmd->base.speed = SPEED_UNKNOWN;
+               cmd->base.duplex = DUPLEX_UNKNOWN;
        }
 
-       ecmd->autoneg = AUTONEG_DISABLE;
+       cmd->base.autoneg = AUTONEG_DISABLE;
 
        return 0;
 }
 
-static int igbvf_set_settings(struct net_device *netdev,
-                             struct ethtool_cmd *ecmd)
+static int igbvf_set_link_ksettings(struct net_device *netdev,
+                                   const struct ethtool_link_ksettings *cmd)
 {
        return -EOPNOTSUPP;
 }
@@ -443,8 +443,6 @@ static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
 }
 
 static const struct ethtool_ops igbvf_ethtool_ops = {
-       .get_settings           = igbvf_get_settings,
-       .set_settings           = igbvf_set_settings,
        .get_drvinfo            = igbvf_get_drvinfo,
        .get_regs_len           = igbvf_get_regs_len,
        .get_regs               = igbvf_get_regs,
@@ -467,6 +465,8 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
        .get_ethtool_stats      = igbvf_get_ethtool_stats,
        .get_coalesce           = igbvf_get_coalesce,
        .set_coalesce           = igbvf_set_coalesce,
+       .get_link_ksettings     = igbvf_get_link_ksettings,
+       .set_link_ksettings     = igbvf_set_link_ksettings,
 };
 
 void igbvf_set_ethtool_ops(struct net_device *netdev)
index e5d72559cca9b060002525e5086ccfc008cc99d6..d10a0d242dda5db4f8c474a0814bc7a1b9eae491 100644 (file)
@@ -94,24 +94,30 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
 #define IXGB_STATS_LEN ARRAY_SIZE(ixgb_gstrings_stats)
 
 static int
-ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+ixgb_get_link_ksettings(struct net_device *netdev,
+                       struct ethtool_link_ksettings *cmd)
 {
        struct ixgb_adapter *adapter = netdev_priv(netdev);
 
-       ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
-       ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
-       ecmd->port = PORT_FIBRE;
-       ecmd->transceiver = XCVR_EXTERNAL;
+       ethtool_link_ksettings_zero_link_mode(cmd, supported);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+
+       ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+       cmd->base.port = PORT_FIBRE;
 
        if (netif_carrier_ok(adapter->netdev)) {
-               ethtool_cmd_speed_set(ecmd, SPEED_10000);
-               ecmd->duplex = DUPLEX_FULL;
+               cmd->base.speed = SPEED_10000;
+               cmd->base.duplex = DUPLEX_FULL;
        } else {
-               ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
-               ecmd->duplex = DUPLEX_UNKNOWN;
+               cmd->base.speed = SPEED_UNKNOWN;
+               cmd->base.duplex = DUPLEX_UNKNOWN;
        }
 
-       ecmd->autoneg = AUTONEG_DISABLE;
+       cmd->base.autoneg = AUTONEG_DISABLE;
        return 0;
 }
 
@@ -126,13 +132,14 @@ void ixgb_set_speed_duplex(struct net_device *netdev)
 }
 
 static int
-ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+ixgb_set_link_ksettings(struct net_device *netdev,
+                       const struct ethtool_link_ksettings *cmd)
 {
        struct ixgb_adapter *adapter = netdev_priv(netdev);
-       u32 speed = ethtool_cmd_speed(ecmd);
+       u32 speed = cmd->base.speed;
 
-       if (ecmd->autoneg == AUTONEG_ENABLE ||
-           (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+       if (cmd->base.autoneg == AUTONEG_ENABLE ||
+           (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
                return -EINVAL;
 
        if (netif_running(adapter->netdev)) {
@@ -630,8 +637,6 @@ ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
 }
 
 static const struct ethtool_ops ixgb_ethtool_ops = {
-       .get_settings = ixgb_get_settings,
-       .set_settings = ixgb_set_settings,
        .get_drvinfo = ixgb_get_drvinfo,
        .get_regs_len = ixgb_get_regs_len,
        .get_regs = ixgb_get_regs,
@@ -649,6 +654,8 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
        .set_phys_id = ixgb_set_phys_id,
        .get_sset_count = ixgb_get_sset_count,
        .get_ethtool_stats = ixgb_get_ethtool_stats,
+       .get_link_ksettings = ixgb_get_link_ksettings,
+       .set_link_ksettings = ixgb_set_link_ksettings,
 };
 
 void ixgb_set_ethtool_ops(struct net_device *netdev)
index 90fa5bf23d1b5f6d636478626b7d7f45d6a8871c..0da0752fedef1db2988ee1b1d9d63831760b73de 100644 (file)
@@ -186,60 +186,62 @@ static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
        }
 }
 
-static int ixgbe_get_settings(struct net_device *netdev,
-                             struct ethtool_cmd *ecmd)
+static int ixgbe_get_link_ksettings(struct net_device *netdev,
+                                   struct ethtool_link_ksettings *cmd)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
        ixgbe_link_speed supported_link;
        bool autoneg = false;
+       u32 supported, advertising;
+
+       ethtool_convert_link_mode_to_legacy_u32(&supported,
+                                               cmd->link_modes.supported);
 
        hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
 
        /* set the supported link speeds */
        if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
-               ecmd->supported |= ixgbe_get_supported_10gtypes(hw);
+               supported |= ixgbe_get_supported_10gtypes(hw);
        if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
-               ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
+               supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
                                   SUPPORTED_1000baseKX_Full :
                                   SUPPORTED_1000baseT_Full;
        if (supported_link & IXGBE_LINK_SPEED_100_FULL)
-               ecmd->supported |= SUPPORTED_100baseT_Full;
+               supported |= SUPPORTED_100baseT_Full;
        if (supported_link & IXGBE_LINK_SPEED_10_FULL)
-               ecmd->supported |= SUPPORTED_10baseT_Full;
+               supported |= SUPPORTED_10baseT_Full;
 
        /* default advertised speed if phy.autoneg_advertised isn't set */
-       ecmd->advertising = ecmd->supported;
+       advertising = supported;
        /* set the advertised speeds */
        if (hw->phy.autoneg_advertised) {
-               ecmd->advertising = 0;
+               advertising = 0;
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
-                       ecmd->advertising |= ADVERTISED_10baseT_Full;
+                       advertising |= ADVERTISED_10baseT_Full;
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
-                       ecmd->advertising |= ADVERTISED_100baseT_Full;
+                       advertising |= ADVERTISED_100baseT_Full;
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
-                       ecmd->advertising |= ecmd->supported & ADVRTSD_MSK_10G;
+                       advertising |= supported & ADVRTSD_MSK_10G;
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
-                       if (ecmd->supported & SUPPORTED_1000baseKX_Full)
-                               ecmd->advertising |= ADVERTISED_1000baseKX_Full;
+                       if (supported & SUPPORTED_1000baseKX_Full)
+                               advertising |= ADVERTISED_1000baseKX_Full;
                        else
-                               ecmd->advertising |= ADVERTISED_1000baseT_Full;
+                               advertising |= ADVERTISED_1000baseT_Full;
                }
        } else {
                if (hw->phy.multispeed_fiber && !autoneg) {
                        if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
-                               ecmd->advertising = ADVERTISED_10000baseT_Full;
+                               advertising = ADVERTISED_10000baseT_Full;
                }
        }
 
        if (autoneg) {
-               ecmd->supported |= SUPPORTED_Autoneg;
-               ecmd->advertising |= ADVERTISED_Autoneg;
-               ecmd->autoneg = AUTONEG_ENABLE;
+               supported |= SUPPORTED_Autoneg;
+               advertising |= ADVERTISED_Autoneg;
+               cmd->base.autoneg = AUTONEG_ENABLE;
        } else
-               ecmd->autoneg = AUTONEG_DISABLE;
-
-       ecmd->transceiver = XCVR_EXTERNAL;
+               cmd->base.autoneg = AUTONEG_DISABLE;
 
        /* Determine the remaining settings based on the PHY type. */
        switch (adapter->hw.phy.type) {
@@ -248,14 +250,14 @@ static int ixgbe_get_settings(struct net_device *netdev,
        case ixgbe_phy_x550em_ext_t:
        case ixgbe_phy_fw:
        case ixgbe_phy_cu_unknown:
-               ecmd->supported |= SUPPORTED_TP;
-               ecmd->advertising |= ADVERTISED_TP;
-               ecmd->port = PORT_TP;
+               supported |= SUPPORTED_TP;
+               advertising |= ADVERTISED_TP;
+               cmd->base.port = PORT_TP;
                break;
        case ixgbe_phy_qt:
-               ecmd->supported |= SUPPORTED_FIBRE;
-               ecmd->advertising |= ADVERTISED_FIBRE;
-               ecmd->port = PORT_FIBRE;
+               supported |= SUPPORTED_FIBRE;
+               advertising |= ADVERTISED_FIBRE;
+               cmd->base.port = PORT_FIBRE;
                break;
        case ixgbe_phy_nl:
        case ixgbe_phy_sfp_passive_tyco:
@@ -273,9 +275,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
                case ixgbe_sfp_type_da_cu:
                case ixgbe_sfp_type_da_cu_core0:
                case ixgbe_sfp_type_da_cu_core1:
-                       ecmd->supported |= SUPPORTED_FIBRE;
-                       ecmd->advertising |= ADVERTISED_FIBRE;
-                       ecmd->port = PORT_DA;
+                       supported |= SUPPORTED_FIBRE;
+                       advertising |= ADVERTISED_FIBRE;
+                       cmd->base.port = PORT_DA;
                        break;
                case ixgbe_sfp_type_sr:
                case ixgbe_sfp_type_lr:
@@ -285,102 +287,113 @@ static int ixgbe_get_settings(struct net_device *netdev,
                case ixgbe_sfp_type_1g_sx_core1:
                case ixgbe_sfp_type_1g_lx_core0:
                case ixgbe_sfp_type_1g_lx_core1:
-                       ecmd->supported |= SUPPORTED_FIBRE;
-                       ecmd->advertising |= ADVERTISED_FIBRE;
-                       ecmd->port = PORT_FIBRE;
+                       supported |= SUPPORTED_FIBRE;
+                       advertising |= ADVERTISED_FIBRE;
+                       cmd->base.port = PORT_FIBRE;
                        break;
                case ixgbe_sfp_type_not_present:
-                       ecmd->supported |= SUPPORTED_FIBRE;
-                       ecmd->advertising |= ADVERTISED_FIBRE;
-                       ecmd->port = PORT_NONE;
+                       supported |= SUPPORTED_FIBRE;
+                       advertising |= ADVERTISED_FIBRE;
+                       cmd->base.port = PORT_NONE;
                        break;
                case ixgbe_sfp_type_1g_cu_core0:
                case ixgbe_sfp_type_1g_cu_core1:
-                       ecmd->supported |= SUPPORTED_TP;
-                       ecmd->advertising |= ADVERTISED_TP;
-                       ecmd->port = PORT_TP;
+                       supported |= SUPPORTED_TP;
+                       advertising |= ADVERTISED_TP;
+                       cmd->base.port = PORT_TP;
                        break;
                case ixgbe_sfp_type_unknown:
                default:
-                       ecmd->supported |= SUPPORTED_FIBRE;
-                       ecmd->advertising |= ADVERTISED_FIBRE;
-                       ecmd->port = PORT_OTHER;
+                       supported |= SUPPORTED_FIBRE;
+                       advertising |= ADVERTISED_FIBRE;
+                       cmd->base.port = PORT_OTHER;
                        break;
                }
                break;
        case ixgbe_phy_xaui:
-               ecmd->supported |= SUPPORTED_FIBRE;
-               ecmd->advertising |= ADVERTISED_FIBRE;
-               ecmd->port = PORT_NONE;
+               supported |= SUPPORTED_FIBRE;
+               advertising |= ADVERTISED_FIBRE;
+               cmd->base.port = PORT_NONE;
                break;
        case ixgbe_phy_unknown:
        case ixgbe_phy_generic:
        case ixgbe_phy_sfp_unsupported:
        default:
-               ecmd->supported |= SUPPORTED_FIBRE;
-               ecmd->advertising |= ADVERTISED_FIBRE;
-               ecmd->port = PORT_OTHER;
+               supported |= SUPPORTED_FIBRE;
+               advertising |= ADVERTISED_FIBRE;
+               cmd->base.port = PORT_OTHER;
                break;
        }
 
        /* Indicate pause support */
-       ecmd->supported |= SUPPORTED_Pause;
+       supported |= SUPPORTED_Pause;
 
        switch (hw->fc.requested_mode) {
        case ixgbe_fc_full:
-               ecmd->advertising |= ADVERTISED_Pause;
+               advertising |= ADVERTISED_Pause;
                break;
        case ixgbe_fc_rx_pause:
-               ecmd->advertising |= ADVERTISED_Pause |
+               advertising |= ADVERTISED_Pause |
                                     ADVERTISED_Asym_Pause;
                break;
        case ixgbe_fc_tx_pause:
-               ecmd->advertising |= ADVERTISED_Asym_Pause;
+               advertising |= ADVERTISED_Asym_Pause;
                break;
        default:
-               ecmd->advertising &= ~(ADVERTISED_Pause |
+               advertising &= ~(ADVERTISED_Pause |
                                       ADVERTISED_Asym_Pause);
        }
 
        if (netif_carrier_ok(netdev)) {
                switch (adapter->link_speed) {
                case IXGBE_LINK_SPEED_10GB_FULL:
-                       ethtool_cmd_speed_set(ecmd, SPEED_10000);
+                       cmd->base.speed = SPEED_10000;
                        break;
                case IXGBE_LINK_SPEED_5GB_FULL:
-                       ethtool_cmd_speed_set(ecmd, SPEED_5000);
+                       cmd->base.speed = SPEED_5000;
                        break;
                case IXGBE_LINK_SPEED_2_5GB_FULL:
-                       ethtool_cmd_speed_set(ecmd, SPEED_2500);
+                       cmd->base.speed = SPEED_2500;
                        break;
                case IXGBE_LINK_SPEED_1GB_FULL:
-                       ethtool_cmd_speed_set(ecmd, SPEED_1000);
+                       cmd->base.speed = SPEED_1000;
                        break;
                case IXGBE_LINK_SPEED_100_FULL:
-                       ethtool_cmd_speed_set(ecmd, SPEED_100);
+                       cmd->base.speed = SPEED_100;
                        break;
                case IXGBE_LINK_SPEED_10_FULL:
-                       ethtool_cmd_speed_set(ecmd, SPEED_10);
+                       cmd->base.speed = SPEED_10;
                        break;
                default:
                        break;
                }
-               ecmd->duplex = DUPLEX_FULL;
+               cmd->base.duplex = DUPLEX_FULL;
        } else {
-               ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
-               ecmd->duplex = DUPLEX_UNKNOWN;
+               cmd->base.speed = SPEED_UNKNOWN;
+               cmd->base.duplex = DUPLEX_UNKNOWN;
        }
 
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
+
        return 0;
 }
 
-static int ixgbe_set_settings(struct net_device *netdev,
-                             struct ethtool_cmd *ecmd)
+static int ixgbe_set_link_ksettings(struct net_device *netdev,
+                                   const struct ethtool_link_ksettings *cmd)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
        u32 advertised, old;
        s32 err = 0;
+       u32 supported, advertising;
+
+       ethtool_convert_link_mode_to_legacy_u32(&supported,
+                                               cmd->link_modes.supported);
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
 
        if ((hw->phy.media_type == ixgbe_media_type_copper) ||
            (hw->phy.multispeed_fiber)) {
@@ -388,12 +401,12 @@ static int ixgbe_set_settings(struct net_device *netdev,
                 * this function does not support duplex forcing, but can
                 * limit the advertising of the adapter to the specified speed
                 */
-               if (ecmd->advertising & ~ecmd->supported)
+               if (advertising & ~supported)
                        return -EINVAL;
 
                /* only allow one speed at a time if no autoneg */
-               if (!ecmd->autoneg && hw->phy.multispeed_fiber) {
-                       if (ecmd->advertising ==
+               if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
+                       if (advertising ==
                            (ADVERTISED_10000baseT_Full |
                             ADVERTISED_1000baseT_Full))
                                return -EINVAL;
@@ -401,16 +414,16 @@ static int ixgbe_set_settings(struct net_device *netdev,
 
                old = hw->phy.autoneg_advertised;
                advertised = 0;
-               if (ecmd->advertising & ADVERTISED_10000baseT_Full)
+               if (advertising & ADVERTISED_10000baseT_Full)
                        advertised |= IXGBE_LINK_SPEED_10GB_FULL;
 
-               if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+               if (advertising & ADVERTISED_1000baseT_Full)
                        advertised |= IXGBE_LINK_SPEED_1GB_FULL;
 
-               if (ecmd->advertising & ADVERTISED_100baseT_Full)
+               if (advertising & ADVERTISED_100baseT_Full)
                        advertised |= IXGBE_LINK_SPEED_100_FULL;
 
-               if (ecmd->advertising & ADVERTISED_10baseT_Full)
+               if (advertising & ADVERTISED_10baseT_Full)
                        advertised |= IXGBE_LINK_SPEED_10_FULL;
 
                if (old == advertised)
@@ -428,10 +441,11 @@ static int ixgbe_set_settings(struct net_device *netdev,
                clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
        } else {
                /* in this case we currently only support 10Gb/FULL */
-               u32 speed = ethtool_cmd_speed(ecmd);
-               if ((ecmd->autoneg == AUTONEG_ENABLE) ||
-                   (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
-                   (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+               u32 speed = cmd->base.speed;
+
+               if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
+                   (advertising != ADVERTISED_10000baseT_Full) ||
+                   (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
                        return -EINVAL;
        }
 
@@ -3402,8 +3416,6 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
 }
 
 static const struct ethtool_ops ixgbe_ethtool_ops = {
-       .get_settings           = ixgbe_get_settings,
-       .set_settings           = ixgbe_set_settings,
        .get_drvinfo            = ixgbe_get_drvinfo,
        .get_regs_len           = ixgbe_get_regs_len,
        .get_regs               = ixgbe_get_regs,
@@ -3442,6 +3454,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
        .get_ts_info            = ixgbe_get_ts_info,
        .get_module_info        = ixgbe_get_module_info,
        .get_module_eeprom      = ixgbe_get_module_eeprom,
+       .get_link_ksettings     = ixgbe_get_link_ksettings,
+       .set_link_ksettings     = ixgbe_set_link_ksettings,
 };
 
 void ixgbe_set_ethtool_ops(struct net_device *netdev)
index a7a430a7be2cd9201cc36022249219e94bfb41ca..852a2e7e25ed185917732df098174820c56e1295 100644 (file)
@@ -2122,7 +2122,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
        prefetch(va + L1_CACHE_BYTES);
 #endif
 
-       /* build an skb to around the page buffer */
+       /* build an skb around the page buffer */
        skb = build_skb(va - IXGBE_SKB_PAD, truesize);
        if (unlikely(!skb))
                return NULL;
@@ -8948,7 +8948,9 @@ static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
        if (tc->type != TC_SETUP_MQPRIO)
                return -EINVAL;
 
-       return ixgbe_setup_tc(dev, tc->tc);
+       tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+       return ixgbe_setup_tc(dev, tc->mqprio->num_tc);
 }
 
 #ifdef CONFIG_PCI_IOV
index d2555e8b947ee2a93053f4ac54191193c4a50529..da6fb825afeafd28a56909aa3baa4e97c3704d58 100644 (file)
@@ -82,13 +82,13 @@ config MVNETA_BM
          that all dependencies are met.
 
 config MVPP2
-       tristate "Marvell Armada 375 network interface support"
+       tristate "Marvell Armada 375/7K/8K network interface support"
        depends on ARCH_MVEBU || COMPILE_TEST
        depends on HAS_DMA
        select MVMDIO
        ---help---
          This driver supports the network interface units in the
-         Marvell ARMADA 375 SoC.
+         Marvell ARMADA 375, 7K and 8K SoCs.
 
 config PXA168_ETH
        tristate "Marvell pxa168 ethernet support"
index 61dd4462411c03511d6121d75ca65dc36c7f688f..34a3686d2ce669091510f5dc85773a84d7880c92 100644 (file)
@@ -431,6 +431,7 @@ struct mvneta_port {
        /* Flags for special SoC configurations */
        bool neta_armada3700;
        u16 rx_offset_correction;
+       const struct mbus_dram_target_info *dram_target_info;
 };
 
 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -4098,6 +4099,8 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
                break;
        case PHY_INTERFACE_MODE_RGMII:
        case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
                ctrl |= MVNETA_GMAC2_PORT_RGMII;
                break;
        default:
@@ -4118,7 +4121,6 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
 /* Device initialization routine */
 static int mvneta_probe(struct platform_device *pdev)
 {
-       const struct mbus_dram_target_info *dram_target_info;
        struct resource *res;
        struct device_node *dn = pdev->dev.of_node;
        struct device_node *phy_node;
@@ -4267,13 +4269,13 @@ static int mvneta_probe(struct platform_device *pdev)
 
        pp->tx_csum_limit = tx_csum_limit;
 
-       dram_target_info = mv_mbus_dram_info();
+       pp->dram_target_info = mv_mbus_dram_info();
        /* Armada3700 requires setting default configuration of Mbus
         * windows, however without using filled mbus_dram_target_info
         * structure.
         */
-       if (dram_target_info || pp->neta_armada3700)
-               mvneta_conf_mbus_windows(pp, dram_target_info);
+       if (pp->dram_target_info || pp->neta_armada3700)
+               mvneta_conf_mbus_windows(pp, pp->dram_target_info);
 
        pp->tx_ring_size = MVNETA_MAX_TXD;
        pp->rx_ring_size = MVNETA_MAX_RXD;
@@ -4405,6 +4407,61 @@ static int mvneta_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int mvneta_suspend(struct device *device)
+{
+       struct net_device *dev = dev_get_drvdata(device);
+       struct mvneta_port *pp = netdev_priv(dev);
+
+       if (netif_running(dev))
+               mvneta_stop(dev);
+       netif_device_detach(dev);
+       clk_disable_unprepare(pp->clk_bus);
+       clk_disable_unprepare(pp->clk);
+       return 0;
+}
+
+static int mvneta_resume(struct device *device)
+{
+       struct platform_device *pdev = to_platform_device(device);
+       struct net_device *dev = dev_get_drvdata(device);
+       struct mvneta_port *pp = netdev_priv(dev);
+       int err;
+
+       clk_prepare_enable(pp->clk);
+       if (!IS_ERR(pp->clk_bus))
+               clk_prepare_enable(pp->clk_bus);
+       if (pp->dram_target_info || pp->neta_armada3700)
+               mvneta_conf_mbus_windows(pp, pp->dram_target_info);
+       if (pp->bm_priv) {
+               err = mvneta_bm_port_init(pdev, pp);
+               if (err < 0) {
+                       dev_info(&pdev->dev, "use SW buffer management\n");
+                       pp->bm_priv = NULL;
+               }
+       }
+       mvneta_defaults_set(pp);
+       err = mvneta_port_power_up(pp, pp->phy_interface);
+       if (err < 0) {
+               dev_err(device, "can't power up port\n");
+               return err;
+       }
+
+       if (pp->use_inband_status)
+               mvneta_fixed_link_update(pp, dev->phydev);
+
+       netif_device_attach(dev);
+       if (netif_running(dev)) {
+               mvneta_open(dev);
+               mvneta_set_rx_mode(dev);
+       }
+
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
+
 static const struct of_device_id mvneta_match[] = {
        { .compatible = "marvell,armada-370-neta" },
        { .compatible = "marvell,armada-xp-neta" },
@@ -4419,6 +4476,7 @@ static struct platform_driver mvneta_driver = {
        .driver = {
                .name = MVNETA_DRIVER_NAME,
                .of_match_table = mvneta_match,
+               .pm = &mvneta_pm_ops,
        },
 };
 
index d00421b9ffea7c0569417e4ee3814469532802c8..af5bfa13d976d20ffa75129f27ce69a87ddc7bc1 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/of_mdio.h>
 #include <linux/of_net.h>
 #include <linux/of_address.h>
+#include <linux/of_device.h>
 #include <linux/phy.h>
 #include <linux/clk.h>
 #include <linux/hrtimer.h>
 #define     MVPP2_SNOOP_PKT_SIZE_MASK          0x1ff
 #define     MVPP2_SNOOP_BUF_HDR_MASK           BIT(9)
 #define     MVPP2_RXQ_POOL_SHORT_OFFS          20
-#define     MVPP2_RXQ_POOL_SHORT_MASK          0x700000
+#define     MVPP21_RXQ_POOL_SHORT_MASK         0x700000
+#define     MVPP22_RXQ_POOL_SHORT_MASK         0xf00000
 #define     MVPP2_RXQ_POOL_LONG_OFFS           24
-#define     MVPP2_RXQ_POOL_LONG_MASK           0x7000000
+#define     MVPP21_RXQ_POOL_LONG_MASK          0x7000000
+#define     MVPP22_RXQ_POOL_LONG_MASK          0xf000000
 #define     MVPP2_RXQ_PACKET_OFFSET_OFFS       28
 #define     MVPP2_RXQ_PACKET_OFFSET_MASK       0x70000000
 #define     MVPP2_RXQ_DISABLE_MASK             BIT(31)
 /* Descriptor Manager Top Registers */
 #define MVPP2_RXQ_NUM_REG                      0x2040
 #define MVPP2_RXQ_DESC_ADDR_REG                        0x2044
+#define     MVPP22_DESC_ADDR_OFFS              8
 #define MVPP2_RXQ_DESC_SIZE_REG                        0x2048
 #define     MVPP2_RXQ_DESC_SIZE_MASK           0x3ff0
 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq)       (0x3000 + 4 * (rxq))
 #define MVPP2_TXQ_DESC_SIZE_REG                        0x2088
 #define     MVPP2_TXQ_DESC_SIZE_MASK           0x3ff0
 #define MVPP2_AGGR_TXQ_UPDATE_REG              0x2090
-#define MVPP2_TXQ_THRESH_REG                   0x2094
-#define     MVPP2_TRANSMITTED_THRESH_OFFSET    16
-#define     MVPP2_TRANSMITTED_THRESH_MASK      0x3fff0000
 #define MVPP2_TXQ_INDEX_REG                    0x2098
 #define MVPP2_TXQ_PREF_BUF_REG                 0x209c
 #define     MVPP2_PREF_BUF_PTR(desc)           ((desc) & 0xfff)
 #define MVPP2_TXQ_RSVD_CLR_REG                 0x20b8
 #define     MVPP2_TXQ_RSVD_CLR_OFFSET          16
 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu)      (0x2100 + 4 * (cpu))
+#define     MVPP22_AGGR_TXQ_DESC_ADDR_OFFS     8
 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu)      (0x2140 + 4 * (cpu))
 #define     MVPP2_AGGR_TXQ_DESC_SIZE_MASK      0x3ff0
 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu)         (0x2180 + 4 * (cpu))
 #define MVPP2_WIN_REMAP(w)                     (0x4040 + ((w) << 2))
 #define MVPP2_BASE_ADDR_ENABLE                 0x4060
 
+/* AXI Bridge Registers */
+#define MVPP22_AXI_BM_WR_ATTR_REG              0x4100
+#define MVPP22_AXI_BM_RD_ATTR_REG              0x4104
+#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG     0x4110
+#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG       0x4114
+#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG       0x4118
+#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG       0x411c
+#define MVPP22_AXI_RX_DATA_WR_ATTR_REG         0x4120
+#define MVPP22_AXI_TX_DATA_RD_ATTR_REG         0x4130
+#define MVPP22_AXI_RD_NORMAL_CODE_REG          0x4150
+#define MVPP22_AXI_RD_SNOOP_CODE_REG           0x4154
+#define MVPP22_AXI_WR_NORMAL_CODE_REG          0x4160
+#define MVPP22_AXI_WR_SNOOP_CODE_REG           0x4164
+
+/* Values for AXI Bridge registers */
+#define MVPP22_AXI_ATTR_CACHE_OFFS             0
+#define MVPP22_AXI_ATTR_DOMAIN_OFFS            12
+
+#define MVPP22_AXI_CODE_CACHE_OFFS             0
+#define MVPP22_AXI_CODE_DOMAIN_OFFS            4
+
+#define MVPP22_AXI_CODE_CACHE_NON_CACHE                0x3
+#define MVPP22_AXI_CODE_CACHE_WR_CACHE         0x7
+#define MVPP22_AXI_CODE_CACHE_RD_CACHE         0xb
+
+#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM       2
+#define MVPP22_AXI_CODE_DOMAIN_SYSTEM          3
+
 /* Interrupt Cause and Mask registers */
 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq)                (0x5200 + 4 * (rxq))
 #define     MVPP2_MAX_ISR_RX_THRESHOLD         0xfffff0
-#define MVPP2_ISR_RXQ_GROUP_REG(rxq)           (0x5400 + 4 * (rxq))
+#define MVPP21_ISR_RXQ_GROUP_REG(rxq)          (0x5400 + 4 * (rxq))
+
+#define MVPP22_ISR_RXQ_GROUP_INDEX_REG          0x5400
+#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK   0x380
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
+
+#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK   0x380
+
+#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG     0x5404
+#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK    0x1f
+#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK      0xf00
+#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET    8
+
 #define MVPP2_ISR_ENABLE_REG(port)             (0x5420 + 4 * (port))
 #define     MVPP2_ISR_ENABLE_INTERRUPT(mask)   ((mask) & 0xffff)
 #define     MVPP2_ISR_DISABLE_INTERRUPT(mask)  (((mask) << 16) & 0xffff0000)
 #define MVPP2_BM_PHY_ALLOC_REG(pool)           (0x6400 + ((pool) * 4))
 #define     MVPP2_BM_PHY_ALLOC_GRNTD_MASK      BIT(0)
 #define MVPP2_BM_VIRT_ALLOC_REG                        0x6440
+#define MVPP22_BM_ADDR_HIGH_ALLOC              0x6444
+#define     MVPP22_BM_ADDR_HIGH_PHYS_MASK      0xff
+#define     MVPP22_BM_ADDR_HIGH_VIRT_MASK      0xff00
+#define     MVPP22_BM_ADDR_HIGH_VIRT_SHIFT     8
 #define MVPP2_BM_PHY_RLS_REG(pool)             (0x6480 + ((pool) * 4))
 #define     MVPP2_BM_PHY_RLS_MC_BUFF_MASK      BIT(0)
 #define     MVPP2_BM_PHY_RLS_PRIO_EN_MASK      BIT(1)
 #define     MVPP2_BM_PHY_RLS_GRNTD_MASK                BIT(2)
 #define MVPP2_BM_VIRT_RLS_REG                  0x64c0
-#define MVPP2_BM_MC_RLS_REG                    0x64c4
-#define     MVPP2_BM_MC_ID_MASK                        0xfff
-#define     MVPP2_BM_FORCE_RELEASE_MASK                BIT(12)
+#define MVPP22_BM_ADDR_HIGH_RLS_REG            0x64c4
+#define     MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK  0xff
+#define            MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK   0xff00
+#define     MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
 
 /* TX Scheduler registers */
 #define MVPP2_TXP_SCHED_PORT_INDEX_REG         0x8000
 #define      MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK        0x1fc0
 #define      MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
                                        MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
+#define MVPP22_GMAC_CTRL_4_REG                 0x90
+#define      MVPP22_CTRL4_EXT_PIN_GMII_SEL     BIT(0)
+#define      MVPP22_CTRL4_DP_CLK_SEL           BIT(5)
+#define      MVPP22_CTRL4_SYNC_BYPASS          BIT(6)
+#define      MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
+
+/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
+ * relative to port->base.
+ */
+#define MVPP22_XLG_CTRL3_REG                   0x11c
+#define      MVPP22_XLG_CTRL3_MACMODESELECT_MASK       (7 << 13)
+#define      MVPP22_XLG_CTRL3_MACMODESELECT_GMAC       (0 << 13)
+
+/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
+#define MVPP22_SMI_MISC_CFG_REG                        0x1204
+#define      MVPP22_SMI_POLLING_EN             BIT(10)
+
+#define MVPP22_GMAC_BASE(port)         (0x7000 + (port) * 0x1000 + 0xe00)
 
 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK     0xff
 
 /* Maximum number of TXQs used by single port */
 #define MVPP2_MAX_TXQ                  8
 
-/* Maximum number of RXQs used by single port */
-#define MVPP2_MAX_RXQ                  8
-
 /* Dfault number of RXQs in use */
 #define MVPP2_DEFAULT_RXQ              4
 
-/* Total number of RXQs available to all ports */
-#define MVPP2_RXQ_TOTAL_NUM            (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
-
 /* Max number of Rx descriptors */
 #define MVPP2_MAX_RXD                  128
 
@@ -615,6 +676,11 @@ enum mvpp2_prs_l3_cast {
  */
 #define MVPP2_BM_SHORT_PKT_SIZE                MVPP2_RX_MAX_PKT_SIZE(512)
 
+#define MVPP21_ADDR_SPACE_SZ           0
+#define MVPP22_ADDR_SPACE_SZ           SZ_64K
+
+#define MVPP2_MAX_CPUS                 4
+
 enum mvpp2_bm_type {
        MVPP2_BM_FREE,
        MVPP2_BM_SWF_LONG,
@@ -626,12 +692,19 @@ enum mvpp2_bm_type {
 /* Shared Packet Processor resources */
 struct mvpp2 {
        /* Shared registers' base addresses */
-       void __iomem *base;
        void __iomem *lms_base;
+       void __iomem *iface_base;
+
+       /* On PPv2.2, each CPU can access the base register through a
+        * separate address space, each 64 KB apart from each
+        * other.
+        */
+       void __iomem *cpu_base[MVPP2_MAX_CPUS];
 
        /* Common clocks */
        struct clk *pp_clk;
        struct clk *gop_clk;
+       struct clk *mg_clk;
 
        /* List of pointers to port structures */
        struct mvpp2_port **port_list;
@@ -649,6 +722,12 @@ struct mvpp2 {
 
        /* Tclk value */
        u32 tclk;
+
+       /* HW version */
+       enum { MVPP21, MVPP22 } hw_version;
+
+       /* Maximum number of RXQs per port */
+       unsigned int max_port_rxqs;
 };
 
 struct mvpp2_pcpu_stats {
@@ -670,6 +749,11 @@ struct mvpp2_port_pcpu {
 struct mvpp2_port {
        u8 id;
 
+       /* Index of the port from the "group of ports" complex point
+        * of view
+        */
+       int gop_id;
+
        int irq;
 
        struct mvpp2 *priv;
@@ -741,22 +825,24 @@ struct mvpp2_port {
 #define MVPP2_RXD_L3_IP6               BIT(30)
 #define MVPP2_RXD_BUF_HDR              BIT(31)
 
-struct mvpp2_tx_desc {
+/* HW TX descriptor for PPv2.1 */
+struct mvpp21_tx_desc {
        u32 command;            /* Options used by HW for packet transmitting.*/
        u8  packet_offset;      /* the offset from the buffer beginning */
        u8  phys_txq;           /* destination queue ID                 */
        u16 data_size;          /* data size of transmitted packet in bytes */
-       u32 buf_phys_addr;      /* physical addr of transmitted buffer  */
+       u32 buf_dma_addr;       /* physical addr of transmitted buffer  */
        u32 buf_cookie;         /* cookie for access to TX buffer in tx path */
        u32 reserved1[3];       /* hw_cmd (for future use, BM, PON, PNC) */
        u32 reserved2;          /* reserved (for future use)            */
 };
 
-struct mvpp2_rx_desc {
+/* HW RX descriptor for PPv2.1 */
+struct mvpp21_rx_desc {
        u32 status;             /* info about received packet           */
        u16 reserved1;          /* parser_info (for future use, PnC)    */
        u16 data_size;          /* size of received packet in bytes     */
-       u32 buf_phys_addr;      /* physical address of the buffer       */
+       u32 buf_dma_addr;       /* physical address of the buffer       */
        u32 buf_cookie;         /* cookie for access to RX buffer in rx path */
        u16 reserved2;          /* gem_port_id (for future use, PON)    */
        u16 reserved3;          /* csum_l4 (for future use, PnC)        */
@@ -767,12 +853,51 @@ struct mvpp2_rx_desc {
        u32 reserved8;
 };
 
+/* HW TX descriptor for PPv2.2 */
+struct mvpp22_tx_desc {
+       u32 command;
+       u8  packet_offset;
+       u8  phys_txq;
+       u16 data_size;
+       u64 reserved1;
+       u64 buf_dma_addr_ptp;
+       u64 buf_cookie_misc;
+};
+
+/* HW RX descriptor for PPv2.2 */
+struct mvpp22_rx_desc {
+       u32 status;
+       u16 reserved1;
+       u16 data_size;
+       u32 reserved2;
+       u32 reserved3;
+       u64 buf_dma_addr_key_hash;
+       u64 buf_cookie_misc;
+};
+
+/* Opaque type used by the driver to manipulate the HW TX and RX
+ * descriptors
+ */
+struct mvpp2_tx_desc {
+       union {
+               struct mvpp21_tx_desc pp21;
+               struct mvpp22_tx_desc pp22;
+       };
+};
+
+struct mvpp2_rx_desc {
+       union {
+               struct mvpp21_rx_desc pp21;
+               struct mvpp22_rx_desc pp22;
+       };
+};
+
 struct mvpp2_txq_pcpu_buf {
        /* Transmitted SKB */
        struct sk_buff *skb;
 
        /* Physical address of transmitted buffer */
-       dma_addr_t phys;
+       dma_addr_t dma;
 
        /* Size transmitted */
        size_t size;
@@ -825,7 +950,7 @@ struct mvpp2_tx_queue {
        struct mvpp2_tx_desc *descs;
 
        /* DMA address of the Tx DMA descriptors array */
-       dma_addr_t descs_phys;
+       dma_addr_t descs_dma;
 
        /* Index of the last Tx DMA descriptor */
        int last_desc;
@@ -848,7 +973,7 @@ struct mvpp2_rx_queue {
        struct mvpp2_rx_desc *descs;
 
        /* DMA address of the RX DMA descriptors array */
-       dma_addr_t descs_phys;
+       dma_addr_t descs_dma;
 
        /* Index of the last RX DMA descriptor */
        int last_desc;
@@ -912,6 +1037,8 @@ struct mvpp2_bm_pool {
 
        /* Buffer Pointers Pool External (BPPE) size */
        int size;
+       /* BPPE size in bytes */
+       int size_bytes;
        /* Number of buffers for this pool */
        int buf_num;
        /* Pool buffer size */
@@ -922,29 +1049,13 @@ struct mvpp2_bm_pool {
 
        /* BPPE virtual base address */
        u32 *virt_addr;
-       /* BPPE physical base address */
-       dma_addr_t phys_addr;
+       /* BPPE DMA base address */
+       dma_addr_t dma_addr;
 
        /* Ports using BM pool */
        u32 port_map;
 };
 
-struct mvpp2_buff_hdr {
-       u32 next_buff_phys_addr;
-       u32 next_buff_virt_addr;
-       u16 byte_count;
-       u16 info;
-       u8  reserved1;          /* bm_qset (for future use, BM)         */
-};
-
-/* Buffer header info bits */
-#define MVPP2_B_HDR_INFO_MC_ID_MASK    0xfff
-#define MVPP2_B_HDR_INFO_MC_ID(info)   ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
-#define MVPP2_B_HDR_INFO_LAST_OFFS     12
-#define MVPP2_B_HDR_INFO_LAST_MASK     BIT(12)
-#define MVPP2_B_HDR_INFO_IS_LAST(info) \
-          ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
-
 /* Static declaractions */
 
 /* Number of RXQs used by single port */
@@ -959,12 +1070,177 @@ static int txq_number = MVPP2_MAX_TXQ;
 
 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
 {
-       writel(data, priv->base + offset);
+       writel(data, priv->cpu_base[0] + offset);
 }
 
 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
 {
-       return readl(priv->base + offset);
+       return readl(priv->cpu_base[0] + offset);
+}
+
+/* These accessors should be used to access:
+ *
+ * - per-CPU registers, where each CPU has its own copy of the
+ *   register.
+ *
+ *   MVPP2_BM_VIRT_ALLOC_REG
+ *   MVPP2_BM_ADDR_HIGH_ALLOC
+ *   MVPP22_BM_ADDR_HIGH_RLS_REG
+ *   MVPP2_BM_VIRT_RLS_REG
+ *   MVPP2_ISR_RX_TX_CAUSE_REG
+ *   MVPP2_ISR_RX_TX_MASK_REG
+ *   MVPP2_TXQ_NUM_REG
+ *   MVPP2_AGGR_TXQ_UPDATE_REG
+ *   MVPP2_TXQ_RSVD_REQ_REG
+ *   MVPP2_TXQ_RSVD_RSLT_REG
+ *   MVPP2_TXQ_SENT_REG
+ *   MVPP2_RXQ_NUM_REG
+ *
+ * - global registers that must be accessed through a specific CPU
+ *   window, because they are related to an access to a per-CPU
+ *   register
+ *
+ *   MVPP2_BM_PHY_ALLOC_REG    (related to MVPP2_BM_VIRT_ALLOC_REG)
+ *   MVPP2_BM_PHY_RLS_REG      (related to MVPP2_BM_VIRT_RLS_REG)
+ *   MVPP2_RXQ_THRESH_REG      (related to MVPP2_RXQ_NUM_REG)
+ *   MVPP2_RXQ_DESC_ADDR_REG   (related to MVPP2_RXQ_NUM_REG)
+ *   MVPP2_RXQ_DESC_SIZE_REG   (related to MVPP2_RXQ_NUM_REG)
+ *   MVPP2_RXQ_INDEX_REG       (related to MVPP2_RXQ_NUM_REG)
+ *   MVPP2_TXQ_PENDING_REG     (related to MVPP2_TXQ_NUM_REG)
+ *   MVPP2_TXQ_DESC_ADDR_REG   (related to MVPP2_TXQ_NUM_REG)
+ *   MVPP2_TXQ_DESC_SIZE_REG   (related to MVPP2_TXQ_NUM_REG)
+ *   MVPP2_TXQ_INDEX_REG       (related to MVPP2_TXQ_NUM_REG)
+ *   MVPP2_TXQ_PENDING_REG     (related to MVPP2_TXQ_NUM_REG)
+ *   MVPP2_TXQ_PREF_BUF_REG    (related to MVPP2_TXQ_NUM_REG)
+ *   MVPP2_TXQ_PREF_BUF_REG    (related to MVPP2_TXQ_NUM_REG)
+ */
+static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
+                              u32 offset, u32 data)
+{
+       writel(data, priv->cpu_base[cpu] + offset);
+}
+
+static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
+                            u32 offset)
+{
+       return readl(priv->cpu_base[cpu] + offset);
+}
+
+static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
+                                           struct mvpp2_tx_desc *tx_desc)
+{
+       if (port->priv->hw_version == MVPP21)
+               return tx_desc->pp21.buf_dma_addr;
+       else
+               return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
+}
+
+static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
+                                     struct mvpp2_tx_desc *tx_desc,
+                                     dma_addr_t dma_addr)
+{
+       if (port->priv->hw_version == MVPP21) {
+               tx_desc->pp21.buf_dma_addr = dma_addr;
+       } else {
+               u64 val = (u64)dma_addr;
+
+               tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
+               tx_desc->pp22.buf_dma_addr_ptp |= val;
+       }
+}
+
+static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
+                                   struct mvpp2_tx_desc *tx_desc)
+{
+       if (port->priv->hw_version == MVPP21)
+               return tx_desc->pp21.data_size;
+       else
+               return tx_desc->pp22.data_size;
+}
+
+static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
+                                 struct mvpp2_tx_desc *tx_desc,
+                                 size_t size)
+{
+       if (port->priv->hw_version == MVPP21)
+               tx_desc->pp21.data_size = size;
+       else
+               tx_desc->pp22.data_size = size;
+}
+
+static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
+                                struct mvpp2_tx_desc *tx_desc,
+                                unsigned int txq)
+{
+       if (port->priv->hw_version == MVPP21)
+               tx_desc->pp21.phys_txq = txq;
+       else
+               tx_desc->pp22.phys_txq = txq;
+}
+
+static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
+                                struct mvpp2_tx_desc *tx_desc,
+                                unsigned int command)
+{
+       if (port->priv->hw_version == MVPP21)
+               tx_desc->pp21.command = command;
+       else
+               tx_desc->pp22.command = command;
+}
+
+static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
+                                   struct mvpp2_tx_desc *tx_desc,
+                                   unsigned int offset)
+{
+       if (port->priv->hw_version == MVPP21)
+               tx_desc->pp21.packet_offset = offset;
+       else
+               tx_desc->pp22.packet_offset = offset;
+}
+
+static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
+                                           struct mvpp2_tx_desc *tx_desc)
+{
+       if (port->priv->hw_version == MVPP21)
+               return tx_desc->pp21.packet_offset;
+       else
+               return tx_desc->pp22.packet_offset;
+}
+
+static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
+                                           struct mvpp2_rx_desc *rx_desc)
+{
+       if (port->priv->hw_version == MVPP21)
+               return rx_desc->pp21.buf_dma_addr;
+       else
+               return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
+}
+
+static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
+                                            struct mvpp2_rx_desc *rx_desc)
+{
+       if (port->priv->hw_version == MVPP21)
+               return rx_desc->pp21.buf_cookie;
+       else
+               return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
+}
+
+static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
+                                   struct mvpp2_rx_desc *rx_desc)
+{
+       if (port->priv->hw_version == MVPP21)
+               return rx_desc->pp21.data_size;
+       else
+               return rx_desc->pp22.data_size;
+}
+
+static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
+                                  struct mvpp2_rx_desc *rx_desc)
+{
+       if (port->priv->hw_version == MVPP21)
+               return rx_desc->pp21.status;
+       else
+               return rx_desc->pp22.status;
 }
 
 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
@@ -974,15 +1250,17 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
                txq_pcpu->txq_get_index = 0;
 }
 
-static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
+static void mvpp2_txq_inc_put(struct mvpp2_port *port,
+                             struct mvpp2_txq_pcpu *txq_pcpu,
                              struct sk_buff *skb,
                              struct mvpp2_tx_desc *tx_desc)
 {
        struct mvpp2_txq_pcpu_buf *tx_buf =
                txq_pcpu->buffs + txq_pcpu->txq_put_index;
        tx_buf->skb = skb;
-       tx_buf->size = tx_desc->data_size;
-       tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset;
+       tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
+       tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
+               mvpp2_txdesc_offset_get(port, tx_desc);
        txq_pcpu->txq_put_index++;
        if (txq_pcpu->txq_put_index == txq_pcpu->size)
                txq_pcpu->txq_put_index = 0;
@@ -3378,27 +3656,39 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
                                struct mvpp2 *priv,
                                struct mvpp2_bm_pool *bm_pool, int size)
 {
-       int size_bytes;
        u32 val;
 
-       size_bytes = sizeof(u32) * size;
-       bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
-                                               &bm_pool->phys_addr,
+       /* Number of buffer pointers must be a multiple of 16, as per
+        * hardware constraints
+        */
+       if (!IS_ALIGNED(size, 16))
+               return -EINVAL;
+
+       /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
+        * bytes per buffer pointer
+        */
+       if (priv->hw_version == MVPP21)
+               bm_pool->size_bytes = 2 * sizeof(u32) * size;
+       else
+               bm_pool->size_bytes = 2 * sizeof(u64) * size;
+
+       bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
+                                               &bm_pool->dma_addr,
                                                GFP_KERNEL);
        if (!bm_pool->virt_addr)
                return -ENOMEM;
 
        if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
                        MVPP2_BM_POOL_PTR_ALIGN)) {
-               dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
-                                 bm_pool->phys_addr);
+               dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
+                                 bm_pool->virt_addr, bm_pool->dma_addr);
                dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
                        bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
                return -ENOMEM;
        }
 
        mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
-                   bm_pool->phys_addr);
+                   lower_32_bits(bm_pool->dma_addr));
        mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
 
        val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
@@ -3426,6 +3716,34 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
        mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
 }
 
+static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
+                                   struct mvpp2_bm_pool *bm_pool,
+                                   dma_addr_t *dma_addr,
+                                   phys_addr_t *phys_addr)
+{
+       int cpu = smp_processor_id();
+
+       *dma_addr = mvpp2_percpu_read(priv, cpu,
+                                     MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
+       *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
+
+       if (priv->hw_version == MVPP22) {
+               u32 val;
+               u32 dma_addr_highbits, phys_addr_highbits;
+
+               val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
+               dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
+               phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
+                       MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
+
+               if (sizeof(dma_addr_t) == 8)
+                       *dma_addr |= (u64)dma_addr_highbits << 32;
+
+               if (sizeof(phys_addr_t) == 8)
+                       *phys_addr |= (u64)phys_addr_highbits << 32;
+       }
+}
+
 /* Free all buffers from the pool */
 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
                               struct mvpp2_bm_pool *bm_pool)
@@ -3433,21 +3751,21 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
        int i;
 
        for (i = 0; i < bm_pool->buf_num; i++) {
-               dma_addr_t buf_phys_addr;
-               unsigned long vaddr;
+               dma_addr_t buf_dma_addr;
+               phys_addr_t buf_phys_addr;
+               void *data;
 
-               /* Get buffer virtual address (indirect access) */
-               buf_phys_addr = mvpp2_read(priv,
-                                          MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
-               vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
+               mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
+                                       &buf_dma_addr, &buf_phys_addr);
 
-               dma_unmap_single(dev, buf_phys_addr,
+               dma_unmap_single(dev, buf_dma_addr,
                                 bm_pool->buf_size, DMA_FROM_DEVICE);
 
-               if (!vaddr)
+               data = (void *)phys_to_virt(buf_phys_addr);
+               if (!data)
                        break;
 
-               mvpp2_frag_free(bm_pool, (void *)vaddr);
+               mvpp2_frag_free(bm_pool, data);
        }
 
        /* Update BM driver with number of buffers removed from pool */
@@ -3471,9 +3789,9 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
        val |= MVPP2_BM_STOP_MASK;
        mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
 
-       dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
+       dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
                          bm_pool->virt_addr,
-                         bm_pool->phys_addr);
+                         bm_pool->dma_addr);
        return 0;
 }
 
@@ -3529,17 +3847,20 @@ static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
                                    int lrxq, int long_pool)
 {
-       u32 val;
+       u32 val, mask;
        int prxq;
 
        /* Get queue physical ID */
        prxq = port->rxqs[lrxq]->id;
 
-       val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
-       val &= ~MVPP2_RXQ_POOL_LONG_MASK;
-       val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
-                   MVPP2_RXQ_POOL_LONG_MASK);
+       if (port->priv->hw_version == MVPP21)
+               mask = MVPP21_RXQ_POOL_LONG_MASK;
+       else
+               mask = MVPP22_RXQ_POOL_LONG_MASK;
 
+       val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+       val &= ~mask;
+       val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
        mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
 }
 
@@ -3547,40 +3868,45 @@ static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
                                     int lrxq, int short_pool)
 {
-       u32 val;
+       u32 val, mask;
        int prxq;
 
        /* Get queue physical ID */
        prxq = port->rxqs[lrxq]->id;
 
-       val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
-       val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
-       val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
-                   MVPP2_RXQ_POOL_SHORT_MASK);
+       if (port->priv->hw_version == MVPP21)
+               mask = MVPP21_RXQ_POOL_SHORT_MASK;
+       else
+               mask = MVPP22_RXQ_POOL_SHORT_MASK;
 
+       val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+       val &= ~mask;
+       val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
        mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
 }
 
 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
                             struct mvpp2_bm_pool *bm_pool,
-                            dma_addr_t *buf_phys_addr,
+                            dma_addr_t *buf_dma_addr,
+                            phys_addr_t *buf_phys_addr,
                             gfp_t gfp_mask)
 {
-       dma_addr_t phys_addr;
+       dma_addr_t dma_addr;
        void *data;
 
        data = mvpp2_frag_alloc(bm_pool);
        if (!data)
                return NULL;
 
-       phys_addr = dma_map_single(port->dev->dev.parent, data,
-                                  MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
-                                   DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
+       dma_addr = dma_map_single(port->dev->dev.parent, data,
+                                 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
+                                 DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
                mvpp2_frag_free(bm_pool, data);
                return NULL;
        }
-       *buf_phys_addr = phys_addr;
+       *buf_dma_addr = dma_addr;
+       *buf_phys_addr = virt_to_phys(data);
 
        return data;
 }
@@ -3604,37 +3930,46 @@ static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
 
 /* Release buffer to BM */
 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
-                                    dma_addr_t buf_phys_addr,
-                                    unsigned long buf_virt_addr)
+                                    dma_addr_t buf_dma_addr,
+                                    phys_addr_t buf_phys_addr)
 {
-       mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
-       mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
-}
+       int cpu = smp_processor_id();
 
-/* Release multicast buffer */
-static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
-                                dma_addr_t buf_phys_addr,
-                                unsigned long buf_virt_addr,
-                                int mc_id)
-{
-       u32 val = 0;
+       if (port->priv->hw_version == MVPP22) {
+               u32 val = 0;
+
+               if (sizeof(dma_addr_t) == 8)
+                       val |= upper_32_bits(buf_dma_addr) &
+                               MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
+
+               if (sizeof(phys_addr_t) == 8)
+                       val |= (upper_32_bits(buf_phys_addr)
+                               << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
+                               MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
 
-       val |= (mc_id & MVPP2_BM_MC_ID_MASK);
-       mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
+               mvpp2_percpu_write(port->priv, cpu,
+                                  MVPP22_BM_ADDR_HIGH_RLS_REG, val);
+       }
 
-       mvpp2_bm_pool_put(port, pool,
-                         buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
-                         buf_virt_addr);
+       /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
+        * returned in the "cookie" field of the RX
+        * descriptor. Instead of storing the virtual address, we
+        * store the physical address
+        */
+       mvpp2_percpu_write(port->priv, cpu,
+                          MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
+       mvpp2_percpu_write(port->priv, cpu,
+                          MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
 }
 
 /* Refill BM pool */
 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
-                             dma_addr_t phys_addr,
-                             unsigned long cookie)
+                             dma_addr_t dma_addr,
+                             phys_addr_t phys_addr)
 {
        int pool = mvpp2_bm_cookie_pool_get(bm);
 
-       mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
+       mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
 }
 
 /* Allocate buffers for the pool */
@@ -3642,7 +3977,8 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
                             struct mvpp2_bm_pool *bm_pool, int buf_num)
 {
        int i, buf_size, total_size;
-       dma_addr_t phys_addr;
+       dma_addr_t dma_addr;
+       phys_addr_t phys_addr;
        void *buf;
 
        buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
@@ -3657,12 +3993,13 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
        }
 
        for (i = 0; i < buf_num; i++) {
-               buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
+               buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
+                                     &phys_addr, GFP_KERNEL);
                if (!buf)
                        break;
 
-               mvpp2_bm_pool_put(port, bm_pool->id, phys_addr,
-                                 (unsigned long)buf);
+               mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
+                                 phys_addr);
        }
 
        /* Update BM driver with number of buffers added to pool */
@@ -3830,7 +4167,8 @@ static void mvpp2_interrupts_mask(void *arg)
 {
        struct mvpp2_port *port = arg;
 
-       mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
+       mvpp2_percpu_write(port->priv, smp_processor_id(),
+                          MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
 }
 
 /* Unmask the current CPU's Rx/Tx interrupts */
@@ -3838,17 +4176,46 @@ static void mvpp2_interrupts_unmask(void *arg)
 {
        struct mvpp2_port *port = arg;
 
-       mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
-                   (MVPP2_CAUSE_MISC_SUM_MASK |
-                    MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
+       mvpp2_percpu_write(port->priv, smp_processor_id(),
+                          MVPP2_ISR_RX_TX_MASK_REG(port->id),
+                          (MVPP2_CAUSE_MISC_SUM_MASK |
+                           MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
 }
 
 /* Port configuration routines */
 
+static void mvpp22_port_mii_set(struct mvpp2_port *port)
+{
+       u32 val;
+
+       return;
+
+       /* Only GOP port 0 has an XLG MAC */
+       if (port->gop_id == 0) {
+               val = readl(port->base + MVPP22_XLG_CTRL3_REG);
+               val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
+               val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
+               writel(val, port->base + MVPP22_XLG_CTRL3_REG);
+       }
+
+       val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
+       if (port->phy_interface == PHY_INTERFACE_MODE_RGMII)
+               val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL;
+       else
+               val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
+       val &= ~MVPP22_CTRL4_DP_CLK_SEL;
+       val |= MVPP22_CTRL4_SYNC_BYPASS;
+       val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+       writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
+}
+
 static void mvpp2_port_mii_set(struct mvpp2_port *port)
 {
        u32 val;
 
+       if (port->priv->hw_version == MVPP22)
+               mvpp22_port_mii_set(port);
+
        val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
 
        switch (port->phy_interface) {
@@ -3952,16 +4319,18 @@ static void mvpp2_defaults_set(struct mvpp2_port *port)
 {
        int tx_port_num, val, queue, ptxq, lrxq;
 
-       /* Configure port to loopback if needed */
-       if (port->flags & MVPP2_F_LOOPBACK)
-               mvpp2_port_loopback_set(port);
+       if (port->priv->hw_version == MVPP21) {
+               /* Configure port to loopback if needed */
+               if (port->flags & MVPP2_F_LOOPBACK)
+                       mvpp2_port_loopback_set(port);
 
-       /* Update TX FIFO MIN Threshold */
-       val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
-       val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
-       /* Min. TX threshold must be less than minimal packet length */
-       val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
-       writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+               /* Update TX FIFO MIN Threshold */
+               val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+               val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
+               /* Min. TX threshold must be less than minimal packet length */
+               val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
+               writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+       }
 
        /* Disable Legacy WRR, Disable EJP, Release from reset */
        tx_port_num = mvpp2_egress_port(port);
@@ -4149,11 +4518,15 @@ static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
 }
 
 /* Obtain BM cookie information from descriptor */
-static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
+static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
+                                struct mvpp2_rx_desc *rx_desc)
 {
-       int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
-                  MVPP2_RXD_BM_POOL_ID_OFFS;
        int cpu = smp_processor_id();
+       int pool;
+
+       pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
+               MVPP2_RXD_BM_POOL_ID_MASK) >>
+               MVPP2_RXD_BM_POOL_ID_OFFS;
 
        return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
               ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
@@ -4161,18 +4534,6 @@ static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
 
 /* Tx descriptors helper methods */
 
-/* Get number of Tx descriptors waiting to be transmitted by HW */
-static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
-                                      struct mvpp2_tx_queue *txq)
-{
-       u32 val;
-
-       mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
-       val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
-
-       return val & MVPP2_TXQ_PENDING_MASK;
-}
-
 /* Get pointer to next Tx descriptor to be processed (send) by HW */
 static struct mvpp2_tx_desc *
 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
@@ -4187,7 +4548,8 @@ mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
 {
        /* aggregated access - relevant TXQ number is written in TX desc */
-       mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
+       mvpp2_percpu_write(port->priv, smp_processor_id(),
+                          MVPP2_AGGR_TXQ_UPDATE_REG, pending);
 }
 
 
@@ -4216,11 +4578,12 @@ static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
                                         struct mvpp2_tx_queue *txq, int num)
 {
        u32 val;
+       int cpu = smp_processor_id();
 
        val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
-       mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
+       mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
 
-       val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
+       val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
 
        return val & MVPP2_TXQ_RSVD_RSLT_MASK;
 }
@@ -4321,7 +4684,8 @@ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
        u32 val;
 
        /* Reading status reg resets transmitted descriptor counter */
-       val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
+       val = mvpp2_percpu_read(port->priv, smp_processor_id(),
+                               MVPP2_TXQ_SENT_REG(txq->id));
 
        return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
                MVPP2_TRANSMITTED_COUNT_OFFSET;
@@ -4335,7 +4699,8 @@ static void mvpp2_txq_sent_counter_clear(void *arg)
        for (queue = 0; queue < txq_number; queue++) {
                int id = port->txqs[queue]->id;
 
-               mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
+               mvpp2_percpu_read(port->priv, smp_processor_id(),
+                                 MVPP2_TXQ_SENT_REG(id));
        }
 }
 
@@ -4394,12 +4759,14 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
                                   struct mvpp2_rx_queue *rxq)
 {
+       int cpu = smp_processor_id();
+
        if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
                rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
 
-       mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
-       mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG,
-                   rxq->pkts_coal);
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
+                          rxq->pkts_coal);
 }
 
 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -4449,7 +4816,7 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
                struct mvpp2_txq_pcpu_buf *tx_buf =
                        txq_pcpu->buffs + txq_pcpu->txq_get_index;
 
-               dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
+               dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
                                 tx_buf->size, DMA_TO_DEVICE);
                if (tx_buf->skb)
                        dev_kfree_skb_any(tx_buf->skb);
@@ -4527,10 +4894,12 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
                               int desc_num, int cpu,
                               struct mvpp2 *priv)
 {
+       u32 txq_dma;
+
        /* Allocate memory for TX descriptors */
        aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
                                desc_num * MVPP2_DESC_ALIGNED_SIZE,
-                               &aggr_txq->descs_phys, GFP_KERNEL);
+                               &aggr_txq->descs_dma, GFP_KERNEL);
        if (!aggr_txq->descs)
                return -ENOMEM;
 
@@ -4540,10 +4909,16 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
        aggr_txq->next_desc_to_proc = mvpp2_read(priv,
                                                 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
 
-       /* Set Tx descriptors queue starting address */
-       /* indirect access */
-       mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
-                   aggr_txq->descs_phys);
+       /* Set Tx descriptors queue starting address indirect
+        * access
+        */
+       if (priv->hw_version == MVPP21)
+               txq_dma = aggr_txq->descs_dma;
+       else
+               txq_dma = aggr_txq->descs_dma >>
+                       MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
+
+       mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
        mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
 
        return 0;
@@ -4554,12 +4929,15 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
                          struct mvpp2_rx_queue *rxq)
 
 {
+       u32 rxq_dma;
+       int cpu;
+
        rxq->size = port->rx_ring_size;
 
        /* Allocate memory for RX descriptors */
        rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
                                        rxq->size * MVPP2_DESC_ALIGNED_SIZE,
-                                       &rxq->descs_phys, GFP_KERNEL);
+                                       &rxq->descs_dma, GFP_KERNEL);
        if (!rxq->descs)
                return -ENOMEM;
 
@@ -4569,10 +4947,15 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
        mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
 
        /* Set Rx descriptors queue starting address - indirect access */
-       mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
-       mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
-       mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
-       mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
+       cpu = smp_processor_id();
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+       if (port->priv->hw_version == MVPP21)
+               rxq_dma = rxq->descs_dma;
+       else
+               rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
 
        /* Set Offset */
        mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
@@ -4599,10 +4982,11 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
 
        for (i = 0; i < rx_received; i++) {
                struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
-               u32 bm = mvpp2_bm_cookie_build(rx_desc);
+               u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
 
-               mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
-                                 rx_desc->buf_cookie);
+               mvpp2_pool_refill(port, bm,
+                                 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
+                                 mvpp2_rxdesc_cookie_get(port, rx_desc));
        }
        mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
 }
@@ -4611,26 +4995,29 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
                             struct mvpp2_rx_queue *rxq)
 {
+       int cpu;
+
        mvpp2_rxq_drop_pkts(port, rxq);
 
        if (rxq->descs)
                dma_free_coherent(port->dev->dev.parent,
                                  rxq->size * MVPP2_DESC_ALIGNED_SIZE,
                                  rxq->descs,
-                                 rxq->descs_phys);
+                                 rxq->descs_dma);
 
        rxq->descs             = NULL;
        rxq->last_desc         = 0;
        rxq->next_desc_to_proc = 0;
-       rxq->descs_phys        = 0;
+       rxq->descs_dma         = 0;
 
        /* Clear Rx descriptors queue starting address and size;
         * free descriptor number
         */
        mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
-       mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
-       mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
-       mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
+       cpu = smp_processor_id();
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
 }
 
 /* Create and initialize a Tx queue */
@@ -4646,23 +5033,25 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
        /* Allocate memory for Tx descriptors */
        txq->descs = dma_alloc_coherent(port->dev->dev.parent,
                                txq->size * MVPP2_DESC_ALIGNED_SIZE,
-                               &txq->descs_phys, GFP_KERNEL);
+                               &txq->descs_dma, GFP_KERNEL);
        if (!txq->descs)
                return -ENOMEM;
 
        txq->last_desc = txq->size - 1;
 
        /* Set Tx descriptors queue starting address - indirect access */
-       mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
-       mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
-       mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
-                                            MVPP2_TXQ_DESC_SIZE_MASK);
-       mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
-       mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
-                   txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
-       val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
+       cpu = smp_processor_id();
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
+                          txq->descs_dma);
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
+                          txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
+                          txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
+       val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
        val &= ~MVPP2_TXQ_PENDING_MASK;
-       mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
 
        /* Calculate base address in prefetch buffer. We reserve 16 descriptors
         * for each existing TXQ.
@@ -4673,9 +5062,9 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
        desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
               (txq->log_id * desc_per_txq);
 
-       mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
-                   MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
-                   MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
+                          MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
+                          MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
 
        /* WRR / EJP configuration - indirect access */
        tx_port_num = mvpp2_egress_port(port);
@@ -4716,7 +5105,7 @@ error:
 
        dma_free_coherent(port->dev->dev.parent,
                          txq->size * MVPP2_DESC_ALIGNED_SIZE,
-                         txq->descs, txq->descs_phys);
+                         txq->descs, txq->descs_dma);
 
        return -ENOMEM;
 }
@@ -4736,20 +5125,21 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
        if (txq->descs)
                dma_free_coherent(port->dev->dev.parent,
                                  txq->size * MVPP2_DESC_ALIGNED_SIZE,
-                                 txq->descs, txq->descs_phys);
+                                 txq->descs, txq->descs_dma);
 
        txq->descs             = NULL;
        txq->last_desc         = 0;
        txq->next_desc_to_proc = 0;
-       txq->descs_phys        = 0;
+       txq->descs_dma         = 0;
 
        /* Set minimum bandwidth for disabled TXQs */
        mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
 
        /* Set Tx descriptors queue starting address and size */
-       mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
-       mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
-       mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
+       cpu = smp_processor_id();
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
 }
 
 /* Cleanup Tx ports */
@@ -4759,10 +5149,11 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
        int delay, pending, cpu;
        u32 val;
 
-       mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
-       val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
+       cpu = smp_processor_id();
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
+       val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
        val |= MVPP2_TXQ_DRAIN_EN_MASK;
-       mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
 
        /* The napi queue has been stopped so wait for all packets
         * to be transmitted.
@@ -4778,11 +5169,13 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
                mdelay(1);
                delay++;
 
-               pending = mvpp2_txq_pend_desc_num_get(port, txq);
+               pending = mvpp2_percpu_read(port->priv, cpu,
+                                           MVPP2_TXQ_PENDING_REG);
+               pending &= MVPP2_TXQ_PENDING_MASK;
        } while (pending);
 
        val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
-       mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
+       mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
 
        for_each_present_cpu(cpu) {
                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
@@ -4991,20 +5384,21 @@ static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
 static void mvpp2_rx_error(struct mvpp2_port *port,
                           struct mvpp2_rx_desc *rx_desc)
 {
-       u32 status = rx_desc->status;
+       u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
+       size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
 
        switch (status & MVPP2_RXD_ERR_CODE_MASK) {
        case MVPP2_RXD_ERR_CRC:
-               netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
-                          status, rx_desc->data_size);
+               netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
+                          status, sz);
                break;
        case MVPP2_RXD_ERR_OVERRUN:
-               netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
-                          status, rx_desc->data_size);
+               netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
+                          status, sz);
                break;
        case MVPP2_RXD_ERR_RESOURCE:
-               netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
-                          status, rx_desc->data_size);
+               netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
+                          status, sz);
                break;
        }
 }
@@ -5031,15 +5425,17 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
 static int mvpp2_rx_refill(struct mvpp2_port *port,
                           struct mvpp2_bm_pool *bm_pool, u32 bm)
 {
-       dma_addr_t phys_addr;
+       dma_addr_t dma_addr;
+       phys_addr_t phys_addr;
        void *buf;
 
        /* No recycle or too many buffers are in use, so allocate a new skb */
-       buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
+       buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
+                             GFP_ATOMIC);
        if (!buf)
                return -ENOMEM;
 
-       mvpp2_pool_refill(port, bm, phys_addr, (unsigned long)buf);
+       mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
 
        return 0;
 }
@@ -5075,43 +5471,6 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
        return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
 }
 
-static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
-                             struct mvpp2_rx_desc *rx_desc)
-{
-       struct mvpp2_buff_hdr *buff_hdr;
-       struct sk_buff *skb;
-       u32 rx_status = rx_desc->status;
-       dma_addr_t buff_phys_addr;
-       unsigned long buff_virt_addr;
-       dma_addr_t buff_phys_addr_next;
-       unsigned long buff_virt_addr_next;
-       int mc_id;
-       int pool_id;
-
-       pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
-                  MVPP2_RXD_BM_POOL_ID_OFFS;
-       buff_phys_addr = rx_desc->buf_phys_addr;
-       buff_virt_addr = rx_desc->buf_cookie;
-
-       do {
-               skb = (struct sk_buff *)buff_virt_addr;
-               buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
-
-               mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
-
-               buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
-               buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
-
-               /* Release buffer */
-               mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
-                                    buff_virt_addr, mc_id);
-
-               buff_phys_addr = buff_phys_addr_next;
-               buff_virt_addr = buff_virt_addr_next;
-
-       } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
-}
-
 /* Main rx processing */
 static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
                    struct mvpp2_rx_queue *rxq)
@@ -5132,25 +5491,23 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
                struct mvpp2_bm_pool *bm_pool;
                struct sk_buff *skb;
                unsigned int frag_size;
-               dma_addr_t phys_addr;
+               dma_addr_t dma_addr;
+               phys_addr_t phys_addr;
                u32 bm, rx_status;
                int pool, rx_bytes, err;
                void *data;
 
                rx_done++;
-               rx_status = rx_desc->status;
-               rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
-               phys_addr = rx_desc->buf_phys_addr;
-               data = (void *)(uintptr_t)rx_desc->buf_cookie;
-
-               bm = mvpp2_bm_cookie_build(rx_desc);
+               rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
+               rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
+               rx_bytes -= MVPP2_MH_SIZE;
+               dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
+               phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
+               data = (void *)phys_to_virt(phys_addr);
+
+               bm = mvpp2_bm_cookie_build(port, rx_desc);
                pool = mvpp2_bm_cookie_pool_get(bm);
                bm_pool = &port->priv->bm_pools[pool];
-               /* Check if buffer header is used */
-               if (rx_status & MVPP2_RXD_BUF_HDR) {
-                       mvpp2_buff_hdr_rx(port, rx_desc);
-                       continue;
-               }
 
                /* In case of an error, release the requested buffer pointer
                 * to the Buffer Manager. This request process is controlled
@@ -5162,9 +5519,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
                        dev->stats.rx_errors++;
                        mvpp2_rx_error(port, rx_desc);
                        /* Return the buffer to the pool */
-
-                       mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
-                                         rx_desc->buf_cookie);
+                       mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
                        continue;
                }
 
@@ -5185,7 +5540,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
                        goto err_drop_frame;
                }
 
-               dma_unmap_single(dev->dev.parent, phys_addr,
+               dma_unmap_single(dev->dev.parent, dma_addr,
                                 bm_pool->buf_size, DMA_FROM_DEVICE);
 
                rcvd_pkts++;
@@ -5216,11 +5571,15 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
 }
 
 static inline void
-tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
+tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
                  struct mvpp2_tx_desc *desc)
 {
-       dma_unmap_single(dev, desc->buf_phys_addr,
-                        desc->data_size, DMA_TO_DEVICE);
+       dma_addr_t buf_dma_addr =
+               mvpp2_txdesc_dma_addr_get(port, desc);
+       size_t buf_sz =
+               mvpp2_txdesc_size_get(port, desc);
+       dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
+                        buf_sz, DMA_TO_DEVICE);
        mvpp2_txq_desc_put(txq);
 }
 
@@ -5232,35 +5591,38 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
        struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
        struct mvpp2_tx_desc *tx_desc;
        int i;
-       dma_addr_t buf_phys_addr;
+       dma_addr_t buf_dma_addr;
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                void *addr = page_address(frag->page.p) + frag->page_offset;
 
                tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
-               tx_desc->phys_txq = txq->id;
-               tx_desc->data_size = frag->size;
+               mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+               mvpp2_txdesc_size_set(port, tx_desc, frag->size);
 
-               buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
-                                              tx_desc->data_size,
+               buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
+                                              frag->size,
                                               DMA_TO_DEVICE);
-               if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
+               if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
                        mvpp2_txq_desc_put(txq);
                        goto error;
                }
 
-               tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
-               tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
+               mvpp2_txdesc_offset_set(port, tx_desc,
+                                       buf_dma_addr & MVPP2_TX_DESC_ALIGN);
+               mvpp2_txdesc_dma_addr_set(port, tx_desc,
+                                         buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
 
                if (i == (skb_shinfo(skb)->nr_frags - 1)) {
                        /* Last descriptor */
-                       tx_desc->command = MVPP2_TXD_L_DESC;
-                       mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
+                       mvpp2_txdesc_cmd_set(port, tx_desc,
+                                            MVPP2_TXD_L_DESC);
+                       mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
                } else {
                        /* Descriptor in the middle: Not First, Not Last */
-                       tx_desc->command = 0;
-                       mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
+                       mvpp2_txdesc_cmd_set(port, tx_desc, 0);
+                       mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
                }
        }
 
@@ -5272,7 +5634,7 @@ error:
         */
        for (i = i - 1; i >= 0; i--) {
                tx_desc = txq->descs + i;
-               tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
+               tx_desc_unmap_put(port, txq, tx_desc);
        }
 
        return -ENOMEM;
@@ -5285,7 +5647,7 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
        struct mvpp2_tx_queue *txq, *aggr_txq;
        struct mvpp2_txq_pcpu *txq_pcpu;
        struct mvpp2_tx_desc *tx_desc;
-       dma_addr_t buf_phys_addr;
+       dma_addr_t buf_dma_addr;
        int frags = 0;
        u16 txq_id;
        u32 tx_cmd;
@@ -5307,35 +5669,38 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
 
        /* Get a descriptor for the first part of the packet */
        tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
-       tx_desc->phys_txq = txq->id;
-       tx_desc->data_size = skb_headlen(skb);
+       mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+       mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
 
-       buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
-                                      tx_desc->data_size, DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
+       buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
+                                     skb_headlen(skb), DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
                mvpp2_txq_desc_put(txq);
                frags = 0;
                goto out;
        }
-       tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
-       tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
+
+       mvpp2_txdesc_offset_set(port, tx_desc,
+                               buf_dma_addr & MVPP2_TX_DESC_ALIGN);
+       mvpp2_txdesc_dma_addr_set(port, tx_desc,
+                                 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
 
        tx_cmd = mvpp2_skb_tx_csum(port, skb);
 
        if (frags == 1) {
                /* First and Last descriptor */
                tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
-               tx_desc->command = tx_cmd;
-               mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
+               mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
+               mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
        } else {
                /* First but not Last */
                tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
-               tx_desc->command = tx_cmd;
-               mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
+               mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
+               mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
 
                /* Continue with other skb fragments */
                if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
-                       tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
+                       tx_desc_unmap_put(port, txq, tx_desc);
                        frags = 0;
                        goto out;
                }
@@ -5396,6 +5761,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
        u32 cause_rx_tx, cause_rx, cause_misc;
        int rx_done = 0;
        struct mvpp2_port *port = netdev_priv(napi->dev);
+       int cpu = smp_processor_id();
 
        /* Rx/Tx cause register
         *
@@ -5407,8 +5773,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
         *
         * Each CPU has its own Rx/Tx cause register
         */
-       cause_rx_tx = mvpp2_read(port->priv,
-                                MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
+       cause_rx_tx = mvpp2_percpu_read(port->priv, cpu,
+                                       MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
        cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
        cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
 
@@ -5417,8 +5783,9 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
 
                /* Clear the cause register */
                mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
-               mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
-                           cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
+               mvpp2_percpu_write(port->priv, cpu,
+                                  MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
+                                  cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
        }
 
        cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
@@ -5530,7 +5897,7 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
        return 0;
 }
 
-static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
+static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
 {
        u32 mac_addr_l, mac_addr_m, mac_addr_h;
 
@@ -5975,16 +6342,6 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
        .set_link_ksettings = phy_ethtool_set_link_ksettings,
 };
 
-/* Driver initialization */
-
-static void mvpp2_port_power_up(struct mvpp2_port *port)
-{
-       mvpp2_port_mii_set(port);
-       mvpp2_port_periodic_xon_disable(port);
-       mvpp2_port_fc_adv_enable(port);
-       mvpp2_port_reset(port);
-}
-
 /* Initialize port HW */
 static int mvpp2_port_init(struct mvpp2_port *port)
 {
@@ -5993,7 +6350,8 @@ static int mvpp2_port_init(struct mvpp2_port *port)
        struct mvpp2_txq_pcpu *txq_pcpu;
        int queue, cpu, err;
 
-       if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
+       if (port->first_rxq + rxq_number >
+           MVPP2_MAX_PORTS * priv->max_port_rxqs)
                return -EINVAL;
 
        /* Disable port */
@@ -6061,7 +6419,18 @@ static int mvpp2_port_init(struct mvpp2_port *port)
        }
 
        /* Configure Rx queue group interrupt for this port */
-       mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
+       if (priv->hw_version == MVPP21) {
+               mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
+                           rxq_number);
+       } else {
+               u32 val;
+
+               val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET);
+               mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
+
+               val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET);
+               mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
+       }
 
        /* Create Rx descriptor rings */
        for (queue = 0; queue < rxq_number; queue++) {
@@ -6103,8 +6472,7 @@ err_free_percpu:
 /* Ports initialization */
 static int mvpp2_port_probe(struct platform_device *pdev,
                            struct device_node *port_node,
-                           struct mvpp2 *priv,
-                           int *next_first_rxq)
+                           struct mvpp2 *priv)
 {
        struct device_node *phy_node;
        struct mvpp2_port *port;
@@ -6117,7 +6485,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        u32 id;
        int features;
        int phy_mode;
-       int priv_common_regs_num = 2;
        int err, i, cpu;
 
        dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
@@ -6163,16 +6530,30 @@ static int mvpp2_port_probe(struct platform_device *pdev,
 
        port->priv = priv;
        port->id = id;
-       port->first_rxq = *next_first_rxq;
+       if (priv->hw_version == MVPP21)
+               port->first_rxq = port->id * rxq_number;
+       else
+               port->first_rxq = port->id * priv->max_port_rxqs;
+
        port->phy_node = phy_node;
        port->phy_interface = phy_mode;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM,
-                                   priv_common_regs_num + id);
-       port->base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(port->base)) {
-               err = PTR_ERR(port->base);
-               goto err_free_irq;
+       if (priv->hw_version == MVPP21) {
+               res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
+               port->base = devm_ioremap_resource(&pdev->dev, res);
+               if (IS_ERR(port->base)) {
+                       err = PTR_ERR(port->base);
+                       goto err_free_irq;
+               }
+       } else {
+               if (of_property_read_u32(port_node, "gop-port-id",
+                                        &port->gop_id)) {
+                       err = -EINVAL;
+                       dev_err(&pdev->dev, "missing gop-port-id value\n");
+                       goto err_free_irq;
+               }
+
+               port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
        }
 
        /* Alloc per-cpu stats */
@@ -6187,7 +6568,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
                mac_from = "device tree";
                ether_addr_copy(dev->dev_addr, dt_mac_addr);
        } else {
-               mvpp2_get_mac_address(port, hw_mac_addr);
+               if (priv->hw_version == MVPP21)
+                       mvpp21_get_mac_address(port, hw_mac_addr);
                if (is_valid_ether_addr(hw_mac_addr)) {
                        mac_from = "hardware";
                        ether_addr_copy(dev->dev_addr, hw_mac_addr);
@@ -6207,7 +6589,14 @@ static int mvpp2_port_probe(struct platform_device *pdev,
                dev_err(&pdev->dev, "failed to init port %d\n", id);
                goto err_free_stats;
        }
-       mvpp2_port_power_up(port);
+
+       mvpp2_port_mii_set(port);
+       mvpp2_port_periodic_xon_disable(port);
+
+       if (priv->hw_version == MVPP21)
+               mvpp2_port_fc_adv_enable(port);
+
+       mvpp2_port_reset(port);
 
        port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
        if (!port->pcpu) {
@@ -6245,8 +6634,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        }
        netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
 
-       /* Increment the first Rx queue number to be used by the next port */
-       *next_first_rxq += rxq_number;
        priv->port_list[id] = port;
        return 0;
 
@@ -6330,6 +6717,60 @@ static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
        mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
 }
 
+static void mvpp2_axi_init(struct mvpp2 *priv)
+{
+       u32 val, rdval, wrval;
+
+       mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
+
+       /* AXI Bridge Configuration */
+
+       rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
+               << MVPP22_AXI_ATTR_CACHE_OFFS;
+       rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+               << MVPP22_AXI_ATTR_DOMAIN_OFFS;
+
+       wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
+               << MVPP22_AXI_ATTR_CACHE_OFFS;
+       wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+               << MVPP22_AXI_ATTR_DOMAIN_OFFS;
+
+       /* BM */
+       mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
+       mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
+
+       /* Descriptors */
+       mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
+       mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
+       mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
+       mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
+
+       /* Buffer Data */
+       mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
+       mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
+
+       val = MVPP22_AXI_CODE_CACHE_NON_CACHE
+               << MVPP22_AXI_CODE_CACHE_OFFS;
+       val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
+               << MVPP22_AXI_CODE_DOMAIN_OFFS;
+       mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
+       mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
+
+       val = MVPP22_AXI_CODE_CACHE_RD_CACHE
+               << MVPP22_AXI_CODE_CACHE_OFFS;
+       val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+               << MVPP22_AXI_CODE_DOMAIN_OFFS;
+
+       mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
+
+       val = MVPP22_AXI_CODE_CACHE_WR_CACHE
+               << MVPP22_AXI_CODE_CACHE_OFFS;
+       val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+               << MVPP22_AXI_CODE_DOMAIN_OFFS;
+
+       mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
+}
+
 /* Initialize network controller common part HW */
 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
 {
@@ -6338,7 +6779,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
        u32 val;
 
        /* Checks for hardware constraints */
-       if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
+       if (rxq_number % 4 || (rxq_number > priv->max_port_rxqs) ||
            (txq_number > MVPP2_MAX_TXQ)) {
                dev_err(&pdev->dev, "invalid queue size parameter\n");
                return -EINVAL;
@@ -6349,10 +6790,19 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
        if (dram_target_info)
                mvpp2_conf_mbus_windows(dram_target_info, priv);
 
+       if (priv->hw_version == MVPP22)
+               mvpp2_axi_init(priv);
+
        /* Disable HW PHY polling */
-       val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
-       val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
-       writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+       if (priv->hw_version == MVPP21) {
+               val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+               val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
+               writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+       } else {
+               val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
+               val &= ~MVPP22_SMI_POLLING_EN;
+               writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
+       }
 
        /* Allocate and initialize aggregated TXQs */
        priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
@@ -6374,11 +6824,25 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
        mvpp2_rx_fifo_init(priv);
 
        /* Reset Rx queue group interrupt configuration */
-       for (i = 0; i < MVPP2_MAX_PORTS; i++)
-               mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
+       for (i = 0; i < MVPP2_MAX_PORTS; i++) {
+               if (priv->hw_version == MVPP21) {
+                       mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i),
+                                   rxq_number);
+                       continue;
+               } else {
+                       u32 val;
+
+                       val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET);
+                       mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
 
-       writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
-              priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
+                       val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET);
+                       mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
+               }
+       }
+
+       if (priv->hw_version == MVPP21)
+               writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
+                      priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
 
        /* Allow cache snoop when transmiting packets */
        mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
@@ -6405,22 +6869,46 @@ static int mvpp2_probe(struct platform_device *pdev)
        struct device_node *port_node;
        struct mvpp2 *priv;
        struct resource *res;
-       int port_count, first_rxq;
+       void __iomem *base;
+       int port_count, cpu;
        int err;
 
        priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
 
+       priv->hw_version =
+               (unsigned long)of_device_get_match_data(&pdev->dev);
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       priv->base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(priv->base))
-               return PTR_ERR(priv->base);
+       base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       if (priv->hw_version == MVPP21) {
+               res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+               priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
+               if (IS_ERR(priv->lms_base))
+                       return PTR_ERR(priv->lms_base);
+       } else {
+               res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+               priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
+               if (IS_ERR(priv->iface_base))
+                       return PTR_ERR(priv->iface_base);
+       }
+
+       for_each_present_cpu(cpu) {
+               u32 addr_space_sz;
+
+               addr_space_sz = (priv->hw_version == MVPP21 ?
+                                MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
+               priv->cpu_base[cpu] = base + cpu * addr_space_sz;
+       }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(priv->lms_base))
-               return PTR_ERR(priv->lms_base);
+       if (priv->hw_version == MVPP21)
+               priv->max_port_rxqs = 8;
+       else
+               priv->max_port_rxqs = 32;
 
        priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
        if (IS_ERR(priv->pp_clk))
@@ -6438,21 +6926,47 @@ static int mvpp2_probe(struct platform_device *pdev)
        if (err < 0)
                goto err_pp_clk;
 
+       if (priv->hw_version == MVPP22) {
+               priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
+               if (IS_ERR(priv->mg_clk)) {
+                       err = PTR_ERR(priv->mg_clk);
+                       goto err_gop_clk;
+               }
+
+               err = clk_prepare_enable(priv->mg_clk);
+               if (err < 0)
+                       goto err_gop_clk;
+       }
+
        /* Get system's tclk rate */
        priv->tclk = clk_get_rate(priv->pp_clk);
 
+       if (priv->hw_version == MVPP22) {
+               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+               if (err)
+                       goto err_mg_clk;
+               /* Sadly, the BM pools all share the same register to
+                * store the high 32 bits of their address. So they
+                * must all have the same high 32 bits, which forces
+                * us to restrict coherent memory to DMA_BIT_MASK(32).
+                */
+               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+               if (err)
+                       goto err_mg_clk;
+       }
+
        /* Initialize network controller */
        err = mvpp2_init(pdev, priv);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to initialize controller\n");
-               goto err_gop_clk;
+               goto err_mg_clk;
        }
 
        port_count = of_get_available_child_count(dn);
        if (port_count == 0) {
                dev_err(&pdev->dev, "no ports enabled\n");
                err = -ENODEV;
-               goto err_gop_clk;
+               goto err_mg_clk;
        }
 
        priv->port_list = devm_kcalloc(&pdev->dev, port_count,
@@ -6460,20 +6974,22 @@ static int mvpp2_probe(struct platform_device *pdev)
                                      GFP_KERNEL);
        if (!priv->port_list) {
                err = -ENOMEM;
-               goto err_gop_clk;
+               goto err_mg_clk;
        }
 
        /* Initialize ports */
-       first_rxq = 0;
        for_each_available_child_of_node(dn, port_node) {
-               err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
+               err = mvpp2_port_probe(pdev, port_node, priv);
                if (err < 0)
-                       goto err_gop_clk;
+                       goto err_mg_clk;
        }
 
        platform_set_drvdata(pdev, priv);
        return 0;
 
+err_mg_clk:
+       if (priv->hw_version == MVPP22)
+               clk_disable_unprepare(priv->mg_clk);
 err_gop_clk:
        clk_disable_unprepare(priv->gop_clk);
 err_pp_clk:
@@ -6506,9 +7022,10 @@ static int mvpp2_remove(struct platform_device *pdev)
                dma_free_coherent(&pdev->dev,
                                  MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
                                  aggr_txq->descs,
-                                 aggr_txq->descs_phys);
+                                 aggr_txq->descs_dma);
        }
 
+       clk_disable_unprepare(priv->mg_clk);
        clk_disable_unprepare(priv->pp_clk);
        clk_disable_unprepare(priv->gop_clk);
 
@@ -6516,7 +7033,14 @@ static int mvpp2_remove(struct platform_device *pdev)
 }
 
 static const struct of_device_id mvpp2_match[] = {
-       { .compatible = "marvell,armada-375-pp2" },
+       {
+               .compatible = "marvell,armada-375-pp2",
+               .data = (void *)MVPP21,
+       },
+       {
+               .compatible = "marvell,armada-7k-pp22",
+               .data = (void *)MVPP22,
+       },
        { }
 };
 MODULE_DEVICE_TABLE(of, mvpp2_match);
index 9e757684816d48b903f62cdac2d6a1123e6c3305..d81d3b6dfd8726b9ee78b08622f6e259150e8cfb 100644 (file)
@@ -1846,6 +1846,12 @@ static int mtk_hw_init(struct mtk_eth *eth)
        /* GE2, Force 1000M/FD, FC ON */
        mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
 
+       /* Indicates CDM to parse the MTK special tag from CPU
+        * which also is working out for untag packets.
+        */
+       val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
+       mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
+
        /* Enable RX VLan Offloading */
        mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
 
@@ -1908,10 +1914,9 @@ static int __init mtk_init(struct net_device *dev)
 
        /* If the mac address is invalid, use random mac address  */
        if (!is_valid_ether_addr(dev->dev_addr)) {
-               random_ether_addr(dev->dev_addr);
+               eth_hw_addr_random(dev);
                dev_err(eth->dev, "generated random MAC address %pM\n",
                        dev->dev_addr);
-               dev->addr_assign_type = NET_ADDR_RANDOM;
        }
 
        return mtk_phy_connect(dev);
@@ -2317,6 +2322,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
        eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
 
        eth->netdev[id]->irq = eth->irq[0];
+       eth->netdev[id]->dev.of_node = np;
+
        return 0;
 
 free_netdev:
index 99b1c8e9f16f981a0603f906280dcd98f7fa1b54..996024d02668a3235b7cedec09ddbfc07efd7eff 100644 (file)
 /* Frame Engine Interrupt Grouping Register */
 #define MTK_FE_INT_GRP         0x20
 
+/* CDMP Ingress Control Register */
+#define MTK_CDMQ_IG_CTRL       0x1400
+#define MTK_CDMQ_STAG_EN       BIT(0)
+
 /* CDMP Exgress Control Register */
 #define MTK_CDMP_EG_CTRL       0x404
 
index e8c105164931f31ff0cf5ed12acef455d0010eda..0e0fa70305659521ed50d1cf1bc40fd38aa3ad04 100644 (file)
@@ -2305,6 +2305,17 @@ static int sync_toggles(struct mlx4_dev *dev)
                rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
                if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
                        /* PCI might be offline */
+
+                       /* If device removal has been requested,
+                        * do not continue retrying.
+                        */
+                       if (dev->persist->interface_state &
+                           MLX4_INTERFACE_STATE_NOWAIT) {
+                               mlx4_warn(dev,
+                                         "communication channel is offline\n");
+                               return -EIO;
+                       }
+
                        msleep(100);
                        wr_toggle = swab32(readl(&priv->mfunc.comm->
                                           slave_write));
index c4d714fcc7dae759998a49a1f90f9ab1ee9bdda3..ffbcb27c05e55f43630a812249bab21609886dd9 100644 (file)
@@ -117,7 +117,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
        /* port statistics */
        "tso_packets",
        "xmit_more",
-       "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
+       "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_pages",
        "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
 
        /* pf statistics */
index 61420473fe5fb57032fa50de9a0d2abfa71831d6..94fab20ef146bd5874a21ec2ecbe3dea16180aec 100644 (file)
@@ -92,7 +92,9 @@ static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
        if (tc->type != TC_SETUP_MQPRIO)
                return -EINVAL;
 
-       return mlx4_en_setup_tc(dev, tc->tc);
+       tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+       return mlx4_en_setup_tc(dev, tc->mqprio->num_tc);
 }
 
 #ifdef CONFIG_RFS_ACCEL
index 9166d90e732858610b1407fe85cbf6cbe27f5e0b..e0eb695318e64ebcaf58d6edb5f9a57be6f9ddf6 100644 (file)
@@ -213,6 +213,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        priv->port_stats.rx_chksum_good = 0;
        priv->port_stats.rx_chksum_none = 0;
        priv->port_stats.rx_chksum_complete = 0;
+       priv->port_stats.rx_alloc_pages = 0;
        priv->xdp_stats.rx_xdp_drop    = 0;
        priv->xdp_stats.rx_xdp_tx      = 0;
        priv->xdp_stats.rx_xdp_tx_full = 0;
@@ -223,6 +224,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
                priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok);
                priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none);
                priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete);
+               priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages);
                priv->xdp_stats.rx_xdp_drop     += READ_ONCE(ring->xdp_drop);
                priv->xdp_stats.rx_xdp_tx       += READ_ONCE(ring->xdp_tx);
                priv->xdp_stats.rx_xdp_tx_full  += READ_ONCE(ring->xdp_tx_full);
index 867292880c07a15124a0cf099d1fcda09926548e..aa074e57ce06fb2842fa1faabd156c3cd2fe10f5 100644 (file)
 
 #include "mlx4_en.h"
 
-static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
-                           struct mlx4_en_rx_alloc *page_alloc,
-                           const struct mlx4_en_frag_info *frag_info,
-                           gfp_t _gfp)
+static int mlx4_alloc_page(struct mlx4_en_priv *priv,
+                          struct mlx4_en_rx_alloc *frag,
+                          gfp_t gfp)
 {
-       int order;
        struct page *page;
        dma_addr_t dma;
 
-       for (order = frag_info->order; ;) {
-               gfp_t gfp = _gfp;
-
-               if (order)
-                       gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
-               page = alloc_pages(gfp, order);
-               if (likely(page))
-                       break;
-               if (--order < 0 ||
-                   ((PAGE_SIZE << order) < frag_info->frag_size))
-                       return -ENOMEM;
-       }
-       dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
-                          frag_info->dma_dir);
+       page = alloc_page(gfp);
+       if (unlikely(!page))
+               return -ENOMEM;
+       dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
        if (unlikely(dma_mapping_error(priv->ddev, dma))) {
-               put_page(page);
+               __free_page(page);
                return -ENOMEM;
        }
-       page_alloc->page_size = PAGE_SIZE << order;
-       page_alloc->page = page;
-       page_alloc->dma = dma;
-       page_alloc->page_offset = 0;
-       /* Not doing get_page() for each frag is a big win
-        * on asymetric workloads. Note we can not use atomic_set().
-        */
-       page_ref_add(page, page_alloc->page_size / frag_info->frag_stride - 1);
+       frag->page = page;
+       frag->dma = dma;
+       frag->page_offset = priv->rx_headroom;
        return 0;
 }
 
 static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
+                              struct mlx4_en_rx_ring *ring,
                               struct mlx4_en_rx_desc *rx_desc,
                               struct mlx4_en_rx_alloc *frags,
-                              struct mlx4_en_rx_alloc *ring_alloc,
                               gfp_t gfp)
 {
-       struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
-       const struct mlx4_en_frag_info *frag_info;
-       struct page *page;
        int i;
 
-       for (i = 0; i < priv->num_frags; i++) {
-               frag_info = &priv->frag_info[i];
-               page_alloc[i] = ring_alloc[i];
-               page_alloc[i].page_offset += frag_info->frag_stride;
-
-               if (page_alloc[i].page_offset + frag_info->frag_stride <=
-                   ring_alloc[i].page_size)
-                       continue;
-
-               if (unlikely(mlx4_alloc_pages(priv, &page_alloc[i],
-                                             frag_info, gfp)))
-                       goto out;
-       }
-
-       for (i = 0; i < priv->num_frags; i++) {
-               frags[i] = ring_alloc[i];
-               frags[i].page_offset += priv->frag_info[i].rx_headroom;
-               rx_desc->data[i].addr = cpu_to_be64(frags[i].dma +
-                                                   frags[i].page_offset);
-               ring_alloc[i] = page_alloc[i];
-       }
-
-       return 0;
-
-out:
-       while (i--) {
-               if (page_alloc[i].page != ring_alloc[i].page) {
-                       dma_unmap_page(priv->ddev, page_alloc[i].dma,
-                               page_alloc[i].page_size,
-                               priv->frag_info[i].dma_dir);
-                       page = page_alloc[i].page;
-                       /* Revert changes done by mlx4_alloc_pages */
-                       page_ref_sub(page, page_alloc[i].page_size /
-                                          priv->frag_info[i].frag_stride - 1);
-                       put_page(page);
+       for (i = 0; i < priv->num_frags; i++, frags++) {
+               if (!frags->page) {
+                       if (mlx4_alloc_page(priv, frags, gfp))
+                               return -ENOMEM;
+                       ring->rx_alloc_pages++;
                }
-       }
-       return -ENOMEM;
-}
-
-static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
-                             struct mlx4_en_rx_alloc *frags,
-                             int i)
-{
-       const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
-       u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
-
-
-       if (next_frag_end > frags[i].page_size)
-               dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
-                              frag_info->dma_dir);
-
-       if (frags[i].page)
-               put_page(frags[i].page);
-}
-
-static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
-                                 struct mlx4_en_rx_ring *ring)
-{
-       int i;
-       struct mlx4_en_rx_alloc *page_alloc;
-
-       for (i = 0; i < priv->num_frags; i++) {
-               const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
-
-               if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
-                                    frag_info, GFP_KERNEL | __GFP_COLD))
-                       goto out;
-
-               en_dbg(DRV, priv, "  frag %d allocator: - size:%d frags:%d\n",
-                      i, ring->page_alloc[i].page_size,
-                      page_ref_count(ring->page_alloc[i].page));
+               rx_desc->data[i].addr = cpu_to_be64(frags->dma +
+                                                   frags->page_offset);
        }
        return 0;
-
-out:
-       while (i--) {
-               struct page *page;
-
-               page_alloc = &ring->page_alloc[i];
-               dma_unmap_page(priv->ddev, page_alloc->dma,
-                              page_alloc->page_size,
-                              priv->frag_info[i].dma_dir);
-               page = page_alloc->page;
-               /* Revert changes done by mlx4_alloc_pages */
-               page_ref_sub(page, page_alloc->page_size /
-                                  priv->frag_info[i].frag_stride - 1);
-               put_page(page);
-               page_alloc->page = NULL;
-       }
-       return -ENOMEM;
 }
 
-static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
-                                     struct mlx4_en_rx_ring *ring)
+static void mlx4_en_free_frag(const struct mlx4_en_priv *priv,
+                             struct mlx4_en_rx_alloc *frag)
 {
-       struct mlx4_en_rx_alloc *page_alloc;
-       int i;
-
-       for (i = 0; i < priv->num_frags; i++) {
-               const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
-
-               page_alloc = &ring->page_alloc[i];
-               en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
-                      i, page_count(page_alloc->page));
-
-               dma_unmap_page(priv->ddev, page_alloc->dma,
-                               page_alloc->page_size, frag_info->dma_dir);
-               while (page_alloc->page_offset + frag_info->frag_stride <
-                      page_alloc->page_size) {
-                       put_page(page_alloc->page);
-                       page_alloc->page_offset += frag_info->frag_stride;
-               }
-               page_alloc->page = NULL;
+       if (frag->page) {
+               dma_unmap_page(priv->ddev, frag->dma,
+                              PAGE_SIZE, priv->dma_dir);
+               __free_page(frag->page);
        }
+       /* We need to clear all fields, otherwise a change of priv->log_rx_info
+        * could lead to see garbage later in frag->page.
+        */
+       memset(frag, 0, sizeof(*frag));
 }
 
-static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
+static void mlx4_en_init_rx_desc(const struct mlx4_en_priv *priv,
                                 struct mlx4_en_rx_ring *ring, int index)
 {
        struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
@@ -248,18 +137,23 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
        struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
        struct mlx4_en_rx_alloc *frags = ring->rx_info +
                                        (index << priv->log_rx_info);
-
        if (ring->page_cache.index > 0) {
-               frags[0] = ring->page_cache.buf[--ring->page_cache.index];
-               rx_desc->data[0].addr = cpu_to_be64(frags[0].dma +
-                                                   frags[0].page_offset);
+               /* XDP uses a single page per frame */
+               if (!frags->page) {
+                       ring->page_cache.index--;
+                       frags->page = ring->page_cache.buf[ring->page_cache.index].page;
+                       frags->dma  = ring->page_cache.buf[ring->page_cache.index].dma;
+               }
+               frags->page_offset = XDP_PACKET_HEADROOM;
+               rx_desc->data[0].addr = cpu_to_be64(frags->dma +
+                                                   XDP_PACKET_HEADROOM);
                return 0;
        }
 
-       return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
+       return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
 }
 
-static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
+static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring *ring)
 {
        return ring->prod == ring->cons;
 }
@@ -269,7 +163,8 @@ static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
        *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
 }
 
-static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
+/* slow path */
+static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
                                 struct mlx4_en_rx_ring *ring,
                                 int index)
 {
@@ -279,7 +174,7 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
        frags = ring->rx_info + (index << priv->log_rx_info);
        for (nr = 0; nr < priv->num_frags; nr++) {
                en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
-               mlx4_en_free_frag(priv, frags, nr);
+               mlx4_en_free_frag(priv, frags + nr);
        }
 }
 
@@ -335,12 +230,12 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
               ring->cons, ring->prod);
 
        /* Unmap and free Rx buffers */
-       while (!mlx4_en_is_ring_empty(ring)) {
-               index = ring->cons & ring->size_mask;
+       for (index = 0; index < ring->size; index++) {
                en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
                mlx4_en_free_rx_desc(priv, ring, index);
-               ++ring->cons;
        }
+       ring->cons = 0;
+       ring->prod = 0;
 }
 
 void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
@@ -392,9 +287,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 
        tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
                                        sizeof(struct mlx4_en_rx_alloc));
-       ring->rx_info = vmalloc_node(tmp, node);
+       ring->rx_info = vzalloc_node(tmp, node);
        if (!ring->rx_info) {
-               ring->rx_info = vmalloc(tmp);
+               ring->rx_info = vzalloc(tmp);
                if (!ring->rx_info) {
                        err = -ENOMEM;
                        goto err_ring;
@@ -464,16 +359,6 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
                /* Initialize all descriptors */
                for (i = 0; i < ring->size; i++)
                        mlx4_en_init_rx_desc(priv, ring, i);
-
-               /* Initialize page allocators */
-               err = mlx4_en_init_allocator(priv, ring);
-               if (err) {
-                       en_err(priv, "Failed initializing ring allocator\n");
-                       if (ring->stride <= TXBB_SIZE)
-                               ring->buf -= TXBB_SIZE;
-                       ring_ind--;
-                       goto err_allocator;
-               }
        }
        err = mlx4_en_fill_rx_buffers(priv);
        if (err)
@@ -493,11 +378,9 @@ err_buffers:
                mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
 
        ring_ind = priv->rx_ring_num - 1;
-err_allocator:
        while (ring_ind >= 0) {
                if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
                        priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
-               mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]);
                ring_ind--;
        }
        return err;
@@ -537,7 +420,9 @@ bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
        if (cache->index >= MLX4_EN_CACHE_SIZE)
                return false;
 
-       cache->buf[cache->index++] = *frame;
+       cache->buf[cache->index].page = frame->page;
+       cache->buf[cache->index].dma = frame->dma;
+       cache->index++;
        return true;
 }
 
@@ -567,136 +452,91 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
        int i;
 
        for (i = 0; i < ring->page_cache.index; i++) {
-               struct mlx4_en_rx_alloc *frame = &ring->page_cache.buf[i];
-
-               dma_unmap_page(priv->ddev, frame->dma, frame->page_size,
-                              priv->frag_info[0].dma_dir);
-               put_page(frame->page);
+               dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
+                              PAGE_SIZE, priv->dma_dir);
+               put_page(ring->page_cache.buf[i].page);
        }
        ring->page_cache.index = 0;
        mlx4_en_free_rx_buf(priv, ring);
        if (ring->stride <= TXBB_SIZE)
                ring->buf -= TXBB_SIZE;
-       mlx4_en_destroy_allocator(priv, ring);
 }
 
 
 static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-                                   struct mlx4_en_rx_desc *rx_desc,
                                    struct mlx4_en_rx_alloc *frags,
                                    struct sk_buff *skb,
                                    int length)
 {
-       struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
-       struct mlx4_en_frag_info *frag_info;
-       int nr;
+       const struct mlx4_en_frag_info *frag_info = priv->frag_info;
+       unsigned int truesize = 0;
+       int nr, frag_size;
+       struct page *page;
        dma_addr_t dma;
+       bool release;
 
        /* Collect used fragments while replacing them in the HW descriptors */
-       for (nr = 0; nr < priv->num_frags; nr++) {
-               frag_info = &priv->frag_info[nr];
-               if (length <= frag_info->frag_prefix_size)
-                       break;
-               if (unlikely(!frags[nr].page))
+       for (nr = 0;; frags++) {
+               frag_size = min_t(int, length, frag_info->frag_size);
+
+               page = frags->page;
+               if (unlikely(!page))
                        goto fail;
 
-               dma = be64_to_cpu(rx_desc->data[nr].addr);
-               dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size,
-                                       DMA_FROM_DEVICE);
+               dma = frags->dma;
+               dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset,
+                                             frag_size, priv->dma_dir);
+
+               __skb_fill_page_desc(skb, nr, page, frags->page_offset,
+                                    frag_size);
 
-               __skb_fill_page_desc(skb, nr, frags[nr].page,
-                                    frags[nr].page_offset,
-                                    frag_info->frag_size);
+               truesize += frag_info->frag_stride;
+               if (frag_info->frag_stride == PAGE_SIZE / 2) {
+                       frags->page_offset ^= PAGE_SIZE / 2;
+                       release = page_count(page) != 1 ||
+                                 page_is_pfmemalloc(page) ||
+                                 page_to_nid(page) != numa_mem_id();
+               } else {
+                       u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
 
-               skb->truesize += frag_info->frag_stride;
-               frags[nr].page = NULL;
+                       frags->page_offset += sz_align;
+                       release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
+               }
+               if (release) {
+                       dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
+                       frags->page = NULL;
+               } else {
+                       page_ref_inc(page);
+               }
+
+               nr++;
+               length -= frag_size;
+               if (!length)
+                       break;
+               frag_info++;
        }
-       /* Adjust size of last fragment to match actual length */
-       if (nr > 0)
-               skb_frag_size_set(&skb_frags_rx[nr - 1],
-                       length - priv->frag_info[nr - 1].frag_prefix_size);
+       skb->truesize += truesize;
        return nr;
 
 fail:
        while (nr > 0) {
                nr--;
-               __skb_frag_unref(&skb_frags_rx[nr]);
+               __skb_frag_unref(skb_shinfo(skb)->frags + nr);
        }
        return 0;
 }
 
-
-static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-                                     struct mlx4_en_rx_desc *rx_desc,
-                                     struct mlx4_en_rx_alloc *frags,
-                                     unsigned int length)
-{
-       struct sk_buff *skb;
-       void *va;
-       int used_frags;
-       dma_addr_t dma;
-
-       skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN);
-       if (unlikely(!skb)) {
-               en_dbg(RX_ERR, priv, "Failed allocating skb\n");
-               return NULL;
-       }
-       skb_reserve(skb, NET_IP_ALIGN);
-       skb->len = length;
-
-       /* Get pointer to first fragment so we could copy the headers into the
-        * (linear part of the) skb */
-       va = page_address(frags[0].page) + frags[0].page_offset;
-
-       if (length <= SMALL_PACKET_SIZE) {
-               /* We are copying all relevant data to the skb - temporarily
-                * sync buffers for the copy */
-               dma = be64_to_cpu(rx_desc->data[0].addr);
-               dma_sync_single_for_cpu(priv->ddev, dma, length,
-                                       DMA_FROM_DEVICE);
-               skb_copy_to_linear_data(skb, va, length);
-               skb->tail += length;
-       } else {
-               unsigned int pull_len;
-
-               /* Move relevant fragments to skb */
-               used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
-                                                       skb, length);
-               if (unlikely(!used_frags)) {
-                       kfree_skb(skb);
-                       return NULL;
-               }
-               skb_shinfo(skb)->nr_frags = used_frags;
-
-               pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE);
-               /* Copy headers into the skb linear buffer */
-               memcpy(skb->data, va, pull_len);
-               skb->tail += pull_len;
-
-               /* Skip headers in first fragment */
-               skb_shinfo(skb)->frags[0].page_offset += pull_len;
-
-               /* Adjust size of first fragment */
-               skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len);
-               skb->data_len = length - pull_len;
-       }
-       return skb;
-}
-
-static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
+static void validate_loopback(struct mlx4_en_priv *priv, void *va)
 {
+       const unsigned char *data = va + ETH_HLEN;
        int i;
-       int offset = ETH_HLEN;
 
-       for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
-               if (*(skb->data + offset) != (unsigned char) (i & 0xff))
-                       goto out_loopback;
+       for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++) {
+               if (data[i] != (unsigned char)i)
+                       return;
        }
        /* Loopback found */
        priv->loopback_ok = 1;
-
-out_loopback:
-       dev_kfree_skb_any(skb);
 }
 
 static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
@@ -801,7 +641,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
        struct mlx4_cqe *cqe;
        struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
        struct mlx4_en_rx_alloc *frags;
-       struct mlx4_en_rx_desc *rx_desc;
        struct bpf_prog *xdp_prog;
        int doorbell_pending;
        struct sk_buff *skb;
@@ -834,10 +673,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
        /* Process all completed CQEs */
        while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
                    cq->mcq.cons_index & cq->size)) {
+               void *va;
 
                frags = ring->rx_info + (index << priv->log_rx_info);
-               rx_desc = ring->buf + (index << ring->log_stride);
-
+               va = page_address(frags[0].page) + frags[0].page_offset;
                /*
                 * make sure we read the CQE after we read the ownership bit
                 */
@@ -860,16 +699,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                 * and not performing the selftest or flb disabled
                 */
                if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
-                       struct ethhdr *ethh;
+                       const struct ethhdr *ethh = va;
                        dma_addr_t dma;
                        /* Get pointer to first fragment since we haven't
                         * skb yet and cast it to ethhdr struct
                         */
-                       dma = be64_to_cpu(rx_desc->data[0].addr);
+                       dma = frags[0].dma + frags[0].page_offset;
                        dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
                                                DMA_FROM_DEVICE);
-                       ethh = (struct ethhdr *)(page_address(frags[0].page) +
-                                                frags[0].page_offset);
 
                        if (is_multicast_ether_addr(ethh->h_dest)) {
                                struct mlx4_mac_entry *entry;
@@ -887,13 +724,16 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                        }
                }
 
+               if (unlikely(priv->validate_loopback)) {
+                       validate_loopback(priv, va);
+                       goto next;
+               }
+
                /*
                 * Packet is OK - process it.
                 */
                length = be32_to_cpu(cqe->byte_cnt);
                length -= ring->fcs_del;
-               l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
-                       (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
 
                /* A bpf program gets first chance to drop the packet. It may
                 * read bytes but not past the end of the frag.
@@ -904,13 +744,13 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                        void *orig_data;
                        u32 act;
 
-                       dma = be64_to_cpu(rx_desc->data[0].addr);
+                       dma = frags[0].dma + frags[0].page_offset;
                        dma_sync_single_for_cpu(priv->ddev, dma,
                                                priv->frag_info[0].frag_size,
                                                DMA_FROM_DEVICE);
 
-                       xdp.data_hard_start = page_address(frags[0].page);
-                       xdp.data = xdp.data_hard_start + frags[0].page_offset;
+                       xdp.data_hard_start = va - frags[0].page_offset;
+                       xdp.data = va;
                        xdp.data_end = xdp.data + length;
                        orig_data = xdp.data;
 
@@ -920,6 +760,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                                length = xdp.data_end - xdp.data;
                                frags[0].page_offset = xdp.data -
                                        xdp.data_hard_start;
+                               va = xdp.data;
                        }
 
                        switch (act) {
@@ -928,8 +769,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                        case XDP_TX:
                                if (likely(!mlx4_en_xmit_frame(ring, frags, dev,
                                                        length, cq->ring,
-                                                       &doorbell_pending)))
-                                       goto consumed;
+                                                       &doorbell_pending))) {
+                                       frags[0].page = NULL;
+                                       goto next;
+                               }
                                trace_xdp_exception(dev, xdp_prog, act);
                                goto xdp_drop_no_cnt; /* Drop on xmit failure */
                        default:
@@ -939,8 +782,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                        case XDP_DROP:
                                ring->xdp_drop++;
 xdp_drop_no_cnt:
-                               if (likely(mlx4_en_rx_recycle(ring, frags)))
-                                       goto consumed;
                                goto next;
                        }
                }
@@ -948,129 +789,51 @@ xdp_drop_no_cnt:
                ring->bytes += length;
                ring->packets++;
 
+               skb = napi_get_frags(&cq->napi);
+               if (!skb)
+                       goto next;
+
+               if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
+                       timestamp = mlx4_en_get_cqe_ts(cqe);
+                       mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb),
+                                              timestamp);
+               }
+               skb_record_rx_queue(skb, cq->ring);
+
                if (likely(dev->features & NETIF_F_RXCSUM)) {
                        if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
                                                      MLX4_CQE_STATUS_UDP)) {
                                if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
                                    cqe->checksum == cpu_to_be16(0xffff)) {
                                        ip_summed = CHECKSUM_UNNECESSARY;
+                                       l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+                                               (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+                                       if (l2_tunnel)
+                                               skb->csum_level = 1;
                                        ring->csum_ok++;
                                } else {
-                                       ip_summed = CHECKSUM_NONE;
-                                       ring->csum_none++;
+                                       goto csum_none;
                                }
                        } else {
                                if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
                                    (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
                                                               MLX4_CQE_STATUS_IPV6))) {
-                                       ip_summed = CHECKSUM_COMPLETE;
-                                       ring->csum_complete++;
+                                       if (check_csum(cqe, skb, va, dev->features)) {
+                                               goto csum_none;
+                                       } else {
+                                               ip_summed = CHECKSUM_COMPLETE;
+                                               ring->csum_complete++;
+                                       }
                                } else {
-                                       ip_summed = CHECKSUM_NONE;
-                                       ring->csum_none++;
+                                       goto csum_none;
                                }
                        }
                } else {
+csum_none:
                        ip_summed = CHECKSUM_NONE;
                        ring->csum_none++;
                }
-
-               /* This packet is eligible for GRO if it is:
-                * - DIX Ethernet (type interpretation)
-                * - TCP/IP (v4)
-                * - without IP options
-                * - not an IP fragment
-                */
-               if (dev->features & NETIF_F_GRO) {
-                       struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
-                       if (!gro_skb)
-                               goto next;
-
-                       nr = mlx4_en_complete_rx_desc(priv,
-                               rx_desc, frags, gro_skb,
-                               length);
-                       if (!nr)
-                               goto next;
-
-                       if (ip_summed == CHECKSUM_COMPLETE) {
-                               void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
-                               if (check_csum(cqe, gro_skb, va,
-                                              dev->features)) {
-                                       ip_summed = CHECKSUM_NONE;
-                                       ring->csum_none++;
-                                       ring->csum_complete--;
-                               }
-                       }
-
-                       skb_shinfo(gro_skb)->nr_frags = nr;
-                       gro_skb->len = length;
-                       gro_skb->data_len = length;
-                       gro_skb->ip_summed = ip_summed;
-
-                       if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
-                               gro_skb->csum_level = 1;
-
-                       if ((cqe->vlan_my_qpn &
-                           cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
-                           (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
-                               u16 vid = be16_to_cpu(cqe->sl_vid);
-
-                               __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
-                       } else if ((be32_to_cpu(cqe->vlan_my_qpn) &
-                                 MLX4_CQE_SVLAN_PRESENT_MASK) &&
-                                (dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
-                               __vlan_hwaccel_put_tag(gro_skb,
-                                                      htons(ETH_P_8021AD),
-                                                      be16_to_cpu(cqe->sl_vid));
-                       }
-
-                       if (dev->features & NETIF_F_RXHASH)
-                               skb_set_hash(gro_skb,
-                                            be32_to_cpu(cqe->immed_rss_invalid),
-                                            (ip_summed == CHECKSUM_UNNECESSARY) ?
-                                               PKT_HASH_TYPE_L4 :
-                                               PKT_HASH_TYPE_L3);
-
-                       skb_record_rx_queue(gro_skb, cq->ring);
-
-                       if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
-                               timestamp = mlx4_en_get_cqe_ts(cqe);
-                               mlx4_en_fill_hwtstamps(mdev,
-                                                      skb_hwtstamps(gro_skb),
-                                                      timestamp);
-                       }
-
-                       napi_gro_frags(&cq->napi);
-                       goto next;
-               }
-
-               /* GRO not possible, complete processing here */
-               skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
-               if (unlikely(!skb)) {
-                       ring->dropped++;
-                       goto next;
-               }
-
-               if (unlikely(priv->validate_loopback)) {
-                       validate_loopback(priv, skb);
-                       goto next;
-               }
-
-               if (ip_summed == CHECKSUM_COMPLETE) {
-                       if (check_csum(cqe, skb, skb->data, dev->features)) {
-                               ip_summed = CHECKSUM_NONE;
-                               ring->csum_complete--;
-                               ring->csum_none++;
-                       }
-               }
-
                skb->ip_summed = ip_summed;
-               skb->protocol = eth_type_trans(skb, dev);
-               skb_record_rx_queue(skb, cq->ring);
-
-               if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
-                       skb->csum_level = 1;
-
                if (dev->features & NETIF_F_RXHASH)
                        skb_set_hash(skb,
                                     be32_to_cpu(cqe->immed_rss_invalid),
@@ -1078,36 +841,36 @@ xdp_drop_no_cnt:
                                        PKT_HASH_TYPE_L4 :
                                        PKT_HASH_TYPE_L3);
 
-               if ((be32_to_cpu(cqe->vlan_my_qpn) &
-                   MLX4_CQE_CVLAN_PRESENT_MASK) &&
+
+               if ((cqe->vlan_my_qpn &
+                    cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
                    (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
-                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
-               else if ((be32_to_cpu(cqe->vlan_my_qpn) &
-                         MLX4_CQE_SVLAN_PRESENT_MASK) &&
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                              be16_to_cpu(cqe->sl_vid));
+               else if ((cqe->vlan_my_qpn &
+                         cpu_to_be32(MLX4_CQE_SVLAN_PRESENT_MASK)) &&
                         (dev->features & NETIF_F_HW_VLAN_STAG_RX))
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
                                               be16_to_cpu(cqe->sl_vid));
 
-               if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
-                       timestamp = mlx4_en_get_cqe_ts(cqe);
-                       mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb),
-                                              timestamp);
+               nr = mlx4_en_complete_rx_desc(priv, frags, skb, length);
+               if (likely(nr)) {
+                       skb_shinfo(skb)->nr_frags = nr;
+                       skb->len = length;
+                       skb->data_len = length;
+                       napi_gro_frags(&cq->napi);
+               } else {
+                       skb->vlan_tci = 0;
+                       skb_clear_hash(skb);
                }
-
-               napi_gro_receive(&cq->napi, skb);
 next:
-               for (nr = 0; nr < priv->num_frags; nr++)
-                       mlx4_en_free_frag(priv, frags, nr);
-
-consumed:
                ++cq->mcq.cons_index;
                index = (cq->mcq.cons_index) & ring->size_mask;
                cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
                if (++polled == budget)
-                       goto out;
+                       break;
        }
 
-out:
        rcu_read_unlock();
 
        if (polled) {
@@ -1178,13 +941,6 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
        return done;
 }
 
-static const int frag_sizes[] = {
-       FRAG_SZ0,
-       FRAG_SZ1,
-       FRAG_SZ2,
-       FRAG_SZ3
-};
-
 void mlx4_en_calc_rx_buf(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -1195,33 +951,43 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
         * This only works when num_frags == 1.
         */
        if (priv->tx_ring_num[TX_XDP]) {
-               priv->frag_info[0].order = 0;
                priv->frag_info[0].frag_size = eff_mtu;
-               priv->frag_info[0].frag_prefix_size = 0;
                /* This will gain efficient xdp frame recycling at the
                 * expense of more costly truesize accounting
                 */
                priv->frag_info[0].frag_stride = PAGE_SIZE;
-               priv->frag_info[0].dma_dir = PCI_DMA_BIDIRECTIONAL;
-               priv->frag_info[0].rx_headroom = XDP_PACKET_HEADROOM;
+               priv->dma_dir = PCI_DMA_BIDIRECTIONAL;
+               priv->rx_headroom = XDP_PACKET_HEADROOM;
                i = 1;
        } else {
-               int buf_size = 0;
+               int frag_size_max = 2048, buf_size = 0;
+
+               /* should not happen, right ? */
+               if (eff_mtu > PAGE_SIZE + (MLX4_EN_MAX_RX_FRAGS - 1) * 2048)
+                       frag_size_max = PAGE_SIZE;
 
                while (buf_size < eff_mtu) {
-                       priv->frag_info[i].order = MLX4_EN_ALLOC_PREFER_ORDER;
-                       priv->frag_info[i].frag_size =
-                               (eff_mtu > buf_size + frag_sizes[i]) ?
-                                       frag_sizes[i] : eff_mtu - buf_size;
-                       priv->frag_info[i].frag_prefix_size = buf_size;
-                       priv->frag_info[i].frag_stride =
-                               ALIGN(priv->frag_info[i].frag_size,
-                                     SMP_CACHE_BYTES);
-                       priv->frag_info[i].dma_dir = PCI_DMA_FROMDEVICE;
-                       priv->frag_info[i].rx_headroom = 0;
-                       buf_size += priv->frag_info[i].frag_size;
+                       int frag_stride, frag_size = eff_mtu - buf_size;
+                       int pad, nb;
+
+                       if (i < MLX4_EN_MAX_RX_FRAGS - 1)
+                               frag_size = min(frag_size, frag_size_max);
+
+                       priv->frag_info[i].frag_size = frag_size;
+                       frag_stride = ALIGN(frag_size, SMP_CACHE_BYTES);
+                       /* We can only pack 2 1536-bytes frames in on 4K page
+                        * Therefore, each frame would consume more bytes (truesize)
+                        */
+                       nb = PAGE_SIZE / frag_stride;
+                       pad = (PAGE_SIZE - nb * frag_stride) / nb;
+                       pad &= ~(SMP_CACHE_BYTES - 1);
+                       priv->frag_info[i].frag_stride = frag_stride + pad;
+
+                       buf_size += frag_size;
                        i++;
                }
+               priv->dma_dir = PCI_DMA_FROMDEVICE;
+               priv->rx_headroom = 0;
        }
 
        priv->num_frags = i;
@@ -1232,10 +998,9 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
               eff_mtu, priv->num_frags);
        for (i = 0; i < priv->num_frags; i++) {
                en_err(priv,
-                      "  frag:%d - size:%d prefix:%d stride:%d\n",
+                      "  frag:%d - size:%d stride:%d\n",
                       i,
                       priv->frag_info[i].frag_size,
-                      priv->frag_info[i].frag_prefix_size,
                       priv->frag_info[i].frag_stride);
        }
 }
index 95290e1fc9fe7600b2e3bcca334f3fad7d733c09..17112faafbccc5f7a75ee82a287be7952859ae9e 100644 (file)
@@ -81,14 +81,11 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
 {
        u32 loopback_ok = 0;
        int i;
-       bool gro_enabled;
 
         priv->loopback_ok = 0;
        priv->validate_loopback = 1;
-       gro_enabled = priv->dev->features & NETIF_F_GRO;
 
        mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
-       priv->dev->features &= ~NETIF_F_GRO;
 
        /* xmit */
        if (mlx4_en_test_loopback_xmit(priv)) {
@@ -111,9 +108,6 @@ mlx4_en_test_loopback_exit:
 
        priv->validate_loopback = 0;
 
-       if (gro_enabled)
-               priv->dev->features |= NETIF_F_GRO;
-
        mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
        return !loopback_ok;
 }
index 3ed42199d3f1275f77560e92a430c0dde181e95a..3ba89bc43d74d8c023776079bcd0bbadd70fb5c6 100644 (file)
@@ -354,13 +354,11 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
        struct mlx4_en_rx_alloc frame = {
                .page = tx_info->page,
                .dma = tx_info->map0_dma,
-               .page_offset = XDP_PACKET_HEADROOM,
-               .page_size = PAGE_SIZE,
        };
 
        if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
                dma_unmap_page(priv->ddev, tx_info->map0_dma,
-                              PAGE_SIZE, priv->frag_info[0].dma_dir);
+                              PAGE_SIZE, priv->dma_dir);
                put_page(tx_info->page);
        }
 
@@ -980,8 +978,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
 
                ring->tso_packets++;
 
-               i = ((skb->len - lso_header_size) / shinfo->gso_size) +
-                       !!((skb->len - lso_header_size) % shinfo->gso_size);
+               i = shinfo->gso_segs;
                tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
                ring->packets += i;
        } else {
index 21377c315083b686d8db25033583dd020d7e50a6..703205475524d689cd2762f2d2ce3abfd2b6ebcb 100644 (file)
@@ -1940,6 +1940,14 @@ static int mlx4_comm_check_offline(struct mlx4_dev *dev)
                               (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
                if (!offline_bit)
                        return 0;
+
+               /* If device removal has been requested,
+                * do not continue retrying.
+                */
+               if (dev->persist->interface_state &
+                   MLX4_INTERFACE_STATE_NOWAIT)
+                       break;
+
                /* There are cases as part of AER/Reset flow that PF needs
                 * around 100 msec to load. We therefore sleep for 100 msec
                 * to allow other tasks to make use of that CPU during this
@@ -3955,6 +3963,9 @@ static void mlx4_remove_one(struct pci_dev *pdev)
        struct devlink *devlink = priv_to_devlink(priv);
        int active_vfs = 0;
 
+       if (mlx4_is_slave(dev))
+               persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
+
        mutex_lock(&persist->interface_state_mutex);
        persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
        mutex_unlock(&persist->interface_state_mutex);
index 3629ce11a68b9dec5c1659539bdc6f2c11114e35..39f401aa30474e61c0b0029463b23a829ec35fa3 100644 (file)
 /* Use the maximum between 16384 and a single page */
 #define MLX4_EN_ALLOC_SIZE     PAGE_ALIGN(16384)
 
-#define MLX4_EN_ALLOC_PREFER_ORDER min_t(int, get_order(32768),                \
-                                        PAGE_ALLOC_COSTLY_ORDER)
-
-/* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU
- * and 4K allocations) */
-enum {
-       FRAG_SZ0 = 1536 - NET_IP_ALIGN,
-       FRAG_SZ1 = 4096,
-       FRAG_SZ2 = 4096,
-       FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
-};
 #define MLX4_EN_MAX_RX_FRAGS   4
 
 /* Maximum ring sizes */
@@ -264,13 +253,16 @@ struct mlx4_en_rx_alloc {
        struct page     *page;
        dma_addr_t      dma;
        u32             page_offset;
-       u32             page_size;
 };
 
 #define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT)
+
 struct mlx4_en_page_cache {
        u32 index;
-       struct mlx4_en_rx_alloc buf[MLX4_EN_CACHE_SIZE];
+       struct {
+               struct page     *page;
+               dma_addr_t      dma;
+       } buf[MLX4_EN_CACHE_SIZE];
 };
 
 struct mlx4_en_priv;
@@ -335,7 +327,6 @@ struct mlx4_en_rx_desc {
 
 struct mlx4_en_rx_ring {
        struct mlx4_hwq_resources wqres;
-       struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
        u32 size ;      /* number of Rx descs*/
        u32 actual_size;
        u32 size_mask;
@@ -355,6 +346,7 @@ struct mlx4_en_rx_ring {
        unsigned long csum_ok;
        unsigned long csum_none;
        unsigned long csum_complete;
+       unsigned long rx_alloc_pages;
        unsigned long xdp_drop;
        unsigned long xdp_tx;
        unsigned long xdp_tx_full;
@@ -472,11 +464,7 @@ struct mlx4_en_mc_list {
 
 struct mlx4_en_frag_info {
        u16 frag_size;
-       u16 frag_prefix_size;
        u32 frag_stride;
-       enum dma_data_direction dma_dir;
-       u16 order;
-       u16 rx_headroom;
 };
 
 #ifdef CONFIG_MLX4_EN_DCB
@@ -584,8 +572,10 @@ struct mlx4_en_priv {
        u32 rx_ring_num;
        u32 rx_skb_size;
        struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
-       u16 num_frags;
-       u16 log_rx_info;
+       u8 num_frags;
+       u8 log_rx_info;
+       u8 dma_dir;
+       u16 rx_headroom;
 
        struct mlx4_en_tx_ring **tx_ring[MLX4_EN_NUM_TX_TYPES];
        struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
index 48641cb0367f251a07537b82d0a16bf50d8479ef..926f3c3f3665c5d28fe5d35c41afaa0e5917c007 100644 (file)
@@ -37,7 +37,7 @@ struct mlx4_en_port_stats {
        unsigned long queue_stopped;
        unsigned long wake_queue;
        unsigned long tx_timeout;
-       unsigned long rx_alloc_failed;
+       unsigned long rx_alloc_pages;
        unsigned long rx_chksum_good;
        unsigned long rx_chksum_none;
        unsigned long rx_chksum_complete;
index ddb4ca4ff930a74b38a97b04b98ad54262da1a7b..117170014e8897f0f91cfc25464e3a03aba044ec 100644 (file)
@@ -14,6 +14,7 @@ config MLX5_CORE
 config MLX5_CORE_EN
        bool "Mellanox Technologies ConnectX-4 Ethernet support"
        depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
+       depends on IPV6=y || IPV6=n || MLX5_CORE=m
        imply PTP_1588_CLOCK
        default n
        ---help---
index caa837e5e2b991fc3666776d2050fe20b1c6c7f6..5bdaf3d545b2fc656a318d5b562f940e14ecd9d9 100644 (file)
@@ -279,6 +279,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_DESTROY_XRC_SRQ:
        case MLX5_CMD_OP_DESTROY_DCT:
        case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
+       case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
+       case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
        case MLX5_CMD_OP_DEALLOC_PD:
        case MLX5_CMD_OP_DEALLOC_UAR:
        case MLX5_CMD_OP_DETACH_FROM_MCG:
@@ -305,8 +307,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
        case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
        case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
-       case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
-       case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
+       case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
                return MLX5_CMD_STAT_OK;
 
        case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -361,6 +362,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
        case MLX5_CMD_OP_ALLOC_Q_COUNTER:
        case MLX5_CMD_OP_QUERY_Q_COUNTER:
+       case MLX5_CMD_OP_SET_RATE_LIMIT:
+       case MLX5_CMD_OP_QUERY_RATE_LIMIT:
+       case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+       case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+       case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
+       case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
        case MLX5_CMD_OP_ALLOC_PD:
        case MLX5_CMD_OP_ALLOC_UAR:
        case MLX5_CMD_OP_CONFIG_INT_MODERATION:
@@ -412,10 +419,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
        case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
        case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
-       case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
-       case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
-       case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
-       case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
+       case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
                *status = MLX5_DRIVER_STATUS_ABORTED;
                *synd = MLX5_DRIVER_SYND;
                return -EIO;
@@ -497,6 +501,14 @@ const char *mlx5_command_str(int command)
        MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
        MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
        MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
+       MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
+       MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
+       MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
+       MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
+       MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
+       MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
+       MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
+       MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
        MLX5_COMMAND_STR_CASE(ALLOC_PD);
        MLX5_COMMAND_STR_CASE(DEALLOC_PD);
        MLX5_COMMAND_STR_CASE(ALLOC_UAR);
@@ -572,12 +584,8 @@ const char *mlx5_command_str(int command)
        MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
        MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
        MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
-       MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
-       MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
-       MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
-       MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
-       MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
-       MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
+       MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
+       MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
        default: return "unknown command opcode";
        }
 }
index f6a6ded204f61cda53c6233d80b3db7cde678c6e..b7feecfbb5a5739a946ee042f33ce95b35108097 100644 (file)
 #define MLX5E_MAX_NUM_SQS              (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
 #define MLX5E_TX_CQ_POLL_BUDGET        128
 #define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
-#define MLX5E_SQ_BF_BUDGET             16
 
 #define MLX5E_ICOSQ_MAX_WQEBBS \
        (DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB))
 
 #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-#define MLX5E_XDP_IHS_DS_COUNT \
-       DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS)
 #define MLX5E_XDP_TX_DS_COUNT \
        ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
-#define MLX5E_XDP_TX_WQEBBS \
-       DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS)
 
 #define MLX5E_NUM_MAIN_GROUPS 9
 
@@ -187,15 +182,15 @@ enum mlx5e_priv_flag {
        MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 1),
 };
 
-#define MLX5E_SET_PFLAG(priv, pflag, enable)                   \
+#define MLX5E_SET_PFLAG(params, pflag, enable)                 \
        do {                                                    \
                if (enable)                                     \
-                       (priv)->params.pflags |= (pflag);       \
+                       (params)->pflags |= (pflag);            \
                else                                            \
-                       (priv)->params.pflags &= ~(pflag);      \
+                       (params)->pflags &= ~(pflag);           \
        } while (0)
 
-#define MLX5E_GET_PFLAG(priv, pflag) (!!((priv)->params.pflags & (pflag)))
+#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag)))
 
 #ifdef CONFIG_MLX5_CORE_EN_DCB
 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
@@ -218,7 +213,6 @@ struct mlx5e_params {
        bool rx_cqe_compress_def;
        struct mlx5e_cq_moder rx_cq_moderation;
        struct mlx5e_cq_moder tx_cq_moderation;
-       u16 min_rx_wqes;
        bool lro_en;
        u32 lro_wqe_sz;
        u16 tx_max_inline;
@@ -227,9 +221,11 @@ struct mlx5e_params {
        u8  toeplitz_hash_key[40];
        u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
        bool vlan_strip_disable;
+       bool scatter_fcs_en;
        bool rx_am_enabled;
        u32 lro_timeout;
        u32 pflags;
+       struct bpf_prog *xdp_prog;
 };
 
 #ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -285,7 +281,6 @@ struct mlx5e_cq {
        struct napi_struct        *napi;
        struct mlx5_core_cq        mcq;
        struct mlx5e_channel      *channel;
-       struct mlx5e_priv         *priv;
 
        /* cqe decompression */
        struct mlx5_cqe64          title;
@@ -295,22 +290,163 @@ struct mlx5e_cq {
        u16                        decmprs_wqe_counter;
 
        /* control */
+       struct mlx5_core_dev      *mdev;
        struct mlx5_frag_wq_ctrl   wq_ctrl;
 } ____cacheline_aligned_in_smp;
 
-struct mlx5e_rq;
-typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
-                                      struct mlx5_cqe64 *cqe);
-typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
-                                 u16 ix);
+struct mlx5e_tx_wqe_info {
+       u32 num_bytes;
+       u8  num_wqebbs;
+       u8  num_dma;
+};
+
+enum mlx5e_dma_map_type {
+       MLX5E_DMA_MAP_SINGLE,
+       MLX5E_DMA_MAP_PAGE
+};
+
+struct mlx5e_sq_dma {
+       dma_addr_t              addr;
+       u32                     size;
+       enum mlx5e_dma_map_type type;
+};
+
+enum {
+       MLX5E_SQ_STATE_ENABLED,
+};
+
+struct mlx5e_sq_wqe_info {
+       u8  opcode;
+       u8  num_wqebbs;
+};
+
+struct mlx5e_txqsq {
+       /* data path */
+
+       /* dirtied @completion */
+       u16                        cc;
+       u32                        dma_fifo_cc;
+
+       /* dirtied @xmit */
+       u16                        pc ____cacheline_aligned_in_smp;
+       u32                        dma_fifo_pc;
+       struct mlx5e_sq_stats      stats;
+
+       struct mlx5e_cq            cq;
+
+       /* write@xmit, read@completion */
+       struct {
+               struct sk_buff           **skb;
+               struct mlx5e_sq_dma       *dma_fifo;
+               struct mlx5e_tx_wqe_info  *wqe_info;
+       } db;
+
+       /* read only */
+       struct mlx5_wq_cyc         wq;
+       u32                        dma_fifo_mask;
+       void __iomem              *uar_map;
+       struct netdev_queue       *txq;
+       u32                        sqn;
+       u16                        max_inline;
+       u8                         min_inline_mode;
+       u16                        edge;
+       struct device             *pdev;
+       struct mlx5e_tstamp       *tstamp;
+       __be32                     mkey_be;
+       unsigned long              state;
+
+       /* control path */
+       struct mlx5_wq_ctrl        wq_ctrl;
+       struct mlx5e_channel      *channel;
+       int                        txq_ix;
+       u32                        rate_limit;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_xdpsq {
+       /* data path */
+
+       /* dirtied @rx completion */
+       u16                        cc;
+       u16                        pc;
+
+       struct mlx5e_cq            cq;
+
+       /* write@xmit, read@completion */
+       struct {
+               struct mlx5e_dma_info     *di;
+               bool                       doorbell;
+       } db;
+
+       /* read only */
+       struct mlx5_wq_cyc         wq;
+       void __iomem              *uar_map;
+       u32                        sqn;
+       struct device             *pdev;
+       __be32                     mkey_be;
+       u8                         min_inline_mode;
+       unsigned long              state;
+
+       /* control path */
+       struct mlx5_wq_ctrl        wq_ctrl;
+       struct mlx5e_channel      *channel;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_icosq {
+       /* data path */
+
+       /* dirtied @completion */
+       u16                        cc;
+
+       /* dirtied @xmit */
+       u16                        pc ____cacheline_aligned_in_smp;
+       u32                        dma_fifo_pc;
+       u16                        prev_cc;
+
+       struct mlx5e_cq            cq;
+
+       /* write@xmit, read@completion */
+       struct {
+               struct mlx5e_sq_wqe_info *ico_wqe;
+       } db;
 
-typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
+       /* read only */
+       struct mlx5_wq_cyc         wq;
+       void __iomem              *uar_map;
+       u32                        sqn;
+       u16                        edge;
+       struct device             *pdev;
+       __be32                     mkey_be;
+       unsigned long              state;
+
+       /* control path */
+       struct mlx5_wq_ctrl        wq_ctrl;
+       struct mlx5e_channel      *channel;
+} ____cacheline_aligned_in_smp;
+
+static inline bool
+mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
+{
+       return (((wq->sz_m1 & (cc - pc)) >= n) || (cc == pc));
+}
 
 struct mlx5e_dma_info {
        struct page     *page;
        dma_addr_t      addr;
 };
 
+struct mlx5e_umr_dma_info {
+       __be64                *mtt;
+       dma_addr_t             mtt_addr;
+       struct mlx5e_dma_info  dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
+       struct mlx5e_umr_wqe   wqe;
+};
+
+struct mlx5e_mpw_info {
+       struct mlx5e_umr_dma_info umr;
+       u16 consumed_strides;
+       u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
+};
+
 struct mlx5e_rx_am_stats {
        int ppms; /* packets per msec */
        int epms; /* events per msec */
@@ -347,6 +483,11 @@ struct mlx5e_page_cache {
        struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
 };
 
+struct mlx5e_rq;
+typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
+typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq*, struct mlx5e_rx_wqe*, u16);
+typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
+
 struct mlx5e_rq {
        /* data path */
        struct mlx5_wq_ll      wq;
@@ -381,7 +522,10 @@ struct mlx5e_rq {
        u16                    rx_headroom;
 
        struct mlx5e_rx_am     am; /* Adaptive Moderation */
+
+       /* XDP */
        struct bpf_prog       *xdp_prog;
+       struct mlx5e_xdpsq     xdpsq;
 
        /* control */
        struct mlx5_wq_ctrl    wq_ctrl;
@@ -390,118 +534,10 @@ struct mlx5e_rq {
        u32                    mpwqe_num_strides;
        u32                    rqn;
        struct mlx5e_channel  *channel;
-       struct mlx5e_priv     *priv;
+       struct mlx5_core_dev  *mdev;
        struct mlx5_core_mkey  umr_mkey;
 } ____cacheline_aligned_in_smp;
 
-struct mlx5e_umr_dma_info {
-       __be64                *mtt;
-       dma_addr_t             mtt_addr;
-       struct mlx5e_dma_info  dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
-       struct mlx5e_umr_wqe   wqe;
-};
-
-struct mlx5e_mpw_info {
-       struct mlx5e_umr_dma_info umr;
-       u16 consumed_strides;
-       u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
-};
-
-struct mlx5e_tx_wqe_info {
-       u32 num_bytes;
-       u8  num_wqebbs;
-       u8  num_dma;
-};
-
-enum mlx5e_dma_map_type {
-       MLX5E_DMA_MAP_SINGLE,
-       MLX5E_DMA_MAP_PAGE
-};
-
-struct mlx5e_sq_dma {
-       dma_addr_t              addr;
-       u32                     size;
-       enum mlx5e_dma_map_type type;
-};
-
-enum {
-       MLX5E_SQ_STATE_ENABLED,
-       MLX5E_SQ_STATE_BF_ENABLE,
-};
-
-struct mlx5e_sq_wqe_info {
-       u8  opcode;
-       u8  num_wqebbs;
-};
-
-enum mlx5e_sq_type {
-       MLX5E_SQ_TXQ,
-       MLX5E_SQ_ICO,
-       MLX5E_SQ_XDP
-};
-
-struct mlx5e_sq {
-       /* data path */
-
-       /* dirtied @completion */
-       u16                        cc;
-       u32                        dma_fifo_cc;
-
-       /* dirtied @xmit */
-       u16                        pc ____cacheline_aligned_in_smp;
-       u32                        dma_fifo_pc;
-       u16                        bf_offset;
-       u16                        prev_cc;
-       u8                         bf_budget;
-       struct mlx5e_sq_stats      stats;
-
-       struct mlx5e_cq            cq;
-
-       /* pointers to per tx element info: write@xmit, read@completion */
-       union {
-               struct {
-                       struct sk_buff           **skb;
-                       struct mlx5e_sq_dma       *dma_fifo;
-                       struct mlx5e_tx_wqe_info  *wqe_info;
-               } txq;
-               struct mlx5e_sq_wqe_info *ico_wqe;
-               struct {
-                       struct mlx5e_sq_wqe_info  *wqe_info;
-                       struct mlx5e_dma_info     *di;
-                       bool                       doorbell;
-               } xdp;
-       } db;
-
-       /* read only */
-       struct mlx5_wq_cyc         wq;
-       u32                        dma_fifo_mask;
-       void __iomem              *uar_map;
-       struct netdev_queue       *txq;
-       u32                        sqn;
-       u16                        bf_buf_size;
-       u16                        max_inline;
-       u8                         min_inline_mode;
-       u16                        edge;
-       struct device             *pdev;
-       struct mlx5e_tstamp       *tstamp;
-       __be32                     mkey_be;
-       unsigned long              state;
-
-       /* control path */
-       struct mlx5_wq_ctrl        wq_ctrl;
-       struct mlx5_sq_bfreg       bfreg;
-       struct mlx5e_channel      *channel;
-       int                        tc;
-       u32                        rate_limit;
-       u8                         type;
-} ____cacheline_aligned_in_smp;
-
-static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
-{
-       return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
-               (sq->cc  == sq->pc));
-}
-
 enum channel_flags {
        MLX5E_CHANNEL_NAPI_SCHED = 1,
 };
@@ -509,9 +545,8 @@ enum channel_flags {
 struct mlx5e_channel {
        /* data path */
        struct mlx5e_rq            rq;
-       struct mlx5e_sq            xdp_sq;
-       struct mlx5e_sq            sq[MLX5E_MAX_NUM_TC];
-       struct mlx5e_sq            icosq;   /* internal control operations */
+       struct mlx5e_txqsq         sq[MLX5E_MAX_NUM_TC];
+       struct mlx5e_icosq         icosq;   /* internal control operations */
        bool                       xdp;
        struct napi_struct         napi;
        struct device             *pdev;
@@ -522,10 +557,18 @@ struct mlx5e_channel {
 
        /* control */
        struct mlx5e_priv         *priv;
+       struct mlx5_core_dev      *mdev;
+       struct mlx5e_tstamp       *tstamp;
        int                        ix;
        int                        cpu;
 };
 
+struct mlx5e_channels {
+       struct mlx5e_channel **c;
+       unsigned int           num;
+       struct mlx5e_params    params;
+};
+
 enum mlx5e_traffic_types {
        MLX5E_TT_IPV4_TCP,
        MLX5E_TT_IPV6_TCP,
@@ -675,34 +718,17 @@ enum {
        MLX5E_NIC_PRIO
 };
 
-struct mlx5e_profile {
-       void    (*init)(struct mlx5_core_dev *mdev,
-                       struct net_device *netdev,
-                       const struct mlx5e_profile *profile, void *ppriv);
-       void    (*cleanup)(struct mlx5e_priv *priv);
-       int     (*init_rx)(struct mlx5e_priv *priv);
-       void    (*cleanup_rx)(struct mlx5e_priv *priv);
-       int     (*init_tx)(struct mlx5e_priv *priv);
-       void    (*cleanup_tx)(struct mlx5e_priv *priv);
-       void    (*enable)(struct mlx5e_priv *priv);
-       void    (*disable)(struct mlx5e_priv *priv);
-       void    (*update_stats)(struct mlx5e_priv *priv);
-       int     (*max_nch)(struct mlx5_core_dev *mdev);
-       int     max_tc;
-};
-
 struct mlx5e_priv {
        /* priv data path fields - start */
-       struct mlx5e_sq            **txq_to_sq_map;
-       int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
-       struct bpf_prog *xdp_prog;
+       struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
+       int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
        /* priv data path fields - end */
 
        unsigned long              state;
        struct mutex               state_lock; /* Protects Interface state */
        struct mlx5e_rq            drop_rq;
 
-       struct mlx5e_channel     **channel;
+       struct mlx5e_channels      channels;
        u32                        tisn[MLX5E_MAX_NUM_TC];
        struct mlx5e_rqt           indir_rqt;
        struct mlx5e_tir           indir_tir[MLX5E_NUM_INDIR_TIRS];
@@ -712,7 +738,6 @@ struct mlx5e_priv {
        struct mlx5e_flow_steering fs;
        struct mlx5e_vxlan_db      vxlan;
 
-       struct mlx5e_params        params;
        struct workqueue_struct    *wq;
        struct work_struct         update_carrier_work;
        struct work_struct         set_rx_mode_work;
@@ -732,9 +757,24 @@ struct mlx5e_priv {
        void                      *ppriv;
 };
 
+struct mlx5e_profile {
+       void    (*init)(struct mlx5_core_dev *mdev,
+                       struct net_device *netdev,
+                       const struct mlx5e_profile *profile, void *ppriv);
+       void    (*cleanup)(struct mlx5e_priv *priv);
+       int     (*init_rx)(struct mlx5e_priv *priv);
+       void    (*cleanup_rx)(struct mlx5e_priv *priv);
+       int     (*init_tx)(struct mlx5e_priv *priv);
+       void    (*cleanup_tx)(struct mlx5e_priv *priv);
+       void    (*enable)(struct mlx5e_priv *priv);
+       void    (*disable)(struct mlx5e_priv *priv);
+       void    (*update_stats)(struct mlx5e_priv *priv);
+       int     (*max_nch)(struct mlx5_core_dev *mdev);
+       int     max_tc;
+};
+
 void mlx5e_build_ptys2ethtool_map(void);
 
-void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
                       void *accel_priv, select_queue_fallback_t fallback);
 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -744,7 +784,9 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
 int mlx5e_napi_poll(struct napi_struct *napi, int budget);
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
-void mlx5e_free_sq_descs(struct mlx5e_sq *sq);
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
+void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
 
 void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
                        bool recycle);
@@ -792,7 +834,7 @@ void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
                             struct ptp_clock_event *event);
 int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
 int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
-void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
 
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
                          u16 vid);
@@ -801,14 +843,38 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
 
-int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
+struct mlx5e_redirect_rqt_param {
+       bool is_rss;
+       union {
+               u32 rqn; /* Direct RQN (Non-RSS) */
+               struct {
+                       u8 hfunc;
+                       struct mlx5e_channels *channels;
+               } rss; /* RSS data */
+       };
+};
 
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
-void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
-                                   enum mlx5e_traffic_types tt);
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
+                      struct mlx5e_redirect_rqt_param rrp);
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
+                                   enum mlx5e_traffic_types tt,
+                                   void *tirc);
 
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
+
+int mlx5e_open_channels(struct mlx5e_priv *priv,
+                       struct mlx5e_channels *chs);
+void mlx5e_close_channels(struct mlx5e_channels *chs);
+
+/* Function pointer to be used to modify WH settings while
+ * switching channels
+ */
+typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
+void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
+                               struct mlx5e_channels *new_chs,
+                               mlx5e_fp_hw_modify hw_modify);
+
 void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
                                   u32 *indirection_rqt, int len,
                                   int num_channels);
@@ -816,30 +882,43 @@ int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
 
 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
                                 u8 cq_period_mode);
-void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type);
+void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
+                             struct mlx5e_params *params, u8 rq_type);
 
-static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
-                                     struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
+static inline
+struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
 {
-       u16 ofst = sq->bf_offset;
+       u16                         pi   = *pc & wq->sz_m1;
+       struct mlx5e_tx_wqe        *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
+       struct mlx5_wqe_ctrl_seg   *cseg = &wqe->ctrl;
+
+       memset(cseg, 0, sizeof(*cseg));
+
+       cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
+       cseg->qpn_ds           = cpu_to_be32((sqn << 8) | 0x01);
+
+       (*pc)++;
 
+       return wqe;
+}
+
+static inline
+void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
+                    void __iomem *uar_map,
+                    struct mlx5_wqe_ctrl_seg *ctrl)
+{
+       ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
        /* ensure wqe is visible to device before updating doorbell record */
        dma_wmb();
 
-       *sq->wq.db = cpu_to_be32(sq->pc);
+       *wq->db = cpu_to_be32(pc);
 
        /* ensure doorbell record is visible to device before ringing the
         * doorbell
         */
        wmb();
-       if (bf_sz)
-               __iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz);
-       else
-               mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL);
-       /* flush the write-combining mapped buffer */
-       wmb();
 
-       sq->bf_offset ^= sq->bf_buf_size;
+       mlx5_write64((__be32 *)ctrl, uar_map, NULL);
 }
 
 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
@@ -895,8 +974,7 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
                       struct mlx5e_tir *tir);
 int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
-int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
-                                    bool enable_uc_lb);
+int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb);
 
 struct mlx5_eswitch_rep;
 int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
@@ -928,10 +1006,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
 int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
 void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
 u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
-void mlx5e_add_vxlan_port(struct net_device *netdev,
-                         struct udp_tunnel_info *ti);
-void mlx5e_del_vxlan_port(struct net_device *netdev,
-                         struct udp_tunnel_info *ti);
 
 int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
                            void *sp);
index 68419a01db36e33765b1cc366455da8b55420da7..c4e9cc79f5c77054029748c6d9785d62f82c8b41 100644 (file)
@@ -174,13 +174,9 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
                                 enum arfs_type type)
 {
        struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
-       struct mlx5_flow_act flow_act = {
-               .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-               .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
-               .encap_id = 0,
-       };
-       struct mlx5_flow_destination dest;
        struct mlx5e_tir *tir = priv->indir_tir;
+       struct mlx5_flow_destination dest;
+       MLX5_DECLARE_FLOW_ACT(flow_act);
        struct mlx5_flow_spec *spec;
        int err = 0;
 
@@ -469,15 +465,11 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
 static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
                                              struct arfs_rule *arfs_rule)
 {
-       struct mlx5_flow_act flow_act = {
-               .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-               .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
-               .encap_id = 0,
-       };
        struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
        struct arfs_tuple *tuple = &arfs_rule->tuple;
        struct mlx5_flow_handle *rule = NULL;
        struct mlx5_flow_destination dest;
+       MLX5_DECLARE_FLOW_ACT(flow_act);
        struct arfs_table *arfs_table;
        struct mlx5_flow_spec *spec;
        struct mlx5_flow_table *ft;
index 37e66eef6fb5ea62576e0a8b012b04e6ba579d56..e706a87fc8b2b06b8f620a96f66d316e90070e04 100644 (file)
@@ -90,6 +90,7 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        struct hwtstamp_config config;
+       int err;
 
        if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
                return -EOPNOTSUPP;
@@ -111,7 +112,7 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
        switch (config.rx_filter) {
        case HWTSTAMP_FILTER_NONE:
                /* Reset CQE compression to Admin default */
-               mlx5e_modify_rx_cqe_compression_locked(priv, priv->params.rx_cqe_compress_def);
+               mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
                break;
        case HWTSTAMP_FILTER_ALL:
        case HWTSTAMP_FILTER_SOME:
@@ -129,7 +130,12 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
                /* Disable CQE compression */
                netdev_warn(dev, "Disabling cqe compression");
-               mlx5e_modify_rx_cqe_compression_locked(priv, false);
+               err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+               if (err) {
+                       netdev_err(dev, "Failed disabling cqe compression err=%d\n", err);
+                       mutex_unlock(&priv->state_lock);
+                       return err;
+               }
                config.rx_filter = HWTSTAMP_FILTER_ALL;
                break;
        default:
index bd898d8deda0ce0c4d6dca7f1ac26722eacf96c4..f1f17f7a3cd049de412abdff58b00f314160be22 100644 (file)
@@ -107,10 +107,18 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
                goto err_dealloc_transport_domain;
        }
 
+       err = mlx5_alloc_bfreg(mdev, &res->bfreg, false, false);
+       if (err) {
+               mlx5_core_err(mdev, "alloc bfreg failed, %d\n", err);
+               goto err_destroy_mkey;
+       }
+
        INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
 
        return 0;
 
+err_destroy_mkey:
+       mlx5_core_destroy_mkey(mdev, &res->mkey);
 err_dealloc_transport_domain:
        mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
 err_dealloc_pd:
@@ -122,23 +130,26 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
 {
        struct mlx5e_resources *res = &mdev->mlx5e_res;
 
+       mlx5_free_bfreg(mdev, &res->bfreg);
        mlx5_core_destroy_mkey(mdev, &res->mkey);
        mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
        mlx5_core_dealloc_pd(mdev, res->pdn);
 }
 
-int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
-                                    bool enable_uc_lb)
+int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
 {
+       struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5e_tir *tir;
-       void *in;
+       int err  = -ENOMEM;
+       u32 tirn = 0;
        int inlen;
-       int err = 0;
+       void *in;
+
 
        inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
        in = mlx5_vzalloc(inlen);
        if (!in)
-               return -ENOMEM;
+               goto out;
 
        if (enable_uc_lb)
                MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@@ -147,13 +158,16 @@ int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
        MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
 
        list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
-               err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
+               tirn = tir->tirn;
+               err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
                if (err)
                        goto out;
        }
 
 out:
        kvfree(in);
+       if (err)
+               netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
 
        return err;
 }
index 0523ed47f597c715296c5ea843245625bf3dac62..8fa23f6a1f67f6494168455a58c5a7b1ee35cae5 100644 (file)
@@ -302,6 +302,9 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
        struct mlx5e_priv *priv = netdev_priv(dev);
        struct mlx5e_dcbx *dcbx = &priv->dcbx;
 
+       if (mode & DCB_CAP_DCBX_LLD_MANAGED)
+               return 1;
+
        if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
                if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
                        return 0;
@@ -315,13 +318,10 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
                return 1;
        }
 
-       if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
+       if (!(mode & DCB_CAP_DCBX_HOST))
                return 1;
 
-       if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
-           !(mode & DCB_CAP_DCBX_VER_CEE) ||
-           !(mode & DCB_CAP_DCBX_VER_IEEE) ||
-           !(mode & DCB_CAP_DCBX_HOST))
+       if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
                return 1;
 
        return 0;
index a004a5a1a4c22a742ef3f9939769c6b5c9445f46..ce7b09d72ff68b1850f6df3845f2fd9ae7f57bbd 100644 (file)
@@ -42,8 +42,9 @@ static void mlx5e_get_drvinfo(struct net_device *dev,
        strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")",
                sizeof(drvinfo->version));
        snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-                "%d.%d.%d",
-                fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+                "%d.%d.%04d (%.16s)",
+                fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
+                mdev->board_id);
        strlcpy(drvinfo->bus_info, pci_name(mdev->pdev),
                sizeof(drvinfo->bus_info));
 }
@@ -152,12 +153,9 @@ static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
 }
 
 #define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
-#define MLX5E_NUM_RQ_STATS(priv) \
-       (NUM_RQ_STATS * priv->params.num_channels * \
-        test_bit(MLX5E_STATE_OPENED, &priv->state))
+#define MLX5E_NUM_RQ_STATS(priv) (NUM_RQ_STATS * (priv)->channels.num)
 #define MLX5E_NUM_SQ_STATS(priv) \
-       (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
-        test_bit(MLX5E_STATE_OPENED, &priv->state))
+       (NUM_SQ_STATS * (priv)->channels.num * (priv)->channels.params.num_tc)
 #define MLX5E_NUM_PFC_COUNTERS(priv) \
        ((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
          NUM_PPORT_PER_PRIO_PFC_COUNTERS)
@@ -262,17 +260,17 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
                return;
 
        /* per channel counters */
-       for (i = 0; i < priv->params.num_channels; i++)
+       for (i = 0; i < priv->channels.num; i++)
                for (j = 0; j < NUM_RQ_STATS; j++)
                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
                                rq_stats_desc[j].format, i);
 
-       for (tc = 0; tc < priv->params.num_tc; tc++)
-               for (i = 0; i < priv->params.num_channels; i++)
+       for (tc = 0; tc < priv->channels.params.num_tc; tc++)
+               for (i = 0; i < priv->channels.num; i++)
                        for (j = 0; j < NUM_SQ_STATS; j++)
                                sprintf(data + (idx++) * ETH_GSTRING_LEN,
                                        sq_stats_desc[j].format,
-                                       priv->channeltc_to_txq_map[i][tc]);
+                                       priv->channel_tc2txq[i][tc]);
 }
 
 static void mlx5e_get_strings(struct net_device *dev,
@@ -303,6 +301,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
                                    struct ethtool_stats *stats, u64 *data)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5e_channels *channels;
        struct mlx5_priv *mlx5_priv;
        int i, j, tc, prio, idx = 0;
        unsigned long pfc_combined;
@@ -313,6 +312,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
        mutex_lock(&priv->state_lock);
        if (test_bit(MLX5E_STATE_OPENED, &priv->state))
                mlx5e_update_stats(priv);
+       channels = &priv->channels;
        mutex_unlock(&priv->state_lock);
 
        for (i = 0; i < NUM_SW_COUNTERS; i++)
@@ -382,16 +382,16 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
                return;
 
        /* per channel counters */
-       for (i = 0; i < priv->params.num_channels; i++)
+       for (i = 0; i < channels->num; i++)
                for (j = 0; j < NUM_RQ_STATS; j++)
                        data[idx++] =
-                              MLX5E_READ_CTR64_CPU(&priv->channel[i]->rq.stats,
+                              MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
                                                    rq_stats_desc, j);
 
-       for (tc = 0; tc < priv->params.num_tc; tc++)
-               for (i = 0; i < priv->params.num_channels; i++)
+       for (tc = 0; tc < priv->channels.params.num_tc; tc++)
+               for (i = 0; i < channels->num; i++)
                        for (j = 0; j < NUM_SQ_STATS; j++)
-                               data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel[i]->sq[tc].stats,
+                               data[idx++] = MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
                                                                   sq_stats_desc, j);
 }
 
@@ -406,8 +406,8 @@ static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
        if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
                return num_wqe;
 
-       stride_size = 1 << priv->params.mpwqe_log_stride_sz;
-       num_strides = 1 << priv->params.mpwqe_log_num_strides;
+       stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz;
+       num_strides = 1 << priv->channels.params.mpwqe_log_num_strides;
        wqe_size = stride_size * num_strides;
 
        packets_per_wqe = wqe_size /
@@ -427,8 +427,8 @@ static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
        if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
                return num_packets;
 
-       stride_size = 1 << priv->params.mpwqe_log_stride_sz;
-       num_strides = 1 << priv->params.mpwqe_log_num_strides;
+       stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz;
+       num_strides = 1 << priv->channels.params.mpwqe_log_num_strides;
        wqe_size = stride_size * num_strides;
 
        num_packets = (1 << order_base_2(num_packets));
@@ -443,26 +443,25 @@ static void mlx5e_get_ringparam(struct net_device *dev,
                                struct ethtool_ringparam *param)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
-       int rq_wq_type = priv->params.rq_wq_type;
+       int rq_wq_type = priv->channels.params.rq_wq_type;
 
        param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
                                                         1 << mlx5_max_log_rq_size(rq_wq_type));
        param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
        param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
-                                                    1 << priv->params.log_rq_size);
-       param->tx_pending     = 1 << priv->params.log_sq_size;
+                                                    1 << priv->channels.params.log_rq_size);
+       param->tx_pending     = 1 << priv->channels.params.log_sq_size;
 }
 
 static int mlx5e_set_ringparam(struct net_device *dev,
                               struct ethtool_ringparam *param)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
-       bool was_opened;
-       int rq_wq_type = priv->params.rq_wq_type;
+       int rq_wq_type = priv->channels.params.rq_wq_type;
+       struct mlx5e_channels new_channels = {};
        u32 rx_pending_wqes;
        u32 min_rq_size;
        u32 max_rq_size;
-       u16 min_rx_wqes;
        u8 log_rq_size;
        u8 log_sq_size;
        u32 num_mtts;
@@ -500,7 +499,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
        }
 
        num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
-       if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+       if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
            !MLX5E_VALID_NUM_MTTS(num_mtts)) {
                netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
                            __func__, param->rx_pending);
@@ -522,26 +521,29 @@ static int mlx5e_set_ringparam(struct net_device *dev,
 
        log_rq_size = order_base_2(rx_pending_wqes);
        log_sq_size = order_base_2(param->tx_pending);
-       min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes);
 
-       if (log_rq_size == priv->params.log_rq_size &&
-           log_sq_size == priv->params.log_sq_size &&
-           min_rx_wqes == priv->params.min_rx_wqes)
+       if (log_rq_size == priv->channels.params.log_rq_size &&
+           log_sq_size == priv->channels.params.log_sq_size)
                return 0;
 
        mutex_lock(&priv->state_lock);
 
-       was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-       if (was_opened)
-               mlx5e_close_locked(dev);
+       new_channels.params = priv->channels.params;
+       new_channels.params.log_rq_size = log_rq_size;
+       new_channels.params.log_sq_size = log_sq_size;
 
-       priv->params.log_rq_size = log_rq_size;
-       priv->params.log_sq_size = log_sq_size;
-       priv->params.min_rx_wqes = min_rx_wqes;
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               priv->channels.params = new_channels.params;
+               goto unlock;
+       }
+
+       err = mlx5e_open_channels(priv, &new_channels);
+       if (err)
+               goto unlock;
 
-       if (was_opened)
-               err = mlx5e_open_locked(dev);
+       mlx5e_switch_priv_channels(priv, &new_channels, NULL);
 
+unlock:
        mutex_unlock(&priv->state_lock);
 
        return err;
@@ -553,7 +555,7 @@ static void mlx5e_get_channels(struct net_device *dev,
        struct mlx5e_priv *priv = netdev_priv(dev);
 
        ch->max_combined   = priv->profile->max_nch(priv->mdev);
-       ch->combined_count = priv->params.num_channels;
+       ch->combined_count = priv->channels.params.num_channels;
 }
 
 static int mlx5e_set_channels(struct net_device *dev,
@@ -561,8 +563,8 @@ static int mlx5e_set_channels(struct net_device *dev,
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        unsigned int count = ch->combined_count;
+       struct mlx5e_channels new_channels = {};
        bool arfs_enabled;
-       bool was_opened;
        int err = 0;
 
        if (!count) {
@@ -571,27 +573,32 @@ static int mlx5e_set_channels(struct net_device *dev,
                return -EINVAL;
        }
 
-       if (priv->params.num_channels == count)
+       if (priv->channels.params.num_channels == count)
                return 0;
 
        mutex_lock(&priv->state_lock);
 
-       was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-       if (was_opened)
-               mlx5e_close_locked(dev);
+       new_channels.params = priv->channels.params;
+       new_channels.params.num_channels = count;
+       mlx5e_build_default_indir_rqt(priv->mdev, new_channels.params.indirection_rqt,
+                                     MLX5E_INDIR_RQT_SIZE, count);
+
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               priv->channels.params = new_channels.params;
+               goto out;
+       }
+
+       /* Create fresh channels with new parameters */
+       err = mlx5e_open_channels(priv, &new_channels);
+       if (err)
+               goto out;
 
        arfs_enabled = dev->features & NETIF_F_NTUPLE;
        if (arfs_enabled)
                mlx5e_arfs_disable(priv);
 
-       priv->params.num_channels = count;
-       mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt,
-                                     MLX5E_INDIR_RQT_SIZE, count);
-
-       if (was_opened)
-               err = mlx5e_open_locked(dev);
-       if (err)
-               goto out;
+       /* Switch to new channels, set new parameters and close old ones */
+       mlx5e_switch_priv_channels(priv, &new_channels, NULL);
 
        if (arfs_enabled) {
                err = mlx5e_arfs_enable(priv);
@@ -614,49 +621,24 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
        if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
                return -EOPNOTSUPP;
 
-       coal->rx_coalesce_usecs       = priv->params.rx_cq_moderation.usec;
-       coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
-       coal->tx_coalesce_usecs       = priv->params.tx_cq_moderation.usec;
-       coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation.pkts;
-       coal->use_adaptive_rx_coalesce = priv->params.rx_am_enabled;
+       coal->rx_coalesce_usecs       = priv->channels.params.rx_cq_moderation.usec;
+       coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts;
+       coal->tx_coalesce_usecs       = priv->channels.params.tx_cq_moderation.usec;
+       coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts;
+       coal->use_adaptive_rx_coalesce = priv->channels.params.rx_am_enabled;
 
        return 0;
 }
 
-static int mlx5e_set_coalesce(struct net_device *netdev,
-                             struct ethtool_coalesce *coal)
+static void
+mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
 {
-       struct mlx5e_priv *priv    = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
-       struct mlx5e_channel *c;
-       bool restart =
-               !!coal->use_adaptive_rx_coalesce != priv->params.rx_am_enabled;
-       bool was_opened;
-       int err = 0;
        int tc;
        int i;
 
-       if (!MLX5_CAP_GEN(mdev, cq_moderation))
-               return -EOPNOTSUPP;
-
-       mutex_lock(&priv->state_lock);
-
-       was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-       if (was_opened && restart) {
-               mlx5e_close_locked(netdev);
-               priv->params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
-       }
-
-       priv->params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
-       priv->params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
-       priv->params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
-       priv->params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
-
-       if (!was_opened || restart)
-               goto out;
-
-       for (i = 0; i < priv->params.num_channels; ++i) {
-               c = priv->channel[i];
+       for (i = 0; i < priv->channels.num; ++i) {
+               struct mlx5e_channel *c = priv->channels.c[i];
 
                for (tc = 0; tc < c->num_tc; tc++) {
                        mlx5_core_modify_cq_moderation(mdev,
@@ -669,11 +651,50 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
                                               coal->rx_coalesce_usecs,
                                               coal->rx_max_coalesced_frames);
        }
+}
 
-out:
-       if (was_opened && restart)
-               err = mlx5e_open_locked(netdev);
+static int mlx5e_set_coalesce(struct net_device *netdev,
+                             struct ethtool_coalesce *coal)
+{
+       struct mlx5e_priv *priv    = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5e_channels new_channels = {};
+       int err = 0;
+       bool reset;
+
+       if (!MLX5_CAP_GEN(mdev, cq_moderation))
+               return -EOPNOTSUPP;
+
+       mutex_lock(&priv->state_lock);
+       new_channels.params = priv->channels.params;
+
+       new_channels.params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
+       new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
+       new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
+       new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
+       new_channels.params.rx_am_enabled         = !!coal->use_adaptive_rx_coalesce;
+
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               priv->channels.params = new_channels.params;
+               goto out;
+       }
+       /* we are opened */
+
+       reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_am_enabled;
+       if (!reset) {
+               mlx5e_set_priv_channels_coalesce(priv, coal);
+               priv->channels.params = new_channels.params;
+               goto out;
+       }
+
+       /* open fresh channels with new coal parameters */
+       err = mlx5e_open_channels(priv, &new_channels);
+       if (err)
+               goto out;
+
+       mlx5e_switch_priv_channels(priv, &new_channels, NULL);
 
+out:
        mutex_unlock(&priv->state_lock);
        return err;
 }
@@ -968,7 +989,7 @@ static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
-       return sizeof(priv->params.toeplitz_hash_key);
+       return sizeof(priv->channels.params.toeplitz_hash_key);
 }
 
 static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
@@ -982,15 +1003,15 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
        if (indir)
-               memcpy(indir, priv->params.indirection_rqt,
-                      sizeof(priv->params.indirection_rqt));
+               memcpy(indir, priv->channels.params.indirection_rqt,
+                      sizeof(priv->channels.params.indirection_rqt));
 
        if (key)
-               memcpy(key, priv->params.toeplitz_hash_key,
-                      sizeof(priv->params.toeplitz_hash_key));
+               memcpy(key, priv->channels.params.toeplitz_hash_key,
+                      sizeof(priv->channels.params.toeplitz_hash_key));
 
        if (hfunc)
-               *hfunc = priv->params.rss_hfunc;
+               *hfunc = priv->channels.params.rss_hfunc;
 
        return 0;
 }
@@ -1006,7 +1027,7 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
 
        for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
                memset(tirc, 0, ctxlen);
-               mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
+               mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc);
                mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
        }
 }
@@ -1030,25 +1051,37 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
 
        mutex_lock(&priv->state_lock);
 
-       if (indir) {
-               u32 rqtn = priv->indir_rqt.rqtn;
-
-               memcpy(priv->params.indirection_rqt, indir,
-                      sizeof(priv->params.indirection_rqt));
-               mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
-       }
-
        if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
-           hfunc != priv->params.rss_hfunc) {
-               priv->params.rss_hfunc = hfunc;
+           hfunc != priv->channels.params.rss_hfunc) {
+               priv->channels.params.rss_hfunc = hfunc;
                hash_changed = true;
        }
 
+       if (indir) {
+               memcpy(priv->channels.params.indirection_rqt, indir,
+                      sizeof(priv->channels.params.indirection_rqt));
+
+               if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+                       u32 rqtn = priv->indir_rqt.rqtn;
+                       struct mlx5e_redirect_rqt_param rrp = {
+                               .is_rss = true,
+                               {
+                                       .rss = {
+                                               .hfunc = priv->channels.params.rss_hfunc,
+                                               .channels  = &priv->channels,
+                                       },
+                               },
+                       };
+
+                       mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
+               }
+       }
+
        if (key) {
-               memcpy(priv->params.toeplitz_hash_key, key,
-                      sizeof(priv->params.toeplitz_hash_key));
+               memcpy(priv->channels.params.toeplitz_hash_key, key,
+                      sizeof(priv->channels.params.toeplitz_hash_key));
                hash_changed = hash_changed ||
-                              priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
+                              priv->channels.params.rss_hfunc == ETH_RSS_HASH_TOP;
        }
 
        if (hash_changed)
@@ -1069,7 +1102,7 @@ static int mlx5e_get_rxnfc(struct net_device *netdev,
 
        switch (info->cmd) {
        case ETHTOOL_GRXRINGS:
-               info->data = priv->params.num_channels;
+               info->data = priv->channels.params.num_channels;
                break;
        case ETHTOOL_GRXCLSRLCNT:
                info->rule_cnt = priv->fs.ethtool.tot_num_rules;
@@ -1097,7 +1130,7 @@ static int mlx5e_get_tunable(struct net_device *dev,
 
        switch (tuna->id) {
        case ETHTOOL_TX_COPYBREAK:
-               *(u32 *)data = priv->params.tx_max_inline;
+               *(u32 *)data = priv->channels.params.tx_max_inline;
                break;
        default:
                err = -EINVAL;
@@ -1113,9 +1146,11 @@ static int mlx5e_set_tunable(struct net_device *dev,
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        struct mlx5_core_dev *mdev = priv->mdev;
-       bool was_opened;
-       u32 val;
+       struct mlx5e_channels new_channels = {};
        int err = 0;
+       u32 val;
+
+       mutex_lock(&priv->state_lock);
 
        switch (tuna->id) {
        case ETHTOOL_TX_COPYBREAK:
@@ -1125,24 +1160,26 @@ static int mlx5e_set_tunable(struct net_device *dev,
                        break;
                }
 
-               mutex_lock(&priv->state_lock);
+               new_channels.params = priv->channels.params;
+               new_channels.params.tx_max_inline = val;
 
-               was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-               if (was_opened)
-                       mlx5e_close_locked(dev);
-
-               priv->params.tx_max_inline = val;
+               if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+                       priv->channels.params = new_channels.params;
+                       break;
+               }
 
-               if (was_opened)
-                       err = mlx5e_open_locked(dev);
+               err = mlx5e_open_channels(priv, &new_channels);
+               if (err)
+                       break;
+               mlx5e_switch_priv_channels(priv, &new_channels, NULL);
 
-               mutex_unlock(&priv->state_lock);
                break;
        default:
                err = -EINVAL;
                break;
        }
 
+       mutex_unlock(&priv->state_lock);
        return err;
 }
 
@@ -1442,15 +1479,15 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5e_channels new_channels = {};
        bool rx_mode_changed;
        u8 rx_cq_period_mode;
        int err = 0;
-       bool reset;
 
        rx_cq_period_mode = enable ?
                MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
                MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
-       rx_mode_changed = rx_cq_period_mode != priv->params.rx_cq_period_mode;
+       rx_mode_changed = rx_cq_period_mode != priv->channels.params.rx_cq_period_mode;
 
        if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
            !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
@@ -1459,16 +1496,51 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
        if (!rx_mode_changed)
                return 0;
 
-       reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
-       if (reset)
-               mlx5e_close_locked(netdev);
+       new_channels.params = priv->channels.params;
+       mlx5e_set_rx_cq_mode_params(&new_channels.params, rx_cq_period_mode);
 
-       mlx5e_set_rx_cq_mode_params(&priv->params, rx_cq_period_mode);
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               priv->channels.params = new_channels.params;
+               return 0;
+       }
 
-       if (reset)
-               err = mlx5e_open_locked(netdev);
+       err = mlx5e_open_channels(priv, &new_channels);
+       if (err)
+               return err;
 
-       return err;
+       mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+       return 0;
+}
+
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
+{
+       bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
+       struct mlx5e_channels new_channels = {};
+       int err = 0;
+
+       if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
+               return new_val ? -EOPNOTSUPP : 0;
+
+       if (curr_val == new_val)
+               return 0;
+
+       new_channels.params = priv->channels.params;
+       MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
+
+       mlx5e_set_rq_type_params(priv->mdev, &new_channels.params,
+                                new_channels.params.rq_wq_type);
+
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               priv->channels.params = new_channels.params;
+               return 0;
+       }
+
+       err = mlx5e_open_channels(priv, &new_channels);
+       if (err)
+               return err;
+
+       mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+       return 0;
 }
 
 static int set_pflag_rx_cqe_compress(struct net_device *netdev,
@@ -1486,8 +1558,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
        }
 
        mlx5e_modify_rx_cqe_compression_locked(priv, enable);
-       priv->params.rx_cqe_compress_def = enable;
-       mlx5e_set_rq_type_params(priv, priv->params.rq_wq_type);
+       priv->channels.params.rx_cqe_compress_def = enable;
 
        return 0;
 }
@@ -1499,7 +1570,7 @@ static int mlx5e_handle_pflag(struct net_device *netdev,
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        bool enable = !!(wanted_flags & flag);
-       u32 changes = wanted_flags ^ priv->params.pflags;
+       u32 changes = wanted_flags ^ priv->channels.params.pflags;
        int err;
 
        if (!(changes & flag))
@@ -1512,7 +1583,7 @@ static int mlx5e_handle_pflag(struct net_device *netdev,
                return err;
        }
 
-       MLX5E_SET_PFLAG(priv, flag, enable);
+       MLX5E_SET_PFLAG(&priv->channels.params, flag, enable);
        return 0;
 }
 
@@ -1541,7 +1612,7 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
-       return priv->params.pflags;
+       return priv->channels.params.pflags;
 }
 
 static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
index f2762e45c8ae2aadd5366ea467a5ce3b4edb3d7e..5376d69a6b1a7b164567fac89ef48adadf5aba48 100644 (file)
@@ -159,14 +159,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
                                 enum mlx5e_vlan_rule_type rule_type,
                                 u16 vid, struct mlx5_flow_spec *spec)
 {
-       struct mlx5_flow_act flow_act = {
-               .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-               .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
-               .encap_id = 0,
-       };
        struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
        struct mlx5_flow_destination dest;
        struct mlx5_flow_handle **rule_p;
+       MLX5_DECLARE_FLOW_ACT(flow_act);
        int err = 0;
 
        dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
@@ -659,11 +655,7 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
                        u16 etype,
                        u8 proto)
 {
-       struct mlx5_flow_act flow_act = {
-               .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-               .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
-               .encap_id = 0,
-       };
+       MLX5_DECLARE_FLOW_ACT(flow_act);
        struct mlx5_flow_handle *rule;
        struct mlx5_flow_spec *spec;
        int err = 0;
@@ -848,13 +840,9 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
                                  struct mlx5e_l2_rule *ai, int type)
 {
-       struct mlx5_flow_act flow_act = {
-               .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-               .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
-               .encap_id = 0,
-       };
        struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
        struct mlx5_flow_destination dest;
+       MLX5_DECLARE_FLOW_ACT(flow_act);
        struct mlx5_flow_spec *spec;
        int err = 0;
        u8 *mc_dmac;
index d55fff0ba388f746809ac601fc3863e94309fc12..e73c97fea55c8cfb9e84d878a0115fd090c0a7b4 100644 (file)
@@ -390,7 +390,7 @@ static int validate_flow(struct mlx5e_priv *priv,
        if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
                return -EINVAL;
 
-       if (fs->ring_cookie >= priv->params.num_channels &&
+       if (fs->ring_cookie >= priv->channels.params.num_channels &&
            fs->ring_cookie != RX_CLS_FLOW_DISC)
                return -EINVAL;
 
index 8ef64c4db2c21ad6a752338cb32b054a5e5f3968..8b7b7e604ea03c190baf1f8adacb3152e1d03c30 100644 (file)
 struct mlx5e_rq_param {
        u32                     rqc[MLX5_ST_SZ_DW(rqc)];
        struct mlx5_wq_param    wq;
-       bool                    am_enabled;
 };
 
 struct mlx5e_sq_param {
        u32                        sqc[MLX5_ST_SZ_DW(sqc)];
        struct mlx5_wq_param       wq;
-       u16                        max_inline;
-       u8                         min_inline_mode;
-       enum mlx5e_sq_type         type;
 };
 
 struct mlx5e_cq_param {
@@ -79,49 +75,47 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
                MLX5_CAP_ETH(mdev, reg_umr_sq);
 }
 
-void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
+void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
+                             struct mlx5e_params *params, u8 rq_type)
 {
-       priv->params.rq_wq_type = rq_type;
-       priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
-       switch (priv->params.rq_wq_type) {
+       params->rq_wq_type = rq_type;
+       params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+       switch (params->rq_wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-               priv->params.log_rq_size = is_kdump_kernel() ?
+               params->log_rq_size = is_kdump_kernel() ?
                        MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
                        MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
-               priv->params.mpwqe_log_stride_sz =
-                       MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
-                       MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(priv->mdev) :
-                       MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(priv->mdev);
-               priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
-                       priv->params.mpwqe_log_stride_sz;
+               params->mpwqe_log_stride_sz =
+                       MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
+                       MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) :
+                       MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
+               params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
+                       params->mpwqe_log_stride_sz;
                break;
        default: /* MLX5_WQ_TYPE_LINKED_LIST */
-               priv->params.log_rq_size = is_kdump_kernel() ?
+               params->log_rq_size = is_kdump_kernel() ?
                        MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
                        MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
 
                /* Extra room needed for build_skb */
-               priv->params.lro_wqe_sz -= MLX5_RX_HEADROOM +
+               params->lro_wqe_sz -= MLX5_RX_HEADROOM +
                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
        }
-       priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
-                                              BIT(priv->params.log_rq_size));
 
-       mlx5_core_info(priv->mdev,
-                      "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
-                      priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
-                      BIT(priv->params.log_rq_size),
-                      BIT(priv->params.mpwqe_log_stride_sz),
-                      MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS));
+       mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
+                      params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
+                      BIT(params->log_rq_size),
+                      BIT(params->mpwqe_log_stride_sz),
+                      MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
 }
 
-static void mlx5e_set_rq_priv_params(struct mlx5e_priv *priv)
+static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
 {
-       u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(priv->mdev) &&
-                   !priv->xdp_prog ?
+       u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
+                   !params->xdp_prog ?
                    MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
                    MLX5_WQ_TYPE_LINKED_LIST;
-       mlx5e_set_rq_type_params(priv, rq_type);
+       mlx5e_set_rq_type_params(mdev, params, rq_type);
 }
 
 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -181,8 +175,10 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
        int i, j;
 
        memset(s, 0, sizeof(*s));
-       for (i = 0; i < priv->params.num_channels; i++) {
-               rq_stats = &priv->channel[i]->rq.stats;
+       for (i = 0; i < priv->channels.num; i++) {
+               struct mlx5e_channel *c = priv->channels.c[i];
+
+               rq_stats = &c->rq.stats;
 
                s->rx_packets   += rq_stats->packets;
                s->rx_bytes     += rq_stats->bytes;
@@ -204,8 +200,8 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
                s->rx_cache_empty += rq_stats->cache_empty;
                s->rx_cache_busy  += rq_stats->cache_busy;
 
-               for (j = 0; j < priv->params.num_tc; j++) {
-                       sq_stats = &priv->channel[i]->sq[j].stats;
+               for (j = 0; j < priv->channels.params.num_tc; j++) {
+                       sq_stats = &c->sq[j].stats;
 
                        s->tx_packets           += sq_stats->packets;
                        s->tx_bytes             += sq_stats->bytes;
@@ -402,8 +398,10 @@ static inline int mlx5e_get_wqe_mtt_sz(void)
                     MLX5_UMR_MTT_ALIGNMENT);
 }
 
-static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq,
-                                      struct mlx5e_umr_wqe *wqe, u16 ix)
+static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
+                                      struct mlx5e_icosq *sq,
+                                      struct mlx5e_umr_wqe *wqe,
+                                      u16 ix)
 {
        struct mlx5_wqe_ctrl_seg      *cseg = &wqe->ctrl;
        struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
@@ -493,11 +491,10 @@ static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
        kfree(rq->mpwqe.info);
 }
 
-static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv,
+static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
                                 u64 npages, u8 page_shift,
                                 struct mlx5_core_mkey *umr_mkey)
 {
-       struct mlx5_core_dev *mdev = priv->mdev;
        int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
        void *mkc;
        u32 *in;
@@ -531,21 +528,20 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv,
        return err;
 }
 
-static int mlx5e_create_rq_umr_mkey(struct mlx5e_rq *rq)
+static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
 {
-       struct mlx5e_priv *priv = rq->priv;
-       u64 num_mtts = MLX5E_REQUIRED_MTTS(BIT(priv->params.log_rq_size));
+       u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq));
 
-       return mlx5e_create_umr_mkey(priv, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
+       return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
 }
 
-static int mlx5e_create_rq(struct mlx5e_channel *c,
-                          struct mlx5e_rq_param *param,
-                          struct mlx5e_rq *rq)
+static int mlx5e_alloc_rq(struct mlx5e_channel *c,
+                         struct mlx5e_params *params,
+                         struct mlx5e_rq_param *rqp,
+                         struct mlx5e_rq *rq)
 {
-       struct mlx5e_priv *priv = c->priv;
-       struct mlx5_core_dev *mdev = priv->mdev;
-       void *rqc = param->rqc;
+       struct mlx5_core_dev *mdev = c->mdev;
+       void *rqc = rqp->rqc;
        void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
        u32 byte_count;
        u32 frag_sz;
@@ -554,9 +550,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
        int err;
        int i;
 
-       param->wq.db_numa_node = cpu_to_node(c->cpu);
+       rqp->wq.db_numa_node = cpu_to_node(c->cpu);
 
-       err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+       err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
                                &rq->wq_ctrl);
        if (err)
                return err;
@@ -565,15 +561,15 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
 
        wq_sz = mlx5_wq_ll_get_size(&rq->wq);
 
-       rq->wq_type = priv->params.rq_wq_type;
+       rq->wq_type = params->rq_wq_type;
        rq->pdev    = c->pdev;
        rq->netdev  = c->netdev;
-       rq->tstamp  = &priv->tstamp;
+       rq->tstamp  = c->tstamp;
        rq->channel = c;
        rq->ix      = c->ix;
-       rq->priv    = c->priv;
+       rq->mdev    = mdev;
 
-       rq->xdp_prog = priv->xdp_prog ? bpf_prog_inc(priv->xdp_prog) : NULL;
+       rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
        if (IS_ERR(rq->xdp_prog)) {
                err = PTR_ERR(rq->xdp_prog);
                rq->xdp_prog = NULL;
@@ -588,9 +584,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                rq->rx_headroom = MLX5_RX_HEADROOM;
        }
 
-       switch (priv->params.rq_wq_type) {
+       switch (rq->wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-               if (mlx5e_is_vf_vport_rep(priv)) {
+               if (mlx5e_is_vf_vport_rep(c->priv)) {
                        err = -EINVAL;
                        goto err_rq_wq_destroy;
                }
@@ -599,13 +595,13 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
                rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
 
-               rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
-               rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
+               rq->mpwqe_stride_sz = BIT(params->mpwqe_log_stride_sz);
+               rq->mpwqe_num_strides = BIT(params->mpwqe_log_num_strides);
 
                rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
                byte_count = rq->buff.wqe_sz;
 
-               err = mlx5e_create_rq_umr_mkey(rq);
+               err = mlx5e_create_rq_umr_mkey(mdev, rq);
                if (err)
                        goto err_rq_wq_destroy;
                rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
@@ -622,7 +618,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                        goto err_rq_wq_destroy;
                }
 
-               if (mlx5e_is_vf_vport_rep(priv))
+               if (mlx5e_is_vf_vport_rep(c->priv))
                        rq->handle_rx_cqe = mlx5e_handle_rx_cqe_rep;
                else
                        rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
@@ -630,9 +626,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                rq->alloc_wqe = mlx5e_alloc_rx_wqe;
                rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
 
-               rq->buff.wqe_sz = (priv->params.lro_en) ?
-                               priv->params.lro_wqe_sz :
-                               MLX5E_SW2HW_MTU(priv->netdev->mtu);
+               rq->buff.wqe_sz = params->lro_en  ?
+                               params->lro_wqe_sz :
+                               MLX5E_SW2HW_MTU(c->netdev->mtu);
                byte_count = rq->buff.wqe_sz;
 
                /* calc the required page order */
@@ -656,8 +652,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
        }
 
        INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
-       rq->am.mode = priv->params.rx_cq_period_mode;
-
+       rq->am.mode = params->rx_cq_period_mode;
        rq->page_cache.head = 0;
        rq->page_cache.tail = 0;
 
@@ -674,7 +669,7 @@ err_rq_wq_destroy:
        return err;
 }
 
-static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
+static void mlx5e_free_rq(struct mlx5e_rq *rq)
 {
        int i;
 
@@ -684,7 +679,7 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
        switch (rq->wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
                mlx5e_rq_free_mpwqe_info(rq);
-               mlx5_core_destroy_mkey(rq->priv->mdev, &rq->umr_mkey);
+               mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
                break;
        default: /* MLX5_WQ_TYPE_LINKED_LIST */
                kfree(rq->dma_info);
@@ -699,10 +694,10 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
        mlx5_wq_destroy(&rq->wq_ctrl);
 }
 
-static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
+static int mlx5e_create_rq(struct mlx5e_rq *rq,
+                          struct mlx5e_rq_param *param)
 {
-       struct mlx5e_priv *priv = rq->priv;
-       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5_core_dev *mdev = rq->mdev;
 
        void *in;
        void *rqc;
@@ -723,7 +718,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
 
        MLX5_SET(rqc,  rqc, cqn,                rq->cq.mcq.cqn);
        MLX5_SET(rqc,  rqc, state,              MLX5_RQC_STATE_RST);
-       MLX5_SET(rqc,  rqc, vsd, priv->params.vlan_strip_disable);
        MLX5_SET(wq,   wq,  log_wq_pg_sz,       rq->wq_ctrl.buf.page_shift -
                                                MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(wq, wq,  dbr_addr,           rq->wq_ctrl.db.dma);
@@ -742,8 +736,7 @@ static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
                                 int next_state)
 {
        struct mlx5e_channel *c = rq->channel;
-       struct mlx5e_priv *priv = c->priv;
-       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5_core_dev *mdev = c->mdev;
 
        void *in;
        void *rqc;
@@ -767,7 +760,7 @@ static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
        return err;
 }
 
-static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
+static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
 {
        struct mlx5e_channel *c = rq->channel;
        struct mlx5e_priv *priv = c->priv;
@@ -785,6 +778,35 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
 
        rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
 
+       MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
+       MLX5_SET64(modify_rq_in, in, modify_bitmask,
+                  MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
+       MLX5_SET(rqc, rqc, scatter_fcs, enable);
+       MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
+
+       err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+
+       kvfree(in);
+
+       return err;
+}
+
+static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
+{
+       struct mlx5e_channel *c = rq->channel;
+       struct mlx5_core_dev *mdev = c->mdev;
+       void *in;
+       void *rqc;
+       int inlen;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
        MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
        MLX5_SET64(modify_rq_in, in, modify_bitmask,
                   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
@@ -798,25 +820,28 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
        return err;
 }
 
-static void mlx5e_disable_rq(struct mlx5e_rq *rq)
+static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
 {
-       mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
+       mlx5_core_destroy_rq(rq->mdev, rq->rqn);
 }
 
 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
 {
        unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
        struct mlx5e_channel *c = rq->channel;
-       struct mlx5e_priv *priv = c->priv;
+
        struct mlx5_wq_ll *wq = &rq->wq;
+       u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
 
        while (time_before(jiffies, exp_time)) {
-               if (wq->cur_sz >= priv->params.min_rx_wqes)
+               if (wq->cur_sz >= min_wqes)
                        return 0;
 
                msleep(20);
        }
 
+       netdev_warn(c->netdev, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
+                   rq->rqn, wq->cur_sz, min_wqes);
        return -ETIMEDOUT;
 }
 
@@ -842,83 +867,128 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
 }
 
 static int mlx5e_open_rq(struct mlx5e_channel *c,
+                        struct mlx5e_params *params,
                         struct mlx5e_rq_param *param,
                         struct mlx5e_rq *rq)
 {
-       struct mlx5e_sq *sq = &c->icosq;
-       u16 pi = sq->pc & sq->wq.sz_m1;
        int err;
 
-       err = mlx5e_create_rq(c, param, rq);
+       err = mlx5e_alloc_rq(c, params, param, rq);
        if (err)
                return err;
 
-       err = mlx5e_enable_rq(rq, param);
+       err = mlx5e_create_rq(rq, param);
        if (err)
-               goto err_destroy_rq;
+               goto err_free_rq;
 
-       set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
        err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
        if (err)
-               goto err_disable_rq;
+               goto err_destroy_rq;
 
-       if (param->am_enabled)
+       if (params->rx_am_enabled)
                set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
 
-       sq->db.ico_wqe[pi].opcode     = MLX5_OPCODE_NOP;
-       sq->db.ico_wqe[pi].num_wqebbs = 1;
-       mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
-
        return 0;
 
-err_disable_rq:
-       clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
-       mlx5e_disable_rq(rq);
 err_destroy_rq:
        mlx5e_destroy_rq(rq);
+err_free_rq:
+       mlx5e_free_rq(rq);
 
        return err;
 }
 
-static void mlx5e_close_rq(struct mlx5e_rq *rq)
+static void mlx5e_activate_rq(struct mlx5e_rq *rq)
+{
+       struct mlx5e_icosq *sq = &rq->channel->icosq;
+       u16 pi = sq->pc & sq->wq.sz_m1;
+       struct mlx5e_tx_wqe *nopwqe;
+
+       set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
+       sq->db.ico_wqe[pi].opcode     = MLX5_OPCODE_NOP;
+       sq->db.ico_wqe[pi].num_wqebbs = 1;
+       nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
+       mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
+}
+
+static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
 {
        clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
        napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
-       cancel_work_sync(&rq->am.work);
+}
 
-       mlx5e_disable_rq(rq);
-       mlx5e_free_rx_descs(rq);
+static void mlx5e_close_rq(struct mlx5e_rq *rq)
+{
+       cancel_work_sync(&rq->am.work);
        mlx5e_destroy_rq(rq);
+       mlx5e_free_rx_descs(rq);
+       mlx5e_free_rq(rq);
 }
 
-static void mlx5e_free_sq_xdp_db(struct mlx5e_sq *sq)
+static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
 {
-       kfree(sq->db.xdp.di);
-       kfree(sq->db.xdp.wqe_info);
+       kfree(sq->db.di);
 }
 
-static int mlx5e_alloc_sq_xdp_db(struct mlx5e_sq *sq, int numa)
+static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
 {
        int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
 
-       sq->db.xdp.di = kzalloc_node(sizeof(*sq->db.xdp.di) * wq_sz,
+       sq->db.di = kzalloc_node(sizeof(*sq->db.di) * wq_sz,
                                     GFP_KERNEL, numa);
-       sq->db.xdp.wqe_info = kzalloc_node(sizeof(*sq->db.xdp.wqe_info) * wq_sz,
-                                          GFP_KERNEL, numa);
-       if (!sq->db.xdp.di || !sq->db.xdp.wqe_info) {
-               mlx5e_free_sq_xdp_db(sq);
+       if (!sq->db.di) {
+               mlx5e_free_xdpsq_db(sq);
                return -ENOMEM;
        }
 
        return 0;
 }
 
-static void mlx5e_free_sq_ico_db(struct mlx5e_sq *sq)
+static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
+                            struct mlx5e_params *params,
+                            struct mlx5e_sq_param *param,
+                            struct mlx5e_xdpsq *sq)
+{
+       void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
+       struct mlx5_core_dev *mdev = c->mdev;
+       int err;
+
+       sq->pdev      = c->pdev;
+       sq->mkey_be   = c->mkey_be;
+       sq->channel   = c;
+       sq->uar_map   = mdev->mlx5e_res.bfreg.map;
+       sq->min_inline_mode = params->tx_min_inline_mode;
+
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
+       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
+       if (err)
+               return err;
+       sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
+
+       err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
+       if (err)
+               goto err_sq_wq_destroy;
+
+       return 0;
+
+err_sq_wq_destroy:
+       mlx5_wq_destroy(&sq->wq_ctrl);
+
+       return err;
+}
+
+static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
+{
+       mlx5e_free_xdpsq_db(sq);
+       mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
 {
        kfree(sq->db.ico_wqe);
 }
 
-static int mlx5e_alloc_sq_ico_db(struct mlx5e_sq *sq, int numa)
+static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
 {
        u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
 
@@ -930,155 +1000,131 @@ static int mlx5e_alloc_sq_ico_db(struct mlx5e_sq *sq, int numa)
        return 0;
 }
 
-static void mlx5e_free_sq_txq_db(struct mlx5e_sq *sq)
+static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
+                            struct mlx5e_sq_param *param,
+                            struct mlx5e_icosq *sq)
 {
-       kfree(sq->db.txq.wqe_info);
-       kfree(sq->db.txq.dma_fifo);
-       kfree(sq->db.txq.skb);
-}
+       void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
+       struct mlx5_core_dev *mdev = c->mdev;
+       int err;
 
-static int mlx5e_alloc_sq_txq_db(struct mlx5e_sq *sq, int numa)
-{
-       int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
-       int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+       sq->pdev      = c->pdev;
+       sq->mkey_be   = c->mkey_be;
+       sq->channel   = c;
+       sq->uar_map   = mdev->mlx5e_res.bfreg.map;
 
-       sq->db.txq.skb = kzalloc_node(wq_sz * sizeof(*sq->db.txq.skb),
-                                     GFP_KERNEL, numa);
-       sq->db.txq.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.txq.dma_fifo),
-                                          GFP_KERNEL, numa);
-       sq->db.txq.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.txq.wqe_info),
-                                          GFP_KERNEL, numa);
-       if (!sq->db.txq.skb || !sq->db.txq.dma_fifo || !sq->db.txq.wqe_info) {
-               mlx5e_free_sq_txq_db(sq);
-               return -ENOMEM;
-       }
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
+       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
+       if (err)
+               return err;
+       sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
 
-       sq->dma_fifo_mask = df_sz - 1;
+       err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
+       if (err)
+               goto err_sq_wq_destroy;
+
+       sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS;
 
        return 0;
+
+err_sq_wq_destroy:
+       mlx5_wq_destroy(&sq->wq_ctrl);
+
+       return err;
 }
 
-static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
 {
-       switch (sq->type) {
-       case MLX5E_SQ_TXQ:
-               mlx5e_free_sq_txq_db(sq);
-               break;
-       case MLX5E_SQ_ICO:
-               mlx5e_free_sq_ico_db(sq);
-               break;
-       case MLX5E_SQ_XDP:
-               mlx5e_free_sq_xdp_db(sq);
-               break;
-       }
+       mlx5e_free_icosq_db(sq);
+       mlx5_wq_destroy(&sq->wq_ctrl);
 }
 
-static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
 {
-       switch (sq->type) {
-       case MLX5E_SQ_TXQ:
-               return mlx5e_alloc_sq_txq_db(sq, numa);
-       case MLX5E_SQ_ICO:
-               return mlx5e_alloc_sq_ico_db(sq, numa);
-       case MLX5E_SQ_XDP:
-               return mlx5e_alloc_sq_xdp_db(sq, numa);
-       }
-
-       return 0;
+       kfree(sq->db.wqe_info);
+       kfree(sq->db.dma_fifo);
+       kfree(sq->db.skb);
 }
 
-static int mlx5e_sq_get_max_wqebbs(u8 sq_type)
+static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
 {
-       switch (sq_type) {
-       case MLX5E_SQ_ICO:
-               return MLX5E_ICOSQ_MAX_WQEBBS;
-       case MLX5E_SQ_XDP:
-               return MLX5E_XDP_TX_WQEBBS;
+       int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+       int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+       sq->db.skb = kzalloc_node(wq_sz * sizeof(*sq->db.skb),
+                                     GFP_KERNEL, numa);
+       sq->db.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.dma_fifo),
+                                          GFP_KERNEL, numa);
+       sq->db.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.wqe_info),
+                                          GFP_KERNEL, numa);
+       if (!sq->db.skb || !sq->db.dma_fifo || !sq->db.wqe_info) {
+               mlx5e_free_txqsq_db(sq);
+               return -ENOMEM;
        }
-       return MLX5_SEND_WQE_MAX_WQEBBS;
+
+       sq->dma_fifo_mask = df_sz - 1;
+
+       return 0;
 }
 
-static int mlx5e_create_sq(struct mlx5e_channel *c,
-                          int tc,
-                          struct mlx5e_sq_param *param,
-                          struct mlx5e_sq *sq)
+static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
+                            int txq_ix,
+                            struct mlx5e_params *params,
+                            struct mlx5e_sq_param *param,
+                            struct mlx5e_txqsq *sq)
 {
-       struct mlx5e_priv *priv = c->priv;
-       struct mlx5_core_dev *mdev = priv->mdev;
-
-       void *sqc = param->sqc;
-       void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
+       void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
+       struct mlx5_core_dev *mdev = c->mdev;
        int err;
 
-       sq->type      = param->type;
        sq->pdev      = c->pdev;
-       sq->tstamp    = &priv->tstamp;
+       sq->tstamp    = c->tstamp;
        sq->mkey_be   = c->mkey_be;
        sq->channel   = c;
-       sq->tc        = tc;
+       sq->txq_ix    = txq_ix;
+       sq->uar_map   = mdev->mlx5e_res.bfreg.map;
+       sq->max_inline      = params->tx_max_inline;
+       sq->min_inline_mode = params->tx_min_inline_mode;
 
-       err = mlx5_alloc_bfreg(mdev, &sq->bfreg, MLX5_CAP_GEN(mdev, bf), false);
-       if (err)
-               return err;
-
-       sq->uar_map = sq->bfreg.map;
        param->wq.db_numa_node = cpu_to_node(c->cpu);
-
-       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
-                                &sq->wq_ctrl);
+       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
        if (err)
-               goto err_unmap_free_uar;
-
-       sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
-       if (sq->bfreg.wc)
-               set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
-
-       sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
-       sq->max_inline  = param->max_inline;
-       sq->min_inline_mode = param->min_inline_mode;
+               return err;
+       sq->wq.db    = &sq->wq.db[MLX5_SND_DBR];
 
-       err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
+       err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
        if (err)
                goto err_sq_wq_destroy;
 
-       if (sq->type == MLX5E_SQ_TXQ) {
-               int txq_ix;
-
-               txq_ix = c->ix + tc * priv->params.num_channels;
-               sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
-               priv->txq_to_sq_map[txq_ix] = sq;
-       }
-
-       sq->edge = (sq->wq.sz_m1 + 1) - mlx5e_sq_get_max_wqebbs(sq->type);
-       sq->bf_budget = MLX5E_SQ_BF_BUDGET;
+       sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
 
        return 0;
 
 err_sq_wq_destroy:
        mlx5_wq_destroy(&sq->wq_ctrl);
 
-err_unmap_free_uar:
-       mlx5_free_bfreg(mdev, &sq->bfreg);
-
        return err;
 }
 
-static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
+static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
 {
-       struct mlx5e_channel *c = sq->channel;
-       struct mlx5e_priv *priv = c->priv;
-
-       mlx5e_free_sq_db(sq);
+       mlx5e_free_txqsq_db(sq);
        mlx5_wq_destroy(&sq->wq_ctrl);
-       mlx5_free_bfreg(priv->mdev, &sq->bfreg);
 }
 
-static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
-{
-       struct mlx5e_channel *c = sq->channel;
-       struct mlx5e_priv *priv = c->priv;
-       struct mlx5_core_dev *mdev = priv->mdev;
+struct mlx5e_create_sq_param {
+       struct mlx5_wq_ctrl        *wq_ctrl;
+       u32                         cqn;
+       u32                         tisn;
+       u8                          tis_lst_sz;
+       u8                          min_inline_mode;
+};
 
+static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
+                          struct mlx5e_sq_param *param,
+                          struct mlx5e_create_sq_param *csp,
+                          u32 *sqn)
+{
        void *in;
        void *sqc;
        void *wq;
@@ -1086,7 +1132,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
        int err;
 
        inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
-               sizeof(u64) * sq->wq_ctrl.buf.npages;
+               sizeof(u64) * csp->wq_ctrl->buf.npages;
        in = mlx5_vzalloc(inlen);
        if (!in)
                return -ENOMEM;
@@ -1095,40 +1141,40 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
        wq = MLX5_ADDR_OF(sqc, sqc, wq);
 
        memcpy(sqc, param->sqc, sizeof(param->sqc));
-
-       MLX5_SET(sqc,  sqc, tis_num_0, param->type == MLX5E_SQ_ICO ?
-                                      0 : priv->tisn[sq->tc]);
-       MLX5_SET(sqc,  sqc, cqn,                sq->cq.mcq.cqn);
+       MLX5_SET(sqc,  sqc, tis_lst_sz, csp->tis_lst_sz);
+       MLX5_SET(sqc,  sqc, tis_num_0, csp->tisn);
+       MLX5_SET(sqc,  sqc, cqn, csp->cqn);
 
        if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
-               MLX5_SET(sqc,  sqc, min_wqe_inline_mode, sq->min_inline_mode);
+               MLX5_SET(sqc,  sqc, min_wqe_inline_mode, csp->min_inline_mode);
 
-       MLX5_SET(sqc,  sqc, state,              MLX5_SQC_STATE_RST);
-       MLX5_SET(sqc,  sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
+       MLX5_SET(sqc,  sqc, state, MLX5_SQC_STATE_RST);
 
        MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
-       MLX5_SET(wq,   wq, uar_page,      sq->bfreg.index);
-       MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
+       MLX5_SET(wq,   wq, uar_page,      mdev->mlx5e_res.bfreg.index);
+       MLX5_SET(wq,   wq, log_wq_pg_sz,  csp->wq_ctrl->buf.page_shift -
                                          MLX5_ADAPTER_PAGE_SHIFT);
-       MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
+       MLX5_SET64(wq, wq, dbr_addr,      csp->wq_ctrl->db.dma);
 
-       mlx5_fill_page_array(&sq->wq_ctrl.buf,
-                            (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+       mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
 
-       err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+       err = mlx5_core_create_sq(mdev, in, inlen, sqn);
 
        kvfree(in);
 
        return err;
 }
 
-static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
-                          int next_state, bool update_rl, int rl_index)
-{
-       struct mlx5e_channel *c = sq->channel;
-       struct mlx5e_priv *priv = c->priv;
-       struct mlx5_core_dev *mdev = priv->mdev;
+struct mlx5e_modify_sq_param {
+       int curr_state;
+       int next_state;
+       bool rl_update;
+       int rl_index;
+};
 
+static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
+                          struct mlx5e_modify_sq_param *p)
+{
        void *in;
        void *sqc;
        int inlen;
@@ -1141,68 +1187,94 @@ static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
 
        sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
 
-       MLX5_SET(modify_sq_in, in, sq_state, curr_state);
-       MLX5_SET(sqc, sqc, state, next_state);
-       if (update_rl && next_state == MLX5_SQC_STATE_RDY) {
+       MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
+       MLX5_SET(sqc, sqc, state, p->next_state);
+       if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
                MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
-               MLX5_SET(sqc,  sqc, packet_pacing_rate_limit_index, rl_index);
+               MLX5_SET(sqc,  sqc, packet_pacing_rate_limit_index, p->rl_index);
        }
 
-       err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
+       err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
 
        kvfree(in);
 
        return err;
 }
 
-static void mlx5e_disable_sq(struct mlx5e_sq *sq)
+static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
 {
-       struct mlx5e_channel *c = sq->channel;
-       struct mlx5e_priv *priv = c->priv;
-       struct mlx5_core_dev *mdev = priv->mdev;
-
-       mlx5_core_destroy_sq(mdev, sq->sqn);
-       if (sq->rate_limit)
-               mlx5_rl_remove_rate(mdev, sq->rate_limit);
+       mlx5_core_destroy_sq(mdev, sqn);
 }
 
-static int mlx5e_open_sq(struct mlx5e_channel *c,
-                        int tc,
-                        struct mlx5e_sq_param *param,
-                        struct mlx5e_sq *sq)
+static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
+                              struct mlx5e_sq_param *param,
+                              struct mlx5e_create_sq_param *csp,
+                              u32 *sqn)
 {
+       struct mlx5e_modify_sq_param msp = {0};
        int err;
 
-       err = mlx5e_create_sq(c, tc, param, sq);
+       err = mlx5e_create_sq(mdev, param, csp, sqn);
        if (err)
                return err;
 
-       err = mlx5e_enable_sq(sq, param);
+       msp.curr_state = MLX5_SQC_STATE_RST;
+       msp.next_state = MLX5_SQC_STATE_RDY;
+       err = mlx5e_modify_sq(mdev, *sqn, &msp);
        if (err)
-               goto err_destroy_sq;
+               mlx5e_destroy_sq(mdev, *sqn);
 
-       set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
-       err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
-                             false, 0);
+       return err;
+}
+
+static int mlx5e_set_sq_maxrate(struct net_device *dev,
+                               struct mlx5e_txqsq *sq, u32 rate);
+
+static int mlx5e_open_txqsq(struct mlx5e_channel *c,
+                           u32 tisn,
+                           int txq_ix,
+                           struct mlx5e_params *params,
+                           struct mlx5e_sq_param *param,
+                           struct mlx5e_txqsq *sq)
+{
+       struct mlx5e_create_sq_param csp = {};
+       u32 tx_rate;
+       int err;
+
+       err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq);
        if (err)
-               goto err_disable_sq;
+               return err;
 
-       if (sq->txq) {
-               netdev_tx_reset_queue(sq->txq);
-               netif_tx_start_queue(sq->txq);
-       }
+       csp.tisn            = tisn;
+       csp.tis_lst_sz      = 1;
+       csp.cqn             = sq->cq.mcq.cqn;
+       csp.wq_ctrl         = &sq->wq_ctrl;
+       csp.min_inline_mode = sq->min_inline_mode;
+       err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+       if (err)
+               goto err_free_txqsq;
+
+       tx_rate = c->priv->tx_rates[sq->txq_ix];
+       if (tx_rate)
+               mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
 
        return 0;
 
-err_disable_sq:
+err_free_txqsq:
        clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
-       mlx5e_disable_sq(sq);
-err_destroy_sq:
-       mlx5e_destroy_sq(sq);
+       mlx5e_free_txqsq(sq);
 
        return err;
 }
 
+static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
+{
+       sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
+       set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+       netdev_tx_reset_queue(sq->txq);
+       netif_tx_start_queue(sq->txq);
+}
+
 static inline void netif_tx_disable_queue(struct netdev_queue *txq)
 {
        __netif_tx_lock_bh(txq);
@@ -1210,43 +1282,153 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
        __netif_tx_unlock_bh(txq);
 }
 
-static void mlx5e_close_sq(struct mlx5e_sq *sq)
+static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
 {
+       struct mlx5e_channel *c = sq->channel;
+
        clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
        /* prevent netif_tx_wake_queue */
-       napi_synchronize(&sq->channel->napi);
+       napi_synchronize(&c->napi);
 
-       if (sq->txq) {
-               netif_tx_disable_queue(sq->txq);
+       netif_tx_disable_queue(sq->txq);
 
-               /* last doorbell out, godspeed .. */
-               if (mlx5e_sq_has_room_for(sq, 1)) {
-                       sq->db.txq.skb[(sq->pc & sq->wq.sz_m1)] = NULL;
-                       mlx5e_send_nop(sq, true);
-               }
+       /* last doorbell out, godspeed .. */
+       if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) {
+               struct mlx5e_tx_wqe *nop;
+
+               sq->db.skb[(sq->pc & sq->wq.sz_m1)] = NULL;
+               nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
+               mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl);
        }
+}
+
+static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
+{
+       struct mlx5e_channel *c = sq->channel;
+       struct mlx5_core_dev *mdev = c->mdev;
 
-       mlx5e_disable_sq(sq);
-       mlx5e_free_sq_descs(sq);
-       mlx5e_destroy_sq(sq);
+       mlx5e_destroy_sq(mdev, sq->sqn);
+       if (sq->rate_limit)
+               mlx5_rl_remove_rate(mdev, sq->rate_limit);
+       mlx5e_free_txqsq_descs(sq);
+       mlx5e_free_txqsq(sq);
 }
 
-static int mlx5e_create_cq(struct mlx5e_channel *c,
-                          struct mlx5e_cq_param *param,
-                          struct mlx5e_cq *cq)
+static int mlx5e_open_icosq(struct mlx5e_channel *c,
+                           struct mlx5e_params *params,
+                           struct mlx5e_sq_param *param,
+                           struct mlx5e_icosq *sq)
+{
+       struct mlx5e_create_sq_param csp = {};
+       int err;
+
+       err = mlx5e_alloc_icosq(c, param, sq);
+       if (err)
+               return err;
+
+       csp.cqn             = sq->cq.mcq.cqn;
+       csp.wq_ctrl         = &sq->wq_ctrl;
+       csp.min_inline_mode = params->tx_min_inline_mode;
+       set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+       err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+       if (err)
+               goto err_free_icosq;
+
+       return 0;
+
+err_free_icosq:
+       clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+       mlx5e_free_icosq(sq);
+
+       return err;
+}
+
+static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+{
+       struct mlx5e_channel *c = sq->channel;
+
+       clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+       napi_synchronize(&c->napi);
+
+       mlx5e_destroy_sq(c->mdev, sq->sqn);
+       mlx5e_free_icosq(sq);
+}
+
+static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
+                           struct mlx5e_params *params,
+                           struct mlx5e_sq_param *param,
+                           struct mlx5e_xdpsq *sq)
+{
+       unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
+       struct mlx5e_create_sq_param csp = {};
+       unsigned int inline_hdr_sz = 0;
+       int err;
+       int i;
+
+       err = mlx5e_alloc_xdpsq(c, params, param, sq);
+       if (err)
+               return err;
+
+       csp.tis_lst_sz      = 1;
+       csp.tisn            = c->priv->tisn[0]; /* tc = 0 */
+       csp.cqn             = sq->cq.mcq.cqn;
+       csp.wq_ctrl         = &sq->wq_ctrl;
+       csp.min_inline_mode = sq->min_inline_mode;
+       set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+       err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+       if (err)
+               goto err_free_xdpsq;
+
+       if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
+               inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
+               ds_cnt++;
+       }
+
+       /* Pre initialize fixed WQE fields */
+       for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
+               struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(&sq->wq, i);
+               struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+               struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
+               struct mlx5_wqe_data_seg *dseg;
+
+               cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+               eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+
+               dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
+               dseg->lkey = sq->mkey_be;
+       }
+
+       return 0;
+
+err_free_xdpsq:
+       clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+       mlx5e_free_xdpsq(sq);
+
+       return err;
+}
+
+static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
+{
+       struct mlx5e_channel *c = sq->channel;
+
+       clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+       napi_synchronize(&c->napi);
+
+       mlx5e_destroy_sq(c->mdev, sq->sqn);
+       mlx5e_free_xdpsq_descs(sq);
+       mlx5e_free_xdpsq(sq);
+}
+
+static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
+                                struct mlx5e_cq_param *param,
+                                struct mlx5e_cq *cq)
 {
-       struct mlx5e_priv *priv = c->priv;
-       struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5_core_cq *mcq = &cq->mcq;
        int eqn_not_used;
        unsigned int irqn;
        int err;
        u32 i;
 
-       param->wq.buf_numa_node = cpu_to_node(c->cpu);
-       param->wq.db_numa_node  = cpu_to_node(c->cpu);
-       param->eq_ix   = c->ix;
-
        err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
                               &cq->wq_ctrl);
        if (err)
@@ -1254,8 +1436,6 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
 
        mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
 
-       cq->napi        = &c->napi;
-
        mcq->cqe_sz     = 64;
        mcq->set_ci_db  = cq->wq_ctrl.db.db;
        mcq->arm_db     = cq->wq_ctrl.db.db + 1;
@@ -1272,21 +1452,38 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
                cqe->op_own = 0xf1;
        }
 
-       cq->channel = c;
-       cq->priv = priv;
+       cq->mdev = mdev;
 
        return 0;
 }
 
-static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
+static int mlx5e_alloc_cq(struct mlx5e_channel *c,
+                         struct mlx5e_cq_param *param,
+                         struct mlx5e_cq *cq)
+{
+       struct mlx5_core_dev *mdev = c->priv->mdev;
+       int err;
+
+       param->wq.buf_numa_node = cpu_to_node(c->cpu);
+       param->wq.db_numa_node  = cpu_to_node(c->cpu);
+       param->eq_ix   = c->ix;
+
+       err = mlx5e_alloc_cq_common(mdev, param, cq);
+
+       cq->napi    = &c->napi;
+       cq->channel = c;
+
+       return err;
+}
+
+static void mlx5e_free_cq(struct mlx5e_cq *cq)
 {
        mlx5_cqwq_destroy(&cq->wq_ctrl);
 }
 
-static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
+static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
 {
-       struct mlx5e_priv *priv = cq->priv;
-       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5_core_dev *mdev = cq->mdev;
        struct mlx5_core_cq *mcq = &cq->mcq;
 
        void *in;
@@ -1330,47 +1527,41 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
        return 0;
 }
 
-static void mlx5e_disable_cq(struct mlx5e_cq *cq)
+static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
 {
-       struct mlx5e_priv *priv = cq->priv;
-       struct mlx5_core_dev *mdev = priv->mdev;
-
-       mlx5_core_destroy_cq(mdev, &cq->mcq);
+       mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
 }
 
 static int mlx5e_open_cq(struct mlx5e_channel *c,
+                        struct mlx5e_cq_moder moder,
                         struct mlx5e_cq_param *param,
-                        struct mlx5e_cq *cq,
-                        struct mlx5e_cq_moder moderation)
+                        struct mlx5e_cq *cq)
 {
+       struct mlx5_core_dev *mdev = c->mdev;
        int err;
-       struct mlx5e_priv *priv = c->priv;
-       struct mlx5_core_dev *mdev = priv->mdev;
 
-       err = mlx5e_create_cq(c, param, cq);
+       err = mlx5e_alloc_cq(c, param, cq);
        if (err)
                return err;
 
-       err = mlx5e_enable_cq(cq, param);
+       err = mlx5e_create_cq(cq, param);
        if (err)
-               goto err_destroy_cq;
+               goto err_free_cq;
 
        if (MLX5_CAP_GEN(mdev, cq_moderation))
-               mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
-                                              moderation.usec,
-                                              moderation.pkts);
+               mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
        return 0;
 
-err_destroy_cq:
-       mlx5e_destroy_cq(cq);
+err_free_cq:
+       mlx5e_free_cq(cq);
 
        return err;
 }
 
 static void mlx5e_close_cq(struct mlx5e_cq *cq)
 {
-       mlx5e_disable_cq(cq);
        mlx5e_destroy_cq(cq);
+       mlx5e_free_cq(cq);
 }
 
 static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
@@ -1379,15 +1570,15 @@ static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
 }
 
 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
+                            struct mlx5e_params *params,
                             struct mlx5e_channel_param *cparam)
 {
-       struct mlx5e_priv *priv = c->priv;
        int err;
        int tc;
 
        for (tc = 0; tc < c->num_tc; tc++) {
-               err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
-                                   priv->params.tx_cq_moderation);
+               err = mlx5e_open_cq(c, params->tx_cq_moderation,
+                                   &cparam->tx_cq, &c->sq[tc].cq);
                if (err)
                        goto err_close_tx_cqs;
        }
@@ -1410,13 +1601,17 @@ static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
 }
 
 static int mlx5e_open_sqs(struct mlx5e_channel *c,
+                         struct mlx5e_params *params,
                          struct mlx5e_channel_param *cparam)
 {
        int err;
        int tc;
 
-       for (tc = 0; tc < c->num_tc; tc++) {
-               err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
+       for (tc = 0; tc < params->num_tc; tc++) {
+               int txq_ix = c->ix + tc * params->num_channels;
+
+               err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
+                                      params, &cparam->sq, &c->sq[tc]);
                if (err)
                        goto err_close_sqs;
        }
@@ -1425,7 +1620,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
 
 err_close_sqs:
        for (tc--; tc >= 0; tc--)
-               mlx5e_close_sq(&c->sq[tc]);
+               mlx5e_close_txqsq(&c->sq[tc]);
 
        return err;
 }
@@ -1435,23 +1630,15 @@ static void mlx5e_close_sqs(struct mlx5e_channel *c)
        int tc;
 
        for (tc = 0; tc < c->num_tc; tc++)
-               mlx5e_close_sq(&c->sq[tc]);
-}
-
-static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
-{
-       int i;
-
-       for (i = 0; i < priv->profile->max_tc; i++)
-               priv->channeltc_to_txq_map[ix][i] =
-                       ix + i * priv->params.num_channels;
+               mlx5e_close_txqsq(&c->sq[tc]);
 }
 
 static int mlx5e_set_sq_maxrate(struct net_device *dev,
-                               struct mlx5e_sq *sq, u32 rate)
+                               struct mlx5e_txqsq *sq, u32 rate)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5e_modify_sq_param msp = {0};
        u16 rl_index = 0;
        int err;
 
@@ -1474,8 +1661,11 @@ static int mlx5e_set_sq_maxrate(struct net_device *dev,
                }
        }
 
-       err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
-                             MLX5_SQC_STATE_RDY, true, rl_index);
+       msp.curr_state = MLX5_SQC_STATE_RDY;
+       msp.next_state = MLX5_SQC_STATE_RDY;
+       msp.rl_index   = rl_index;
+       msp.rl_update  = true;
+       err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
        if (err) {
                netdev_err(dev, "Failed configuring rate %u: %d\n",
                           rate, err);
@@ -1493,7 +1683,7 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        struct mlx5_core_dev *mdev = priv->mdev;
-       struct mlx5e_sq *sq = priv->txq_to_sq_map[index];
+       struct mlx5e_txqsq *sq = priv->txq2sq[index];
        int err = 0;
 
        if (!mlx5_rl_is_supported(mdev)) {
@@ -1529,105 +1719,86 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
 }
 
 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+                             struct mlx5e_params *params,
                              struct mlx5e_channel_param *cparam,
                              struct mlx5e_channel **cp)
 {
-       struct mlx5e_cq_moder icosq_cq_moder = {0, 0};
+       struct mlx5e_cq_moder icocq_moder = {0, 0};
        struct net_device *netdev = priv->netdev;
-       struct mlx5e_cq_moder rx_cq_profile;
        int cpu = mlx5e_get_cpu(priv, ix);
        struct mlx5e_channel *c;
-       struct mlx5e_sq *sq;
        int err;
-       int i;
 
        c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
        if (!c)
                return -ENOMEM;
 
        c->priv     = priv;
+       c->mdev     = priv->mdev;
+       c->tstamp   = &priv->tstamp;
        c->ix       = ix;
        c->cpu      = cpu;
        c->pdev     = &priv->mdev->pdev->dev;
        c->netdev   = priv->netdev;
        c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
-       c->num_tc   = priv->params.num_tc;
-       c->xdp      = !!priv->xdp_prog;
-
-       if (priv->params.rx_am_enabled)
-               rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
-       else
-               rx_cq_profile = priv->params.rx_cq_moderation;
-
-       mlx5e_build_channeltc_to_txq_map(priv, ix);
+       c->num_tc   = params->num_tc;
+       c->xdp      = !!params->xdp_prog;
 
        netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
 
-       err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder);
+       err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
        if (err)
                goto err_napi_del;
 
-       err = mlx5e_open_tx_cqs(c, cparam);
+       err = mlx5e_open_tx_cqs(c, params, cparam);
        if (err)
                goto err_close_icosq_cq;
 
-       err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
-                           rx_cq_profile);
+       err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
        if (err)
                goto err_close_tx_cqs;
 
        /* XDP SQ CQ params are same as normal TXQ sq CQ params */
-       err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
-                                    priv->params.tx_cq_moderation) : 0;
+       err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
+                                    &cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
        if (err)
                goto err_close_rx_cq;
 
        napi_enable(&c->napi);
 
-       err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
+       err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
        if (err)
                goto err_disable_napi;
 
-       err = mlx5e_open_sqs(c, cparam);
+       err = mlx5e_open_sqs(c, params, cparam);
        if (err)
                goto err_close_icosq;
 
-       for (i = 0; i < priv->params.num_tc; i++) {
-               u32 txq_ix = priv->channeltc_to_txq_map[ix][i];
-
-               if (priv->tx_rates[txq_ix]) {
-                       sq = priv->txq_to_sq_map[txq_ix];
-                       mlx5e_set_sq_maxrate(priv->netdev, sq,
-                                            priv->tx_rates[txq_ix]);
-               }
-       }
-
-       err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0;
+       err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
        if (err)
                goto err_close_sqs;
 
-       err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
+       err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq);
        if (err)
                goto err_close_xdp_sq;
 
-       netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
        *cp = c;
 
        return 0;
 err_close_xdp_sq:
        if (c->xdp)
-               mlx5e_close_sq(&c->xdp_sq);
+               mlx5e_close_xdpsq(&c->rq.xdpsq);
 
 err_close_sqs:
        mlx5e_close_sqs(c);
 
 err_close_icosq:
-       mlx5e_close_sq(&c->icosq);
+       mlx5e_close_icosq(&c->icosq);
 
 err_disable_napi:
        napi_disable(&c->napi);
        if (c->xdp)
-               mlx5e_close_cq(&c->xdp_sq.cq);
+               mlx5e_close_cq(&c->rq.xdpsq.cq);
 
 err_close_rx_cq:
        mlx5e_close_cq(&c->rq.cq);
@@ -1645,16 +1816,35 @@ err_napi_del:
        return err;
 }
 
+static void mlx5e_activate_channel(struct mlx5e_channel *c)
+{
+       int tc;
+
+       for (tc = 0; tc < c->num_tc; tc++)
+               mlx5e_activate_txqsq(&c->sq[tc]);
+       mlx5e_activate_rq(&c->rq);
+       netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
+}
+
+static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
+{
+       int tc;
+
+       mlx5e_deactivate_rq(&c->rq);
+       for (tc = 0; tc < c->num_tc; tc++)
+               mlx5e_deactivate_txqsq(&c->sq[tc]);
+}
+
 static void mlx5e_close_channel(struct mlx5e_channel *c)
 {
        mlx5e_close_rq(&c->rq);
        if (c->xdp)
-               mlx5e_close_sq(&c->xdp_sq);
+               mlx5e_close_xdpsq(&c->rq.xdpsq);
        mlx5e_close_sqs(c);
-       mlx5e_close_sq(&c->icosq);
+       mlx5e_close_icosq(&c->icosq);
        napi_disable(&c->napi);
        if (c->xdp)
-               mlx5e_close_cq(&c->xdp_sq.cq);
+               mlx5e_close_cq(&c->rq.xdpsq.cq);
        mlx5e_close_cq(&c->rq.cq);
        mlx5e_close_tx_cqs(c);
        mlx5e_close_cq(&c->icosq.cq);
@@ -1664,17 +1854,16 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
 }
 
 static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
+                                struct mlx5e_params *params,
                                 struct mlx5e_rq_param *param)
 {
        void *rqc = param->rqc;
        void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
 
-       switch (priv->params.rq_wq_type) {
+       switch (params->rq_wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-               MLX5_SET(wq, wq, log_wqe_num_of_strides,
-                        priv->params.mpwqe_log_num_strides - 9);
-               MLX5_SET(wq, wq, log_wqe_stride_size,
-                        priv->params.mpwqe_log_stride_sz - 6);
+               MLX5_SET(wq, wq, log_wqe_num_of_strides, params->mpwqe_log_num_strides - 9);
+               MLX5_SET(wq, wq, log_wqe_stride_size, params->mpwqe_log_stride_sz - 6);
                MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
                break;
        default: /* MLX5_WQ_TYPE_LINKED_LIST */
@@ -1683,14 +1872,14 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
 
        MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
        MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
-       MLX5_SET(wq, wq, log_wq_sz,        priv->params.log_rq_size);
+       MLX5_SET(wq, wq, log_wq_sz,        params->log_rq_size);
        MLX5_SET(wq, wq, pd,               priv->mdev->mlx5e_res.pdn);
        MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
+       MLX5_SET(rqc, rqc, vsd,            params->vlan_strip_disable);
+       MLX5_SET(rqc, rqc, scatter_fcs,    params->scatter_fcs_en);
 
        param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
        param->wq.linear = 1;
-
-       param->am_enabled = priv->params.rx_am_enabled;
 }
 
 static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
@@ -1715,17 +1904,14 @@ static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
 }
 
 static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
+                                struct mlx5e_params *params,
                                 struct mlx5e_sq_param *param)
 {
        void *sqc = param->sqc;
        void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
 
        mlx5e_build_sq_param_common(priv, param);
-       MLX5_SET(wq, wq, log_wq_sz,     priv->params.log_sq_size);
-
-       param->max_inline = priv->params.tx_max_inline;
-       param->min_inline_mode = priv->params.tx_min_inline_mode;
-       param->type = MLX5E_SQ_TXQ;
+       MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
 }
 
 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1737,37 +1923,36 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
 }
 
 static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
+                                   struct mlx5e_params *params,
                                    struct mlx5e_cq_param *param)
 {
        void *cqc = param->cqc;
        u8 log_cq_size;
 
-       switch (priv->params.rq_wq_type) {
+       switch (params->rq_wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-               log_cq_size = priv->params.log_rq_size +
-                       priv->params.mpwqe_log_num_strides;
+               log_cq_size = params->log_rq_size + params->mpwqe_log_num_strides;
                break;
        default: /* MLX5_WQ_TYPE_LINKED_LIST */
-               log_cq_size = priv->params.log_rq_size;
+               log_cq_size = params->log_rq_size;
        }
 
        MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
-       if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
+       if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
                MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
                MLX5_SET(cqc, cqc, cqe_comp_en, 1);
        }
 
        mlx5e_build_common_cq_param(priv, param);
-
-       param->cq_period_mode = priv->params.rx_cq_period_mode;
 }
 
 static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+                                   struct mlx5e_params *params,
                                    struct mlx5e_cq_param *param)
 {
        void *cqc = param->cqc;
 
-       MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
+       MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
 
        mlx5e_build_common_cq_param(priv, param);
 
@@ -1775,8 +1960,8 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
 }
 
 static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
-                                    struct mlx5e_cq_param *param,
-                                    u8 log_wq_size)
+                                    u8 log_wq_size,
+                                    struct mlx5e_cq_param *param)
 {
        void *cqc = param->cqc;
 
@@ -1788,8 +1973,8 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
 }
 
 static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
-                                   struct mlx5e_sq_param *param,
-                                   u8 log_wq_size)
+                                   u8 log_wq_size,
+                                   struct mlx5e_sq_param *param)
 {
        void *sqc = param->sqc;
        void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
@@ -1798,162 +1983,119 @@ static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
 
        MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
        MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
-
-       param->type = MLX5E_SQ_ICO;
 }
 
 static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
+                                   struct mlx5e_params *params,
                                    struct mlx5e_sq_param *param)
 {
        void *sqc = param->sqc;
        void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
 
        mlx5e_build_sq_param_common(priv, param);
-       MLX5_SET(wq, wq, log_wq_sz,     priv->params.log_sq_size);
-
-       param->max_inline = priv->params.tx_max_inline;
-       param->min_inline_mode = priv->params.tx_min_inline_mode;
-       param->type = MLX5E_SQ_XDP;
+       MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
 }
 
-static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
+                                     struct mlx5e_params *params,
+                                     struct mlx5e_channel_param *cparam)
 {
        u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
 
-       mlx5e_build_rq_param(priv, &cparam->rq);
-       mlx5e_build_sq_param(priv, &cparam->sq);
-       mlx5e_build_xdpsq_param(priv, &cparam->xdp_sq);
-       mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
-       mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
-       mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
-       mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz);
+       mlx5e_build_rq_param(priv, params, &cparam->rq);
+       mlx5e_build_sq_param(priv, params, &cparam->sq);
+       mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
+       mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
+       mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq);
+       mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
+       mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
 }
 
-static int mlx5e_open_channels(struct mlx5e_priv *priv)
+int mlx5e_open_channels(struct mlx5e_priv *priv,
+                       struct mlx5e_channels *chs)
 {
        struct mlx5e_channel_param *cparam;
-       int nch = priv->params.num_channels;
        int err = -ENOMEM;
        int i;
-       int j;
 
-       priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
-                               GFP_KERNEL);
-
-       priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
-                                     sizeof(struct mlx5e_sq *), GFP_KERNEL);
+       chs->num = chs->params.num_channels;
 
+       chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
        cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
+       if (!chs->c || !cparam)
+               goto err_free;
 
-       if (!priv->channel || !priv->txq_to_sq_map || !cparam)
-               goto err_free_txq_to_sq_map;
-
-       mlx5e_build_channel_param(priv, cparam);
-
-       for (i = 0; i < nch; i++) {
-               err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]);
+       mlx5e_build_channel_param(priv, &chs->params, cparam);
+       for (i = 0; i < chs->num; i++) {
+               err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]);
                if (err)
                        goto err_close_channels;
        }
 
-       for (j = 0; j < nch; j++) {
-               err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
-               if (err)
-                       goto err_close_channels;
-       }
-
-       /* FIXME: This is a W/A for tx timeout watch dog false alarm when
-        * polling for inactive tx queues.
-        */
-       netif_tx_start_all_queues(priv->netdev);
-
        kfree(cparam);
        return 0;
 
 err_close_channels:
        for (i--; i >= 0; i--)
-               mlx5e_close_channel(priv->channel[i]);
+               mlx5e_close_channel(chs->c[i]);
 
-err_free_txq_to_sq_map:
-       kfree(priv->txq_to_sq_map);
-       kfree(priv->channel);
+err_free:
+       kfree(chs->c);
        kfree(cparam);
-
+       chs->num = 0;
        return err;
 }
 
-static void mlx5e_close_channels(struct mlx5e_priv *priv)
+static void mlx5e_activate_channels(struct mlx5e_channels *chs)
 {
        int i;
 
-       /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
-        * polling for inactive tx queues.
-        */
-       netif_tx_stop_all_queues(priv->netdev);
-       netif_tx_disable(priv->netdev);
-
-       for (i = 0; i < priv->params.num_channels; i++)
-               mlx5e_close_channel(priv->channel[i]);
-
-       kfree(priv->txq_to_sq_map);
-       kfree(priv->channel);
-}
-
-static int mlx5e_rx_hash_fn(int hfunc)
-{
-       return (hfunc == ETH_RSS_HASH_TOP) ?
-              MLX5_RX_HASH_FN_TOEPLITZ :
-              MLX5_RX_HASH_FN_INVERTED_XOR8;
+       for (i = 0; i < chs->num; i++)
+               mlx5e_activate_channel(chs->c[i]);
 }
 
-static int mlx5e_bits_invert(unsigned long a, int size)
+static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
 {
-       int inv = 0;
+       int err = 0;
        int i;
 
-       for (i = 0; i < size; i++)
-               inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
+       for (i = 0; i < chs->num; i++) {
+               err = mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq);
+               if (err)
+                       break;
+       }
 
-       return inv;
+       return err;
 }
 
-static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
+static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
 {
        int i;
 
-       for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
-               int ix = i;
-               u32 rqn;
-
-               if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
-                       ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
-
-               ix = priv->params.indirection_rqt[ix];
-               rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
-                               priv->channel[ix]->rq.rqn :
-                               priv->drop_rq.rqn;
-               MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
-       }
+       for (i = 0; i < chs->num; i++)
+               mlx5e_deactivate_channel(chs->c[i]);
 }
 
-static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
-                                     int ix)
+void mlx5e_close_channels(struct mlx5e_channels *chs)
 {
-       u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
-                       priv->channel[ix]->rq.rqn :
-                       priv->drop_rq.rqn;
+       int i;
 
-       MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
+       for (i = 0; i < chs->num; i++)
+               mlx5e_close_channel(chs->c[i]);
+
+       kfree(chs->c);
+       chs->num = 0;
 }
 
-static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
-                           int ix, struct mlx5e_rqt *rqt)
+static int
+mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
        void *rqtc;
        int inlen;
        int err;
        u32 *in;
+       int i;
 
        inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
        in = mlx5_vzalloc(inlen);
@@ -1965,10 +2107,8 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
        MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
        MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
 
-       if (sz > 1) /* RSS */
-               mlx5e_fill_indir_rqt_rqns(priv, rqtc);
-       else
-               mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
+       for (i = 0; i < sz; i++)
+               MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
 
        err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
        if (!err)
@@ -1988,7 +2128,7 @@ static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv)
 {
        struct mlx5e_rqt *rqt = &priv->indir_rqt;
 
-       return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt);
+       return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
 }
 
 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
@@ -1999,7 +2139,7 @@ int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
 
        for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
                rqt = &priv->direct_tir[ix].rqt;
-               err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt);
+               err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
                if (err)
                        goto err_destroy_rqts;
        }
@@ -2013,7 +2153,49 @@ err_destroy_rqts:
        return err;
 }
 
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
+static int mlx5e_rx_hash_fn(int hfunc)
+{
+       return (hfunc == ETH_RSS_HASH_TOP) ?
+              MLX5_RX_HASH_FN_TOEPLITZ :
+              MLX5_RX_HASH_FN_INVERTED_XOR8;
+}
+
+static int mlx5e_bits_invert(unsigned long a, int size)
+{
+       int inv = 0;
+       int i;
+
+       for (i = 0; i < size; i++)
+               inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
+
+       return inv;
+}
+
+static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
+                               struct mlx5e_redirect_rqt_param rrp, void *rqtc)
+{
+       int i;
+
+       for (i = 0; i < sz; i++) {
+               u32 rqn;
+
+               if (rrp.is_rss) {
+                       int ix = i;
+
+                       if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
+                               ix = mlx5e_bits_invert(i, ilog2(sz));
+
+                       ix = priv->channels.params.indirection_rqt[ix];
+                       rqn = rrp.rss.channels->c[ix]->rq.rqn;
+               } else {
+                       rqn = rrp.rqn;
+               }
+               MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
+       }
+}
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
+                      struct mlx5e_redirect_rqt_param rrp)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
        void *rqtc;
@@ -2029,41 +2211,86 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
        rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
 
        MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
-       if (sz > 1) /* RSS */
-               mlx5e_fill_indir_rqt_rqns(priv, rqtc);
-       else
-               mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
-
        MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
-
+       mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
        err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
 
        kvfree(in);
-
        return err;
 }
 
-static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
+static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
+                               struct mlx5e_redirect_rqt_param rrp)
+{
+       if (!rrp.is_rss)
+               return rrp.rqn;
+
+       if (ix >= rrp.rss.channels->num)
+               return priv->drop_rq.rqn;
+
+       return rrp.rss.channels->c[ix]->rq.rqn;
+}
+
+static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
+                               struct mlx5e_redirect_rqt_param rrp)
 {
        u32 rqtn;
        int ix;
 
        if (priv->indir_rqt.enabled) {
+               /* RSS RQ table */
                rqtn = priv->indir_rqt.rqtn;
-               mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
+               mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
        }
 
-       for (ix = 0; ix < priv->params.num_channels; ix++) {
+       for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+               struct mlx5e_redirect_rqt_param direct_rrp = {
+                       .is_rss = false,
+                       {
+                               .rqn    = mlx5e_get_direct_rqn(priv, ix, rrp)
+                       },
+               };
+
+               /* Direct RQ Tables */
                if (!priv->direct_tir[ix].rqt.enabled)
                        continue;
+
                rqtn = priv->direct_tir[ix].rqt.rqtn;
-               mlx5e_redirect_rqt(priv, rqtn, 1, ix);
+               mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
        }
 }
 
-static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
+static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
+                                           struct mlx5e_channels *chs)
+{
+       struct mlx5e_redirect_rqt_param rrp = {
+               .is_rss        = true,
+               {
+                       .rss = {
+                               .channels  = chs,
+                               .hfunc     = chs->params.rss_hfunc,
+                       }
+               },
+       };
+
+       mlx5e_redirect_rqts(priv, rrp);
+}
+
+static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
 {
-       if (!priv->params.lro_en)
+       struct mlx5e_redirect_rqt_param drop_rrp = {
+               .is_rss = false,
+               {
+                       .rqn = priv->drop_rq.rqn,
+               },
+       };
+
+       mlx5e_redirect_rqts(priv, drop_rrp);
+}
+
+static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
+{
+       if (!params->lro_en)
                return;
 
 #define ROUGH_MAX_L2_L3_HDR_SZ 256
@@ -2072,13 +2299,13 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
                 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
                 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
        MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
-                (priv->params.lro_wqe_sz -
-                 ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
-       MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
+                (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+       MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
 }
 
-void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
-                                   enum mlx5e_traffic_types tt)
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
+                                   enum mlx5e_traffic_types tt,
+                                   void *tirc)
 {
        void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
 
@@ -2094,16 +2321,15 @@ void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
                                 MLX5_HASH_FIELD_SEL_DST_IP   |\
                                 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
 
-       MLX5_SET(tirc, tirc, rx_hash_fn,
-                mlx5e_rx_hash_fn(priv->params.rss_hfunc));
-       if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+       MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc));
+       if (params->rss_hfunc == ETH_RSS_HASH_TOP) {
                void *rss_key = MLX5_ADDR_OF(tirc, tirc,
                                             rx_hash_toeplitz_key);
                size_t len = MLX5_FLD_SZ_BYTES(tirc,
                                               rx_hash_toeplitz_key);
 
                MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
-               memcpy(rss_key, priv->params.toeplitz_hash_key, len);
+               memcpy(rss_key, params->toeplitz_hash_key, len);
        }
 
        switch (tt) {
@@ -2208,7 +2434,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
        MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
        tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
 
-       mlx5e_build_tir_ctx_lro(tirc, priv);
+       mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
 
        for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
                err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
@@ -2258,9 +2484,9 @@ static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
        *mtu = MLX5E_HW2SW_MTU(hw_mtu);
 }
 
-static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
+static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
 {
-       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct net_device *netdev = priv->netdev;
        u16 mtu;
        int err;
 
@@ -2280,8 +2506,8 @@ static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
 static void mlx5e_netdev_set_tcs(struct net_device *netdev)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
-       int nch = priv->params.num_channels;
-       int ntc = priv->params.num_tc;
+       int nch = priv->channels.params.num_channels;
+       int ntc = priv->channels.params.num_tc;
        int tc;
 
        netdev_reset_tc(netdev);
@@ -2298,53 +2524,110 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
                netdev_set_tc_queue(netdev, tc, nch, 0);
 }
 
+static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
+{
+       struct mlx5e_channel *c;
+       struct mlx5e_txqsq *sq;
+       int i, tc;
+
+       for (i = 0; i < priv->channels.num; i++)
+               for (tc = 0; tc < priv->profile->max_tc; tc++)
+                       priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
+
+       for (i = 0; i < priv->channels.num; i++) {
+               c = priv->channels.c[i];
+               for (tc = 0; tc < c->num_tc; tc++) {
+                       sq = &c->sq[tc];
+                       priv->txq2sq[sq->txq_ix] = sq;
+               }
+       }
+}
+
+static void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
+{
+       int num_txqs = priv->channels.num * priv->channels.params.num_tc;
+       struct net_device *netdev = priv->netdev;
+
+       mlx5e_netdev_set_tcs(netdev);
+       netif_set_real_num_tx_queues(netdev, num_txqs);
+       netif_set_real_num_rx_queues(netdev, priv->channels.num);
+
+       mlx5e_build_channels_tx_maps(priv);
+       mlx5e_activate_channels(&priv->channels);
+       netif_tx_start_all_queues(priv->netdev);
+
+       if (MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+               mlx5e_add_sqs_fwd_rules(priv);
+
+       mlx5e_wait_channels_min_rx_wqes(&priv->channels);
+       mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
+}
+
+static void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
+{
+       mlx5e_redirect_rqts_to_drop(priv);
+
+       if (MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+               mlx5e_remove_sqs_fwd_rules(priv);
+
+       /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
+        * polling for inactive tx queues.
+        */
+       netif_tx_stop_all_queues(priv->netdev);
+       netif_tx_disable(priv->netdev);
+       mlx5e_deactivate_channels(&priv->channels);
+}
+
+void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
+                               struct mlx5e_channels *new_chs,
+                               mlx5e_fp_hw_modify hw_modify)
+{
+       struct net_device *netdev = priv->netdev;
+       int new_num_txqs;
+
+       new_num_txqs = new_chs->num * new_chs->params.num_tc;
+
+       netif_carrier_off(netdev);
+
+       if (new_num_txqs < netdev->real_num_tx_queues)
+               netif_set_real_num_tx_queues(netdev, new_num_txqs);
+
+       mlx5e_deactivate_priv_channels(priv);
+       mlx5e_close_channels(&priv->channels);
+
+       priv->channels = *new_chs;
+
+       /* New channels are ready to roll, modify HW settings if needed */
+       if (hw_modify)
+               hw_modify(priv);
+
+       mlx5e_refresh_tirs(priv, false);
+       mlx5e_activate_priv_channels(priv);
+
+       mlx5e_update_carrier(priv);
+}
+
 int mlx5e_open_locked(struct net_device *netdev)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
-       struct mlx5_core_dev *mdev = priv->mdev;
-       int num_txqs;
        int err;
 
        set_bit(MLX5E_STATE_OPENED, &priv->state);
 
-       mlx5e_netdev_set_tcs(netdev);
-
-       num_txqs = priv->params.num_channels * priv->params.num_tc;
-       netif_set_real_num_tx_queues(netdev, num_txqs);
-       netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
-
-       err = mlx5e_open_channels(priv);
-       if (err) {
-               netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
-                          __func__, err);
+       err = mlx5e_open_channels(priv, &priv->channels);
+       if (err)
                goto err_clear_state_opened_flag;
-       }
 
-       err = mlx5e_refresh_tirs_self_loopback(priv->mdev, false);
-       if (err) {
-               netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
-                          __func__, err);
-               goto err_close_channels;
-       }
-
-       mlx5e_redirect_rqts(priv);
+       mlx5e_refresh_tirs(priv, false);
+       mlx5e_activate_priv_channels(priv);
        mlx5e_update_carrier(priv);
        mlx5e_timestamp_init(priv);
-#ifdef CONFIG_RFS_ACCEL
-       priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
-#endif
+
        if (priv->profile->update_stats)
                queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
 
-       if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
-               err = mlx5e_add_sqs_fwd_rules(priv);
-               if (err)
-                       goto err_close_channels;
-       }
        return 0;
 
-err_close_channels:
-       mlx5e_close_channels(priv);
 err_clear_state_opened_flag:
        clear_bit(MLX5E_STATE_OPENED, &priv->state);
        return err;
@@ -2365,7 +2648,6 @@ int mlx5e_open(struct net_device *netdev)
 int mlx5e_close_locked(struct net_device *netdev)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
-       struct mlx5_core_dev *mdev = priv->mdev;
 
        /* May already be CLOSED in case a previous configuration operation
         * (e.g RX/TX queue size change) that involves close&open failed.
@@ -2375,13 +2657,10 @@ int mlx5e_close_locked(struct net_device *netdev)
 
        clear_bit(MLX5E_STATE_OPENED, &priv->state);
 
-       if (MLX5_CAP_GEN(mdev, vport_group_manager))
-               mlx5e_remove_sqs_fwd_rules(priv);
-
        mlx5e_timestamp_cleanup(priv);
        netif_carrier_off(priv->netdev);
-       mlx5e_redirect_rqts(priv);
-       mlx5e_close_channels(priv);
+       mlx5e_deactivate_priv_channels(priv);
+       mlx5e_close_channels(&priv->channels);
 
        return 0;
 }
@@ -2401,11 +2680,10 @@ int mlx5e_close(struct net_device *netdev)
        return err;
 }
 
-static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
-                               struct mlx5e_rq *rq,
-                               struct mlx5e_rq_param *param)
+static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
+                              struct mlx5e_rq *rq,
+                              struct mlx5e_rq_param *param)
 {
-       struct mlx5_core_dev *mdev = priv->mdev;
        void *rqc = param->rqc;
        void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
        int err;
@@ -2417,91 +2695,64 @@ static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
        if (err)
                return err;
 
-       rq->priv = priv;
+       rq->mdev = mdev;
 
        return 0;
 }
 
-static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
-                               struct mlx5e_cq *cq,
-                               struct mlx5e_cq_param *param)
+static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
+                              struct mlx5e_cq *cq,
+                              struct mlx5e_cq_param *param)
 {
-       struct mlx5_core_dev *mdev = priv->mdev;
-       struct mlx5_core_cq *mcq = &cq->mcq;
-       int eqn_not_used;
-       unsigned int irqn;
-       int err;
-
-       err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
-                              &cq->wq_ctrl);
-       if (err)
-               return err;
-
-       mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
-
-       mcq->cqe_sz     = 64;
-       mcq->set_ci_db  = cq->wq_ctrl.db.db;
-       mcq->arm_db     = cq->wq_ctrl.db.db + 1;
-       *mcq->set_ci_db = 0;
-       *mcq->arm_db    = 0;
-       mcq->vector     = param->eq_ix;
-       mcq->comp       = mlx5e_completion_event;
-       mcq->event      = mlx5e_cq_error_event;
-       mcq->irqn       = irqn;
-
-       cq->priv = priv;
-
-       return 0;
+       return mlx5e_alloc_cq_common(mdev, param, cq);
 }
 
-static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
+static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
+                             struct mlx5e_rq *drop_rq)
 {
-       struct mlx5e_cq_param cq_param;
-       struct mlx5e_rq_param rq_param;
-       struct mlx5e_rq *rq = &priv->drop_rq;
-       struct mlx5e_cq *cq = &priv->drop_rq.cq;
+       struct mlx5e_cq_param cq_param = {};
+       struct mlx5e_rq_param rq_param = {};
+       struct mlx5e_cq *cq = &drop_rq->cq;
        int err;
 
-       memset(&cq_param, 0, sizeof(cq_param));
-       memset(&rq_param, 0, sizeof(rq_param));
        mlx5e_build_drop_rq_param(&rq_param);
 
-       err = mlx5e_create_drop_cq(priv, cq, &cq_param);
+       err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
        if (err)
                return err;
 
-       err = mlx5e_enable_cq(cq, &cq_param);
+       err = mlx5e_create_cq(cq, &cq_param);
        if (err)
-               goto err_destroy_cq;
+               goto err_free_cq;
 
-       err = mlx5e_create_drop_rq(priv, rq, &rq_param);
+       err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
        if (err)
-               goto err_disable_cq;
+               goto err_destroy_cq;
 
-       err = mlx5e_enable_rq(rq, &rq_param);
+       err = mlx5e_create_rq(drop_rq, &rq_param);
        if (err)
-               goto err_destroy_rq;
+               goto err_free_rq;
 
        return 0;
 
-err_destroy_rq:
-       mlx5e_destroy_rq(&priv->drop_rq);
-
-err_disable_cq:
-       mlx5e_disable_cq(&priv->drop_rq.cq);
+err_free_rq:
+       mlx5e_free_rq(drop_rq);
 
 err_destroy_cq:
-       mlx5e_destroy_cq(&priv->drop_rq.cq);
+       mlx5e_destroy_cq(cq);
+
+err_free_cq:
+       mlx5e_free_cq(cq);
 
        return err;
 }
 
-static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
+static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
 {
-       mlx5e_disable_rq(&priv->drop_rq);
-       mlx5e_destroy_rq(&priv->drop_rq);
-       mlx5e_disable_cq(&priv->drop_rq.cq);
-       mlx5e_destroy_cq(&priv->drop_rq.cq);
+       mlx5e_destroy_rq(drop_rq);
+       mlx5e_free_rq(drop_rq);
+       mlx5e_destroy_cq(&drop_rq->cq);
+       mlx5e_free_cq(&drop_rq->cq);
 }
 
 static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
@@ -2552,24 +2803,24 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
                mlx5e_destroy_tis(priv, tc);
 }
 
-static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
-                                     enum mlx5e_traffic_types tt)
+static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
+                                     enum mlx5e_traffic_types tt,
+                                     u32 *tirc)
 {
        MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
 
-       mlx5e_build_tir_ctx_lro(tirc, priv);
+       mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
 
        MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
        MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
-       mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
+       mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc);
 }
 
-static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
-                                      u32 rqtn)
+static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
 {
        MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
 
-       mlx5e_build_tir_ctx_lro(tirc, priv);
+       mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
 
        MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
        MLX5_SET(tirc, tirc, indirect_table, rqtn);
@@ -2594,7 +2845,7 @@ static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
                memset(in, 0, inlen);
                tir = &priv->indir_tir[tt];
                tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
-               mlx5e_build_indir_tir_ctx(priv, tirc, tt);
+               mlx5e_build_indir_tir_ctx(priv, tt, tirc);
                err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
                if (err)
                        goto err_destroy_tirs;
@@ -2632,8 +2883,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
                memset(in, 0, inlen);
                tir = &priv->direct_tir[ix];
                tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
-               mlx5e_build_direct_tir_ctx(priv, tirc,
-                                          priv->direct_tir[ix].rqt.rqtn);
+               mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc);
                err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
                if (err)
                        goto err_destroy_ch_tirs;
@@ -2669,16 +2919,27 @@ void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
                mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
 }
 
-int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
+static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
 {
        int err = 0;
        int i;
 
-       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
-               return 0;
+       for (i = 0; i < chs->num; i++) {
+               err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
 
-       for (i = 0; i < priv->params.num_channels; i++) {
-               err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd);
+static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
+{
+       int err = 0;
+       int i;
+
+       for (i = 0; i < chs->num; i++) {
+               err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
                if (err)
                        return err;
        }
@@ -2689,7 +2950,7 @@ int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
 static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
-       bool was_opened;
+       struct mlx5e_channels new_channels = {};
        int err = 0;
 
        if (tc && tc != MLX5E_MAX_NUM_TC)
@@ -2697,17 +2958,21 @@ static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
 
        mutex_lock(&priv->state_lock);
 
-       was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-       if (was_opened)
-               mlx5e_close_locked(priv->netdev);
+       new_channels.params = priv->channels.params;
+       new_channels.params.num_tc = tc ? tc : 1;
 
-       priv->params.num_tc = tc ? tc : 1;
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               priv->channels.params = new_channels.params;
+               goto out;
+       }
 
-       if (was_opened)
-               err = mlx5e_open_locked(priv->netdev);
+       err = mlx5e_open_channels(priv, &new_channels);
+       if (err)
+               goto out;
 
+       mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+out:
        mutex_unlock(&priv->state_lock);
-
        return err;
 }
 
@@ -2737,7 +3002,9 @@ mqprio:
        if (tc->type != TC_SETUP_MQPRIO)
                return -EINVAL;
 
-       return mlx5e_setup_tc(dev, tc->tc);
+       tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+       return mlx5e_setup_tc(dev, tc->mqprio->num_tc);
 }
 
 static void
@@ -2822,26 +3089,31 @@ typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
 static int set_feature_lro(struct net_device *netdev, bool enable)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
-       bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-       int err;
+       struct mlx5e_channels new_channels = {};
+       int err = 0;
+       bool reset;
 
        mutex_lock(&priv->state_lock);
 
-       if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
-               mlx5e_close_locked(priv->netdev);
+       reset = (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST);
+       reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
 
-       priv->params.lro_en = enable;
-       err = mlx5e_modify_tirs_lro(priv);
-       if (err) {
-               netdev_err(netdev, "lro modify failed, %d\n", err);
-               priv->params.lro_en = !enable;
+       new_channels.params = priv->channels.params;
+       new_channels.params.lro_en = enable;
+
+       if (!reset) {
+               priv->channels.params = new_channels.params;
+               err = mlx5e_modify_tirs_lro(priv);
+               goto out;
        }
 
-       if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
-               mlx5e_open_locked(priv->netdev);
+       err = mlx5e_open_channels(priv, &new_channels);
+       if (err)
+               goto out;
 
+       mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
+out:
        mutex_unlock(&priv->state_lock);
-
        return err;
 }
 
@@ -2878,18 +3150,39 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable)
        return mlx5_set_port_fcs(mdev, !enable);
 }
 
-static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
+static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        int err;
 
        mutex_lock(&priv->state_lock);
 
-       priv->params.vlan_strip_disable = !enable;
-       err = mlx5e_modify_rqs_vsd(priv, !enable);
+       priv->channels.params.scatter_fcs_en = enable;
+       err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
+       if (err)
+               priv->channels.params.scatter_fcs_en = !enable;
+
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err = 0;
+
+       mutex_lock(&priv->state_lock);
+
+       priv->channels.params.vlan_strip_disable = !enable;
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+               goto unlock;
+
+       err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
        if (err)
-               priv->params.vlan_strip_disable = enable;
+               priv->channels.params.vlan_strip_disable = enable;
 
+unlock:
        mutex_unlock(&priv->state_lock);
 
        return err;
@@ -2947,6 +3240,8 @@ static int mlx5e_set_features(struct net_device *netdev,
                                    set_feature_tc_num_filters);
        err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
                                    set_feature_rx_all);
+       err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS,
+                                   set_feature_rx_fcs);
        err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
                                    set_feature_rx_vlan);
 #ifdef CONFIG_RFS_ACCEL
@@ -2960,28 +3255,38 @@ static int mlx5e_set_features(struct net_device *netdev,
 static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
-       bool was_opened;
+       struct mlx5e_channels new_channels = {};
+       int curr_mtu;
        int err = 0;
        bool reset;
 
        mutex_lock(&priv->state_lock);
 
-       reset = !priv->params.lro_en &&
-               (priv->params.rq_wq_type !=
+       reset = !priv->channels.params.lro_en &&
+               (priv->channels.params.rq_wq_type !=
                 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
 
-       was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-       if (was_opened && reset)
-               mlx5e_close_locked(netdev);
+       reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
 
+       curr_mtu    = netdev->mtu;
        netdev->mtu = new_mtu;
-       mlx5e_set_dev_port_mtu(netdev);
 
-       if (was_opened && reset)
-               err = mlx5e_open_locked(netdev);
+       if (!reset) {
+               mlx5e_set_dev_port_mtu(priv);
+               goto out;
+       }
 
-       mutex_unlock(&priv->state_lock);
+       new_channels.params = priv->channels.params;
+       err = mlx5e_open_channels(priv, &new_channels);
+       if (err) {
+               netdev->mtu = curr_mtu;
+               goto out;
+       }
 
+       mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu);
+
+out:
+       mutex_unlock(&priv->state_lock);
        return err;
 }
 
@@ -3100,8 +3405,8 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
                                            vf_stats);
 }
 
-void mlx5e_add_vxlan_port(struct net_device *netdev,
-                         struct udp_tunnel_info *ti)
+static void mlx5e_add_vxlan_port(struct net_device *netdev,
+                                struct udp_tunnel_info *ti)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
@@ -3114,8 +3419,8 @@ void mlx5e_add_vxlan_port(struct net_device *netdev,
        mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
 }
 
-void mlx5e_del_vxlan_port(struct net_device *netdev,
-                         struct udp_tunnel_info *ti)
+static void mlx5e_del_vxlan_port(struct net_device *netdev,
+                                struct udp_tunnel_info *ti)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
@@ -3186,8 +3491,8 @@ static void mlx5e_tx_timeout(struct net_device *dev)
 
        netdev_err(dev, "TX timeout detected\n");
 
-       for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
-               struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
+       for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
+               struct mlx5e_txqsq *sq = priv->txq2sq[i];
 
                if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
                        continue;
@@ -3219,7 +3524,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
 
        was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
        /* no need for full reset when exchanging programs */
-       reset = (!priv->xdp_prog || !prog);
+       reset = (!priv->channels.params.xdp_prog || !prog);
 
        if (was_opened && reset)
                mlx5e_close_locked(netdev);
@@ -3227,7 +3532,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
                /* num_channels is invariant here, so we can take the
                 * batched reference right upfront.
                 */
-               prog = bpf_prog_add(prog, priv->params.num_channels);
+               prog = bpf_prog_add(prog, priv->channels.num);
                if (IS_ERR(prog)) {
                        err = PTR_ERR(prog);
                        goto unlock;
@@ -3237,12 +3542,12 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
        /* exchange programs, extra prog reference we got from caller
         * as long as we don't fail from this point onwards.
         */
-       old_prog = xchg(&priv->xdp_prog, prog);
+       old_prog = xchg(&priv->channels.params.xdp_prog, prog);
        if (old_prog)
                bpf_prog_put(old_prog);
 
        if (reset) /* change RQ type according to priv->xdp_prog */
-               mlx5e_set_rq_priv_params(priv);
+               mlx5e_set_rq_params(priv->mdev, &priv->channels.params);
 
        if (was_opened && reset)
                mlx5e_open_locked(netdev);
@@ -3253,8 +3558,8 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
        /* exchanging programs w/o reset, we update ref counts on behalf
         * of the channels RQs here.
         */
-       for (i = 0; i < priv->params.num_channels; i++) {
-               struct mlx5e_channel *c = priv->channel[i];
+       for (i = 0; i < priv->channels.num; i++) {
+               struct mlx5e_channel *c = priv->channels.c[i];
 
                clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
                napi_synchronize(&c->napi);
@@ -3280,7 +3585,7 @@ static bool mlx5e_xdp_attached(struct net_device *dev)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
 
-       return !!priv->xdp_prog;
+       return !!priv->channels.params.xdp_prog;
 }
 
 static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
@@ -3303,10 +3608,12 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
 static void mlx5e_netpoll(struct net_device *dev)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5e_channels *chs = &priv->channels;
+
        int i;
 
-       for (i = 0; i < priv->params.num_channels; i++)
-               napi_schedule(&priv->channel[i]->napi);
+       for (i = 0; i < chs->num; i++)
+               napi_schedule(&chs->c[i]->napi);
 }
 #endif
 
@@ -3475,6 +3782,13 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
        if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
                params->rx_cq_moderation.usec =
                        MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
+
+       if (params->rx_am_enabled)
+               params->rx_cq_moderation =
+                       mlx5e_am_get_def_profile(params->rx_cq_period_mode);
+
+       MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
+                       params->rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
 }
 
 u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
@@ -3489,75 +3803,79 @@ u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
        return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
 }
 
-static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
-                                       struct net_device *netdev,
-                                       const struct mlx5e_profile *profile,
-                                       void *ppriv)
+static void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+                                  struct mlx5e_params *params,
+                                  u16 max_channels)
 {
-       struct mlx5e_priv *priv = netdev_priv(netdev);
+       u8 cq_period_mode = 0;
        u32 link_speed = 0;
        u32 pci_bw = 0;
-       u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
-                                        MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
-                                        MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
-
-       priv->mdev                         = mdev;
-       priv->netdev                       = netdev;
-       priv->params.num_channels          = profile->max_nch(mdev);
-       priv->profile                      = profile;
-       priv->ppriv                        = ppriv;
 
-       priv->params.lro_timeout =
-               mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+       params->num_channels = max_channels;
+       params->num_tc       = 1;
 
-       priv->params.log_sq_size = is_kdump_kernel() ?
+       /* SQ */
+       params->log_sq_size = is_kdump_kernel() ?
                MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
                MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
 
        /* set CQE compression */
-       priv->params.rx_cqe_compress_def = false;
+       params->rx_cqe_compress_def = false;
        if (MLX5_CAP_GEN(mdev, cqe_compression) &&
-           MLX5_CAP_GEN(mdev, vport_group_manager)) {
+            MLX5_CAP_GEN(mdev, vport_group_manager)) {
                mlx5e_get_max_linkspeed(mdev, &link_speed);
                mlx5e_get_pci_bw(mdev, &pci_bw);
                mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
-                             link_speed, pci_bw);
-               priv->params.rx_cqe_compress_def =
-                       cqe_compress_heuristic(link_speed, pci_bw);
+                              link_speed, pci_bw);
+               params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
        }
-
-       MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS,
-                       priv->params.rx_cqe_compress_def);
-
-       mlx5e_set_rq_priv_params(priv);
-       if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
-               priv->params.lro_en = true;
-
-       priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
-       mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
-
-       priv->params.tx_cq_moderation.usec =
-               MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
-       priv->params.tx_cq_moderation.pkts =
-               MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
-       priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
-       mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
-       if (priv->params.tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
+       MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
+
+       /* RQ */
+       mlx5e_set_rq_params(mdev, params);
+
+       /* HW LRO */
+       if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+               params->lro_en = true;
+       params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+
+       /* CQ moderation params */
+       cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
+                       MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
+                       MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+       params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+       mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
+
+       params->tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+       params->tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+
+       /* TX inline */
+       params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
+       mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
+       if (params->tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
            !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
-               priv->params.tx_min_inline_mode = MLX5_INLINE_MODE_L2;
+               params->tx_min_inline_mode = MLX5_INLINE_MODE_L2;
 
-       priv->params.num_tc                = 1;
-       priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
+       /* RSS */
+       params->rss_hfunc = ETH_RSS_HASH_XOR;
+       netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
+       mlx5e_build_default_indir_rqt(mdev, params->indirection_rqt,
+                                     MLX5E_INDIR_RQT_SIZE, max_channels);
+}
 
-       netdev_rss_key_fill(priv->params.toeplitz_hash_key,
-                           sizeof(priv->params.toeplitz_hash_key));
+static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
+                                       struct net_device *netdev,
+                                       const struct mlx5e_profile *profile,
+                                       void *ppriv)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
 
-       mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
-                                     MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
+       priv->mdev        = mdev;
+       priv->netdev      = netdev;
+       priv->profile     = profile;
+       priv->ppriv       = ppriv;
 
-       /* Initialize pflags */
-       MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
-                       priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+       mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
 
        mutex_init(&priv->state_lock);
 
@@ -3642,13 +3960,19 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
        if (fcs_supported)
                netdev->hw_features |= NETIF_F_RXALL;
 
+       if (MLX5_CAP_ETH(mdev, scatter_fcs))
+               netdev->hw_features |= NETIF_F_RXFCS;
+
        netdev->features          = netdev->hw_features;
-       if (!priv->params.lro_en)
+       if (!priv->channels.params.lro_en)
                netdev->features  &= ~NETIF_F_LRO;
 
        if (fcs_enabled)
                netdev->features  &= ~NETIF_F_RXALL;
 
+       if (!priv->channels.params.scatter_fcs_en)
+               netdev->features  &= ~NETIF_F_RXFCS;
+
 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
        if (FT_CAP(flow_modify_en) &&
            FT_CAP(modify_root) &&
@@ -3708,8 +4032,8 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
 {
        mlx5e_vxlan_cleanup(priv);
 
-       if (priv->xdp_prog)
-               bpf_prog_put(priv->xdp_prog);
+       if (priv->channels.params.xdp_prog)
+               bpf_prog_put(priv->channels.params.xdp_prog);
 }
 
 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
@@ -3872,6 +4196,10 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
                return NULL;
        }
 
+#ifdef CONFIG_RFS_ACCEL
+       netdev->rx_cpu_rmap = mdev->rmap;
+#endif
+
        profile->init(mdev, netdev, profile, ppriv);
 
        netif_carrier_off(netdev);
@@ -3906,7 +4234,7 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
        if (err)
                goto out;
 
-       err = mlx5e_open_drop_rq(priv);
+       err = mlx5e_open_drop_rq(mdev, &priv->drop_rq);
        if (err) {
                mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
                goto err_cleanup_tx;
@@ -3925,7 +4253,7 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
        mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
        netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu);
 
-       mlx5e_set_dev_port_mtu(netdev);
+       mlx5e_set_dev_port_mtu(priv);
 
        if (profile->enable)
                profile->enable(priv);
@@ -3939,7 +4267,7 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
        return 0;
 
 err_close_drop_rq:
-       mlx5e_close_drop_rq(priv);
+       mlx5e_close_drop_rq(&priv->drop_rq);
 
 err_cleanup_tx:
        profile->cleanup_tx(priv);
@@ -4003,7 +4331,7 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
 
        mlx5e_destroy_q_counter(priv);
        profile->cleanup_rx(priv);
-       mlx5e_close_drop_rq(priv);
+       mlx5e_close_drop_rq(&priv->drop_rq);
        profile->cleanup_tx(priv);
        cancel_delayed_work_sync(&priv->update_stats_work);
 }
index 2c864574a9d5faeaa3b329f3bc0ab0d4e0cc7b55..53db5ec2c1225a7960e78965403aced80eecc94d 100644 (file)
@@ -102,14 +102,16 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
        int i, j;
 
        memset(s, 0, sizeof(*s));
-       for (i = 0; i < priv->params.num_channels; i++) {
-               rq_stats = &priv->channel[i]->rq.stats;
+       for (i = 0; i < priv->channels.num; i++) {
+               struct mlx5e_channel *c = priv->channels.c[i];
+
+               rq_stats = &c->rq.stats;
 
                s->rx_packets   += rq_stats->packets;
                s->rx_bytes     += rq_stats->bytes;
 
-               for (j = 0; j < priv->params.num_tc; j++) {
-                       sq_stats = &priv->channel[i]->sq[j].stats;
+               for (j = 0; j < priv->channels.params.num_tc; j++) {
+                       sq_stats = &c->sq[j].stats;
 
                        s->tx_packets           += sq_stats->packets;
                        s->tx_bytes             += sq_stats->bytes;
@@ -187,22 +189,26 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5_eswitch_rep *rep = priv->ppriv;
        struct mlx5e_channel *c;
-       int n, tc, err, num_sqs = 0;
+       int n, tc, num_sqs = 0;
+       int err = -ENOMEM;
        u16 *sqs;
 
-       sqs = kcalloc(priv->params.num_channels * priv->params.num_tc, sizeof(u16), GFP_KERNEL);
+       sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(u16), GFP_KERNEL);
        if (!sqs)
-               return -ENOMEM;
+               goto out;
 
-       for (n = 0; n < priv->params.num_channels; n++) {
-               c = priv->channel[n];
+       for (n = 0; n < priv->channels.num; n++) {
+               c = priv->channels.c[n];
                for (tc = 0; tc < c->num_tc; tc++)
                        sqs[num_sqs++] = c->sq[tc].sqn;
        }
 
        err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
-
        kfree(sqs);
+
+out:
+       if (err)
+               netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
        return err;
 }
 
@@ -393,48 +399,27 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
        .ndo_get_phys_port_name  = mlx5e_rep_get_phys_port_name,
        .ndo_setup_tc            = mlx5e_rep_ndo_setup_tc,
        .ndo_get_stats64         = mlx5e_rep_get_stats,
-       .ndo_udp_tunnel_add      = mlx5e_add_vxlan_port,
-       .ndo_udp_tunnel_del      = mlx5e_del_vxlan_port,
        .ndo_has_offload_stats   = mlx5e_has_offload_stats,
        .ndo_get_offload_stats   = mlx5e_get_offload_stats,
 };
 
-static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev,
-                                       struct net_device *netdev,
-                                       const struct mlx5e_profile *profile,
-                                       void *ppriv)
+static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
+                                  struct mlx5e_params *params)
 {
-       struct mlx5e_priv *priv = netdev_priv(netdev);
        u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
                                         MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
                                         MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
 
-       priv->params.log_sq_size           =
-               MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
-       priv->params.rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
-       priv->params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
-
-       priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
-                                           BIT(priv->params.log_rq_size));
-
-       priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
-       mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
+       params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+       params->rq_wq_type  = MLX5_WQ_TYPE_LINKED_LIST;
+       params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
 
-       priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
-       priv->params.num_tc                = 1;
+       params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+       mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
 
-       priv->params.lro_wqe_sz            =
-               MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
-
-       priv->mdev                         = mdev;
-       priv->netdev                       = netdev;
-       priv->params.num_channels          = profile->max_nch(mdev);
-       priv->profile                      = profile;
-       priv->ppriv                        = ppriv;
-
-       mutex_init(&priv->state_lock);
-
-       INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+       params->tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
+       params->num_tc                = 1;
+       params->lro_wqe_sz            = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
 }
 
 static void mlx5e_build_rep_netdev(struct net_device *netdev)
@@ -460,7 +445,19 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
                           const struct mlx5e_profile *profile,
                           void *ppriv)
 {
-       mlx5e_build_rep_netdev_priv(mdev, netdev, profile, ppriv);
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       priv->mdev                         = mdev;
+       priv->netdev                       = netdev;
+       priv->profile                      = profile;
+       priv->ppriv                        = ppriv;
+
+       mutex_init(&priv->state_lock);
+
+       INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+
+       priv->channels.params.num_channels = profile->max_nch(mdev);
+       mlx5e_build_rep_params(mdev, &priv->channels.params);
        mlx5e_build_rep_netdev(netdev);
 }
 
@@ -505,7 +502,7 @@ err_del_flow_rule:
 err_destroy_direct_tirs:
        mlx5e_destroy_direct_tirs(priv);
 err_destroy_direct_rqts:
-       for (i = 0; i < priv->params.num_channels; i++)
+       for (i = 0; i < priv->channels.params.num_channels; i++)
                mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
        return err;
 }
@@ -518,7 +515,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
        mlx5e_tc_cleanup(priv);
        mlx5_del_flow_rules(rep->vport_rx_rule);
        mlx5e_destroy_direct_tirs(priv);
-       for (i = 0; i < priv->params.num_channels; i++)
+       for (i = 0; i < priv->channels.params.num_channels; i++)
                mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
 }
 
index 3d371688fbbbf3544170468840829e15095ea3a0..1a9532b31635f3dca7d8d6d57e590b1c18122315 100644 (file)
@@ -156,28 +156,6 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
        return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
 }
 
-void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val)
-{
-       bool was_opened;
-
-       if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
-               return;
-
-       if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val)
-               return;
-
-       was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-       if (was_opened)
-               mlx5e_close_locked(priv->netdev);
-
-       MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, val);
-       mlx5e_set_rq_type_params(priv, priv->params.rq_wq_type);
-
-       if (was_opened)
-               mlx5e_open_locked(priv->netdev);
-
-}
-
 #define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
 
 static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
@@ -331,7 +309,7 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev,
 static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
 {
        struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
-       struct mlx5e_sq *sq = &rq->channel->icosq;
+       struct mlx5e_icosq *sq = &rq->channel->icosq;
        struct mlx5_wq_cyc *wq = &sq->wq;
        struct mlx5e_umr_wqe *wqe;
        u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB);
@@ -341,7 +319,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
        while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
                sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
                sq->db.ico_wqe[pi].num_wqebbs = 1;
-               mlx5e_send_nop(sq, false);
+               mlx5e_post_nop(wq, sq->sqn, &sq->pc);
        }
 
        wqe = mlx5_wq_cyc_get_wqe(wq, pi);
@@ -353,7 +331,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
        sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
        sq->db.ico_wqe[pi].num_wqebbs = num_wqebbs;
        sq->pc += num_wqebbs;
-       mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+       mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
 }
 
 static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
@@ -601,6 +579,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
        if (lro_num_seg > 1) {
                mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
                skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
+               /* Subtract one since we already counted this as one
+                * "regular" packet in mlx5e_complete_rx_cqe()
+                */
+               rq->stats.packets += lro_num_seg - 1;
                rq->stats.lro_packets++;
                rq->stats.lro_bytes += cqe_bcnt;
        }
@@ -633,37 +615,36 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
        mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
 }
 
-static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
+static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
 {
        struct mlx5_wq_cyc *wq = &sq->wq;
        struct mlx5e_tx_wqe *wqe;
-       u16 pi = (sq->pc - MLX5E_XDP_TX_WQEBBS) & wq->sz_m1; /* last pi */
+       u16 pi = (sq->pc - 1) & wq->sz_m1; /* last pi */
 
        wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
 
-       wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-       mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+       mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl);
 }
 
 static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
                                        struct mlx5e_dma_info *di,
                                        const struct xdp_buff *xdp)
 {
-       struct mlx5e_sq          *sq   = &rq->channel->xdp_sq;
+       struct mlx5e_xdpsq       *sq   = &rq->xdpsq;
        struct mlx5_wq_cyc       *wq   = &sq->wq;
-       u16                      pi    = sq->pc & wq->sz_m1;
+       u16                       pi   = sq->pc & wq->sz_m1;
        struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
-       struct mlx5e_sq_wqe_info *wi   = &sq->db.xdp.wqe_info[pi];
 
        struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
        struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
        struct mlx5_wqe_data_seg *dseg;
-       u8 ds_cnt = MLX5E_XDP_TX_DS_COUNT;
 
        ptrdiff_t data_offset = xdp->data - xdp->data_hard_start;
        dma_addr_t dma_addr  = di->addr + data_offset;
        unsigned int dma_len = xdp->data_end - xdp->data;
 
+       prefetchw(wqe);
+
        if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE ||
                     MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) {
                rq->stats.xdp_drop++;
@@ -671,48 +652,42 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
                return false;
        }
 
-       if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) {
-               if (sq->db.xdp.doorbell) {
+       if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
+               if (sq->db.doorbell) {
                        /* SQ is full, ring doorbell */
                        mlx5e_xmit_xdp_doorbell(sq);
-                       sq->db.xdp.doorbell = false;
+                       sq->db.doorbell = false;
                }
                rq->stats.xdp_tx_full++;
                mlx5e_page_release(rq, di, true);
                return false;
        }
 
-       dma_sync_single_for_device(sq->pdev, dma_addr, dma_len,
-                                  PCI_DMA_TODEVICE);
+       dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
 
-       memset(wqe, 0, sizeof(*wqe));
+       cseg->fm_ce_se = 0;
 
        dseg = (struct mlx5_wqe_data_seg *)eseg + 1;
+
        /* copy the inline part if required */
        if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
                memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE);
                eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
                dma_len  -= MLX5E_XDP_MIN_INLINE;
                dma_addr += MLX5E_XDP_MIN_INLINE;
-
-               ds_cnt   += MLX5E_XDP_IHS_DS_COUNT;
                dseg++;
        }
 
        /* write the dma part */
        dseg->addr       = cpu_to_be64(dma_addr);
        dseg->byte_count = cpu_to_be32(dma_len);
-       dseg->lkey       = sq->mkey_be;
 
        cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
-       cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
 
-       sq->db.xdp.di[pi] = *di;
-       wi->opcode     = MLX5_OPCODE_SEND;
-       wi->num_wqebbs = MLX5E_XDP_TX_WQEBBS;
-       sq->pc += MLX5E_XDP_TX_WQEBBS;
+       sq->db.di[pi] = *di;
+       sq->pc++;
 
-       sq->db.xdp.doorbell = true;
+       sq->db.doorbell = true;
        rq->stats.xdp_tx++;
        return true;
 }
@@ -946,7 +921,7 @@ mpwrq_cqe_out:
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
 {
        struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
-       struct mlx5e_sq *xdp_sq = &rq->channel->xdp_sq;
+       struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
        int work_done = 0;
 
        if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
@@ -973,9 +948,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
                rq->handle_rx_cqe(rq, cqe);
        }
 
-       if (xdp_sq->db.xdp.doorbell) {
-               mlx5e_xmit_xdp_doorbell(xdp_sq);
-               xdp_sq->db.xdp.doorbell = false;
+       if (xdpsq->db.doorbell) {
+               mlx5e_xmit_xdp_doorbell(xdpsq);
+               xdpsq->db.doorbell = false;
        }
 
        mlx5_cqwq_update_db_record(&cq->wq);
@@ -985,3 +960,74 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
 
        return work_done;
 }
+
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
+{
+       struct mlx5e_xdpsq *sq;
+       struct mlx5e_rq *rq;
+       u16 sqcc;
+       int i;
+
+       sq = container_of(cq, struct mlx5e_xdpsq, cq);
+
+       if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+               return false;
+
+       rq = container_of(sq, struct mlx5e_rq, xdpsq);
+
+       /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+        * otherwise a cq overrun may occur
+        */
+       sqcc = sq->cc;
+
+       for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+               struct mlx5_cqe64 *cqe;
+               u16 wqe_counter;
+               bool last_wqe;
+
+               cqe = mlx5e_get_cqe(cq);
+               if (!cqe)
+                       break;
+
+               mlx5_cqwq_pop(&cq->wq);
+
+               wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+               do {
+                       struct mlx5e_dma_info *di;
+                       u16 ci;
+
+                       last_wqe = (sqcc == wqe_counter);
+
+                       ci = sqcc & sq->wq.sz_m1;
+                       di = &sq->db.di[ci];
+
+                       sqcc++;
+                       /* Recycle RX page */
+                       mlx5e_page_release(rq, di, true);
+               } while (!last_wqe);
+       }
+
+       mlx5_cqwq_update_db_record(&cq->wq);
+
+       /* ensure cq space is freed before enabling more cqes */
+       wmb();
+
+       sq->cc = sqcc;
+       return (i == MLX5E_TX_CQ_POLL_BUDGET);
+}
+
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
+{
+       struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq);
+       struct mlx5e_dma_info *di;
+       u16 ci;
+
+       while (sq->cc != sq->pc) {
+               ci = sq->cc & sq->wq.sz_m1;
+               di = &sq->db.di[ci];
+               sq->cc++;
+
+               mlx5e_page_release(rq, di, false);
+       }
+}
index cbfac06b7ffd1d5140226ccb87331db57d4880d8..02dd3a95ed8f013d0d4795d5054bd79ae4ca1201 100644 (file)
@@ -293,7 +293,7 @@ void mlx5e_rx_am_work(struct work_struct *work)
        struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
        struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
 
-       mlx5_core_modify_cq_moderation(rq->priv->mdev, &rq->cq.mcq,
+       mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
                                       cur_profile.usec, cur_profile.pkts);
 
        am->state = MLX5E_AM_START_MEASURE;
index 31e3cb7ee5feeb35ce383c5582bc800733b81643..5225f2226a67cc25761d265e3d5f69b510d0df3b 100644 (file)
@@ -204,9 +204,6 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
        struct iphdr *iph;
 
        /* We are only going to peek, no need to clone the SKB */
-       if (skb->protocol != htons(ETH_P_IP))
-               goto out;
-
        if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
                goto out;
 
@@ -239,17 +236,14 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
 {
        int err = 0;
 
-       err = mlx5e_refresh_tirs_self_loopback(priv->mdev, true);
-       if (err) {
-               netdev_err(priv->netdev,
-                          "\tFailed to enable UC loopback err(%d)\n", err);
+       err = mlx5e_refresh_tirs(priv, true);
+       if (err)
                return err;
-       }
 
        lbtp->loopback_ok = false;
        init_completion(&lbtp->comp);
 
-       lbtp->pt.type = htons(ETH_P_ALL);
+       lbtp->pt.type = htons(ETH_P_IP);
        lbtp->pt.func = mlx5e_test_loopback_validate;
        lbtp->pt.dev = priv->netdev;
        lbtp->pt.af_packet_priv = lbtp;
@@ -261,7 +255,7 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
                                        struct mlx5e_lbt_priv *lbtp)
 {
        dev_remove_pack(&lbtp->pt);
-       mlx5e_refresh_tirs_self_loopback(priv->mdev, false);
+       mlx5e_refresh_tirs(priv, false);
 }
 
 #define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200))
index 44406a5ec15d96a6ca45d30b609864f8cccb07e1..9dec11c00a49b379bc0524ecfaebf72d775d3b2f 100644 (file)
 #include <net/tc_act/tc_mirred.h>
 #include <net/tc_act/tc_vlan.h>
 #include <net/tc_act/tc_tunnel_key.h>
+#include <net/tc_act/tc_pedit.h>
 #include <net/vxlan.h>
 #include "en.h"
 #include "en_tc.h"
 #include "eswitch.h"
 #include "vxlan.h"
 
+struct mlx5_nic_flow_attr {
+       u32 action;
+       u32 flow_tag;
+       u32 mod_hdr_id;
+};
+
+enum {
+       MLX5E_TC_FLOW_ESWITCH   = BIT(0),
+       MLX5E_TC_FLOW_NIC       = BIT(1),
+};
+
 struct mlx5e_tc_flow {
        struct rhash_head       node;
        u64                     cookie;
+       u8                      flags;
        struct mlx5_flow_handle *rule;
        struct list_head        encap; /* flows sharing the same encap */
-       struct mlx5_esw_flow_attr *attr;
+       union {
+               struct mlx5_esw_flow_attr esw_attr[0];
+               struct mlx5_nic_flow_attr nic_attr[0];
+       };
+};
+
+struct mlx5e_tc_flow_parse_attr {
+       struct mlx5_flow_spec spec;
+       int num_mod_hdr_actions;
+       void *mod_hdr_actions;
 };
 
 enum {
@@ -66,24 +88,26 @@ enum {
 
 static struct mlx5_flow_handle *
 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
-                     struct mlx5_flow_spec *spec,
-                     u32 action, u32 flow_tag)
+                     struct mlx5e_tc_flow_parse_attr *parse_attr,
+                     struct mlx5e_tc_flow *flow)
 {
+       struct mlx5_nic_flow_attr *attr = flow->nic_attr;
        struct mlx5_core_dev *dev = priv->mdev;
-       struct mlx5_flow_destination dest = { 0 };
+       struct mlx5_flow_destination dest = {};
        struct mlx5_flow_act flow_act = {
-               .action = action,
-               .flow_tag = flow_tag,
+               .action = attr->action,
+               .flow_tag = attr->flow_tag,
                .encap_id = 0,
        };
        struct mlx5_fc *counter = NULL;
        struct mlx5_flow_handle *rule;
        bool table_created = false;
+       int err;
 
-       if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
                dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
                dest.ft = priv->fs.vlan.ft.t;
-       } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+       } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
                counter = mlx5_fc_create(dev, true);
                if (IS_ERR(counter))
                        return ERR_CAST(counter);
@@ -92,6 +116,19 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
                dest.counter = counter;
        }
 
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+               err = mlx5_modify_header_alloc(dev, MLX5_FLOW_NAMESPACE_KERNEL,
+                                              parse_attr->num_mod_hdr_actions,
+                                              parse_attr->mod_hdr_actions,
+                                              &attr->mod_hdr_id);
+               flow_act.modify_id = attr->mod_hdr_id;
+               kfree(parse_attr->mod_hdr_actions);
+               if (err) {
+                       rule = ERR_PTR(err);
+                       goto err_create_mod_hdr_id;
+               }
+       }
+
        if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
                priv->fs.tc.t =
                        mlx5_create_auto_grouped_flow_table(priv->fs.ns,
@@ -109,8 +146,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
                table_created = true;
        }
 
-       spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
+       parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
+                                  &flow_act, &dest, 1);
 
        if (IS_ERR(rule))
                goto err_add_rule;
@@ -123,28 +161,104 @@ err_add_rule:
                priv->fs.tc.t = NULL;
        }
 err_create_ft:
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+               mlx5_modify_header_dealloc(priv->mdev,
+                                          attr->mod_hdr_id);
+err_create_mod_hdr_id:
        mlx5_fc_destroy(dev, counter);
 
        return rule;
 }
 
+static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
+                                 struct mlx5e_tc_flow *flow)
+{
+       struct mlx5_fc *counter = NULL;
+
+       counter = mlx5_flow_rule_counter(flow->rule);
+       mlx5_del_flow_rules(flow->rule);
+       mlx5_fc_destroy(priv->mdev, counter);
+
+       if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
+               mlx5_destroy_flow_table(priv->fs.tc.t);
+               priv->fs.tc.t = NULL;
+       }
+
+       if (flow->nic_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+               mlx5_modify_header_dealloc(priv->mdev,
+                                          flow->nic_attr->mod_hdr_id);
+}
+
+static void mlx5e_detach_encap(struct mlx5e_priv *priv,
+                              struct mlx5e_tc_flow *flow);
+
 static struct mlx5_flow_handle *
 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
-                     struct mlx5_flow_spec *spec,
-                     struct mlx5_esw_flow_attr *attr)
+                     struct mlx5e_tc_flow_parse_attr *parse_attr,
+                     struct mlx5e_tc_flow *flow)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+       struct mlx5_flow_handle *rule;
        int err;
 
        err = mlx5_eswitch_add_vlan_action(esw, attr);
-       if (err)
-               return ERR_PTR(err);
+       if (err) {
+               rule = ERR_PTR(err);
+               goto err_add_vlan;
+       }
+
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+               err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB,
+                                              parse_attr->num_mod_hdr_actions,
+                                              parse_attr->mod_hdr_actions,
+                                              &attr->mod_hdr_id);
+               kfree(parse_attr->mod_hdr_actions);
+               if (err) {
+                       rule = ERR_PTR(err);
+                       goto err_mod_hdr;
+               }
+       }
 
-       return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+       rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
+       if (IS_ERR(rule))
+               goto err_add_rule;
+
+       return rule;
+
+err_add_rule:
+       if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+               mlx5_modify_header_dealloc(priv->mdev,
+                                          attr->mod_hdr_id);
+err_mod_hdr:
+       mlx5_eswitch_del_vlan_action(esw, attr);
+err_add_vlan:
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+               mlx5e_detach_encap(priv, flow);
+       return rule;
+}
+
+static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
+                                 struct mlx5e_tc_flow *flow)
+{
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+
+       mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
+
+       mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
+
+       if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+               mlx5e_detach_encap(priv, flow);
+
+       if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+               mlx5_modify_header_dealloc(priv->mdev,
+                                          attr->mod_hdr_id);
 }
 
 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
-                              struct mlx5e_tc_flow *flow) {
+                              struct mlx5e_tc_flow *flow)
+{
        struct list_head *next = flow->encap.next;
 
        list_del(&flow->encap);
@@ -161,32 +275,13 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
        }
 }
 
-/* we get here also when setting rule to the FW failed, etc. It means that the
- * flow rule itself might not exist, but some offloading related to the actions
- * should be cleaned.
- */
 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
                              struct mlx5e_tc_flow *flow)
 {
-       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-       struct mlx5_fc *counter = NULL;
-
-       if (!IS_ERR(flow->rule)) {
-               counter = mlx5_flow_rule_counter(flow->rule);
-               mlx5_del_flow_rules(flow->rule);
-               mlx5_fc_destroy(priv->mdev, counter);
-       }
-
-       if (esw && esw->mode == SRIOV_OFFLOADS) {
-               mlx5_eswitch_del_vlan_action(esw, flow->attr);
-               if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
-                       mlx5e_detach_encap(priv, flow);
-       }
-
-       if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
-               mlx5_destroy_flow_table(priv->fs.tc.t);
-               priv->fs.tc.t = NULL;
-       }
+       if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+               mlx5e_tc_del_fdb_flow(priv, flow);
+       else
+               mlx5e_tc_del_nic_flow(priv, flow);
 }
 
 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
@@ -243,12 +338,15 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
                        skb_flow_dissector_target(f->dissector,
                                                  FLOW_DISSECTOR_KEY_ENC_PORTS,
                                                  f->mask);
+               struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+               struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+               struct mlx5e_priv *up_priv = netdev_priv(up_dev);
 
                /* Full udp dst port must be given */
                if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
                        goto vxlan_match_offload_err;
 
-               if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
+               if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
                    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
                        parse_vxlan_attr(spec, f);
                else {
@@ -598,6 +696,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
 }
 
 static int parse_cls_flower(struct mlx5e_priv *priv,
+                           struct mlx5e_tc_flow *flow,
                            struct mlx5_flow_spec *spec,
                            struct tc_cls_flower_offload *f)
 {
@@ -609,7 +708,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
 
        err = __parse_cls_flower(priv, spec, f, &min_inline);
 
-       if (!err && esw->mode == SRIOV_OFFLOADS &&
+       if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
            rep->vport != FDB_UPLINK_VPORT) {
                if (min_inline > esw->offloads.inline_mode) {
                        netdev_warn(priv->netdev,
@@ -622,29 +721,313 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
        return err;
 }
 
+struct pedit_headers {
+       struct ethhdr  eth;
+       struct iphdr   ip4;
+       struct ipv6hdr ip6;
+       struct tcphdr  tcp;
+       struct udphdr  udp;
+};
+
+static int pedit_header_offsets[] = {
+       [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
+       [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
+       [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
+       [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
+       [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
+};
+
+#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
+
+static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
+                        struct pedit_headers *masks,
+                        struct pedit_headers *vals)
+{
+       u32 *curr_pmask, *curr_pval;
+
+       if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
+               goto out_err;
+
+       curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
+       curr_pval  = (u32 *)(pedit_header(vals, hdr_type) + offset);
+
+       if (*curr_pmask & mask)  /* disallow acting twice on the same location */
+               goto out_err;
+
+       *curr_pmask |= mask;
+       *curr_pval  |= (val & mask);
+
+       return 0;
+
+out_err:
+       return -EOPNOTSUPP;
+}
+
+struct mlx5_fields {
+       u8  field;
+       u8  size;
+       u32 offset;
+};
+
+static struct mlx5_fields fields[] = {
+       {MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_dest[0])},
+       {MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0,  2, offsetof(struct pedit_headers, eth.h_dest[4])},
+       {MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_source[0])},
+       {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0,  2, offsetof(struct pedit_headers, eth.h_source[4])},
+       {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE,  2, offsetof(struct pedit_headers, eth.h_proto)},
+
+       {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
+       {MLX5_ACTION_IN_FIELD_OUT_IP_TTL,  1, offsetof(struct pedit_headers, ip4.ttl)},
+       {MLX5_ACTION_IN_FIELD_OUT_SIPV4,   4, offsetof(struct pedit_headers, ip4.saddr)},
+       {MLX5_ACTION_IN_FIELD_OUT_DIPV4,   4, offsetof(struct pedit_headers, ip4.daddr)},
+
+       {MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[0])},
+       {MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64,  4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[1])},
+       {MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32,  4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[2])},
+       {MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0,   4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[3])},
+       {MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[0])},
+       {MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64,  4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[1])},
+       {MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32,  4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[2])},
+       {MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0,   4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[3])},
+
+       {MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT, 2, offsetof(struct pedit_headers, tcp.source)},
+       {MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT, 2, offsetof(struct pedit_headers, tcp.dest)},
+       {MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS, 1, offsetof(struct pedit_headers, tcp.ack_seq) + 5},
+
+       {MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT, 2, offsetof(struct pedit_headers, udp.source)},
+       {MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT, 2, offsetof(struct pedit_headers, udp.dest)},
+};
+
+/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
+ * max from the SW pedit action. On success, it says how many HW actions were
+ * actually parsed.
+ */
+static int offload_pedit_fields(struct pedit_headers *masks,
+                               struct pedit_headers *vals,
+                               struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+       struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
+       int i, action_size, nactions, max_actions, first, last;
+       void *s_masks_p, *a_masks_p, *vals_p;
+       u32 s_mask, a_mask, val;
+       struct mlx5_fields *f;
+       u8 cmd, field_bsize;
+       unsigned long mask;
+       void *action;
+
+       set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
+       add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
+       set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
+       add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
+
+       action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+       action = parse_attr->mod_hdr_actions;
+       max_actions = parse_attr->num_mod_hdr_actions;
+       nactions = 0;
+
+       for (i = 0; i < ARRAY_SIZE(fields); i++) {
+               f = &fields[i];
+               /* avoid seeing bits set from previous iterations */
+               s_mask = a_mask = mask = val = 0;
+
+               s_masks_p = (void *)set_masks + f->offset;
+               a_masks_p = (void *)add_masks + f->offset;
+
+               memcpy(&s_mask, s_masks_p, f->size);
+               memcpy(&a_mask, a_masks_p, f->size);
+
+               if (!s_mask && !a_mask) /* nothing to offload here */
+                       continue;
+
+               if (s_mask && a_mask) {
+                       printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
+                       return -EOPNOTSUPP;
+               }
+
+               if (nactions == max_actions) {
+                       printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
+                       return -EOPNOTSUPP;
+               }
+
+               if (s_mask) {
+                       cmd  = MLX5_ACTION_TYPE_SET;
+                       mask = s_mask;
+                       vals_p = (void *)set_vals + f->offset;
+                       /* clear to denote we consumed this field */
+                       memset(s_masks_p, 0, f->size);
+               } else {
+                       cmd  = MLX5_ACTION_TYPE_ADD;
+                       mask = a_mask;
+                       vals_p = (void *)add_vals + f->offset;
+                       /* clear to denote we consumed this field */
+                       memset(a_masks_p, 0, f->size);
+               }
+
+               memcpy(&val, vals_p, f->size);
+
+               field_bsize = f->size * BITS_PER_BYTE;
+               first = find_first_bit(&mask, field_bsize);
+               last  = find_last_bit(&mask, field_bsize);
+               if (first > 0 || last != (field_bsize - 1)) {
+                       printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
+                              mask);
+                       return -EOPNOTSUPP;
+               }
+
+               MLX5_SET(set_action_in, action, action_type, cmd);
+               MLX5_SET(set_action_in, action, field, f->field);
+
+               if (cmd == MLX5_ACTION_TYPE_SET) {
+                       MLX5_SET(set_action_in, action, offset, 0);
+                       /* length is num of bits to be written, zero means length of 32 */
+                       MLX5_SET(set_action_in, action, length, field_bsize);
+               }
+
+               if (field_bsize == 32)
+                       MLX5_SET(set_action_in, action, data, ntohl(val));
+               else if (field_bsize == 16)
+                       MLX5_SET(set_action_in, action, data, ntohs(val));
+               else if (field_bsize == 8)
+                       MLX5_SET(set_action_in, action, data, val);
+
+               action += action_size;
+               nactions++;
+       }
+
+       parse_attr->num_mod_hdr_actions = nactions;
+       return 0;
+}
+
+static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
+                                const struct tc_action *a, int namespace,
+                                struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+       int nkeys, action_size, max_actions;
+
+       nkeys = tcf_pedit_nkeys(a);
+       action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+
+       if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
+               max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
+       else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
+               max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
+
+       /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
+       max_actions = min(max_actions, nkeys * 16);
+
+       parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
+       if (!parse_attr->mod_hdr_actions)
+               return -ENOMEM;
+
+       parse_attr->num_mod_hdr_actions = max_actions;
+       return 0;
+}
+
+static const struct pedit_headers zero_masks = {};
+
+static int parse_tc_pedit_action(struct mlx5e_priv *priv,
+                                const struct tc_action *a, int namespace,
+                                struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+       struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
+       int nkeys, i, err = -EOPNOTSUPP;
+       u32 mask, val, offset;
+       u8 cmd, htype;
+
+       nkeys = tcf_pedit_nkeys(a);
+
+       memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
+       memset(vals,  0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
+
+       for (i = 0; i < nkeys; i++) {
+               htype = tcf_pedit_htype(a, i);
+               cmd = tcf_pedit_cmd(a, i);
+               err = -EOPNOTSUPP; /* can't be all optimistic */
+
+               if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
+                       printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
+                       goto out_err;
+               }
+
+               if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
+                       printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
+                       goto out_err;
+               }
+
+               mask = tcf_pedit_mask(a, i);
+               val = tcf_pedit_val(a, i);
+               offset = tcf_pedit_offset(a, i);
+
+               err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
+               if (err)
+                       goto out_err;
+       }
+
+       err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
+       if (err)
+               goto out_err;
+
+       err = offload_pedit_fields(masks, vals, parse_attr);
+       if (err < 0)
+               goto out_dealloc_parsed_actions;
+
+       for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
+               cmd_masks = &masks[cmd];
+               if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
+                       printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
+                              cmd);
+                       print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
+                                      16, 1, cmd_masks, sizeof(zero_masks), true);
+                       err = -EOPNOTSUPP;
+                       goto out_dealloc_parsed_actions;
+               }
+       }
+
+       return 0;
+
+out_dealloc_parsed_actions:
+       kfree(parse_attr->mod_hdr_actions);
+out_err:
+       return err;
+}
+
 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
-                               u32 *action, u32 *flow_tag)
+                               struct mlx5e_tc_flow_parse_attr *parse_attr,
+                               struct mlx5e_tc_flow *flow)
 {
+       struct mlx5_nic_flow_attr *attr = flow->nic_attr;
        const struct tc_action *a;
        LIST_HEAD(actions);
+       int err;
 
        if (tc_no_actions(exts))
                return -EINVAL;
 
-       *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
-       *action = 0;
+       attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+       attr->action = 0;
 
        tcf_exts_to_list(exts, &actions);
        list_for_each_entry(a, &actions, list) {
                /* Only support a single action per rule */
-               if (*action)
+               if (attr->action)
                        return -EINVAL;
 
                if (is_tcf_gact_shot(a)) {
-                       *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+                       attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
                        if (MLX5_CAP_FLOWTABLE(priv->mdev,
                                               flow_table_properties_nic_receive.flow_counter))
-                               *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+                               attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+                       continue;
+               }
+
+               if (is_tcf_pedit(a)) {
+                       err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
+                                                   parse_attr);
+                       if (err)
+                               return err;
+
+                       attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+                                       MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
                        continue;
                }
 
@@ -657,8 +1040,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                return -EINVAL;
                        }
 
-                       *flow_tag = mark;
-                       *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+                       attr->flow_tag = mark;
+                       attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
                        continue;
                }
 
@@ -970,6 +1353,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
                              struct mlx5_esw_flow_attr *attr)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+       struct mlx5e_priv *up_priv = netdev_priv(up_dev);
        unsigned short family = ip_tunnel_info_af(tun_info);
        struct ip_tunnel_key *key = &tun_info->key;
        struct mlx5_encap_entry *e;
@@ -990,7 +1375,7 @@ vxlan_encap_offload_err:
                return -EOPNOTSUPP;
        }
 
-       if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
+       if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
            MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
                tunnel_type = MLX5_HEADER_TYPE_VXLAN;
        } else {
@@ -1041,9 +1426,10 @@ out_err:
 }
 
 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+                               struct mlx5e_tc_flow_parse_attr *parse_attr,
                                struct mlx5e_tc_flow *flow)
 {
-       struct mlx5_esw_flow_attr *attr = flow->attr;
+       struct mlx5_esw_flow_attr *attr = flow->esw_attr;
        struct ip_tunnel_info *info = NULL;
        const struct tc_action *a;
        LIST_HEAD(actions);
@@ -1064,6 +1450,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                        continue;
                }
 
+               if (is_tcf_pedit(a)) {
+                       err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
+                                                   parse_attr);
+                       if (err)
+                               return err;
+
+                       attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+                       continue;
+               }
+
                if (is_tcf_mirred_egress_redirect(a)) {
                        int ifindex = tcf_mirred_ifindex(a);
                        struct net_device *out_dev;
@@ -1106,14 +1502,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                }
 
                if (is_tcf_vlan(a)) {
-                       if (tcf_vlan_action(a) == VLAN_F_POP) {
+                       if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
                                attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
-                       } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
+                       } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
                                if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
                                        return -EOPNOTSUPP;
 
                                attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
                                attr->vlan = tcf_vlan_push_vid(a);
+                       } else { /* action is TCA_VLAN_ACT_MODIFY */
+                               return -EOPNOTSUPP;
                        }
                        continue;
                }
@@ -1131,52 +1529,50 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
                           struct tc_cls_flower_offload *f)
 {
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       struct mlx5e_tc_flow_parse_attr *parse_attr;
        struct mlx5e_tc_table *tc = &priv->fs.tc;
-       int err = 0;
-       bool fdb_flow = false;
-       u32 flow_tag, action;
        struct mlx5e_tc_flow *flow;
-       struct mlx5_flow_spec *spec;
-       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       int attr_size, err = 0;
+       u8 flow_flags = 0;
 
-       if (esw && esw->mode == SRIOV_OFFLOADS)
-               fdb_flow = true;
-
-       if (fdb_flow)
-               flow = kzalloc(sizeof(*flow) +
-                              sizeof(struct mlx5_esw_flow_attr),
-                              GFP_KERNEL);
-       else
-               flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+       if (esw && esw->mode == SRIOV_OFFLOADS) {
+               flow_flags = MLX5E_TC_FLOW_ESWITCH;
+               attr_size  = sizeof(struct mlx5_esw_flow_attr);
+       } else {
+               flow_flags = MLX5E_TC_FLOW_NIC;
+               attr_size  = sizeof(struct mlx5_nic_flow_attr);
+       }
 
-       spec = mlx5_vzalloc(sizeof(*spec));
-       if (!spec || !flow) {
+       flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
+       parse_attr = mlx5_vzalloc(sizeof(*parse_attr));
+       if (!parse_attr || !flow) {
                err = -ENOMEM;
                goto err_free;
        }
 
        flow->cookie = f->cookie;
+       flow->flags = flow_flags;
 
-       err = parse_cls_flower(priv, spec, f);
+       err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
        if (err < 0)
                goto err_free;
 
-       if (fdb_flow) {
-               flow->attr  = (struct mlx5_esw_flow_attr *)(flow + 1);
-               err = parse_tc_fdb_actions(priv, f->exts, flow);
+       if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
+               err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
                if (err < 0)
                        goto err_free;
-               flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
+               flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
        } else {
-               err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
+               err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
                if (err < 0)
                        goto err_free;
-               flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
+               flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
        }
 
        if (IS_ERR(flow->rule)) {
                err = PTR_ERR(flow->rule);
-               goto err_del_rule;
+               goto err_free;
        }
 
        err = rhashtable_insert_fast(&tc->ht, &flow->node,
@@ -1192,7 +1588,7 @@ err_del_rule:
 err_free:
        kfree(flow);
 out:
-       kvfree(spec);
+       kvfree(parse_attr);
        return err;
 }
 
index f193128bac4b8c18504ec1f5905def3baa5c4633..5bbc313e70c553e51b6e0fe806e82c3a9dba2e44 100644 (file)
 #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
                            MLX5E_SQ_NOPS_ROOM)
 
-void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
-{
-       struct mlx5_wq_cyc                *wq  = &sq->wq;
-
-       u16 pi = sq->pc & wq->sz_m1;
-       struct mlx5e_tx_wqe              *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
-
-       struct mlx5_wqe_ctrl_seg         *cseg = &wqe->ctrl;
-
-       memset(cseg, 0, sizeof(*cseg));
-
-       cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
-       cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | 0x01);
-
-       sq->pc++;
-       sq->stats.nop++;
-
-       if (notify_hw) {
-               cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-               mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
-       }
-}
-
 static inline void mlx5e_tx_dma_unmap(struct device *pdev,
                                      struct mlx5e_sq_dma *dma)
 {
@@ -76,25 +53,25 @@ static inline void mlx5e_tx_dma_unmap(struct device *pdev,
        }
 }
 
-static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
+static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
                                  dma_addr_t addr,
                                  u32 size,
                                  enum mlx5e_dma_map_type map_type)
 {
        u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
 
-       sq->db.txq.dma_fifo[i].addr = addr;
-       sq->db.txq.dma_fifo[i].size = size;
-       sq->db.txq.dma_fifo[i].type = map_type;
+       sq->db.dma_fifo[i].addr = addr;
+       sq->db.dma_fifo[i].size = size;
+       sq->db.dma_fifo[i].type = map_type;
        sq->dma_fifo_pc++;
 }
 
-static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
+static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
 {
-       return &sq->db.txq.dma_fifo[i & sq->dma_fifo_mask];
+       return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
 }
 
-static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma)
+static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
 {
        int i;
 
@@ -111,6 +88,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        int channel_ix = fallback(dev, skb);
+       u16 num_channels;
        int up = 0;
 
        if (!netdev_get_num_tc(dev))
@@ -122,11 +100,11 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
        /* channel_ix can be larger than num_channels since
         * dev->num_real_tx_queues = num_channels * num_tc
         */
-       if (channel_ix >= priv->params.num_channels)
-               channel_ix = reciprocal_scale(channel_ix,
-                                             priv->params.num_channels);
+       num_channels = priv->channels.params.num_channels;
+       if (channel_ix >= num_channels)
+               channel_ix = reciprocal_scale(channel_ix, num_channels);
 
-       return priv->channeltc_to_txq_map[channel_ix][up];
+       return priv->channel_tc2txq[channel_ix][up];
 }
 
 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
@@ -175,25 +153,6 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
        }
 }
 
-static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
-                                           struct sk_buff *skb, bool bf)
-{
-       /* Some NIC TX decisions, e.g loopback, are based on the packet
-        * headers and occur before the data gather.
-        * Therefore these headers must be copied into the WQE
-        */
-       if (bf) {
-               u16 ihs = skb_headlen(skb);
-
-               if (skb_vlan_tag_present(skb))
-                       ihs += VLAN_HLEN;
-
-               if (ihs <= sq->max_inline)
-                       return skb_headlen(skb);
-       }
-       return mlx5e_calc_min_inline(sq->min_inline_mode, skb);
-}
-
 static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
                                            unsigned int *skb_len,
                                            unsigned int len)
@@ -218,13 +177,13 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
        mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
 }
 
-static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
+static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb)
 {
        struct mlx5_wq_cyc       *wq   = &sq->wq;
 
        u16 pi = sq->pc & wq->sz_m1;
        struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
-       struct mlx5e_tx_wqe_info *wi   = &sq->db.txq.wqe_info[pi];
+       struct mlx5e_tx_wqe_info *wi   = &sq->db.wqe_info[pi];
 
        struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
        struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
@@ -235,7 +194,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
        u8  opcode = MLX5_OPCODE_SEND;
        dma_addr_t dma_addr = 0;
        unsigned int num_bytes;
-       bool bf = false;
        u16 headlen;
        u16 ds_cnt;
        u16 ihs;
@@ -255,11 +213,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
        } else
                sq->stats.csum_none++;
 
-       if (sq->cc != sq->prev_cc) {
-               sq->prev_cc = sq->cc;
-               sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
-       }
-
        if (skb_is_gso(skb)) {
                eseg->mss    = cpu_to_be16(skb_shinfo(skb)->gso_size);
                opcode       = MLX5_OPCODE_LSO;
@@ -274,15 +227,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
                        sq->stats.tso_bytes += skb->len - ihs;
                }
 
+               sq->stats.packets += skb_shinfo(skb)->gso_segs;
                num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
        } else {
-               bf = sq->bf_budget &&
-                    !skb->xmit_more &&
-                    !skb_shinfo(skb)->nr_frags;
-               ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
+               ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
+               sq->stats.packets++;
                num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
        }
 
+       sq->stats.bytes += num_bytes;
        wi->num_bytes = num_bytes;
 
        ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
@@ -346,7 +299,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
        cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
        cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
 
-       sq->db.txq.skb[pi] = skb;
+       sq->db.skb[pi] = skb;
 
        wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
        sq->pc += wi->num_wqebbs;
@@ -356,33 +309,23 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 
-       if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
+       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
+                                            MLX5E_SQ_STOP_ROOM))) {
                netif_tx_stop_queue(sq->txq);
                sq->stats.stopped++;
        }
 
        sq->stats.xmit_more += skb->xmit_more;
-       if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
-               int bf_sz = 0;
-
-               if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state))
-                       bf_sz = wi->num_wqebbs << 3;
-
-               cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-               mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz);
-       }
+       if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
+               mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
 
        /* fill sq edge with nops to avoid wqe wrap around */
        while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
-               sq->db.txq.skb[pi] = NULL;
-               mlx5e_send_nop(sq, false);
+               sq->db.skb[pi] = NULL;
+               mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
+               sq->stats.nop++;
        }
 
-       if (bf)
-               sq->bf_budget--;
-
-       sq->stats.packets++;
-       sq->stats.bytes += num_bytes;
        return NETDEV_TX_OK;
 
 dma_unmap_wqe_err:
@@ -397,21 +340,21 @@ dma_unmap_wqe_err:
 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
-       struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)];
+       struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)];
 
        return mlx5e_sq_xmit(sq, skb);
 }
 
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 {
-       struct mlx5e_sq *sq;
+       struct mlx5e_txqsq *sq;
        u32 dma_fifo_cc;
        u32 nbytes;
        u16 npkts;
        u16 sqcc;
        int i;
 
-       sq = container_of(cq, struct mlx5e_sq, cq);
+       sq = container_of(cq, struct mlx5e_txqsq, cq);
 
        if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
                return false;
@@ -449,8 +392,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
                        last_wqe = (sqcc == wqe_counter);
 
                        ci = sqcc & sq->wq.sz_m1;
-                       skb = sq->db.txq.skb[ci];
-                       wi = &sq->db.txq.wqe_info[ci];
+                       skb = sq->db.skb[ci];
+                       wi = &sq->db.wqe_info[ci];
 
                        if (unlikely(!skb)) { /* nop */
                                sqcc++;
@@ -491,7 +434,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
        netdev_tx_completed_queue(sq->txq, npkts, nbytes);
 
        if (netif_tx_queue_stopped(sq->txq) &&
-           mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) {
+           mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM)) {
                netif_tx_wake_queue(sq->txq);
                sq->stats.wake++;
        }
@@ -499,7 +442,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
        return (i == MLX5E_TX_CQ_POLL_BUDGET);
 }
 
-static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
+void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
 {
        struct mlx5e_tx_wqe_info *wi;
        struct sk_buff *skb;
@@ -508,8 +451,8 @@ static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
 
        while (sq->cc != sq->pc) {
                ci = sq->cc & sq->wq.sz_m1;
-               skb = sq->db.txq.skb[ci];
-               wi = &sq->db.txq.wqe_info[ci];
+               skb = sq->db.skb[ci];
+               wi = &sq->db.wqe_info[ci];
 
                if (!skb) { /* nop */
                        sq->cc++;
@@ -527,37 +470,3 @@ static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
                sq->cc += wi->num_wqebbs;
        }
 }
-
-static void mlx5e_free_xdp_sq_descs(struct mlx5e_sq *sq)
-{
-       struct mlx5e_sq_wqe_info *wi;
-       struct mlx5e_dma_info *di;
-       u16 ci;
-
-       while (sq->cc != sq->pc) {
-               ci = sq->cc & sq->wq.sz_m1;
-               di = &sq->db.xdp.di[ci];
-               wi = &sq->db.xdp.wqe_info[ci];
-
-               if (wi->opcode == MLX5_OPCODE_NOP) {
-                       sq->cc++;
-                       continue;
-               }
-
-               sq->cc += wi->num_wqebbs;
-
-               mlx5e_page_release(&sq->channel->rq, di, false);
-       }
-}
-
-void mlx5e_free_sq_descs(struct mlx5e_sq *sq)
-{
-       switch (sq->type) {
-       case MLX5E_SQ_TXQ:
-               mlx5e_free_txq_sq_descs(sq);
-               break;
-       case MLX5E_SQ_XDP:
-               mlx5e_free_xdp_sq_descs(sq);
-               break;
-       }
-}
index e5c12a732aa1212274943183ed83696ce2606639..43729ec35dfca585e4827175051671c2a662abcc 100644 (file)
@@ -44,14 +44,14 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
                return NULL;
 
        /* ensure cqe content is read after cqe ownership bit */
-       rmb();
+       dma_rmb();
 
        return cqe;
 }
 
 static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
 {
-       struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq);
+       struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
        struct mlx5_wq_cyc *wq;
        struct mlx5_cqe64 *cqe;
        u16 sqcc;
@@ -105,66 +105,6 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
        sq->cc = sqcc;
 }
 
-static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq)
-{
-       struct mlx5e_sq *sq;
-       u16 sqcc;
-       int i;
-
-       sq = container_of(cq, struct mlx5e_sq, cq);
-
-       if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
-               return false;
-
-       /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
-        * otherwise a cq overrun may occur
-        */
-       sqcc = sq->cc;
-
-       for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
-               struct mlx5_cqe64 *cqe;
-               u16 wqe_counter;
-               bool last_wqe;
-
-               cqe = mlx5e_get_cqe(cq);
-               if (!cqe)
-                       break;
-
-               mlx5_cqwq_pop(&cq->wq);
-
-               wqe_counter = be16_to_cpu(cqe->wqe_counter);
-
-               do {
-                       struct mlx5e_sq_wqe_info *wi;
-                       struct mlx5e_dma_info *di;
-                       u16 ci;
-
-                       last_wqe = (sqcc == wqe_counter);
-
-                       ci = sqcc & sq->wq.sz_m1;
-                       di = &sq->db.xdp.di[ci];
-                       wi = &sq->db.xdp.wqe_info[ci];
-
-                       if (unlikely(wi->opcode == MLX5_OPCODE_NOP)) {
-                               sqcc++;
-                               continue;
-                       }
-
-                       sqcc += wi->num_wqebbs;
-                       /* Recycle RX page */
-                       mlx5e_page_release(&sq->channel->rq, di, true);
-               } while (!last_wqe);
-       }
-
-       mlx5_cqwq_update_db_record(&cq->wq);
-
-       /* ensure cq space is freed before enabling more cqes */
-       wmb();
-
-       sq->cc = sqcc;
-       return (i == MLX5E_TX_CQ_POLL_BUDGET);
-}
-
 int mlx5e_napi_poll(struct napi_struct *napi, int budget)
 {
        struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
@@ -178,12 +118,12 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
        for (i = 0; i < c->num_tc; i++)
                busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
 
+       if (c->xdp)
+               busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
+
        work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
        busy |= work_done == budget;
 
-       if (c->xdp)
-               busy |= mlx5e_poll_xdp_tx_cq(&c->xdp_sq.cq);
-
        mlx5e_poll_ico_cq(&c->icosq.cq);
 
        busy |= mlx5e_post_rx_wqes(&c->rq);
@@ -224,8 +164,7 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
 {
        struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
        struct mlx5e_channel *c = cq->channel;
-       struct mlx5e_priv *priv = c->priv;
-       struct net_device *netdev = priv->netdev;
+       struct net_device *netdev = c->netdev;
 
        netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
                   __func__, mcq->cqn, event);
index 5b78883d565413ec59a00ecba4ddb483e4eecd3f..1f56ed9f5a6f78e20e76d63d8cba9bef847518a1 100644 (file)
@@ -209,6 +209,7 @@ struct mlx5_esw_offload {
        struct mlx5_eswitch_rep *vport_reps;
        DECLARE_HASHTABLE(encap_tbl, 8);
        u8 inline_mode;
+       u64 num_flows;
 };
 
 struct mlx5_eswitch {
@@ -271,6 +272,11 @@ struct mlx5_flow_handle *
 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
                                struct mlx5_flow_spec *spec,
                                struct mlx5_esw_flow_attr *attr);
+void
+mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
+                               struct mlx5_flow_handle *rule,
+                               struct mlx5_esw_flow_attr *attr);
+
 struct mlx5_flow_handle *
 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
 
@@ -279,8 +285,8 @@ enum {
        SET_VLAN_INSERT = BIT(1)
 };
 
-#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP  0x40
-#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
+#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP  0x4000
+#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x8000
 
 struct mlx5_encap_entry {
        struct hlist_node encap_hlist;
@@ -302,6 +308,7 @@ struct mlx5_esw_flow_attr {
        u16     vlan;
        bool    vlan_handled;
        struct mlx5_encap_entry *encap;
+       u32     mod_hdr_id;
 };
 
 int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
index 4f5b0d47d5f38237129a7c90a1240b8615615d32..fff962dac8e310fe4f3d9ab8af0412188ed190cd 100644 (file)
@@ -68,8 +68,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
        }
        if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
                counter = mlx5_fc_create(esw->dev, true);
-               if (IS_ERR(counter))
-                       return ERR_CAST(counter);
+               if (IS_ERR(counter)) {
+                       rule = ERR_CAST(counter);
+                       goto err_counter_alloc;
+               }
                dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
                dest[i].counter = counter;
                i++;
@@ -86,15 +88,38 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
        if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
                spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
 
-       if (attr->encap)
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+               flow_act.modify_id = attr->mod_hdr_id;
+
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
                flow_act.encap_id = attr->encap->encap_id;
 
        rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
                                   spec, &flow_act, dest, i);
        if (IS_ERR(rule))
-               mlx5_fc_destroy(esw->dev, counter);
+               goto err_add_rule;
+       else
+               esw->offloads.num_flows++;
 
        return rule;
+
+err_add_rule:
+       mlx5_fc_destroy(esw->dev, counter);
+err_counter_alloc:
+       return rule;
+}
+
+void
+mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
+                               struct mlx5_flow_handle *rule,
+                               struct mlx5_esw_flow_attr *attr)
+{
+       struct mlx5_fc *counter = NULL;
+
+       counter = mlx5_flow_rule_counter(rule);
+       mlx5_del_flow_rules(rule);
+       mlx5_fc_destroy(esw->dev, counter);
+       esw->offloads.num_flows--;
 }
 
 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
@@ -908,6 +933,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
            MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
                return -EOPNOTSUPP;
 
+       if (esw->offloads.num_flows > 0) {
+               esw_warn(dev, "Can't set inline mode when flows are configured\n");
+               return -EOPNOTSUPP;
+       }
+
        err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
        if (err)
                goto out;
index b64a781c7e855fd1d38cb7303d26a27073626435..c6178ea1a46161223ba73598c328cfd7c36ffa06 100644 (file)
@@ -249,6 +249,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
        MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
        MLX5_SET(flow_context, in_flow_context, action, fte->action);
        MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id);
+       MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->modify_id);
        in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
                                      match_value);
        memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
@@ -515,3 +516,69 @@ void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
 
        mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 }
+
+int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
+                            u8 namespace, u8 num_actions,
+                            void *modify_actions, u32 *modify_header_id)
+{
+       u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
+       int max_actions, actions_size, inlen, err;
+       void *actions_in;
+       u8 table_type;
+       u32 *in;
+
+       switch (namespace) {
+       case MLX5_FLOW_NAMESPACE_FDB:
+               max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
+               table_type = FS_FT_FDB;
+               break;
+       case MLX5_FLOW_NAMESPACE_KERNEL:
+               max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
+               table_type = FS_FT_NIC_RX;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       if (num_actions > max_actions) {
+               mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
+                              num_actions, max_actions);
+               return -EOPNOTSUPP;
+       }
+
+       actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
+       inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
+
+       in = kzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(alloc_modify_header_context_in, in, opcode,
+                MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
+       MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
+       MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
+
+       actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
+       memcpy(actions_in, modify_actions, actions_size);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+
+       *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
+       kfree(in);
+       return err;
+}
+
+void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
+{
+       u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
+       u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
+
+       memset(in, 0, sizeof(in));
+       MLX5_SET(dealloc_modify_header_context_in, in, opcode,
+                MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
+       MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
+                modify_header_id);
+
+       mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
index 2478516a61e2ea547f5ae8af0c3aae7228e64db9..27ff815600f78dfc0e363fe3852663e1518d09c2 100644 (file)
@@ -476,6 +476,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act,
        fte->index = index;
        fte->action = flow_act->action;
        fte->encap_id = flow_act->encap_id;
+       fte->modify_id = flow_act->modify_id;
 
        return fte;
 }
@@ -1136,7 +1137,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
                                                u32 *match_criteria)
 {
        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-       struct list_head *prev = ft->node.children.prev;
+       struct list_head *prev = &ft->node.children;
        unsigned int candidate_index = 0;
        struct mlx5_flow_group *fg;
        void *match_criteria_addr;
index 8e668c63f69ec4afefb197f1f4c0a32ca3760179..03af2e7989f375943db11845536f5646a8d46fa3 100644 (file)
@@ -152,6 +152,7 @@ struct fs_fte {
        u32                             index;
        u32                             action;
        u32                             encap_id;
+       u32                             modify_id;
        enum fs_fte_status              status;
        struct mlx5_fc                  *counter;
 };
index 55957246c0e844826a5a7f18c42c4678fb6c5be5..b5d5519542e87380b064de5578e327f0d55ba9cf 100644 (file)
@@ -294,7 +294,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
                                         struct netdev_notifier_changeupper_info *info)
 {
        struct net_device *upper = info->upper_dev, *ndev_tmp;
-       struct netdev_lag_upper_info *lag_upper_info;
+       struct netdev_lag_upper_info *lag_upper_info = NULL;
        bool is_bonded;
        int bond_status = 0;
        int num_slaves = 0;
@@ -303,7 +303,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
        if (!netif_is_lag_master(upper))
                return 0;
 
-       lag_upper_info = info->upper_info;
+       if (info->linking)
+               lag_upper_info = info->upper_info;
 
        /* The event may still be of interest if the slave does not belong to
         * us, but is enslaved to a master which has one or more of our netdevs
index c4242a4e81309f0d90a0cae8bdfc09fd39da5649..9c2bec732af989f0e0cb9a3104aa9f22b5c7b85b 100644 (file)
@@ -87,7 +87,7 @@ static struct mlx5_profile profile[] = {
        [2] = {
                .mask           = MLX5_PROF_MASK_QP_SIZE |
                                  MLX5_PROF_MASK_MR_CACHE,
-               .log_max_qp     = 17,
+               .log_max_qp     = 18,
                .mr_cache[0]    = {
                        .size   = 500,
                        .limit  = 250
@@ -1352,6 +1352,7 @@ static int init_one(struct pci_dev *pdev,
        if (err)
                goto clean_load;
 
+       pci_save_state(pdev);
        return 0;
 
 clean_load:
@@ -1407,9 +1408,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
 
        mlx5_enter_error_state(dev);
        mlx5_unload_one(dev, priv, false);
-       /* In case of kernel call save the pci state and drain the health wq */
+       /* In case of kernel call drain the health wq */
        if (state) {
-               pci_save_state(pdev);
                mlx5_drain_health_wq(dev);
                mlx5_pci_disable_device(dev);
        }
@@ -1461,6 +1461,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
 
        pci_set_master(pdev);
        pci_restore_state(pdev);
+       pci_save_state(pdev);
 
        if (wait_vital(pdev)) {
                dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
@@ -1513,8 +1514,10 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
        { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF},   /* ConnectX-4LX VF */
        { PCI_VDEVICE(MELLANOX, 0x1017) },                      /* ConnectX-5, PCIe 3.0 */
        { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF},   /* ConnectX-5 VF */
-       { PCI_VDEVICE(MELLANOX, 0x1019) },                      /* ConnectX-5, PCIe 4.0 */
-       { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF},   /* ConnectX-5, PCIe 4.0 VF */
+       { PCI_VDEVICE(MELLANOX, 0x1019) },                      /* ConnectX-5 Ex */
+       { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF},   /* ConnectX-5 Ex VF */
+       { PCI_VDEVICE(MELLANOX, 0x101b) },                      /* ConnectX-6 */
+       { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF},   /* ConnectX-6 VF */
        { 0, }
 };
 
index b3dabe6e88366133fd07dab68f059d4f5d7e5e3a..fbc6e9e9e3053a7527cf49e7d00c54104acf50af 100644 (file)
@@ -141,6 +141,11 @@ int mlx5_encap_alloc(struct mlx5_core_dev *dev,
                     u32 *encap_id);
 void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
 
+int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
+                            u8 namespace, u8 num_actions,
+                            void *modify_actions, u32 *modify_header_id);
+void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id);
+
 bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
 
 int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
index 6b6c30deee83ca289ba0bfcb924678efe55e65e7..2fb8c6585ac711c748d18355e9d4877f779ccbf5 100644 (file)
@@ -15,7 +15,8 @@ obj-$(CONFIG_MLXSW_SPECTRUM)  += mlxsw_spectrum.o
 mlxsw_spectrum-objs            := spectrum.o spectrum_buffers.o \
                                   spectrum_switchdev.o spectrum_router.o \
                                   spectrum_kvdl.o spectrum_acl_tcam.o \
-                                  spectrum_acl.o spectrum_flower.o
+                                  spectrum_acl.o spectrum_flower.o \
+                                  spectrum_cnt.o spectrum_dpipe.o
 mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB)    += spectrum_dcb.o
 obj-$(CONFIG_MLXSW_MINIMAL)    += mlxsw_minimal.o
 mlxsw_minimal-objs             := minimal.o
index a1b48421648a3c11e25e7a5c148a25bf07d322c0..479511cf79bc1ca3f02ec3dd1b8cb578416f1cc8 100644 (file)
@@ -1043,13 +1043,6 @@ MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
  */
 MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1);
 
-/* cmd_mbox_sw2hw_cq_oi
- * When set, overrun ignore is enabled. When set, updates of
- * CQ consumer counter (poll for completion) or Request completion
- * notifications (Arm CQ) DoorBells should not be rung on that CQ.
- */
-MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1);
-
 /* cmd_mbox_sw2hw_cq_st
  * Event delivery state machine
  * 0x0 - FIRED
@@ -1132,11 +1125,6 @@ static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
  */
 MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
 
-/* cmd_mbox_sw2hw_eq_oi
- * When set, overrun ignore is enabled.
- */
-MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
-
 /* cmd_mbox_sw2hw_eq_st
  * Event delivery state machine
  * 0x0 - FIRED
index a4c07841aaf6254c844eb8d8512687b447928ba8..affe84eb4bff5717e5ddba4835395a8a8989f8ca 100644 (file)
@@ -40,9 +40,6 @@
 #include <linux/export.h>
 #include <linux/err.h>
 #include <linux/if_link.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/u64_stats_sync.h>
 #include <linux/netdevice.h>
 #include <linux/completion.h>
 #include <linux/skbuff.h>
@@ -74,23 +71,9 @@ static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
 
 static const char mlxsw_core_driver_name[] = "mlxsw_core";
 
-static struct dentry *mlxsw_core_dbg_root;
-
 static struct workqueue_struct *mlxsw_wq;
 static struct workqueue_struct *mlxsw_owq;
 
-struct mlxsw_core_pcpu_stats {
-       u64                     trap_rx_packets[MLXSW_TRAP_ID_MAX];
-       u64                     trap_rx_bytes[MLXSW_TRAP_ID_MAX];
-       u64                     port_rx_packets[MLXSW_PORT_MAX_PORTS];
-       u64                     port_rx_bytes[MLXSW_PORT_MAX_PORTS];
-       struct u64_stats_sync   syncp;
-       u32                     trap_rx_dropped[MLXSW_TRAP_ID_MAX];
-       u32                     port_rx_dropped[MLXSW_PORT_MAX_PORTS];
-       u32                     trap_rx_invalid;
-       u32                     port_rx_invalid;
-};
-
 struct mlxsw_core_port {
        struct devlink_port devlink_port;
        void *port_driver_priv;
@@ -121,23 +104,48 @@ struct mlxsw_core {
                spinlock_t trans_list_lock; /* protects trans_list writes */
                bool use_emad;
        } emad;
-       struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
-       struct dentry *dbg_dir;
-       struct {
-               struct debugfs_blob_wrapper vsd_blob;
-               struct debugfs_blob_wrapper psid_blob;
-       } dbg;
        struct {
                u8 *mapping; /* lag_id+port_index to local_port mapping */
        } lag;
        struct mlxsw_res res;
        struct mlxsw_hwmon *hwmon;
        struct mlxsw_thermal *thermal;
-       struct mlxsw_core_port ports[MLXSW_PORT_MAX_PORTS];
+       struct mlxsw_core_port *ports;
+       unsigned int max_ports;
        unsigned long driver_priv[0];
        /* driver_priv has to be always the last item */
 };
 
+#define MLXSW_PORT_MAX_PORTS_DEFAULT   0x40
+
+static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
+{
+       /* Switch ports are numbered from 1 to queried value */
+       if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
+               mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
+                                                          MAX_SYSTEM_PORT) + 1;
+       else
+               mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
+
+       mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
+                                   sizeof(struct mlxsw_core_port), GFP_KERNEL);
+       if (!mlxsw_core->ports)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core)
+{
+       kfree(mlxsw_core->ports);
+}
+
+unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
+{
+       return mlxsw_core->max_ports;
+}
+EXPORT_SYMBOL(mlxsw_core_max_ports);
+
 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
 {
        return mlxsw_core->driver_priv;
@@ -703,91 +711,6 @@ err_out:
  * Core functions
  *****************/
 
-static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
-{
-       struct mlxsw_core *mlxsw_core = file->private;
-       struct mlxsw_core_pcpu_stats *p;
-       u64 rx_packets, rx_bytes;
-       u64 tmp_rx_packets, tmp_rx_bytes;
-       u32 rx_dropped, rx_invalid;
-       unsigned int start;
-       int i;
-       int j;
-       static const char hdr[] =
-               "     NUM   RX_PACKETS     RX_BYTES RX_DROPPED\n";
-
-       seq_printf(file, hdr);
-       for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
-               rx_packets = 0;
-               rx_bytes = 0;
-               rx_dropped = 0;
-               for_each_possible_cpu(j) {
-                       p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
-                       do {
-                               start = u64_stats_fetch_begin(&p->syncp);
-                               tmp_rx_packets = p->trap_rx_packets[i];
-                               tmp_rx_bytes = p->trap_rx_bytes[i];
-                       } while (u64_stats_fetch_retry(&p->syncp, start));
-
-                       rx_packets += tmp_rx_packets;
-                       rx_bytes += tmp_rx_bytes;
-                       rx_dropped += p->trap_rx_dropped[i];
-               }
-               seq_printf(file, "trap %3d %12llu %12llu %10u\n",
-                          i, rx_packets, rx_bytes, rx_dropped);
-       }
-       rx_invalid = 0;
-       for_each_possible_cpu(j) {
-               p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
-               rx_invalid += p->trap_rx_invalid;
-       }
-       seq_printf(file, "trap INV                           %10u\n",
-                  rx_invalid);
-
-       for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
-               rx_packets = 0;
-               rx_bytes = 0;
-               rx_dropped = 0;
-               for_each_possible_cpu(j) {
-                       p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
-                       do {
-                               start = u64_stats_fetch_begin(&p->syncp);
-                               tmp_rx_packets = p->port_rx_packets[i];
-                               tmp_rx_bytes = p->port_rx_bytes[i];
-                       } while (u64_stats_fetch_retry(&p->syncp, start));
-
-                       rx_packets += tmp_rx_packets;
-                       rx_bytes += tmp_rx_bytes;
-                       rx_dropped += p->port_rx_dropped[i];
-               }
-               seq_printf(file, "port %3d %12llu %12llu %10u\n",
-                          i, rx_packets, rx_bytes, rx_dropped);
-       }
-       rx_invalid = 0;
-       for_each_possible_cpu(j) {
-               p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
-               rx_invalid += p->port_rx_invalid;
-       }
-       seq_printf(file, "port INV                           %10u\n",
-                  rx_invalid);
-       return 0;
-}
-
-static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
-{
-       struct mlxsw_core *mlxsw_core = inode->i_private;
-
-       return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
-}
-
-static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
-       .owner = THIS_MODULE,
-       .open = mlxsw_core_rx_stats_dbg_open,
-       .release = single_release,
-       .read = seq_read,
-       .llseek = seq_lseek
-};
-
 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
 {
        spin_lock(&mlxsw_core_driver_list_lock);
@@ -835,39 +758,13 @@ static void mlxsw_core_driver_put(const char *kind)
        spin_unlock(&mlxsw_core_driver_list_lock);
 }
 
-static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
-{
-       const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
-
-       mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
-                                                mlxsw_core_dbg_root);
-       if (!mlxsw_core->dbg_dir)
-               return -ENOMEM;
-       debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
-                           mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
-       mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
-       mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
-       debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
-                           &mlxsw_core->dbg.vsd_blob);
-       mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
-       mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
-       debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
-                           &mlxsw_core->dbg.psid_blob);
-       return 0;
-}
-
-static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
-{
-       debugfs_remove_recursive(mlxsw_core->dbg_dir);
-}
-
 static int mlxsw_devlink_port_split(struct devlink *devlink,
                                    unsigned int port_index,
                                    unsigned int count)
 {
        struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
 
-       if (port_index >= MLXSW_PORT_MAX_PORTS)
+       if (port_index >= mlxsw_core->max_ports)
                return -EINVAL;
        if (!mlxsw_core->driver->port_split)
                return -EOPNOTSUPP;
@@ -879,7 +776,7 @@ static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
 {
        struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
 
-       if (port_index >= MLXSW_PORT_MAX_PORTS)
+       if (port_index >= mlxsw_core->max_ports)
                return -EINVAL;
        if (!mlxsw_core->driver->port_unsplit)
                return -EOPNOTSUPP;
@@ -1101,18 +998,15 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
        mlxsw_core->bus_priv = bus_priv;
        mlxsw_core->bus_info = mlxsw_bus_info;
 
-       mlxsw_core->pcpu_stats =
-               netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
-       if (!mlxsw_core->pcpu_stats) {
-               err = -ENOMEM;
-               goto err_alloc_stats;
-       }
-
        err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
                              &mlxsw_core->res);
        if (err)
                goto err_bus_init;
 
+       err = mlxsw_ports_init(mlxsw_core);
+       if (err)
+               goto err_ports_init;
+
        if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
            MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
                alloc_size = sizeof(u8) *
@@ -1148,15 +1042,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
                        goto err_driver_init;
        }
 
-       err = mlxsw_core_debugfs_init(mlxsw_core);
-       if (err)
-               goto err_debugfs_init;
-
        return 0;
 
-err_debugfs_init:
-       if (mlxsw_core->driver->fini)
-               mlxsw_core->driver->fini(mlxsw_core);
 err_driver_init:
        mlxsw_thermal_fini(mlxsw_core->thermal);
 err_thermal_init:
@@ -1167,10 +1054,10 @@ err_devlink_register:
 err_emad_init:
        kfree(mlxsw_core->lag.mapping);
 err_alloc_lag_mapping:
+       mlxsw_ports_fini(mlxsw_core);
+err_ports_init:
        mlxsw_bus->fini(bus_priv);
 err_bus_init:
-       free_percpu(mlxsw_core->pcpu_stats);
-err_alloc_stats:
        devlink_free(devlink);
 err_devlink_alloc:
        mlxsw_core_driver_put(device_kind);
@@ -1183,15 +1070,14 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
        const char *device_kind = mlxsw_core->bus_info->device_kind;
        struct devlink *devlink = priv_to_devlink(mlxsw_core);
 
-       mlxsw_core_debugfs_fini(mlxsw_core);
        if (mlxsw_core->driver->fini)
                mlxsw_core->driver->fini(mlxsw_core);
        mlxsw_thermal_fini(mlxsw_core->thermal);
        devlink_unregister(devlink);
        mlxsw_emad_fini(mlxsw_core);
        kfree(mlxsw_core->lag.mapping);
+       mlxsw_ports_fini(mlxsw_core);
        mlxsw_core->bus->fini(mlxsw_core->bus_priv);
-       free_percpu(mlxsw_core->pcpu_stats);
        devlink_free(devlink);
        mlxsw_core_driver_put(device_kind);
 }
@@ -1639,7 +1525,6 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
 {
        struct mlxsw_rx_listener_item *rxl_item;
        const struct mlxsw_rx_listener *rxl;
-       struct mlxsw_core_pcpu_stats *pcpu_stats;
        u8 local_port;
        bool found = false;
 
@@ -1661,7 +1546,7 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
                            __func__, local_port, rx_info->trap_id);
 
        if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
-           (local_port >= MLXSW_PORT_MAX_PORTS))
+           (local_port >= mlxsw_core->max_ports))
                goto drop;
 
        rcu_read_lock();
@@ -1678,26 +1563,10 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
        if (!found)
                goto drop;
 
-       pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
-       u64_stats_update_begin(&pcpu_stats->syncp);
-       pcpu_stats->port_rx_packets[local_port]++;
-       pcpu_stats->port_rx_bytes[local_port] += skb->len;
-       pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
-       pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
-       u64_stats_update_end(&pcpu_stats->syncp);
-
        rxl->func(skb, local_port, rxl_item->priv);
        return;
 
 drop:
-       if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
-               this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
-       else
-               this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
-       if (local_port >= MLXSW_PORT_MAX_PORTS)
-               this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
-       else
-               this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
        dev_kfree_skb(skb);
 }
 EXPORT_SYMBOL(mlxsw_core_skb_receive);
@@ -1926,15 +1795,8 @@ static int __init mlxsw_core_module_init(void)
                err = -ENOMEM;
                goto err_alloc_ordered_workqueue;
        }
-       mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
-       if (!mlxsw_core_dbg_root) {
-               err = -ENOMEM;
-               goto err_debugfs_create_dir;
-       }
        return 0;
 
-err_debugfs_create_dir:
-       destroy_workqueue(mlxsw_owq);
 err_alloc_ordered_workqueue:
        destroy_workqueue(mlxsw_wq);
        return err;
@@ -1942,7 +1804,6 @@ err_alloc_ordered_workqueue:
 
 static void __exit mlxsw_core_module_exit(void)
 {
-       debugfs_remove_recursive(mlxsw_core_dbg_root);
        destroy_workqueue(mlxsw_owq);
        destroy_workqueue(mlxsw_wq);
 }
index cf38cf9027f80a95a4f8a744de7551cb0810bf51..7fb35395adf52076ead77cbcbe0381317c197627 100644 (file)
@@ -57,6 +57,8 @@ struct mlxsw_driver;
 struct mlxsw_bus;
 struct mlxsw_bus_info;
 
+unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
+
 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
 
 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
index 5f337715a4da64dcd94178bd627189648d6f775b..a984c361926c7841cfcd12c9a7c5a2e17bfdc15c 100644 (file)
@@ -567,6 +567,89 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
        return oneact + MLXSW_AFA_PAYLOAD_OFFSET;
 }
 
+/* VLAN Action
+ * -----------
+ * VLAN action is used for manipulating VLANs. It can be used to implement QinQ,
+ * VLAN translation, change of PCP bits of the VLAN tag, push, pop as swap VLANs
+ * and more.
+ */
+
+#define MLXSW_AFA_VLAN_CODE 0x02
+#define MLXSW_AFA_VLAN_SIZE 1
+
+enum mlxsw_afa_vlan_vlan_tag_cmd {
+       MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
+       MLXSW_AFA_VLAN_VLAN_TAG_CMD_PUSH_TAG,
+       MLXSW_AFA_VLAN_VLAN_TAG_CMD_POP_TAG,
+};
+
+enum mlxsw_afa_vlan_cmd {
+       MLXSW_AFA_VLAN_CMD_NOP,
+       MLXSW_AFA_VLAN_CMD_SET_OUTER,
+       MLXSW_AFA_VLAN_CMD_SET_INNER,
+       MLXSW_AFA_VLAN_CMD_COPY_OUTER_TO_INNER,
+       MLXSW_AFA_VLAN_CMD_COPY_INNER_TO_OUTER,
+       MLXSW_AFA_VLAN_CMD_SWAP,
+};
+
+/* afa_vlan_vlan_tag_cmd
+ * Tag command: push, pop, nop VLAN header.
+ */
+MLXSW_ITEM32(afa, vlan, vlan_tag_cmd, 0x00, 29, 3);
+
+/* afa_vlan_vid_cmd */
+MLXSW_ITEM32(afa, vlan, vid_cmd, 0x04, 29, 3);
+
+/* afa_vlan_vid */
+MLXSW_ITEM32(afa, vlan, vid, 0x04, 0, 12);
+
+/* afa_vlan_ethertype_cmd */
+MLXSW_ITEM32(afa, vlan, ethertype_cmd, 0x08, 29, 3);
+
+/* afa_vlan_ethertype
+ * Index to EtherTypes in Switch VLAN EtherType Register (SVER).
+ */
+MLXSW_ITEM32(afa, vlan, ethertype, 0x08, 24, 3);
+
+/* afa_vlan_pcp_cmd */
+MLXSW_ITEM32(afa, vlan, pcp_cmd, 0x08, 13, 3);
+
+/* afa_vlan_pcp */
+MLXSW_ITEM32(afa, vlan, pcp, 0x08, 8, 3);
+
+static inline void
+mlxsw_afa_vlan_pack(char *payload,
+                   enum mlxsw_afa_vlan_vlan_tag_cmd vlan_tag_cmd,
+                   enum mlxsw_afa_vlan_cmd vid_cmd, u16 vid,
+                   enum mlxsw_afa_vlan_cmd pcp_cmd, u8 pcp,
+                   enum mlxsw_afa_vlan_cmd ethertype_cmd, u8 ethertype)
+{
+       mlxsw_afa_vlan_vlan_tag_cmd_set(payload, vlan_tag_cmd);
+       mlxsw_afa_vlan_vid_cmd_set(payload, vid_cmd);
+       mlxsw_afa_vlan_vid_set(payload, vid);
+       mlxsw_afa_vlan_pcp_cmd_set(payload, pcp_cmd);
+       mlxsw_afa_vlan_pcp_set(payload, pcp);
+       mlxsw_afa_vlan_ethertype_cmd_set(payload, ethertype_cmd);
+       mlxsw_afa_vlan_ethertype_set(payload, ethertype);
+}
+
+int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
+                                      u16 vid, u8 pcp, u8 et)
+{
+       char *act = mlxsw_afa_block_append_action(block,
+                                                 MLXSW_AFA_VLAN_CODE,
+                                                 MLXSW_AFA_VLAN_SIZE);
+
+       if (!act)
+               return -ENOBUFS;
+       mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
+                           MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
+                           MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp,
+                           MLXSW_AFA_VLAN_CMD_SET_OUTER, et);
+       return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
+
 /* Trap / Discard Action
  * ---------------------
  * The Trap / Discard action enables trapping / mirroring packets to the CPU
@@ -677,3 +760,54 @@ err_append_action:
        return err;
 }
 EXPORT_SYMBOL(mlxsw_afa_block_append_fwd);
+
+/* Policing and Counting Action
+ * ----------------------------
+ * Policing and Counting action is used for binding policer and counter
+ * to ACL rules.
+ */
+
+#define MLXSW_AFA_POLCNT_CODE 0x08
+#define MLXSW_AFA_POLCNT_SIZE 1
+
+enum mlxsw_afa_polcnt_counter_set_type {
+       /* No count */
+       MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_NO_COUNT = 0x00,
+       /* Count packets and bytes */
+       MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
+       /* Count only packets */
+       MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS = 0x05,
+};
+
+/* afa_polcnt_counter_set_type
+ * Counter set type for flow counters.
+ */
+MLXSW_ITEM32(afa, polcnt, counter_set_type, 0x04, 24, 8);
+
+/* afa_polcnt_counter_index
+ * Counter index for flow counters.
+ */
+MLXSW_ITEM32(afa, polcnt, counter_index, 0x04, 0, 24);
+
+static inline void
+mlxsw_afa_polcnt_pack(char *payload,
+                     enum mlxsw_afa_polcnt_counter_set_type set_type,
+                     u32 counter_index)
+{
+       mlxsw_afa_polcnt_counter_set_type_set(payload, set_type);
+       mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
+}
+
+int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
+                                  u32 counter_index)
+{
+       char *act = mlxsw_afa_block_append_action(block,
+                                                 MLXSW_AFA_POLCNT_CODE,
+                                                 MLXSW_AFA_POLCNT_SIZE);
+       if (!act)
+               return -ENOBUFS;
+       mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
+                             counter_index);
+       return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
index 43f78dcfe3942b87c5167054eff93f4f45e19a52..a03362c1ef3245cff5aba5c2dd07232614df094e 100644 (file)
@@ -62,5 +62,9 @@ void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
 int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
 int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
                               u8 local_port, bool in_port);
+int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
+                                      u16 vid, u8 pcp, u8 et);
+int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
+                                  u32 counter_index);
 
 #endif
index e4fcba7c2af202002e9382cee5a0a83857707f6b..c75e9141e3ec57b9ca47f1b35cc717c4dae14c83 100644 (file)
@@ -54,6 +54,8 @@ enum mlxsw_afk_element {
        MLXSW_AFK_ELEMENT_DST_IP6_LO,
        MLXSW_AFK_ELEMENT_DST_L4_PORT,
        MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+       MLXSW_AFK_ELEMENT_VID,
+       MLXSW_AFK_ELEMENT_PCP,
        MLXSW_AFK_ELEMENT_MAX,
 };
 
@@ -88,7 +90,7 @@ struct mlxsw_afk_element_info {
        MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF,                      \
                               _element, _offset, 0, _size)
 
-/* For the purpose of the driver, define a internal storage scratchpad
+/* For the purpose of the driver, define an internal storage scratchpad
  * that will be used to store key/mask values. For each defined element type
  * define an internal storage geometry.
  */
@@ -98,6 +100,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
        MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6),
        MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16),
        MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
+       MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
+       MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
        MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
        MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
        MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
index a223c85dfde064eee873eb6ffd6aae818a4f46ba..23f7d828cf676244766e582674a2946ea467bb6b 100644 (file)
@@ -44,8 +44,6 @@
 #include <linux/skbuff.h>
 #include <linux/if_vlan.h>
 #include <linux/log2.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
 #include <linux/string.h>
 
 #include "pci_hw.h"
@@ -57,8 +55,6 @@
 
 static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
 
-static struct dentry *mlxsw_pci_dbg_root;
-
 #define mlxsw_pci_write32(mlxsw_pci, reg, val) \
        iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
 #define mlxsw_pci_read32(mlxsw_pci, reg) \
@@ -71,21 +67,6 @@ enum mlxsw_pci_queue_type {
        MLXSW_PCI_QUEUE_TYPE_EQ,
 };
 
-static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type)
-{
-       switch (q_type) {
-       case MLXSW_PCI_QUEUE_TYPE_SDQ:
-               return "sdq";
-       case MLXSW_PCI_QUEUE_TYPE_RDQ:
-               return "rdq";
-       case MLXSW_PCI_QUEUE_TYPE_CQ:
-               return "cq";
-       case MLXSW_PCI_QUEUE_TYPE_EQ:
-               return "eq";
-       }
-       BUG();
-}
-
 #define MLXSW_PCI_QUEUE_TYPE_COUNT     4
 
 static const u16 mlxsw_pci_doorbell_type_offset[] = {
@@ -155,7 +136,6 @@ struct mlxsw_pci {
        u8 __iomem *hw_addr;
        struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
        u32 doorbell_offset;
-       struct msix_entry msix_entry;
        struct mlxsw_core *core;
        struct {
                struct mlxsw_pci_mem_item *items;
@@ -174,7 +154,6 @@ struct mlxsw_pci {
                } comp;
        } cmd;
        struct mlxsw_bus_info bus_info;
-       struct dentry *dbg_dir;
 };
 
 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
@@ -261,21 +240,11 @@ static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
        return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
 }
 
-static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci)
-{
-       return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ);
-}
-
 static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
 {
        return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
 }
 
-static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci)
-{
-       return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ);
-}
-
 static struct mlxsw_pci_queue *
 __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
                      enum mlxsw_pci_queue_type q_type, u8 q_num)
@@ -390,26 +359,6 @@ static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
        mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
 }
 
-static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data)
-{
-       struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
-       struct mlxsw_pci_queue *q;
-       int i;
-       static const char hdr[] =
-               "NUM PROD_COUNT CONS_COUNT COUNT\n";
-
-       seq_printf(file, hdr);
-       for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) {
-               q = mlxsw_pci_sdq_get(mlxsw_pci, i);
-               spin_lock_bh(&q->lock);
-               seq_printf(file, "%3d %10d %10d %5d\n",
-                          i, q->producer_counter, q->consumer_counter,
-                          q->count);
-               spin_unlock_bh(&q->lock);
-       }
-       return 0;
-}
-
 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
                                  int index, char *frag_data, size_t frag_len,
                                  int direction)
@@ -544,26 +493,6 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
        }
 }
 
-static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data)
-{
-       struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
-       struct mlxsw_pci_queue *q;
-       int i;
-       static const char hdr[] =
-               "NUM PROD_COUNT CONS_COUNT COUNT\n";
-
-       seq_printf(file, hdr);
-       for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) {
-               q = mlxsw_pci_rdq_get(mlxsw_pci, i);
-               spin_lock_bh(&q->lock);
-               seq_printf(file, "%3d %10d %10d %5d\n",
-                          i, q->producer_counter, q->consumer_counter,
-                          q->count);
-               spin_unlock_bh(&q->lock);
-       }
-       return 0;
-}
-
 static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
                             struct mlxsw_pci_queue *q)
 {
@@ -580,7 +509,6 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
 
        mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
        mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
-       mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
        mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
        mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
        for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
@@ -602,27 +530,6 @@ static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
        mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
 }
 
-static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data)
-{
-       struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
-
-       struct mlxsw_pci_queue *q;
-       int i;
-       static const char hdr[] =
-               "NUM CONS_INDEX  SDQ_COUNT  RDQ_COUNT COUNT\n";
-
-       seq_printf(file, hdr);
-       for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) {
-               q = mlxsw_pci_cq_get(mlxsw_pci, i);
-               spin_lock_bh(&q->lock);
-               seq_printf(file, "%3d %10d %10d %10d %5d\n",
-                          i, q->consumer_counter, q->u.cq.comp_sdq_count,
-                          q->u.cq.comp_rdq_count, q->count);
-               spin_unlock_bh(&q->lock);
-       }
-       return 0;
-}
-
 static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
                                     struct mlxsw_pci_queue *q,
                                     u16 consumer_counter_limit,
@@ -755,7 +662,6 @@ static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
        }
 
        mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
-       mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
        mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
        mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
        for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
@@ -777,27 +683,6 @@ static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
        mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
 }
 
-static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data)
-{
-       struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
-       struct mlxsw_pci_queue *q;
-       int i;
-       static const char hdr[] =
-               "NUM CONS_COUNT     EV_CMD    EV_COMP   EV_OTHER COUNT\n";
-
-       seq_printf(file, hdr);
-       for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) {
-               q = mlxsw_pci_eq_get(mlxsw_pci, i);
-               spin_lock_bh(&q->lock);
-               seq_printf(file, "%3d %10d %10d %10d %10d %5d\n",
-                          i, q->consumer_counter, q->u.eq.ev_cmd_count,
-                          q->u.eq.ev_comp_count, q->u.eq.ev_other_count,
-                          q->count);
-               spin_unlock_bh(&q->lock);
-       }
-       return 0;
-}
-
 static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
 {
        mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
@@ -868,7 +753,6 @@ struct mlxsw_pci_queue_ops {
        void (*fini)(struct mlxsw_pci *mlxsw_pci,
                     struct mlxsw_pci_queue *q);
        void (*tasklet)(unsigned long data);
-       int (*dbg_read)(struct seq_file *s, void *data);
        u16 elem_count;
        u8 elem_size;
 };
@@ -877,7 +761,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
        .type           = MLXSW_PCI_QUEUE_TYPE_SDQ,
        .init           = mlxsw_pci_sdq_init,
        .fini           = mlxsw_pci_sdq_fini,
-       .dbg_read       = mlxsw_pci_sdq_dbg_read,
        .elem_count     = MLXSW_PCI_WQE_COUNT,
        .elem_size      = MLXSW_PCI_WQE_SIZE,
 };
@@ -886,7 +769,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
        .type           = MLXSW_PCI_QUEUE_TYPE_RDQ,
        .init           = mlxsw_pci_rdq_init,
        .fini           = mlxsw_pci_rdq_fini,
-       .dbg_read       = mlxsw_pci_rdq_dbg_read,
        .elem_count     = MLXSW_PCI_WQE_COUNT,
        .elem_size      = MLXSW_PCI_WQE_SIZE
 };
@@ -896,7 +778,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
        .init           = mlxsw_pci_cq_init,
        .fini           = mlxsw_pci_cq_fini,
        .tasklet        = mlxsw_pci_cq_tasklet,
-       .dbg_read       = mlxsw_pci_cq_dbg_read,
        .elem_count     = MLXSW_PCI_CQE_COUNT,
        .elem_size      = MLXSW_PCI_CQE_SIZE
 };
@@ -906,7 +787,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
        .init           = mlxsw_pci_eq_init,
        .fini           = mlxsw_pci_eq_fini,
        .tasklet        = mlxsw_pci_eq_tasklet,
-       .dbg_read       = mlxsw_pci_eq_dbg_read,
        .elem_count     = MLXSW_PCI_EQE_COUNT,
        .elem_size      = MLXSW_PCI_EQE_SIZE
 };
@@ -984,9 +864,7 @@ static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
                                      const struct mlxsw_pci_queue_ops *q_ops,
                                      u8 num_qs)
 {
-       struct pci_dev *pdev = mlxsw_pci->pdev;
        struct mlxsw_pci_queue_type_group *queue_group;
-       char tmp[16];
        int i;
        int err;
 
@@ -1003,10 +881,6 @@ static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
        }
        queue_group->count = num_qs;
 
-       sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type));
-       debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir,
-                                   q_ops->dbg_read);
-
        return 0;
 
 err_queue_init:
@@ -1534,7 +1408,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
        if (err)
                goto err_aqs_init;
 
-       err = request_irq(mlxsw_pci->msix_entry.vector,
+       err = request_irq(pci_irq_vector(pdev, 0),
                          mlxsw_pci_eq_irq_handler, 0,
                          mlxsw_pci->bus_info.device_kind, mlxsw_pci);
        if (err) {
@@ -1567,7 +1441,7 @@ static void mlxsw_pci_fini(void *bus_priv)
 {
        struct mlxsw_pci *mlxsw_pci = bus_priv;
 
-       free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci);
+       free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
        mlxsw_pci_aqs_fini(mlxsw_pci);
        mlxsw_pci_fw_area_fini(mlxsw_pci);
        mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
@@ -1842,8 +1716,8 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto err_sw_reset;
        }
 
-       err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1);
-       if (err) {
+       err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+       if (err < 0) {
                dev_err(&pdev->dev, "MSI-X init failed\n");
                goto err_msix_init;
        }
@@ -1852,14 +1726,6 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
        mlxsw_pci->bus_info.dev = &pdev->dev;
 
-       mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name,
-                                               mlxsw_pci_dbg_root);
-       if (!mlxsw_pci->dbg_dir) {
-               dev_err(&pdev->dev, "Failed to create debugfs dir\n");
-               err = -ENOMEM;
-               goto err_dbg_create_dir;
-       }
-
        err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
                                             &mlxsw_pci_bus, mlxsw_pci);
        if (err) {
@@ -1870,9 +1736,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        return 0;
 
 err_bus_device_register:
-       debugfs_remove_recursive(mlxsw_pci->dbg_dir);
-err_dbg_create_dir:
-       pci_disable_msix(mlxsw_pci->pdev);
+       pci_free_irq_vectors(mlxsw_pci->pdev);
 err_msix_init:
 err_sw_reset:
        iounmap(mlxsw_pci->hw_addr);
@@ -1892,8 +1756,7 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
        struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
 
        mlxsw_core_bus_device_unregister(mlxsw_pci->core);
-       debugfs_remove_recursive(mlxsw_pci->dbg_dir);
-       pci_disable_msix(mlxsw_pci->pdev);
+       pci_free_irq_vectors(mlxsw_pci->pdev);
        iounmap(mlxsw_pci->hw_addr);
        pci_release_regions(mlxsw_pci->pdev);
        pci_disable_device(mlxsw_pci->pdev);
@@ -1916,15 +1779,11 @@ EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
 
 static int __init mlxsw_pci_module_init(void)
 {
-       mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL);
-       if (!mlxsw_pci_dbg_root)
-               return -ENOMEM;
        return 0;
 }
 
 static void __exit mlxsw_pci_module_exit(void)
 {
-       debugfs_remove_recursive(mlxsw_pci_dbg_root);
 }
 
 module_init(mlxsw_pci_module_init);
index 3d42146473b30a786629ec06091eb4364cdfde37..c580abba8d342b844b8f776e32fd8d683722da18 100644 (file)
 
 #define MLXSW_PORT_MID                 0xd000
 
-#define MLXSW_PORT_MAX_PHY_PORTS       0x40
-#define MLXSW_PORT_MAX_PORTS           (MLXSW_PORT_MAX_PHY_PORTS + 1)
-
 #define MLXSW_PORT_MAX_IB_PHY_PORTS    36
 #define MLXSW_PORT_MAX_IB_PORTS                (MLXSW_PORT_MAX_IB_PHY_PORTS + 1)
 
-#define MLXSW_PORT_DEVID_BITS_OFFSET   10
-#define MLXSW_PORT_PHY_BITS_OFFSET     4
-#define MLXSW_PORT_PHY_BITS_MASK       (MLXSW_PORT_MAX_PHY_PORTS - 1)
-
 #define MLXSW_PORT_CPU_PORT            0x0
-#define MLXSW_PORT_ROUTER_PORT         (MLXSW_PORT_MAX_PHY_PORTS + 2)
 
-#define MLXSW_PORT_DONT_CARE           (MLXSW_PORT_MAX_PORTS)
+#define MLXSW_PORT_DONT_CARE           0xFF
 
 #define MLXSW_PORT_MODULE_MAX_WIDTH    4
 
index 0899e2d310e26269a5c3d025b7afeeb1516bf21e..83b277c8090e3d16a4b32e380eee387aac95a1a0 100644 (file)
@@ -769,7 +769,7 @@ static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
 #define MLXSW_REG_SPVM_ID 0x200F
 #define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
 #define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
-#define MLXSW_REG_SPVM_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVM_REC_MAX_COUNT 255
 #define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN +  \
                    MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
 
@@ -1702,7 +1702,7 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
 #define MLXSW_REG_SPVMLR_ID 0x2020
 #define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
 #define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
-#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 255
 #define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
                              MLXSW_REG_SPVMLR_REC_LEN * \
                              MLXSW_REG_SPVMLR_REC_MAX_COUNT)
@@ -4125,6 +4125,60 @@ MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16);
  */
 MLXSW_ITEM32(reg, ritr, sp_if_vid, 0x18, 0, 12);
 
+/* Shared between ingress/egress */
+enum mlxsw_reg_ritr_counter_set_type {
+       /* No Count. */
+       MLXSW_REG_RITR_COUNTER_SET_TYPE_NO_COUNT = 0x0,
+       /* Basic. Used for router interfaces, counting the following:
+        *      - Error and Discard counters.
+        *      - Unicast, Multicast and Broadcast counters. Sharing the
+        *        same set of counters for the different type of traffic
+        *        (IPv4, IPv6 and mpls).
+        */
+       MLXSW_REG_RITR_COUNTER_SET_TYPE_BASIC = 0x9,
+};
+
+/* reg_ritr_ingress_counter_index
+ * Counter Index for flow counter.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ingress_counter_index, 0x38, 0, 24);
+
+/* reg_ritr_ingress_counter_set_type
+ * Igress Counter Set Type for router interface counter.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ingress_counter_set_type, 0x38, 24, 8);
+
+/* reg_ritr_egress_counter_index
+ * Counter Index for flow counter.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, egress_counter_index, 0x3C, 0, 24);
+
+/* reg_ritr_egress_counter_set_type
+ * Egress Counter Set Type for router interface counter.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, egress_counter_set_type, 0x3C, 24, 8);
+
+static inline void mlxsw_reg_ritr_counter_pack(char *payload, u32 index,
+                                              bool enable, bool egress)
+{
+       enum mlxsw_reg_ritr_counter_set_type set_type;
+
+       if (enable)
+               set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_BASIC;
+       else
+               set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_NO_COUNT;
+       mlxsw_reg_ritr_egress_counter_set_type_set(payload, set_type);
+
+       if (egress)
+               mlxsw_reg_ritr_egress_counter_index_set(payload, index);
+       else
+               mlxsw_reg_ritr_ingress_counter_index_set(payload, index);
+}
+
 static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
 {
        MLXSW_REG_ZERO(ritr, payload);
@@ -4141,7 +4195,8 @@ static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
 
 static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
                                       enum mlxsw_reg_ritr_if_type type,
-                                      u16 rif, u16 mtu, const char *mac)
+                                      u16 rif, u16 vr_id, u16 mtu,
+                                      const char *mac)
 {
        bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL;
 
@@ -4153,6 +4208,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
        mlxsw_reg_ritr_rif_set(payload, rif);
        mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
        mlxsw_reg_ritr_lb_en_set(payload, 1);
+       mlxsw_reg_ritr_virtual_router_set(payload, vr_id);
        mlxsw_reg_ritr_mtu_set(payload, mtu);
        mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
 }
@@ -4285,6 +4341,129 @@ static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload,
        mlxsw_reg_ratr_eth_destination_mac_memcpy_to(payload, dest_mac);
 }
 
+/* RICNT - Router Interface Counter Register
+ * -----------------------------------------
+ * The RICNT register retrieves per port performance counters
+ */
+#define MLXSW_REG_RICNT_ID 0x800B
+#define MLXSW_REG_RICNT_LEN 0x100
+
+MLXSW_REG_DEFINE(ricnt, MLXSW_REG_RICNT_ID, MLXSW_REG_RICNT_LEN);
+
+/* reg_ricnt_counter_index
+ * Counter index
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ricnt, counter_index, 0x04, 0, 24);
+
+enum mlxsw_reg_ricnt_counter_set_type {
+       /* No Count. */
+       MLXSW_REG_RICNT_COUNTER_SET_TYPE_NO_COUNT = 0x00,
+       /* Basic. Used for router interfaces, counting the following:
+        *      - Error and Discard counters.
+        *      - Unicast, Multicast and Broadcast counters. Sharing the
+        *        same set of counters for the different type of traffic
+        *        (IPv4, IPv6 and mpls).
+        */
+       MLXSW_REG_RICNT_COUNTER_SET_TYPE_BASIC = 0x09,
+};
+
+/* reg_ricnt_counter_set_type
+ * Counter Set Type for router interface counter
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ricnt, counter_set_type, 0x04, 24, 8);
+
+enum mlxsw_reg_ricnt_opcode {
+       /* Nop. Supported only for read access*/
+       MLXSW_REG_RICNT_OPCODE_NOP = 0x00,
+       /* Clear. Setting the clr bit will reset the counter value for
+        * all counters of the specified Router Interface.
+        */
+       MLXSW_REG_RICNT_OPCODE_CLEAR = 0x08,
+};
+
+/* reg_ricnt_opcode
+ * Opcode
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ricnt, op, 0x00, 28, 4);
+
+/* reg_ricnt_good_unicast_packets
+ * good unicast packets.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_unicast_packets, 0x08, 0, 64);
+
+/* reg_ricnt_good_multicast_packets
+ * good multicast packets.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_multicast_packets, 0x10, 0, 64);
+
+/* reg_ricnt_good_broadcast_packets
+ * good broadcast packets
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_broadcast_packets, 0x18, 0, 64);
+
+/* reg_ricnt_good_unicast_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for good unicast frames.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_unicast_bytes, 0x20, 0, 64);
+
+/* reg_ricnt_good_multicast_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for good multicast frames.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_multicast_bytes, 0x28, 0, 64);
+
+/* reg_ritr_good_broadcast_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for good broadcast frames.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_broadcast_bytes, 0x30, 0, 64);
+
+/* reg_ricnt_error_packets
+ * A count of errored frames that do not pass the router checks.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, error_packets, 0x38, 0, 64);
+
+/* reg_ricnt_discrad_packets
+ * A count of non-errored frames that do not pass the router checks.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, discard_packets, 0x40, 0, 64);
+
+/* reg_ricnt_error_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for errored frames.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, error_bytes, 0x48, 0, 64);
+
+/* reg_ricnt_discard_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for non-errored frames that do not pass the router checks.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, discard_bytes, 0x50, 0, 64);
+
+static inline void mlxsw_reg_ricnt_pack(char *payload, u32 index,
+                                       enum mlxsw_reg_ricnt_opcode op)
+{
+       MLXSW_REG_ZERO(ricnt, payload);
+       mlxsw_reg_ricnt_op_set(payload, op);
+       mlxsw_reg_ricnt_counter_index_set(payload, index);
+       mlxsw_reg_ricnt_counter_set_type_set(payload,
+                                            MLXSW_REG_RICNT_COUNTER_SET_TYPE_BASIC);
+}
+
 /* RALTA - Router Algorithmic LPM Tree Allocation Register
  * -------------------------------------------------------
  * RALTA is used to allocate the LPM trees of the SHSPM method.
@@ -5504,6 +5683,70 @@ static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e,
        mlxsw_reg_mpsc_rate_set(payload, rate);
 }
 
+/* MGPC - Monitoring General Purpose Counter Set Register
+ * The MGPC register retrieves and sets the General Purpose Counter Set.
+ */
+#define MLXSW_REG_MGPC_ID 0x9081
+#define MLXSW_REG_MGPC_LEN 0x18
+
+MLXSW_REG_DEFINE(mgpc, MLXSW_REG_MGPC_ID, MLXSW_REG_MGPC_LEN);
+
+enum mlxsw_reg_mgpc_counter_set_type {
+       /* No count */
+       MLXSW_REG_MGPC_COUNTER_SET_TYPE_NO_COUT = 0x00,
+       /* Count packets and bytes */
+       MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
+       /* Count only packets */
+       MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS = 0x05,
+};
+
+/* reg_mgpc_counter_set_type
+ * Counter set type.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mgpc, counter_set_type, 0x00, 24, 8);
+
+/* reg_mgpc_counter_index
+ * Counter index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mgpc, counter_index, 0x00, 0, 24);
+
+enum mlxsw_reg_mgpc_opcode {
+       /* Nop */
+       MLXSW_REG_MGPC_OPCODE_NOP = 0x00,
+       /* Clear counters */
+       MLXSW_REG_MGPC_OPCODE_CLEAR = 0x08,
+};
+
+/* reg_mgpc_opcode
+ * Opcode.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mgpc, opcode, 0x04, 28, 4);
+
+/* reg_mgpc_byte_counter
+ * Byte counter value.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, mgpc, byte_counter, 0x08, 0, 64);
+
+/* reg_mgpc_packet_counter
+ * Packet counter value.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, mgpc, packet_counter, 0x10, 0, 64);
+
+static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
+                                      enum mlxsw_reg_mgpc_opcode opcode,
+                                      enum mlxsw_reg_mgpc_counter_set_type set_type)
+{
+       MLXSW_REG_ZERO(mgpc, payload);
+       mlxsw_reg_mgpc_counter_index_set(payload, counter_index);
+       mlxsw_reg_mgpc_counter_set_type_set(payload, set_type);
+       mlxsw_reg_mgpc_opcode_set(payload, opcode);
+}
+
 /* SBPR - Shared Buffer Pools Register
  * -----------------------------------
  * The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -5960,6 +6203,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
        MLXSW_REG(rgcr),
        MLXSW_REG(ritr),
        MLXSW_REG(ratr),
+       MLXSW_REG(ricnt),
        MLXSW_REG(ralta),
        MLXSW_REG(ralst),
        MLXSW_REG(raltb),
@@ -5977,6 +6221,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
        MLXSW_REG(mpar),
        MLXSW_REG(mlcr),
        MLXSW_REG(mpsc),
+       MLXSW_REG(mgpc),
        MLXSW_REG(sbpr),
        MLXSW_REG(sbcm),
        MLXSW_REG(sbpm),
index bce8c2e006302db45ce4eaedf8b3a368ec2660c4..9556d934714b0871119d52258813a23f94227d09 100644 (file)
@@ -43,11 +43,15 @@ enum mlxsw_res_id {
        MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
        MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
        MLXSW_RES_ID_MAX_TRAP_GROUPS,
+       MLXSW_RES_ID_COUNTER_POOL_SIZE,
        MLXSW_RES_ID_MAX_SPAN,
+       MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
+       MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC,
        MLXSW_RES_ID_MAX_SYSTEM_PORT,
        MLXSW_RES_ID_MAX_LAG,
        MLXSW_RES_ID_MAX_LAG_MEMBERS,
        MLXSW_RES_ID_MAX_BUFFER_SIZE,
+       MLXSW_RES_ID_CELL_SIZE,
        MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS,
        MLXSW_RES_ID_ACL_MAX_TCAM_RULES,
        MLXSW_RES_ID_ACL_MAX_REGIONS,
@@ -59,6 +63,7 @@ enum mlxsw_res_id {
        MLXSW_RES_ID_MAX_CPU_POLICERS,
        MLXSW_RES_ID_MAX_VRS,
        MLXSW_RES_ID_MAX_RIFS,
+       MLXSW_RES_ID_MAX_LPM_TREES,
 
        /* Internal resources.
         * Determined by the SW, not queried from the HW.
@@ -75,11 +80,15 @@ static u16 mlxsw_res_ids[] = {
        [MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
        [MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
        [MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
+       [MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410,
        [MLXSW_RES_ID_MAX_SPAN] = 0x2420,
+       [MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
+       [MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC] = 0x2449,
        [MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
        [MLXSW_RES_ID_MAX_LAG] = 0x2520,
        [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
        [MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802,        /* Bytes */
+       [MLXSW_RES_ID_CELL_SIZE] = 0x2803,      /* Bytes */
        [MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901,
        [MLXSW_RES_ID_ACL_MAX_TCAM_RULES] = 0x2902,
        [MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903,
@@ -91,6 +100,7 @@ static u16 mlxsw_res_ids[] = {
        [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13,
        [MLXSW_RES_ID_MAX_VRS] = 0x2C01,
        [MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
+       [MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30,
 };
 
 struct mlxsw_res {
index 16484f24b7dbbaa2fe10170bd7cb46fee9832938..b031f09bf4e64bc08d99c6fc91104234a4ff93cb 100644 (file)
@@ -66,6 +66,8 @@
 #include "port.h"
 #include "trap.h"
 #include "txheader.h"
+#include "spectrum_cnt.h"
+#include "spectrum_dpipe.h"
 
 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
 static const char mlxsw_sp_driver_version[] = "1.0";
@@ -138,6 +140,60 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
  */
 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
 
+int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
+                             unsigned int counter_index, u64 *packets,
+                             u64 *bytes)
+{
+       char mgpc_pl[MLXSW_REG_MGPC_LEN];
+       int err;
+
+       mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
+                           MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
+       if (err)
+               return err;
+       *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
+       *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
+       return 0;
+}
+
+static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
+                                      unsigned int counter_index)
+{
+       char mgpc_pl[MLXSW_REG_MGPC_LEN];
+
+       mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
+                           MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
+}
+
+int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+                               unsigned int *p_counter_index)
+{
+       int err;
+
+       err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+                                    p_counter_index);
+       if (err)
+               return err;
+       err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
+       if (err)
+               goto err_counter_clear;
+       return 0;
+
+err_counter_clear:
+       mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+                             *p_counter_index);
+       return err;
+}
+
+void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
+                               unsigned int counter_index)
+{
+        mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+                              counter_index);
+}
+
 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
                                     const struct mlxsw_tx_info *tx_info)
 {
@@ -304,9 +360,10 @@ static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
        return false;
 }
 
-static int mlxsw_sp_span_mtu_to_buffsize(int mtu)
+static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
+                                        int mtu)
 {
-       return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1;
+       return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
 }
 
 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
@@ -319,8 +376,9 @@ static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
         * updated according to the mtu value
         */
        if (mlxsw_sp_span_is_egress_mirror(port)) {
-               mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
-                                   mlxsw_sp_span_mtu_to_buffsize(mtu));
+               u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
+
+               mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
                err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
                if (err) {
                        netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
@@ -357,8 +415,10 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
 
        /* if it is an egress SPAN, bind a shared buffer to it */
        if (type == MLXSW_SP_SPAN_EGRESS) {
-               mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
-                                   mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu));
+               u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
+                                                            port->dev->mtu);
+
+               mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
                err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
                if (err) {
                        netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
@@ -745,19 +805,47 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
        return 0;
 }
 
-static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
-                                bool pause_en, bool pfc_en, u16 delay)
+static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
+                                        int mtu)
 {
-       u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
+       return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
+}
 
-       delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
-                        MLXSW_SP_PAUSE_DELAY;
+#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
+
+static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
+                                 u16 delay)
+{
+       delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
+                                                           BITS_PER_BYTE));
+       return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
+                                                                  mtu);
+}
 
-       if (pause_en || pfc_en)
-               mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
-                                                   pg_size + delay, pg_size);
+/* Maximum delay buffer needed in case of PAUSE frames, in bytes.
+ * Assumes 100m cable and maximum MTU.
+ */
+#define MLXSW_SP_PAUSE_DELAY 58752
+
+static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
+                                    u16 delay, bool pfc, bool pause)
+{
+       if (pfc)
+               return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
+       else if (pause)
+               return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
        else
-               mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
+               return 0;
+}
+
+static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
+                                bool lossy)
+{
+       if (lossy)
+               mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
+       else
+               mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
+                                                   thres);
 }
 
 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
@@ -778,6 +866,8 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
                bool configure = false;
                bool pfc = false;
+               bool lossy;
+               u16 thres;
 
                for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
                        if (prio_tc[j] == i) {
@@ -789,7 +879,12 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
 
                if (!configure)
                        continue;
-               mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
+
+               lossy = !(pfc || pause_en);
+               thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
+               delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
+                                                 pause_en);
+               mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
        }
 
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
@@ -1368,7 +1463,7 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
                                                       tc->cls_mall);
                        return 0;
                default:
-                       return -EINVAL;
+                       return -EOPNOTSUPP;
                }
        case TC_SETUP_CLSFLOWER:
                switch (tc->cls_flower->command) {
@@ -1379,6 +1474,9 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
                        mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
                                                tc->cls_flower);
                        return 0;
+               case TC_CLSFLOWER_STATS:
+                       return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress,
+                                                    tc->cls_flower);
                default:
                        return -EOPNOTSUPP;
                }
@@ -1492,6 +1590,7 @@ err_port_pause_configure:
 struct mlxsw_sp_port_hw_stats {
        char str[ETH_GSTRING_LEN];
        u64 (*getter)(const char *payload);
+       bool cells_bytes;
 };
 
 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
@@ -1612,17 +1711,11 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
 
 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
 
-static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(const char *ppcnt_pl)
-{
-       u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
-
-       return MLXSW_SP_CELLS_TO_BYTES(transmit_queue);
-}
-
 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
        {
                .str = "tc_transmit_queue_tc",
-               .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get,
+               .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
+               .cells_bytes = true,
        },
        {
                .str = "tc_no_buffer_discard_uc_tc",
@@ -1734,6 +1827,8 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
                                      enum mlxsw_reg_ppcnt_grp grp, int prio,
                                      u64 *data, int data_index)
 {
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        struct mlxsw_sp_port_hw_stats *hw_stats;
        char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
        int i, len;
@@ -1743,8 +1838,13 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
        if (err)
                return;
        mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
-       for (i = 0; i < len; i++)
+       for (i = 0; i < len; i++) {
                data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
+               if (!hw_stats[i].cells_bytes)
+                       continue;
+               data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
+                                                           data[data_index + i]);
+       }
 }
 
 static void mlxsw_sp_port_get_stats(struct net_device *dev,
@@ -2537,25 +2637,33 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
 {
        int i;
 
-       for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+       for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
                if (mlxsw_sp_port_created(mlxsw_sp, i))
                        mlxsw_sp_port_remove(mlxsw_sp, i);
+       kfree(mlxsw_sp->port_to_module);
        kfree(mlxsw_sp->ports);
 }
 
 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
 {
+       unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
        u8 module, width, lane;
        size_t alloc_size;
        int i;
        int err;
 
-       alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
+       alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
        mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
        if (!mlxsw_sp->ports)
                return -ENOMEM;
 
-       for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+       mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
+       if (!mlxsw_sp->port_to_module) {
+               err = -ENOMEM;
+               goto err_port_to_module_alloc;
+       }
+
+       for (i = 1; i < max_ports; i++) {
                err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
                                                    &width, &lane);
                if (err)
@@ -2575,6 +2683,8 @@ err_port_module_info_get:
        for (i--; i >= 1; i--)
                if (mlxsw_sp_port_created(mlxsw_sp, i))
                        mlxsw_sp_port_remove(mlxsw_sp, i);
+       kfree(mlxsw_sp->port_to_module);
+err_port_to_module_alloc:
        kfree(mlxsw_sp->ports);
        return err;
 }
@@ -3224,6 +3334,18 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
                goto err_acl_init;
        }
 
+       err = mlxsw_sp_counter_pool_init(mlxsw_sp);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
+               goto err_counter_pool_init;
+       }
+
+       err = mlxsw_sp_dpipe_init(mlxsw_sp);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
+               goto err_dpipe_init;
+       }
+
        err = mlxsw_sp_ports_create(mlxsw_sp);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
@@ -3233,6 +3355,10 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
        return 0;
 
 err_ports_create:
+       mlxsw_sp_dpipe_fini(mlxsw_sp);
+err_dpipe_init:
+       mlxsw_sp_counter_pool_fini(mlxsw_sp);
+err_counter_pool_init:
        mlxsw_sp_acl_fini(mlxsw_sp);
 err_acl_init:
        mlxsw_sp_span_fini(mlxsw_sp);
@@ -3255,6 +3381,8 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
 
        mlxsw_sp_ports_remove(mlxsw_sp);
+       mlxsw_sp_dpipe_fini(mlxsw_sp);
+       mlxsw_sp_counter_pool_fini(mlxsw_sp);
        mlxsw_sp_acl_fini(mlxsw_sp);
        mlxsw_sp_span_fini(mlxsw_sp);
        mlxsw_sp_router_fini(mlxsw_sp);
@@ -3326,13 +3454,13 @@ bool mlxsw_sp_port_dev_check(const struct net_device *dev)
        return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
 }
 
-static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data)
+static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
 {
-       struct mlxsw_sp_port **port = data;
+       struct mlxsw_sp_port **p_mlxsw_sp_port = data;
        int ret = 0;
 
        if (mlxsw_sp_port_dev_check(lower_dev)) {
-               *port = netdev_priv(lower_dev);
+               *p_mlxsw_sp_port = netdev_priv(lower_dev);
                ret = 1;
        }
 
@@ -3341,18 +3469,18 @@ static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data)
 
 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
 {
-       struct mlxsw_sp_port *port;
+       struct mlxsw_sp_port *mlxsw_sp_port;
 
        if (mlxsw_sp_port_dev_check(dev))
                return netdev_priv(dev);
 
-       port = NULL;
-       netdev_walk_all_lower_dev(dev, mlxsw_lower_dev_walk, &port);
+       mlxsw_sp_port = NULL;
+       netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
 
-       return port;
+       return mlxsw_sp_port;
 }
 
-static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
+struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
 {
        struct mlxsw_sp_port *mlxsw_sp_port;
 
@@ -3362,15 +3490,16 @@ static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
 
 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
 {
-       struct mlxsw_sp_port *port;
+       struct mlxsw_sp_port *mlxsw_sp_port;
 
        if (mlxsw_sp_port_dev_check(dev))
                return netdev_priv(dev);
 
-       port = NULL;
-       netdev_walk_all_lower_dev_rcu(dev, mlxsw_lower_dev_walk, &port);
+       mlxsw_sp_port = NULL;
+       netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
+                                     &mlxsw_sp_port);
 
-       return port;
+       return mlxsw_sp_port;
 }
 
 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
@@ -3390,546 +3519,6 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
        dev_put(mlxsw_sp_port->dev);
 }
 
-static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
-                                      unsigned long event)
-{
-       switch (event) {
-       case NETDEV_UP:
-               if (!r)
-                       return true;
-               r->ref_count++;
-               return false;
-       case NETDEV_DOWN:
-               if (r && --r->ref_count == 0)
-                       return true;
-               /* It is possible we already removed the RIF ourselves
-                * if it was assigned to a netdev that is now a bridge
-                * or LAG slave.
-                */
-               return false;
-       }
-
-       return false;
-}
-
-static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
-{
-       int i;
-
-       for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
-               if (!mlxsw_sp->rifs[i])
-                       return i;
-
-       return MLXSW_SP_INVALID_RIF;
-}
-
-static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
-                                          bool *p_lagged, u16 *p_system_port)
-{
-       u8 local_port = mlxsw_sp_vport->local_port;
-
-       *p_lagged = mlxsw_sp_vport->lagged;
-       *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
-}
-
-static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
-                                   struct net_device *l3_dev, u16 rif,
-                                   bool create)
-{
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
-       bool lagged = mlxsw_sp_vport->lagged;
-       char ritr_pl[MLXSW_REG_RITR_LEN];
-       u16 system_port;
-
-       mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
-                           l3_dev->mtu, l3_dev->dev_addr);
-
-       mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
-       mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
-                                 mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
-
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
-
-static struct mlxsw_sp_fid *
-mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
-{
-       struct mlxsw_sp_fid *f;
-
-       f = kzalloc(sizeof(*f), GFP_KERNEL);
-       if (!f)
-               return NULL;
-
-       f->leave = mlxsw_sp_vport_rif_sp_leave;
-       f->ref_count = 0;
-       f->dev = l3_dev;
-       f->fid = fid;
-
-       return f;
-}
-
-static struct mlxsw_sp_rif *
-mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
-{
-       struct mlxsw_sp_rif *r;
-
-       r = kzalloc(sizeof(*r), GFP_KERNEL);
-       if (!r)
-               return NULL;
-
-       INIT_LIST_HEAD(&r->nexthop_list);
-       INIT_LIST_HEAD(&r->neigh_list);
-       ether_addr_copy(r->addr, l3_dev->dev_addr);
-       r->mtu = l3_dev->mtu;
-       r->ref_count = 1;
-       r->dev = l3_dev;
-       r->rif = rif;
-       r->f = f;
-
-       return r;
-}
-
-static struct mlxsw_sp_rif *
-mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
-                            struct net_device *l3_dev)
-{
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
-       struct mlxsw_sp_fid *f;
-       struct mlxsw_sp_rif *r;
-       u16 fid, rif;
-       int err;
-
-       rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
-       if (rif == MLXSW_SP_INVALID_RIF)
-               return ERR_PTR(-ERANGE);
-
-       err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
-       if (err)
-               return ERR_PTR(err);
-
-       fid = mlxsw_sp_rif_sp_to_fid(rif);
-       err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
-       if (err)
-               goto err_rif_fdb_op;
-
-       f = mlxsw_sp_rfid_alloc(fid, l3_dev);
-       if (!f) {
-               err = -ENOMEM;
-               goto err_rfid_alloc;
-       }
-
-       r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
-       if (!r) {
-               err = -ENOMEM;
-               goto err_rif_alloc;
-       }
-
-       f->r = r;
-       mlxsw_sp->rifs[rif] = r;
-
-       return r;
-
-err_rif_alloc:
-       kfree(f);
-err_rfid_alloc:
-       mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
-err_rif_fdb_op:
-       mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
-       return ERR_PTR(err);
-}
-
-static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
-                                         struct mlxsw_sp_rif *r)
-{
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
-       struct net_device *l3_dev = r->dev;
-       struct mlxsw_sp_fid *f = r->f;
-       u16 fid = f->fid;
-       u16 rif = r->rif;
-
-       mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
-
-       mlxsw_sp->rifs[rif] = NULL;
-       f->r = NULL;
-
-       kfree(r);
-
-       kfree(f);
-
-       mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
-
-       mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
-}
-
-static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
-                                     struct net_device *l3_dev)
-{
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
-       struct mlxsw_sp_rif *r;
-
-       r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
-       if (!r) {
-               r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
-               if (IS_ERR(r))
-                       return PTR_ERR(r);
-       }
-
-       mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
-       r->f->ref_count++;
-
-       netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
-
-       return 0;
-}
-
-static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
-{
-       struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
-
-       netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
-
-       mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
-       if (--f->ref_count == 0)
-               mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
-}
-
-static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
-                                        struct net_device *port_dev,
-                                        unsigned long event, u16 vid)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
-       struct mlxsw_sp_port *mlxsw_sp_vport;
-
-       mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
-       if (WARN_ON(!mlxsw_sp_vport))
-               return -EINVAL;
-
-       switch (event) {
-       case NETDEV_UP:
-               return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
-       case NETDEV_DOWN:
-               mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
-               break;
-       }
-
-       return 0;
-}
-
-static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
-                                       unsigned long event)
-{
-       if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
-               return 0;
-
-       return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
-}
-
-static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
-                                        struct net_device *lag_dev,
-                                        unsigned long event, u16 vid)
-{
-       struct net_device *port_dev;
-       struct list_head *iter;
-       int err;
-
-       netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
-               if (mlxsw_sp_port_dev_check(port_dev)) {
-                       err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
-                                                           event, vid);
-                       if (err)
-                               return err;
-               }
-       }
-
-       return 0;
-}
-
-static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
-                                      unsigned long event)
-{
-       if (netif_is_bridge_port(lag_dev))
-               return 0;
-
-       return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
-}
-
-static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
-                                                   struct net_device *l3_dev)
-{
-       u16 fid;
-
-       if (is_vlan_dev(l3_dev))
-               fid = vlan_dev_vlan_id(l3_dev);
-       else if (mlxsw_sp->master_bridge.dev == l3_dev)
-               fid = 1;
-       else
-               return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
-
-       return mlxsw_sp_fid_find(mlxsw_sp, fid);
-}
-
-static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
-{
-       return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
-              MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
-}
-
-static u16 mlxsw_sp_flood_table_index_get(u16 fid)
-{
-       return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
-}
-
-static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
-                                         bool set)
-{
-       enum mlxsw_flood_table_type table_type;
-       char *sftr_pl;
-       u16 index;
-       int err;
-
-       sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
-       if (!sftr_pl)
-               return -ENOMEM;
-
-       table_type = mlxsw_sp_flood_table_type_get(fid);
-       index = mlxsw_sp_flood_table_index_get(fid);
-       mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
-                           1, MLXSW_PORT_ROUTER_PORT, set);
-       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
-
-       kfree(sftr_pl);
-       return err;
-}
-
-static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
-{
-       if (mlxsw_sp_fid_is_vfid(fid))
-               return MLXSW_REG_RITR_FID_IF;
-       else
-               return MLXSW_REG_RITR_VLAN_IF;
-}
-
-static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
-                                 struct net_device *l3_dev,
-                                 u16 fid, u16 rif,
-                                 bool create)
-{
-       enum mlxsw_reg_ritr_if_type rif_type;
-       char ritr_pl[MLXSW_REG_RITR_LEN];
-
-       rif_type = mlxsw_sp_rif_type_get(fid);
-       mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
-                           l3_dev->dev_addr);
-       mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
-
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
-                                     struct net_device *l3_dev,
-                                     struct mlxsw_sp_fid *f)
-{
-       struct mlxsw_sp_rif *r;
-       u16 rif;
-       int err;
-
-       rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
-       if (rif == MLXSW_SP_INVALID_RIF)
-               return -ERANGE;
-
-       err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
-       if (err)
-               return err;
-
-       err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
-       if (err)
-               goto err_rif_bridge_op;
-
-       err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
-       if (err)
-               goto err_rif_fdb_op;
-
-       r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
-       if (!r) {
-               err = -ENOMEM;
-               goto err_rif_alloc;
-       }
-
-       f->r = r;
-       mlxsw_sp->rifs[rif] = r;
-
-       netdev_dbg(l3_dev, "RIF=%d created\n", rif);
-
-       return 0;
-
-err_rif_alloc:
-       mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
-err_rif_fdb_op:
-       mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
-err_rif_bridge_op:
-       mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
-       return err;
-}
-
-void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
-                                struct mlxsw_sp_rif *r)
-{
-       struct net_device *l3_dev = r->dev;
-       struct mlxsw_sp_fid *f = r->f;
-       u16 rif = r->rif;
-
-       mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
-
-       mlxsw_sp->rifs[rif] = NULL;
-       f->r = NULL;
-
-       kfree(r);
-
-       mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
-
-       mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
-
-       mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
-
-       netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
-}
-
-static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
-                                         struct net_device *br_dev,
-                                         unsigned long event)
-{
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
-       struct mlxsw_sp_fid *f;
-
-       /* FID can either be an actual FID if the L3 device is the
-        * VLAN-aware bridge or a VLAN device on top. Otherwise, the
-        * L3 device is a VLAN-unaware bridge and we get a vFID.
-        */
-       f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
-       if (WARN_ON(!f))
-               return -EINVAL;
-
-       switch (event) {
-       case NETDEV_UP:
-               return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
-       case NETDEV_DOWN:
-               mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
-               break;
-       }
-
-       return 0;
-}
-
-static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
-                                       unsigned long event)
-{
-       struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
-       u16 vid = vlan_dev_vlan_id(vlan_dev);
-
-       if (mlxsw_sp_port_dev_check(real_dev))
-               return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
-                                                    vid);
-       else if (netif_is_lag_master(real_dev))
-               return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
-                                                    vid);
-       else if (netif_is_bridge_master(real_dev) &&
-                mlxsw_sp->master_bridge.dev == real_dev)
-               return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
-                                                     event);
-
-       return 0;
-}
-
-static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
-                                  unsigned long event, void *ptr)
-{
-       struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
-       struct net_device *dev = ifa->ifa_dev->dev;
-       struct mlxsw_sp *mlxsw_sp;
-       struct mlxsw_sp_rif *r;
-       int err = 0;
-
-       mlxsw_sp = mlxsw_sp_lower_get(dev);
-       if (!mlxsw_sp)
-               goto out;
-
-       r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
-       if (!mlxsw_sp_rif_should_config(r, event))
-               goto out;
-
-       if (mlxsw_sp_port_dev_check(dev))
-               err = mlxsw_sp_inetaddr_port_event(dev, event);
-       else if (netif_is_lag_master(dev))
-               err = mlxsw_sp_inetaddr_lag_event(dev, event);
-       else if (netif_is_bridge_master(dev))
-               err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
-       else if (is_vlan_dev(dev))
-               err = mlxsw_sp_inetaddr_vlan_event(dev, event);
-
-out:
-       return notifier_from_errno(err);
-}
-
-static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
-                            const char *mac, int mtu)
-{
-       char ritr_pl[MLXSW_REG_RITR_LEN];
-       int err;
-
-       mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
-       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-       if (err)
-               return err;
-
-       mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
-       mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
-       mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
-{
-       struct mlxsw_sp *mlxsw_sp;
-       struct mlxsw_sp_rif *r;
-       int err;
-
-       mlxsw_sp = mlxsw_sp_lower_get(dev);
-       if (!mlxsw_sp)
-               return 0;
-
-       r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
-       if (!r)
-               return 0;
-
-       err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
-       if (err)
-               return err;
-
-       err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
-       if (err)
-               goto err_rif_edit;
-
-       err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
-       if (err)
-               goto err_rif_fdb_op;
-
-       ether_addr_copy(r->addr, dev->dev_addr);
-       r->mtu = dev->mtu;
-
-       netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
-
-       return 0;
-
-err_rif_fdb_op:
-       mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
-err_rif_edit:
-       mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
-       return err;
-}
-
 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
                                         u16 fid)
 {
@@ -4220,7 +3809,7 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
 
 static void
 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
-                                 u16 lag_id)
+                                 struct net_device *lag_dev, u16 lag_id)
 {
        struct mlxsw_sp_port *mlxsw_sp_vport;
        struct mlxsw_sp_fid *f;
@@ -4238,6 +3827,7 @@ mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
 
        mlxsw_sp_vport->lag_id = lag_id;
        mlxsw_sp_vport->lagged = 1;
+       mlxsw_sp_vport->dev = lag_dev;
 }
 
 static void
@@ -4254,6 +3844,7 @@ mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
        if (f)
                f->leave(mlxsw_sp_vport);
 
+       mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
        mlxsw_sp_vport->lagged = 0;
 }
 
@@ -4293,7 +3884,7 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
        mlxsw_sp_port->lagged = 1;
        lag->ref_count++;
 
-       mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id);
+       mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
 
        return 0;
 
@@ -4421,7 +4012,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
                upper_dev = info->upper_dev;
                if (!is_vlan_dev(upper_dev) &&
                    !netif_is_lag_master(upper_dev) &&
-                   !netif_is_bridge_master(upper_dev))
+                   !netif_is_bridge_master(upper_dev) &&
+                   !netif_is_l3_master(upper_dev))
                        return -EINVAL;
                if (!info->linking)
                        break;
@@ -4461,6 +4053,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
                        else
                                mlxsw_sp_port_lag_leave(mlxsw_sp_port,
                                                        upper_dev);
+               } else if (netif_is_l3_master(upper_dev)) {
+                       if (info->linking)
+                               err = mlxsw_sp_port_vrf_join(mlxsw_sp_port);
+                       else
+                               mlxsw_sp_port_vrf_leave(mlxsw_sp_port);
                } else {
                        err = -EINVAL;
                        WARN_ON(1);
@@ -4552,8 +4149,8 @@ static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_fid *f;
 
        f = mlxsw_sp_fid_find(mlxsw_sp, fid);
-       if (f && f->r)
-               mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+       if (f && f->rif)
+               mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
        if (f && --f->ref_count == 0)
                mlxsw_sp_fid_destroy(mlxsw_sp, f);
 }
@@ -4564,33 +4161,46 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
        struct netdev_notifier_changeupper_info *info;
        struct net_device *upper_dev;
        struct mlxsw_sp *mlxsw_sp;
-       int err;
+       int err = 0;
 
        mlxsw_sp = mlxsw_sp_lower_get(br_dev);
        if (!mlxsw_sp)
                return 0;
-       if (br_dev != mlxsw_sp->master_bridge.dev)
-               return 0;
 
        info = ptr;
 
        switch (event) {
+       case NETDEV_PRECHANGEUPPER:
+               upper_dev = info->upper_dev;
+               if (!is_vlan_dev(upper_dev) && !netif_is_l3_master(upper_dev))
+                       return -EINVAL;
+               if (is_vlan_dev(upper_dev) &&
+                   br_dev != mlxsw_sp->master_bridge.dev)
+                       return -EINVAL;
+               break;
        case NETDEV_CHANGEUPPER:
                upper_dev = info->upper_dev;
-               if (!is_vlan_dev(upper_dev))
-                       break;
-               if (info->linking) {
-                       err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
-                                                              upper_dev);
-                       if (err)
-                               return err;
+               if (is_vlan_dev(upper_dev)) {
+                       if (info->linking)
+                               err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
+                                                                      upper_dev);
+                       else
+                               mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
+                                                                  upper_dev);
+               } else if (netif_is_l3_master(upper_dev)) {
+                       if (info->linking)
+                               err = mlxsw_sp_bridge_vrf_join(mlxsw_sp,
+                                                              br_dev);
+                       else
+                               mlxsw_sp_bridge_vrf_leave(mlxsw_sp, br_dev);
                } else {
-                       mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev);
+                       err = -EINVAL;
+                       WARN_ON(1);
                }
                break;
        }
 
-       return 0;
+       return err;
 }
 
 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
@@ -4657,8 +4267,8 @@ static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
        clear_bit(vfid, mlxsw_sp->vfids.mapped);
        list_del(&f->list);
 
-       if (f->r)
-               mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+       if (f->rif)
+               mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
 
        kfree(f);
 
@@ -4810,33 +4420,43 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
        int err = 0;
 
        mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
+       if (!mlxsw_sp_vport)
+               return 0;
 
        switch (event) {
        case NETDEV_PRECHANGEUPPER:
                upper_dev = info->upper_dev;
-               if (!netif_is_bridge_master(upper_dev))
+               if (!netif_is_bridge_master(upper_dev) &&
+                   !netif_is_l3_master(upper_dev))
                        return -EINVAL;
                if (!info->linking)
                        break;
                /* We can't have multiple VLAN interfaces configured on
                 * the same port and being members in the same bridge.
                 */
-               if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
+               if (netif_is_bridge_master(upper_dev) &&
+                   !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
                                                       upper_dev))
                        return -EINVAL;
                break;
        case NETDEV_CHANGEUPPER:
                upper_dev = info->upper_dev;
-               if (info->linking) {
-                       if (WARN_ON(!mlxsw_sp_vport))
-                               return -EINVAL;
-                       err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
-                                                        upper_dev);
+               if (netif_is_bridge_master(upper_dev)) {
+                       if (info->linking)
+                               err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
+                                                                upper_dev);
+                       else
+                               mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
+               } else if (netif_is_l3_master(upper_dev)) {
+                       if (info->linking)
+                               err = mlxsw_sp_vport_vrf_join(mlxsw_sp_vport);
+                       else
+                               mlxsw_sp_vport_vrf_leave(mlxsw_sp_vport);
                } else {
-                       if (!mlxsw_sp_vport)
-                               return 0;
-                       mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
+                       err = -EINVAL;
+                       WARN_ON(1);
                }
+               break;
        }
 
        return err;
@@ -4862,6 +4482,47 @@ static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
        return 0;
 }
 
+static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
+                                               unsigned long event, void *ptr)
+{
+       struct netdev_notifier_changeupper_info *info;
+       struct mlxsw_sp *mlxsw_sp;
+       int err = 0;
+
+       mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
+       if (!mlxsw_sp)
+               return 0;
+
+       info = ptr;
+
+       switch (event) {
+       case NETDEV_PRECHANGEUPPER:
+               /* VLAN devices are only allowed on top of the
+                * VLAN-aware bridge.
+                */
+               if (WARN_ON(vlan_dev_real_dev(vlan_dev) !=
+                           mlxsw_sp->master_bridge.dev))
+                       return -EINVAL;
+               if (!netif_is_l3_master(info->upper_dev))
+                       return -EINVAL;
+               break;
+       case NETDEV_CHANGEUPPER:
+               if (netif_is_l3_master(info->upper_dev)) {
+                       if (info->linking)
+                               err = mlxsw_sp_bridge_vrf_join(mlxsw_sp,
+                                                              vlan_dev);
+                       else
+                               mlxsw_sp_bridge_vrf_leave(mlxsw_sp, vlan_dev);
+               } else {
+                       err = -EINVAL;
+                       WARN_ON(1);
+               }
+               break;
+       }
+
+       return err;
+}
+
 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
                                         unsigned long event, void *ptr)
 {
@@ -4874,6 +4535,9 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
        else if (netif_is_lag_master(real_dev))
                return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
                                                          vid);
+       else if (netif_is_bridge_master(real_dev))
+               return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, event,
+                                                           ptr);
 
        return 0;
 }
index 13ec85e7c392f8941ecf6441333d416ba4609f3a..c245e4c3d9adc3a36a1a8451f3348eeaa68af5ce 100644 (file)
 #define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */
 
 #define MLXSW_SP_RFID_BASE 15360
-#define MLXSW_SP_INVALID_RIF 0xffff
 
 #define MLXSW_SP_MID_MAX 7000
 
 #define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
 
-#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
-#define MLXSW_SP_LPM_TREE_MAX 22
-#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
-
 #define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
 
-#define MLXSW_SP_BYTES_PER_CELL 96
-
-#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
-#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
-
 #define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
 #define MLXSW_SP_KVD_GRANULARITY 128
 
-/* Maximum delay buffer needed in case of PAUSE frames, in cells.
- * Assumes 100m cable and maximum MTU.
- */
-#define MLXSW_SP_PAUSE_DELAY 612
-
-#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
-
-static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
-{
-       delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE));
-       return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu);
-}
-
 struct mlxsw_sp_port;
+struct mlxsw_sp_rif;
 
 struct mlxsw_sp_upper {
        struct net_device *dev;
@@ -103,21 +81,10 @@ struct mlxsw_sp_fid {
        struct list_head list;
        unsigned int ref_count;
        struct net_device *dev;
-       struct mlxsw_sp_rif *r;
+       struct mlxsw_sp_rif *rif;
        u16 fid;
 };
 
-struct mlxsw_sp_rif {
-       struct list_head nexthop_list;
-       struct list_head neigh_list;
-       struct net_device *dev;
-       unsigned int ref_count;
-       struct mlxsw_sp_fid *f;
-       unsigned char addr[ETH_ALEN];
-       int mtu;
-       u16 rif;
-};
-
 struct mlxsw_sp_mid {
        struct list_head list;
        unsigned char addr[ETH_ALEN];
@@ -141,16 +108,6 @@ static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
        return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE;
 }
 
-static inline bool mlxsw_sp_fid_is_rfid(u16 fid)
-{
-       return fid >= MLXSW_SP_RFID_BASE;
-}
-
-static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
-{
-       return MLXSW_SP_RFID_BASE + rif;
-}
-
 struct mlxsw_sp_sb_pr {
        enum mlxsw_reg_sbpr_mode mode;
        u32 size;
@@ -177,12 +134,15 @@ struct mlxsw_sp_sb_pm {
 #define MLXSW_SP_SB_POOL_COUNT 4
 #define MLXSW_SP_SB_TC_COUNT   8
 
+struct mlxsw_sp_sb_port {
+       struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
+       struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
+};
+
 struct mlxsw_sp_sb {
        struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
-       struct {
-               struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
-               struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
-       } ports[MLXSW_PORT_MAX_PORTS];
+       struct mlxsw_sp_sb_port *ports;
+       u32 cell_size;
 };
 
 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
@@ -207,11 +167,9 @@ struct mlxsw_sp_fib;
 
 struct mlxsw_sp_vr {
        u16 id; /* virtual router ID */
-       bool used;
-       enum mlxsw_sp_l3proto proto;
        u32 tb_id; /* kernel fib table id */
-       struct mlxsw_sp_lpm_tree *lpm_tree;
-       struct mlxsw_sp_fib *fib;
+       unsigned int rif_count;
+       struct mlxsw_sp_fib *fib4;
 };
 
 enum mlxsw_sp_span_type {
@@ -253,11 +211,14 @@ struct mlxsw_sp_port_mall_tc_entry {
 };
 
 struct mlxsw_sp_router {
-       struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
        struct mlxsw_sp_vr *vrs;
        struct rhashtable neigh_ht;
        struct rhashtable nexthop_group_ht;
        struct rhashtable nexthop_ht;
+       struct {
+               struct mlxsw_sp_lpm_tree *trees;
+               unsigned int tree_count;
+       } lpm;
        struct {
                struct delayed_work dw;
                unsigned long interval; /* ms */
@@ -269,6 +230,7 @@ struct mlxsw_sp_router {
 };
 
 struct mlxsw_sp_acl;
+struct mlxsw_sp_counter_pool;
 
 struct mlxsw_sp {
        struct {
@@ -296,7 +258,7 @@ struct mlxsw_sp {
        u32 ageing_time;
        struct mlxsw_sp_upper master_bridge;
        struct mlxsw_sp_upper *lags;
-       u8 port_to_module[MLXSW_PORT_MAX_PORTS];
+       u8 *port_to_module;
        struct mlxsw_sp_sb sb;
        struct mlxsw_sp_router router;
        struct mlxsw_sp_acl *acl;
@@ -304,6 +266,7 @@ struct mlxsw_sp {
                DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
        } kvdl;
 
+       struct mlxsw_sp_counter_pool *counter_pool;
        struct {
                struct mlxsw_sp_span_entry *entries;
                int entries_count;
@@ -317,6 +280,18 @@ mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
        return &mlxsw_sp->lags[lag_id];
 }
 
+static inline u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp,
+                                      u32 cells)
+{
+       return mlxsw_sp->sb.cell_size * cells;
+}
+
+static inline u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp,
+                                      u32 bytes)
+{
+       return DIV_ROUND_UP(bytes, mlxsw_sp->sb.cell_size);
+}
+
 struct mlxsw_sp_port_pcpu_stats {
        u64                     rx_packets;
        u64                     rx_bytes;
@@ -386,6 +361,7 @@ struct mlxsw_sp_port {
 };
 
 bool mlxsw_sp_port_dev_check(const struct net_device *dev);
+struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
 
@@ -497,19 +473,6 @@ mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
        return NULL;
 }
 
-static inline struct mlxsw_sp_rif *
-mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
-                        const struct net_device *dev)
-{
-       int i;
-
-       for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
-               if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
-                       return mlxsw_sp->rifs[i];
-
-       return NULL;
-}
-
 enum mlxsw_sp_flood_table {
        MLXSW_SP_FLOOD_TABLE_UC,
        MLXSW_SP_FLOOD_TABLE_BC,
@@ -570,8 +533,6 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
                        bool adding);
 struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
 void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
-void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
-                                struct mlxsw_sp_rif *r);
 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
                          enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
                          bool dwrr, u8 dwrr_weight);
@@ -608,10 +569,22 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
 int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
                                   unsigned long event, void *ptr);
-void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
-                                  struct mlxsw_sp_rif *r);
-
-int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
+int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
+int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
+                           unsigned long event, void *ptr);
+void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
+                                struct mlxsw_sp_rif *rif);
+int mlxsw_sp_vport_vrf_join(struct mlxsw_sp_port *mlxsw_sp_vport);
+void mlxsw_sp_vport_vrf_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
+int mlxsw_sp_port_vrf_join(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_vrf_leave(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_bridge_vrf_join(struct mlxsw_sp *mlxsw_sp,
+                            struct net_device *l3_dev);
+void mlxsw_sp_bridge_vrf_leave(struct mlxsw_sp *mlxsw_sp,
+                              struct net_device *l3_dev);
+
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
+                       u32 *p_entry_index);
 void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
 
 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
@@ -620,6 +593,8 @@ struct mlxsw_sp_acl_rule_info {
        unsigned int priority;
        struct mlxsw_afk_element_values values;
        struct mlxsw_afa_block *act_block;
+       unsigned int counter_index;
+       bool counter_valid;
 };
 
 enum mlxsw_sp_acl_profile {
@@ -639,6 +614,8 @@ struct mlxsw_sp_acl_profile_ops {
                        void *ruleset_priv, void *rule_priv,
                        struct mlxsw_sp_acl_rule_info *rulei);
        void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
+       int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
+                                bool *activity);
 };
 
 struct mlxsw_sp_acl_ops {
@@ -679,6 +656,11 @@ int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
                               struct mlxsw_sp_acl_rule_info *rulei,
                               struct net_device *out_dev);
+int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
+                               struct mlxsw_sp_acl_rule_info *rulei,
+                               u32 action, u16 vid, u16 proto, u8 prio);
+int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
+                                struct mlxsw_sp_acl_rule_info *rulei);
 
 struct mlxsw_sp_acl_rule;
 
@@ -698,6 +680,9 @@ mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
                         unsigned long cookie);
 struct mlxsw_sp_acl_rule_info *
 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule);
+int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
+                               struct mlxsw_sp_acl_rule *rule,
+                               u64 *packets, u64 *bytes, u64 *last_use);
 
 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
@@ -708,5 +693,14 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
                            __be16 protocol, struct tc_cls_flower_offload *f);
 void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
                             struct tc_cls_flower_offload *f);
+int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+                         struct tc_cls_flower_offload *f);
+int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
+                             unsigned int counter_index, u64 *packets,
+                             u64 *bytes);
+int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+                               unsigned int *p_counter_index);
+void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
+                               unsigned int counter_index);
 
 #endif
index 8a18b3aa70dc20d7464a14805e60cc838a87ce17..d3b791f69f5bb8b0332a9321cc258a7d7573d3d3 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/string.h>
 #include <linux/rhashtable.h>
 #include <linux/netdevice.h>
+#include <net/tc_act/tc_vlan.h>
 
 #include "reg.h"
 #include "core.h"
 #include "spectrum_acl_flex_keys.h"
 
 struct mlxsw_sp_acl {
+       struct mlxsw_sp *mlxsw_sp;
        struct mlxsw_afk *afk;
        struct mlxsw_afa *afa;
        const struct mlxsw_sp_acl_ops *ops;
        struct rhashtable ruleset_ht;
+       struct list_head rules;
+       struct {
+               struct delayed_work dw;
+               unsigned long interval; /* ms */
+#define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
+       } rule_activity_update;
        unsigned long priv[0];
        /* priv has to be always the last item */
 };
@@ -79,9 +87,13 @@ struct mlxsw_sp_acl_ruleset {
 
 struct mlxsw_sp_acl_rule {
        struct rhash_head ht_node; /* Member of rule HT */
+       struct list_head list;
        unsigned long cookie; /* HT key */
        struct mlxsw_sp_acl_ruleset *ruleset;
        struct mlxsw_sp_acl_rule_info *rulei;
+       u64 last_used;
+       u64 last_packets;
+       u64 last_bytes;
        unsigned long priv[0];
        /* priv has to be always the last item */
 };
@@ -237,6 +249,27 @@ void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
        mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
 }
 
+static int
+mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+                                struct mlxsw_sp_acl_rule_info *rulei)
+{
+       int err;
+
+       err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
+       if (err)
+               return err;
+       rulei->counter_valid = true;
+       return 0;
+}
+
+static void
+mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
+                               struct mlxsw_sp_acl_rule_info *rulei)
+{
+       rulei->counter_valid = false;
+       mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
+}
+
 struct mlxsw_sp_acl_rule_info *
 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
 {
@@ -335,6 +368,41 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
                                          local_port, in_port);
 }
 
+int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
+                               struct mlxsw_sp_acl_rule_info *rulei,
+                               u32 action, u16 vid, u16 proto, u8 prio)
+{
+       u8 ethertype;
+
+       if (action == TCA_VLAN_ACT_MODIFY) {
+               switch (proto) {
+               case ETH_P_8021Q:
+                       ethertype = 0;
+                       break;
+               case ETH_P_8021AD:
+                       ethertype = 1;
+                       break;
+               default:
+                       dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
+                               proto);
+                       return -EINVAL;
+               }
+
+               return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
+                                                         vid, prio, ethertype);
+       } else {
+               dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
+               return -EINVAL;
+       }
+}
+
+int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
+                                struct mlxsw_sp_acl_rule_info *rulei)
+{
+       return mlxsw_afa_block_append_counter(rulei->act_block,
+                                             rulei->counter_index);
+}
+
 struct mlxsw_sp_acl_rule *
 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
                         struct mlxsw_sp_acl_ruleset *ruleset,
@@ -358,8 +426,14 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
                err = PTR_ERR(rule->rulei);
                goto err_rulei_create;
        }
+
+       err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
+       if (err)
+               goto err_counter_alloc;
        return rule;
 
+err_counter_alloc:
+       mlxsw_sp_acl_rulei_destroy(rule->rulei);
 err_rulei_create:
        kfree(rule);
 err_alloc:
@@ -372,6 +446,7 @@ void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
 {
        struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
 
+       mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
        mlxsw_sp_acl_rulei_destroy(rule->rulei);
        kfree(rule);
        mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
@@ -393,6 +468,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
        if (err)
                goto err_rhashtable_insert;
 
+       list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
        return 0;
 
 err_rhashtable_insert:
@@ -406,6 +482,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
        const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
 
+       list_del(&rule->list);
        rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
                               mlxsw_sp_acl_rule_ht_params);
        ops->rule_del(mlxsw_sp, rule->priv);
@@ -426,6 +503,90 @@ mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
        return rule->rulei;
 }
 
+static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
+                                            struct mlxsw_sp_acl_rule *rule)
+{
+       struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+       const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+       bool active;
+       int err;
+
+       err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
+       if (err)
+               return err;
+       if (active)
+               rule->last_used = jiffies;
+       return 0;
+}
+
+static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
+{
+       struct mlxsw_sp_acl_rule *rule;
+       int err;
+
+       /* Protect internal structures from changes */
+       rtnl_lock();
+       list_for_each_entry(rule, &acl->rules, list) {
+               err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
+                                                       rule);
+               if (err)
+                       goto err_rule_update;
+       }
+       rtnl_unlock();
+       return 0;
+
+err_rule_update:
+       rtnl_unlock();
+       return err;
+}
+
+static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
+{
+       unsigned long interval = acl->rule_activity_update.interval;
+
+       mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
+                              msecs_to_jiffies(interval));
+}
+
+static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work)
+{
+       struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
+                                               rule_activity_update.dw.work);
+       int err;
+
+       err = mlxsw_sp_acl_rules_activity_update(acl);
+       if (err)
+               dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
+
+       mlxsw_sp_acl_rule_activity_work_schedule(acl);
+}
+
+int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
+                               struct mlxsw_sp_acl_rule *rule,
+                               u64 *packets, u64 *bytes, u64 *last_use)
+
+{
+       struct mlxsw_sp_acl_rule_info *rulei;
+       u64 current_packets;
+       u64 current_bytes;
+       int err;
+
+       rulei = mlxsw_sp_acl_rule_rulei(rule);
+       err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
+                                       &current_packets, &current_bytes);
+       if (err)
+               return err;
+
+       *packets = current_packets - rule->last_packets;
+       *bytes = current_bytes - rule->last_bytes;
+       *last_use = rule->last_used;
+
+       rule->last_bytes = current_bytes;
+       rule->last_packets = current_packets;
+
+       return 0;
+}
+
 #define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
 
 static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
@@ -434,7 +595,6 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
        struct mlxsw_sp *mlxsw_sp = priv;
        char pefa_pl[MLXSW_REG_PEFA_LEN];
        u32 kvdl_index;
-       int ret;
        int err;
 
        /* The first action set of a TCAM entry is stored directly in TCAM,
@@ -443,10 +603,10 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
        if (is_first)
                return 0;
 
-       ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE);
-       if (ret < 0)
-               return ret;
-       kvdl_index = ret;
+       err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE,
+                                 &kvdl_index);
+       if (err)
+               return err;
        mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
        if (err)
@@ -475,13 +635,11 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
        struct mlxsw_sp *mlxsw_sp = priv;
        char ppbs_pl[MLXSW_REG_PPBS_LEN];
        u32 kvdl_index;
-       int ret;
        int err;
 
-       ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1);
-       if (ret < 0)
-               return ret;
-       kvdl_index = ret;
+       err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
+       if (err)
+               return err;
        mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
        if (err)
@@ -518,7 +676,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
        if (!acl)
                return -ENOMEM;
        mlxsw_sp->acl = acl;
-
+       acl->mlxsw_sp = mlxsw_sp;
        acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
                                                       ACL_FLEX_KEYS),
                                    mlxsw_sp_afk_blocks,
@@ -541,11 +699,18 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
        if (err)
                goto err_rhashtable_init;
 
+       INIT_LIST_HEAD(&acl->rules);
        err = acl_ops->init(mlxsw_sp, acl->priv);
        if (err)
                goto err_acl_ops_init;
 
        acl->ops = acl_ops;
+
+       /* Create the delayed work for the rule activity_update */
+       INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
+                         mlxsw_sp_acl_rul_activity_update_work);
+       acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
+       mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
        return 0;
 
 err_acl_ops_init:
@@ -564,7 +729,9 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
        struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
        const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
 
+       cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
        acl_ops->fini(mlxsw_sp, acl->priv);
+       WARN_ON(!list_empty(&acl->rules));
        rhashtable_destroy(&acl->ruleset_ht);
        mlxsw_afa_destroy(acl->afa);
        mlxsw_afk_destroy(acl->afk);
index 82b81cf7f4a7de875191c1b7dcf69b0fd27854d1..af7b7bad48df7746946c9d0dab44cb8bdf13f6b6 100644 (file)
 
 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
        MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6),
+       MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+       MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
        MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
 };
 
 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
        MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6),
+       MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+       MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
        MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
 };
 
@@ -65,6 +69,8 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
 };
 
 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
+       MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
+       MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
        MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
        MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
 };
index 7382832215faa0d2211625a53ee6d7f328686ba2..3a24289979d9a0bf41ec87d008e0e55c404bfba6 100644 (file)
@@ -561,6 +561,24 @@ mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
 }
 
+static int
+mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+                                           struct mlxsw_sp_acl_tcam_region *region,
+                                           unsigned int offset,
+                                           bool *activity)
+{
+       char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+       int err;
+
+       mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
+                            region->tcam_region_info, offset);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+       if (err)
+               return err;
+       *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
+       return 0;
+}
+
 #define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
 
 static int
@@ -940,6 +958,19 @@ static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
        mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
 }
 
+static int
+mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+                                    struct mlxsw_sp_acl_tcam_entry *entry,
+                                    bool *activity)
+{
+       struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
+       struct mlxsw_sp_acl_tcam_region *region = chunk->region;
+
+       return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region,
+                                                          entry->parman_item.index,
+                                                          activity);
+}
+
 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
        MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
        MLXSW_AFK_ELEMENT_DMAC,
@@ -950,6 +981,8 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
        MLXSW_AFK_ELEMENT_DST_IP4,
        MLXSW_AFK_ELEMENT_DST_L4_PORT,
        MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+       MLXSW_AFK_ELEMENT_VID,
+       MLXSW_AFK_ELEMENT_PCP,
 };
 
 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
@@ -1046,6 +1079,16 @@ mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
        mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
 }
 
+static int
+mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
+                                          void *rule_priv, bool *activity)
+{
+       struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
+
+       return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
+                                                   activity);
+}
+
 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
        .ruleset_priv_size      = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
        .ruleset_add            = mlxsw_sp_acl_tcam_flower_ruleset_add,
@@ -1055,6 +1098,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
        .rule_priv_size         = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
        .rule_add               = mlxsw_sp_acl_tcam_flower_rule_add,
        .rule_del               = mlxsw_sp_acl_tcam_flower_rule_del,
+       .rule_activity_get      = mlxsw_sp_acl_tcam_flower_rule_activity_get,
 };
 
 static const struct mlxsw_sp_acl_profile_ops *
index a7468262f118979c8914e8acea042361b838f69e..997189cfe7fd58c7e93419d69cf609420eb1633f 100644 (file)
@@ -162,8 +162,8 @@ static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
 }
 
 static const u16 mlxsw_sp_pbs[] = {
-       [0] = 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN),
-       [9] = 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU),
+       [0] = 2 * ETH_FRAME_LEN,
+       [9] = 2 * MLXSW_PORT_MAX_MTU,
 };
 
 #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
@@ -171,20 +171,22 @@ static const u16 mlxsw_sp_pbs[] = {
 
 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
 {
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        char pbmc_pl[MLXSW_REG_PBMC_LEN];
        int i;
 
        mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
                            0xffff, 0xffff / 2);
        for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
+               u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp_pbs[i]);
+
                if (i == MLXSW_SP_PB_UNUSED)
                        continue;
-               mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]);
+               mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
        }
        mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
                                         MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
-       return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
-                              MLXSW_REG(pbmc), pbmc_pl);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
 }
 
 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -209,11 +211,25 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
        return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
 }
 
-#define MLXSW_SP_SB_PR_INGRESS_SIZE                            \
-       (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS))
+static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
+{
+       unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+
+       mlxsw_sp->sb.ports = kcalloc(max_ports, sizeof(struct mlxsw_sp_sb_port),
+                                    GFP_KERNEL);
+       if (!mlxsw_sp->sb.ports)
+               return -ENOMEM;
+       return 0;
+}
+
+static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       kfree(mlxsw_sp->sb.ports);
+}
+
+#define MLXSW_SP_SB_PR_INGRESS_SIZE    12440000
 #define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
-#define MLXSW_SP_SB_PR_EGRESS_SIZE                             \
-       (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS))
+#define MLXSW_SP_SB_PR_EGRESS_SIZE     13232000
 
 #define MLXSW_SP_SB_PR(_mode, _size)   \
        {                               \
@@ -223,18 +239,17 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
 
 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
-                      MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)),
+                      MLXSW_SP_SB_PR_INGRESS_SIZE),
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
-                      MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)),
+                      MLXSW_SP_SB_PR_INGRESS_MNG_SIZE),
 };
 
 #define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
 
 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
-       MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
-                      MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)),
+       MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE),
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
@@ -251,11 +266,9 @@ static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
        int err;
 
        for (i = 0; i < prs_len; i++) {
-               const struct mlxsw_sp_sb_pr *pr;
+               u32 size = mlxsw_sp_bytes_cells(mlxsw_sp, prs[i].size);
 
-               pr = &prs[i];
-               err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir,
-                                          pr->mode, pr->size);
+               err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, prs[i].mode, size);
                if (err)
                        return err;
        }
@@ -284,7 +297,7 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
        }
 
 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
-       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0),
+       MLXSW_SP_SB_CM(10000, 8, 0),
        MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
        MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
        MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
@@ -293,20 +306,20 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
        MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
        MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
        MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
-       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3),
+       MLXSW_SP_SB_CM(20000, 1, 3),
 };
 
 #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
 
 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
-       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
-       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
-       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
-       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
-       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
-       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
-       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
-       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+       MLXSW_SP_SB_CM(1500, 9, 0),
+       MLXSW_SP_SB_CM(1500, 9, 0),
+       MLXSW_SP_SB_CM(1500, 9, 0),
+       MLXSW_SP_SB_CM(1500, 9, 0),
+       MLXSW_SP_SB_CM(1500, 9, 0),
+       MLXSW_SP_SB_CM(1500, 9, 0),
+       MLXSW_SP_SB_CM(1500, 9, 0),
+       MLXSW_SP_SB_CM(1500, 9, 0),
        MLXSW_SP_SB_CM(0, 0, 0),
        MLXSW_SP_SB_CM(0, 0, 0),
        MLXSW_SP_SB_CM(0, 0, 0),
@@ -330,7 +343,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
        MLXSW_SP_CPU_PORT_SB_CM,
        MLXSW_SP_CPU_PORT_SB_CM,
        MLXSW_SP_CPU_PORT_SB_CM,
-       MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0),
+       MLXSW_SP_SB_CM(10000, 0, 0),
        MLXSW_SP_CPU_PORT_SB_CM,
        MLXSW_SP_CPU_PORT_SB_CM,
        MLXSW_SP_CPU_PORT_SB_CM,
@@ -370,13 +383,17 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
 
        for (i = 0; i < cms_len; i++) {
                const struct mlxsw_sp_sb_cm *cm;
+               u32 min_buff;
 
                if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
                        continue; /* PG number 8 does not exist, skip it */
                cm = &cms[i];
+               /* All pools are initialized using dynamic thresholds,
+                * therefore 'max_buff' isn't specified in cells.
+                */
+               min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
                err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
-                                          cm->min_buff, cm->max_buff,
-                                          cm->pool);
+                                          min_buff, cm->max_buff, cm->pool);
                if (err)
                        return err;
        }
@@ -484,21 +501,21 @@ struct mlxsw_sp_sb_mm {
        }
 
 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
-       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
-       MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+       MLXSW_SP_SB_MM(20000, 0xff, 0),
+       MLXSW_SP_SB_MM(20000, 0xff, 0),
+       MLXSW_SP_SB_MM(20000, 0xff, 0),
+       MLXSW_SP_SB_MM(20000, 0xff, 0),
+       MLXSW_SP_SB_MM(20000, 0xff, 0),
+       MLXSW_SP_SB_MM(20000, 0xff, 0),
+       MLXSW_SP_SB_MM(20000, 0xff, 0),
+       MLXSW_SP_SB_MM(20000, 0xff, 0),
+       MLXSW_SP_SB_MM(20000, 0xff, 0),
+       MLXSW_SP_SB_MM(20000, 0xff, 0),
+       MLXSW_SP_SB_MM(20000, 0xff, 0),
+       MLXSW_SP_SB_MM(20000, 0xff, 0),
+       MLXSW_SP_SB_MM(20000, 0xff, 0),
+       MLXSW_SP_SB_MM(20000, 0xff, 0),
+       MLXSW_SP_SB_MM(20000, 0xff, 0),
 };
 
 #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
@@ -511,10 +528,15 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
 
        for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
                const struct mlxsw_sp_sb_mm *mc;
+               u32 min_buff;
 
                mc = &mlxsw_sp_sb_mms[i];
-               mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff,
-                                   mc->max_buff, mc->pool);
+               /* All pools are initialized using dynamic thresholds,
+                * therefore 'max_buff' isn't specified in cells.
+                */
+               min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
+               mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
+                                   mc->pool);
                err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
                if (err)
                        return err;
@@ -522,32 +544,53 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
        return 0;
 }
 
-#define MLXSW_SP_SB_SIZE (16 * 1024 * 1024)
-
 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
 {
+       u64 sb_size;
        int err;
 
-       err = mlxsw_sp_sb_prs_init(mlxsw_sp);
+       if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
+               return -EIO;
+       mlxsw_sp->sb.cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
+
+       if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
+               return -EIO;
+       sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE);
+
+       err = mlxsw_sp_sb_ports_init(mlxsw_sp);
        if (err)
                return err;
+       err = mlxsw_sp_sb_prs_init(mlxsw_sp);
+       if (err)
+               goto err_sb_prs_init;
        err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
        if (err)
-               return err;
+               goto err_sb_cpu_port_sb_cms_init;
        err = mlxsw_sp_sb_mms_init(mlxsw_sp);
        if (err)
-               return err;
-       return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
-                                  MLXSW_SP_SB_SIZE,
-                                  MLXSW_SP_SB_POOL_COUNT,
-                                  MLXSW_SP_SB_POOL_COUNT,
-                                  MLXSW_SP_SB_TC_COUNT,
-                                  MLXSW_SP_SB_TC_COUNT);
+               goto err_sb_mms_init;
+       err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, sb_size,
+                                 MLXSW_SP_SB_POOL_COUNT,
+                                 MLXSW_SP_SB_POOL_COUNT,
+                                 MLXSW_SP_SB_TC_COUNT,
+                                 MLXSW_SP_SB_TC_COUNT);
+       if (err)
+               goto err_devlink_sb_register;
+
+       return 0;
+
+err_devlink_sb_register:
+err_sb_mms_init:
+err_sb_cpu_port_sb_cms_init:
+err_sb_prs_init:
+       mlxsw_sp_sb_ports_fini(mlxsw_sp);
+       return err;
 }
 
 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
 {
        devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
+       mlxsw_sp_sb_ports_fini(mlxsw_sp);
 }
 
 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -596,7 +639,7 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
        struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
 
        pool_info->pool_type = (enum devlink_sb_pool_type) dir;
-       pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
+       pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
        pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
        return 0;
 }
@@ -606,9 +649,9 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
                         enum devlink_sb_threshold_type threshold_type)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+       u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
        u8 pool = pool_get(pool_index);
        enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
-       u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
        enum mlxsw_reg_sbpr_mode mode;
 
        if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
@@ -627,7 +670,7 @@ static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
 
        if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
                return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
-       return MLXSW_SP_CELLS_TO_BYTES(max_buff);
+       return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
 }
 
 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
@@ -645,7 +688,7 @@ static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
                        return -EINVAL;
                *p_max_buff = val;
        } else {
-               *p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold);
+               *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
        }
        return 0;
 }
@@ -761,7 +804,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
 
        masked_count = 0;
        for (local_port = cb_ctx.local_port_1;
-            local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+            local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
                if (!mlxsw_sp->ports[local_port])
                        continue;
                for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
@@ -775,7 +818,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
        }
        masked_count = 0;
        for (local_port = cb_ctx.local_port_1;
-            local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+            local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
                if (!mlxsw_sp->ports[local_port])
                        continue;
                for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
@@ -817,7 +860,7 @@ next_batch:
                mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
                mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
        }
-       for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+       for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
                if (!mlxsw_sp->ports[local_port])
                        continue;
                mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
@@ -847,7 +890,7 @@ do_query:
                                    cb_priv);
        if (err)
                goto out;
-       if (local_port < MLXSW_PORT_MAX_PORTS)
+       if (local_port < mlxsw_core_max_ports(mlxsw_core))
                goto next_batch;
 
 out:
@@ -882,7 +925,7 @@ next_batch:
                mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
                mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
        }
-       for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+       for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
                if (!mlxsw_sp->ports[local_port])
                        continue;
                mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
@@ -908,7 +951,7 @@ do_query:
                                    &bulk_list, NULL, 0);
        if (err)
                goto out;
-       if (local_port < MLXSW_PORT_MAX_PORTS)
+       if (local_port < mlxsw_core_max_ports(mlxsw_core))
                goto next_batch;
 
 out:
@@ -932,8 +975,8 @@ int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
        struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
                                                       pool, dir);
 
-       *p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur);
-       *p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max);
+       *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
+       *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
        return 0;
 }
 
@@ -951,7 +994,7 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
        struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
                                                       pg_buff, dir);
 
-       *p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur);
-       *p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max);
+       *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
+       *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
        return 0;
 }
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
new file mode 100644 (file)
index 0000000..0f46775
--- /dev/null
@@ -0,0 +1,207 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum_cnt.h"
+
+#define MLXSW_SP_COUNTER_POOL_BANK_SIZE 4096
+
+struct mlxsw_sp_counter_sub_pool {
+       unsigned int base_index;
+       unsigned int size;
+       unsigned int entry_size;
+       unsigned int bank_count;
+};
+
+struct mlxsw_sp_counter_pool {
+       unsigned int pool_size;
+       unsigned long *usage; /* Usage bitmap */
+       struct mlxsw_sp_counter_sub_pool *sub_pools;
+};
+
+static struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
+       [MLXSW_SP_COUNTER_SUB_POOL_FLOW] = {
+               .bank_count = 6,
+       },
+       [MLXSW_SP_COUNTER_SUB_POOL_RIF] = {
+               .bank_count = 2,
+       }
+};
+
+static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp)
+{
+       unsigned int total_bank_config = 0;
+       unsigned int pool_size;
+       int i;
+
+       pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
+       /* Check config is valid, no bank over subscription */
+       for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++)
+               total_bank_config += mlxsw_sp_counter_sub_pools[i].bank_count;
+       if (total_bank_config > pool_size / MLXSW_SP_COUNTER_POOL_BANK_SIZE + 1)
+               return -EINVAL;
+       return 0;
+}
+
+static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp)
+{
+       struct mlxsw_sp_counter_sub_pool *sub_pool;
+
+       /* Prepare generic flow pool*/
+       sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_FLOW];
+       if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_PACKETS_BYTES))
+               return -EIO;
+       sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+                                                 COUNTER_SIZE_PACKETS_BYTES);
+       /* Prepare erif pool*/
+       sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_RIF];
+       if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_ROUTER_BASIC))
+               return -EIO;
+       sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+                                                 COUNTER_SIZE_ROUTER_BASIC);
+       return 0;
+}
+
+int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
+{
+       struct mlxsw_sp_counter_sub_pool *sub_pool;
+       struct mlxsw_sp_counter_pool *pool;
+       unsigned int base_index;
+       unsigned int map_size;
+       int i;
+       int err;
+
+       if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_POOL_SIZE))
+               return -EIO;
+
+       err = mlxsw_sp_counter_pool_validate(mlxsw_sp);
+       if (err)
+               return err;
+
+       err = mlxsw_sp_counter_sub_pools_prepare(mlxsw_sp);
+       if (err)
+               return err;
+
+       pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+       if (!pool)
+               return -ENOMEM;
+
+       pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
+       map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
+
+       pool->usage = kzalloc(map_size, GFP_KERNEL);
+       if (!pool->usage) {
+               err = -ENOMEM;
+               goto err_usage_alloc;
+       }
+
+       pool->sub_pools = mlxsw_sp_counter_sub_pools;
+       /* Allocation is based on bank count which should be
+        * specified for each sub pool statically.
+        */
+       base_index = 0;
+       for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
+               sub_pool = &pool->sub_pools[i];
+               sub_pool->size = sub_pool->bank_count *
+                                MLXSW_SP_COUNTER_POOL_BANK_SIZE;
+               sub_pool->base_index = base_index;
+               base_index += sub_pool->size;
+               /* The last bank can't be fully used */
+               if (sub_pool->base_index + sub_pool->size > pool->pool_size)
+                       sub_pool->size = pool->pool_size - sub_pool->base_index;
+       }
+
+       mlxsw_sp->counter_pool = pool;
+       return 0;
+
+err_usage_alloc:
+       kfree(pool);
+       return err;
+}
+
+void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+
+       WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
+                              pool->pool_size);
+       kfree(pool->usage);
+       kfree(pool);
+}
+
+int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+                          enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+                          unsigned int *p_counter_index)
+{
+       struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+       struct mlxsw_sp_counter_sub_pool *sub_pool;
+       unsigned int entry_index;
+       unsigned int stop_index;
+       int i;
+
+       sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
+       stop_index = sub_pool->base_index + sub_pool->size;
+       entry_index = sub_pool->base_index;
+
+       entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
+       if (entry_index == stop_index)
+               return -ENOBUFS;
+       /* The sub-pools can contain non-integer number of entries
+        * so we must check for overflow
+        */
+       if (entry_index + sub_pool->entry_size > stop_index)
+               return -ENOBUFS;
+       for (i = 0; i < sub_pool->entry_size; i++)
+               __set_bit(entry_index + i, pool->usage);
+
+       *p_counter_index = entry_index;
+       return 0;
+}
+
+void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
+                          enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+                          unsigned int counter_index)
+{
+       struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+       struct mlxsw_sp_counter_sub_pool *sub_pool;
+       int i;
+
+       if (WARN_ON(counter_index >= pool->pool_size))
+               return;
+       sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
+       for (i = 0; i < sub_pool->entry_size; i++)
+               __clear_bit(counter_index + i, pool->usage);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
new file mode 100644 (file)
index 0000000..fd34d0a
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Arkadi Sharshevsky <arkdis@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_CNT_H
+#define _MLXSW_SPECTRUM_CNT_H
+
+#include "spectrum.h"
+
+enum mlxsw_sp_counter_sub_pool_id {
+       MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+       MLXSW_SP_COUNTER_SUB_POOL_RIF,
+};
+
+int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+                          enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+                          unsigned int *p_counter_index);
+void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
+                          enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+                          unsigned int counter_index);
+int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
new file mode 100644 (file)
index 0000000..ea56f6a
--- /dev/null
@@ -0,0 +1,351 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Arkadi Sharshevsky <arakdis@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <net/devlink.h>
+
+#include "spectrum.h"
+#include "spectrum_dpipe.h"
+#include "spectrum_router.h"
+
+enum mlxsw_sp_field_metadata_id {
+       MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT,
+       MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD,
+       MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP,
+};
+
+static struct devlink_dpipe_field mlxsw_sp_dpipe_fields_metadata[] = {
+       { .name = "erif_port",
+         .id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT,
+         .bitwidth = 32,
+         .mapping_type = DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX,
+       },
+       { .name = "l3_forward",
+         .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD,
+         .bitwidth = 1,
+       },
+       { .name = "l3_drop",
+         .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP,
+         .bitwidth = 1,
+       },
+};
+
+enum mlxsw_sp_dpipe_header_id {
+       MLXSW_SP_DPIPE_HEADER_METADATA,
+};
+
+static struct devlink_dpipe_header mlxsw_sp_dpipe_header_metadata = {
+       .name = "mlxsw_meta",
+       .id = MLXSW_SP_DPIPE_HEADER_METADATA,
+       .fields = mlxsw_sp_dpipe_fields_metadata,
+       .fields_count = ARRAY_SIZE(mlxsw_sp_dpipe_fields_metadata),
+};
+
+static struct devlink_dpipe_header *mlxsw_dpipe_headers[] = {
+       &mlxsw_sp_dpipe_header_metadata,
+};
+
+static struct devlink_dpipe_headers mlxsw_sp_dpipe_headers = {
+       .headers = mlxsw_dpipe_headers,
+       .headers_count = ARRAY_SIZE(mlxsw_dpipe_headers),
+};
+
+static int mlxsw_sp_dpipe_table_erif_actions_dump(void *priv,
+                                                 struct sk_buff *skb)
+{
+       struct devlink_dpipe_action action = {0};
+       int err;
+
+       action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+       action.header = &mlxsw_sp_dpipe_header_metadata;
+       action.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD;
+
+       err = devlink_dpipe_action_put(skb, &action);
+       if (err)
+               return err;
+
+       action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+       action.header = &mlxsw_sp_dpipe_header_metadata;
+       action.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP;
+
+       return devlink_dpipe_action_put(skb, &action);
+}
+
+static int mlxsw_sp_dpipe_table_erif_matches_dump(void *priv,
+                                                 struct sk_buff *skb)
+{
+       struct devlink_dpipe_match match = {0};
+
+       match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+       match.header = &mlxsw_sp_dpipe_header_metadata;
+       match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT;
+
+       return devlink_dpipe_match_put(skb, &match);
+}
+
+static void mlxsw_sp_erif_entry_clear(struct devlink_dpipe_entry *entry)
+{
+       unsigned int value_count, value_index;
+       struct devlink_dpipe_value *value;
+
+       value = entry->action_values;
+       value_count = entry->action_values_count;
+       for (value_index = 0; value_index < value_count; value_index++) {
+               kfree(value[value_index].value);
+               kfree(value[value_index].mask);
+       }
+
+       value = entry->match_values;
+       value_count = entry->match_values_count;
+       for (value_index = 0; value_index < value_count; value_index++) {
+               kfree(value[value_index].value);
+               kfree(value[value_index].mask);
+       }
+}
+
+static void
+mlxsw_sp_erif_match_action_prepare(struct devlink_dpipe_match *match,
+                                  struct devlink_dpipe_action *action)
+{
+       action->type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+       action->header = &mlxsw_sp_dpipe_header_metadata;
+       action->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD;
+
+       match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+       match->header = &mlxsw_sp_dpipe_header_metadata;
+       match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT;
+}
+
+static int mlxsw_sp_erif_entry_prepare(struct devlink_dpipe_entry *entry,
+                                      struct devlink_dpipe_value *match_value,
+                                      struct devlink_dpipe_match *match,
+                                      struct devlink_dpipe_value *action_value,
+                                      struct devlink_dpipe_action *action)
+{
+       entry->match_values = match_value;
+       entry->match_values_count = 1;
+
+       entry->action_values = action_value;
+       entry->action_values_count = 1;
+
+       match_value->match = match;
+       match_value->value_size = sizeof(u32);
+       match_value->value = kmalloc(match_value->value_size, GFP_KERNEL);
+       if (!match_value->value)
+               return -ENOMEM;
+
+       action_value->action = action;
+       action_value->value_size = sizeof(u32);
+       action_value->value = kmalloc(action_value->value_size, GFP_KERNEL);
+       if (!action_value->value)
+               goto err_action_alloc;
+       return 0;
+
+err_action_alloc:
+       kfree(match_value->value);
+       return -ENOMEM;
+}
+
+static int mlxsw_sp_erif_entry_get(struct mlxsw_sp *mlxsw_sp,
+                                  struct devlink_dpipe_entry *entry,
+                                  struct mlxsw_sp_rif *rif,
+                                  bool counters_enabled)
+{
+       u32 *action_value;
+       u32 *rif_value;
+       u64 cnt;
+       int err;
+
+       /* Set Match RIF index */
+       rif_value = entry->match_values->value;
+       *rif_value = mlxsw_sp_rif_index(rif);
+       entry->match_values->mapping_value = mlxsw_sp_rif_dev_ifindex(rif);
+       entry->match_values->mapping_valid = true;
+
+       /* Set Action Forwarding */
+       action_value = entry->action_values->value;
+       *action_value = 1;
+
+       entry->counter_valid = false;
+       entry->counter = 0;
+       if (!counters_enabled)
+               return 0;
+
+       entry->index = mlxsw_sp_rif_index(rif);
+       err = mlxsw_sp_rif_counter_value_get(mlxsw_sp, rif,
+                                            MLXSW_SP_RIF_COUNTER_EGRESS,
+                                            &cnt);
+       if (!err) {
+               entry->counter = cnt;
+               entry->counter_valid = true;
+       }
+       return 0;
+}
+
+static int
+mlxsw_sp_table_erif_entries_dump(void *priv, bool counters_enabled,
+                                struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+       struct devlink_dpipe_value match_value = {{0}}, action_value = {{0}};
+       struct devlink_dpipe_action action = {0};
+       struct devlink_dpipe_match match = {0};
+       struct devlink_dpipe_entry entry = {0};
+       struct mlxsw_sp *mlxsw_sp = priv;
+       unsigned int rif_count;
+       int i, j;
+       int err;
+
+       mlxsw_sp_erif_match_action_prepare(&match, &action);
+       err = mlxsw_sp_erif_entry_prepare(&entry, &match_value, &match,
+                                         &action_value, &action);
+       if (err)
+               return err;
+
+       rif_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+       rtnl_lock();
+       i = 0;
+start_again:
+       err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
+       if (err)
+               return err;
+       j = 0;
+       for (; i < rif_count; i++) {
+               if (!mlxsw_sp->rifs[i])
+                       continue;
+               err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry,
+                                             mlxsw_sp->rifs[i],
+                                             counters_enabled);
+               if (err)
+                       goto err_entry_get;
+               err = devlink_dpipe_entry_ctx_append(dump_ctx, &entry);
+               if (err) {
+                       if (err == -EMSGSIZE) {
+                               if (!j)
+                                       goto err_entry_append;
+                               break;
+                       }
+                       goto err_entry_append;
+               }
+               j++;
+       }
+
+       devlink_dpipe_entry_ctx_close(dump_ctx);
+       if (i != rif_count)
+               goto start_again;
+       rtnl_unlock();
+
+       mlxsw_sp_erif_entry_clear(&entry);
+       return 0;
+err_entry_append:
+err_entry_get:
+       rtnl_unlock();
+       mlxsw_sp_erif_entry_clear(&entry);
+       return err;
+}
+
+static int mlxsw_sp_table_erif_counters_update(void *priv, bool enable)
+{
+       struct mlxsw_sp *mlxsw_sp = priv;
+       int i;
+
+       rtnl_lock();
+       for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
+               if (!mlxsw_sp->rifs[i])
+                       continue;
+               if (enable)
+                       mlxsw_sp_rif_counter_alloc(mlxsw_sp,
+                                                  mlxsw_sp->rifs[i],
+                                                  MLXSW_SP_RIF_COUNTER_EGRESS);
+               else
+                       mlxsw_sp_rif_counter_free(mlxsw_sp,
+                                                 mlxsw_sp->rifs[i],
+                                                 MLXSW_SP_RIF_COUNTER_EGRESS);
+       }
+       rtnl_unlock();
+       return 0;
+}
+
+static struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = {
+       .matches_dump = mlxsw_sp_dpipe_table_erif_matches_dump,
+       .actions_dump = mlxsw_sp_dpipe_table_erif_actions_dump,
+       .entries_dump = mlxsw_sp_table_erif_entries_dump,
+       .counters_set_update = mlxsw_sp_table_erif_counters_update,
+};
+
+static int mlxsw_sp_dpipe_erif_table_init(struct mlxsw_sp *mlxsw_sp)
+{
+       struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+       u64 table_size;
+
+       table_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+       return devlink_dpipe_table_register(devlink,
+                                           MLXSW_SP_DPIPE_TABLE_NAME_ERIF,
+                                           &mlxsw_sp_erif_ops,
+                                           mlxsw_sp, table_size,
+                                           false);
+}
+
+static void mlxsw_sp_dpipe_erif_table_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+       devlink_dpipe_table_unregister(devlink, MLXSW_SP_DPIPE_TABLE_NAME_ERIF);
+}
+
+int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
+{
+       struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+       int err;
+
+       err = devlink_dpipe_headers_register(devlink,
+                                            &mlxsw_sp_dpipe_headers);
+       if (err)
+               return err;
+       err = mlxsw_sp_dpipe_erif_table_init(mlxsw_sp);
+       if (err)
+               goto err_erif_register;
+       return 0;
+
+err_erif_register:
+       devlink_dpipe_headers_unregister(priv_to_devlink(mlxsw_sp->core));
+       return err;
+}
+
+void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+       mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp);
+       devlink_dpipe_headers_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
new file mode 100644 (file)
index 0000000..d208929
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_PIPELINE_H_
+#define _MLXSW_PIPELINE_H_
+
+int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp);
+
+#define MLXSW_SP_DPIPE_TABLE_NAME_ERIF "mlxsw_erif"
+
+#endif /* _MLXSW_PIPELINE_H_*/
index 22ab429253778d2a22e4c59c742f8f6778e57f40..3e7a0bcbba72d56d74eb5f3c7d42056487bbc4cd 100644 (file)
@@ -39,6 +39,7 @@
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_gact.h>
 #include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
 
 #include "spectrum.h"
 #include "core_acl_flex_keys.h"
@@ -55,6 +56,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
        if (tc_no_actions(exts))
                return 0;
 
+       /* Count action is inserted first */
+       err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
+       if (err)
+               return err;
+
        tcf_exts_to_list(exts, &actions);
        list_for_each_entry(a, &actions, list) {
                if (is_tcf_gact_shot(a)) {
@@ -73,6 +79,15 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
                                                         out_dev);
                        if (err)
                                return err;
+               } else if (is_tcf_vlan(a)) {
+                       u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
+                       u32 action = tcf_vlan_action(a);
+                       u8 prio = tcf_vlan_push_prio(a);
+                       u16 vid = tcf_vlan_push_vid(a);
+
+                       return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
+                                                          action, vid,
+                                                          proto, prio);
                } else {
                        dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
                        return -EOPNOTSUPP;
@@ -173,7 +188,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
              BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
              BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
              BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
-             BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+             BIT(FLOW_DISSECTOR_KEY_PORTS) |
+             BIT(FLOW_DISSECTOR_KEY_VLAN))) {
                dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
                return -EOPNOTSUPP;
        }
@@ -234,6 +250,27 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
                                               sizeof(key->src));
        }
 
+       if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+               struct flow_dissector_key_vlan *key =
+                       skb_flow_dissector_target(f->dissector,
+                                                 FLOW_DISSECTOR_KEY_VLAN,
+                                                 f->key);
+               struct flow_dissector_key_vlan *mask =
+                       skb_flow_dissector_target(f->dissector,
+                                                 FLOW_DISSECTOR_KEY_VLAN,
+                                                 f->mask);
+               if (mask->vlan_id != 0)
+                       mlxsw_sp_acl_rulei_keymask_u32(rulei,
+                                                      MLXSW_AFK_ELEMENT_VID,
+                                                      key->vlan_id,
+                                                      mask->vlan_id);
+               if (mask->vlan_priority != 0)
+                       mlxsw_sp_acl_rulei_keymask_u32(rulei,
+                                                      MLXSW_AFK_ELEMENT_PCP,
+                                                      key->vlan_priority,
+                                                      mask->vlan_priority);
+       }
+
        if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
                mlxsw_sp_flower_parse_ipv4(rulei, f);
 
@@ -303,14 +340,58 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
        ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
                                           ingress,
                                           MLXSW_SP_ACL_PROFILE_FLOWER);
-       if (WARN_ON(IS_ERR(ruleset)))
+       if (IS_ERR(ruleset))
                return;
 
        rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
-       if (!WARN_ON(!rule)) {
+       if (rule) {
                mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
                mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
        }
 
        mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
 }
+
+int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+                         struct tc_cls_flower_offload *f)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       struct mlxsw_sp_acl_ruleset *ruleset;
+       struct mlxsw_sp_acl_rule *rule;
+       struct tc_action *a;
+       LIST_HEAD(actions);
+       u64 packets;
+       u64 lastuse;
+       u64 bytes;
+       int err;
+
+       ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
+                                          ingress,
+                                          MLXSW_SP_ACL_PROFILE_FLOWER);
+       if (WARN_ON(IS_ERR(ruleset)))
+               return -EINVAL;
+
+       rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
+       if (!rule)
+               return -EINVAL;
+
+       err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
+                                         &lastuse);
+       if (err)
+               goto err_rule_get_stats;
+
+       preempt_disable();
+
+       tcf_exts_to_list(f->exts, &actions);
+       list_for_each_entry(a, &actions, list)
+               tcf_action_stats_update(a, bytes, packets, lastuse);
+
+       preempt_enable();
+
+       mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+       return 0;
+
+err_rule_get_stats:
+       mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+       return err;
+}
index ac321e8e5c1ac4cb85ce90b53b7907cddd3e859b..26c26cd30c3d4038948fcd79028a3b853d3651c7 100644 (file)
@@ -45,7 +45,8 @@
        (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_CHUNKS_BASE)
 #define MLXSW_SP_CHUNK_MAX 32
 
-int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count)
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
+                       u32 *p_entry_index)
 {
        int entry_index;
        int size;
@@ -72,7 +73,8 @@ int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count)
 
                for (i = 0; i < type_entries; i++)
                        set_bit(entry_index + i, mlxsw_sp->kvdl.usage);
-               return entry_index;
+               *p_entry_index = entry_index;
+               return 0;
        }
        return -ENOBUFS;
 }
index bd8de6b9be718f967ca6967a06c00be21d2e3b6c..c70c59181014d96ce169231000eed109644b0c79 100644 (file)
 #include <linux/in6.h>
 #include <linux/notifier.h>
 #include <linux/inetdevice.h>
+#include <linux/netdevice.h>
 #include <net/netevent.h>
 #include <net/neighbour.h>
 #include <net/arp.h>
 #include <net/ip_fib.h>
+#include <net/fib_rules.h>
+#include <net/l3mdev.h>
 
 #include "spectrum.h"
 #include "core.h"
 #include "reg.h"
+#include "spectrum_cnt.h"
+#include "spectrum_dpipe.h"
+#include "spectrum_router.h"
+
+struct mlxsw_sp_rif {
+       struct list_head nexthop_list;
+       struct list_head neigh_list;
+       struct net_device *dev;
+       struct mlxsw_sp_fid *f;
+       unsigned char addr[ETH_ALEN];
+       int mtu;
+       u16 rif_index;
+       u16 vr_id;
+       unsigned int counter_ingress;
+       bool counter_ingress_valid;
+       unsigned int counter_egress;
+       bool counter_egress_valid;
+};
+
+static unsigned int *
+mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
+                          enum mlxsw_sp_rif_counter_dir dir)
+{
+       switch (dir) {
+       case MLXSW_SP_RIF_COUNTER_EGRESS:
+               return &rif->counter_egress;
+       case MLXSW_SP_RIF_COUNTER_INGRESS:
+               return &rif->counter_ingress;
+       }
+       return NULL;
+}
+
+static bool
+mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
+                              enum mlxsw_sp_rif_counter_dir dir)
+{
+       switch (dir) {
+       case MLXSW_SP_RIF_COUNTER_EGRESS:
+               return rif->counter_egress_valid;
+       case MLXSW_SP_RIF_COUNTER_INGRESS:
+               return rif->counter_ingress_valid;
+       }
+       return false;
+}
+
+static void
+mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
+                              enum mlxsw_sp_rif_counter_dir dir,
+                              bool valid)
+{
+       switch (dir) {
+       case MLXSW_SP_RIF_COUNTER_EGRESS:
+               rif->counter_egress_valid = valid;
+               break;
+       case MLXSW_SP_RIF_COUNTER_INGRESS:
+               rif->counter_ingress_valid = valid;
+               break;
+       }
+}
+
+static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
+                                    unsigned int counter_index, bool enable,
+                                    enum mlxsw_sp_rif_counter_dir dir)
+{
+       char ritr_pl[MLXSW_REG_RITR_LEN];
+       bool is_egress = false;
+       int err;
+
+       if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
+               is_egress = true;
+       mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+       if (err)
+               return err;
+
+       mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
+                                   is_egress);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
+                                  struct mlxsw_sp_rif *rif,
+                                  enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
+{
+       char ricnt_pl[MLXSW_REG_RICNT_LEN];
+       unsigned int *p_counter_index;
+       bool valid;
+       int err;
+
+       valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
+       if (!valid)
+               return -EINVAL;
+
+       p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
+       if (!p_counter_index)
+               return -EINVAL;
+       mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
+                            MLXSW_REG_RICNT_OPCODE_NOP);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
+       if (err)
+               return err;
+       *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
+       return 0;
+}
+
+static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
+                                     unsigned int counter_index)
+{
+       char ricnt_pl[MLXSW_REG_RICNT_LEN];
+
+       mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
+                            MLXSW_REG_RICNT_OPCODE_CLEAR);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
+}
+
+int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+                              struct mlxsw_sp_rif *rif,
+                              enum mlxsw_sp_rif_counter_dir dir)
+{
+       unsigned int *p_counter_index;
+       int err;
+
+       p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
+       if (!p_counter_index)
+               return -EINVAL;
+       err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
+                                    p_counter_index);
+       if (err)
+               return err;
+
+       err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
+       if (err)
+               goto err_counter_clear;
+
+       err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
+                                       *p_counter_index, true, dir);
+       if (err)
+               goto err_counter_edit;
+       mlxsw_sp_rif_counter_valid_set(rif, dir, true);
+       return 0;
+
+err_counter_edit:
+err_counter_clear:
+       mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
+                             *p_counter_index);
+       return err;
+}
+
+void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
+                              struct mlxsw_sp_rif *rif,
+                              enum mlxsw_sp_rif_counter_dir dir)
+{
+       unsigned int *p_counter_index;
+
+       p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
+       if (WARN_ON(!p_counter_index))
+               return;
+       mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
+                                 *p_counter_index, false, dir);
+       mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
+                             *p_counter_index);
+       mlxsw_sp_rif_counter_valid_set(rif, dir, false);
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+                        const struct net_device *dev);
 
 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
        for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
@@ -88,12 +258,6 @@ mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
        memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
 }
 
-static void
-mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
-{
-       memset(prefix_usage, 0, sizeof(*prefix_usage));
-}
-
 static void
 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
                          unsigned char prefix_len)
@@ -125,7 +289,7 @@ struct mlxsw_sp_fib_node {
        struct list_head entry_list;
        struct list_head list;
        struct rhash_head ht_node;
-       struct mlxsw_sp_vr *vr;
+       struct mlxsw_sp_fib *fib;
        struct mlxsw_sp_fib_key key;
 };
 
@@ -149,13 +313,17 @@ struct mlxsw_sp_fib_entry {
 struct mlxsw_sp_fib {
        struct rhashtable ht;
        struct list_head node_list;
+       struct mlxsw_sp_vr *vr;
+       struct mlxsw_sp_lpm_tree *lpm_tree;
        unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
        struct mlxsw_sp_prefix_usage prefix_usage;
+       enum mlxsw_sp_l3proto proto;
 };
 
 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
 
-static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
+static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
+                                               enum mlxsw_sp_l3proto proto)
 {
        struct mlxsw_sp_fib *fib;
        int err;
@@ -167,6 +335,8 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
        if (err)
                goto err_rhashtable_init;
        INIT_LIST_HEAD(&fib->node_list);
+       fib->proto = proto;
+       fib->vr = vr;
        return fib;
 
 err_rhashtable_init:
@@ -177,24 +347,21 @@ err_rhashtable_init:
 static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
 {
        WARN_ON(!list_empty(&fib->node_list));
+       WARN_ON(fib->lpm_tree);
        rhashtable_destroy(&fib->ht);
        kfree(fib);
 }
 
 static struct mlxsw_sp_lpm_tree *
-mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved)
+mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
 {
        static struct mlxsw_sp_lpm_tree *lpm_tree;
        int i;
 
-       for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
-               lpm_tree = &mlxsw_sp->router.lpm_trees[i];
-               if (lpm_tree->ref_count == 0) {
-                       if (one_reserved)
-                               one_reserved = false;
-                       else
-                               return lpm_tree;
-               }
+       for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
+               lpm_tree = &mlxsw_sp->router.lpm.trees[i];
+               if (lpm_tree->ref_count == 0)
+                       return lpm_tree;
        }
        return NULL;
 }
@@ -248,12 +415,12 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
 static struct mlxsw_sp_lpm_tree *
 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
                         struct mlxsw_sp_prefix_usage *prefix_usage,
-                        enum mlxsw_sp_l3proto proto, bool one_reserved)
+                        enum mlxsw_sp_l3proto proto)
 {
        struct mlxsw_sp_lpm_tree *lpm_tree;
        int err;
 
-       lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved);
+       lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
        if (!lpm_tree)
                return ERR_PTR(-EBUSY);
        lpm_tree->proto = proto;
@@ -283,13 +450,13 @@ static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
 static struct mlxsw_sp_lpm_tree *
 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
                      struct mlxsw_sp_prefix_usage *prefix_usage,
-                     enum mlxsw_sp_l3proto proto, bool one_reserved)
+                     enum mlxsw_sp_l3proto proto)
 {
        struct mlxsw_sp_lpm_tree *lpm_tree;
        int i;
 
-       for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
-               lpm_tree = &mlxsw_sp->router.lpm_trees[i];
+       for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
+               lpm_tree = &mlxsw_sp->router.lpm.trees[i];
                if (lpm_tree->ref_count != 0 &&
                    lpm_tree->proto == proto &&
                    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
@@ -297,7 +464,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
                        goto inc_ref_count;
        }
        lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
-                                           proto, one_reserved);
+                                           proto);
        if (IS_ERR(lpm_tree))
                return lpm_tree;
 
@@ -314,15 +481,41 @@ static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
        return 0;
 }
 
-static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
+#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
+
+static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
 {
        struct mlxsw_sp_lpm_tree *lpm_tree;
+       u64 max_trees;
        int i;
 
-       for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
-               lpm_tree = &mlxsw_sp->router.lpm_trees[i];
+       if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
+               return -EIO;
+
+       max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
+       mlxsw_sp->router.lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
+       mlxsw_sp->router.lpm.trees = kcalloc(mlxsw_sp->router.lpm.tree_count,
+                                            sizeof(struct mlxsw_sp_lpm_tree),
+                                            GFP_KERNEL);
+       if (!mlxsw_sp->router.lpm.trees)
+               return -ENOMEM;
+
+       for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
+               lpm_tree = &mlxsw_sp->router.lpm.trees[i];
                lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
        }
+
+       return 0;
+}
+
+static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       kfree(mlxsw_sp->router.lpm.trees);
+}
+
+static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
+{
+       return !!vr->fib4;
 }
 
 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
@@ -332,31 +525,31 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
 
        for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
                vr = &mlxsw_sp->router.vrs[i];
-               if (!vr->used)
+               if (!mlxsw_sp_vr_is_used(vr))
                        return vr;
        }
        return NULL;
 }
 
 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
-                                    struct mlxsw_sp_vr *vr)
+                                    const struct mlxsw_sp_fib *fib)
 {
        char raltb_pl[MLXSW_REG_RALTB_LEN];
 
-       mlxsw_reg_raltb_pack(raltb_pl, vr->id,
-                            (enum mlxsw_reg_ralxx_protocol) vr->proto,
-                            vr->lpm_tree->id);
+       mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+                            (enum mlxsw_reg_ralxx_protocol) fib->proto,
+                            fib->lpm_tree->id);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
 }
 
 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
-                                      struct mlxsw_sp_vr *vr)
+                                      const struct mlxsw_sp_fib *fib)
 {
        char raltb_pl[MLXSW_REG_RALTB_LEN];
 
        /* Bind to tree 0 which is default */
-       mlxsw_reg_raltb_pack(raltb_pl, vr->id,
-                            (enum mlxsw_reg_ralxx_protocol) vr->proto, 0);
+       mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+                            (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
 }
 
@@ -369,8 +562,7 @@ static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
 }
 
 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
-                                           u32 tb_id,
-                                           enum mlxsw_sp_l3proto proto)
+                                           u32 tb_id)
 {
        struct mlxsw_sp_vr *vr;
        int i;
@@ -379,69 +571,50 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
 
        for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
                vr = &mlxsw_sp->router.vrs[i];
-               if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
+               if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
                        return vr;
        }
        return NULL;
 }
 
+static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
+                                           enum mlxsw_sp_l3proto proto)
+{
+       switch (proto) {
+       case MLXSW_SP_L3_PROTO_IPV4:
+               return vr->fib4;
+       case MLXSW_SP_L3_PROTO_IPV6:
+               BUG_ON(1);
+       }
+       return NULL;
+}
+
 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
-                                             unsigned char prefix_len,
-                                             u32 tb_id,
-                                             enum mlxsw_sp_l3proto proto)
+                                             u32 tb_id)
 {
-       struct mlxsw_sp_prefix_usage req_prefix_usage;
-       struct mlxsw_sp_lpm_tree *lpm_tree;
        struct mlxsw_sp_vr *vr;
-       int err;
 
        vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
        if (!vr)
                return ERR_PTR(-EBUSY);
-       vr->fib = mlxsw_sp_fib_create();
-       if (IS_ERR(vr->fib))
-               return ERR_CAST(vr->fib);
-
-       vr->proto = proto;
+       vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
+       if (IS_ERR(vr->fib4))
+               return ERR_CAST(vr->fib4);
        vr->tb_id = tb_id;
-       mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
-       mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
-       lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
-                                        proto, true);
-       if (IS_ERR(lpm_tree)) {
-               err = PTR_ERR(lpm_tree);
-               goto err_tree_get;
-       }
-       vr->lpm_tree = lpm_tree;
-       err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
-       if (err)
-               goto err_tree_bind;
-
-       vr->used = true;
        return vr;
-
-err_tree_bind:
-       mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
-err_tree_get:
-       mlxsw_sp_fib_destroy(vr->fib);
-
-       return ERR_PTR(err);
 }
 
-static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
-                               struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
 {
-       mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
-       mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
-       mlxsw_sp_fib_destroy(vr->fib);
-       vr->used = false;
+       mlxsw_sp_fib_destroy(vr->fib4);
+       vr->fib4 = NULL;
 }
 
 static int
-mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
+mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
                           struct mlxsw_sp_prefix_usage *req_prefix_usage)
 {
-       struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree;
+       struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
        struct mlxsw_sp_lpm_tree *new_tree;
        int err;
 
@@ -449,7 +622,7 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
                return 0;
 
        new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
-                                        vr->proto, false);
+                                        fib->proto);
        if (IS_ERR(new_tree)) {
                /* We failed to get a tree according to the required
                 * prefix usage. However, the current tree might be still good
@@ -463,8 +636,8 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
        }
 
        /* Prevent packet loss by overwriting existing binding */
-       vr->lpm_tree = new_tree;
-       err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
+       fib->lpm_tree = new_tree;
+       err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
        if (err)
                goto err_tree_bind;
        mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
@@ -472,53 +645,26 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
        return 0;
 
 err_tree_bind:
-       vr->lpm_tree = lpm_tree;
+       fib->lpm_tree = lpm_tree;
        mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
        return err;
 }
 
-static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
-                                          unsigned char prefix_len,
-                                          u32 tb_id,
-                                          enum mlxsw_sp_l3proto proto)
+static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
 {
        struct mlxsw_sp_vr *vr;
-       int err;
 
        tb_id = mlxsw_sp_fix_tb_id(tb_id);
-       vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto);
-       if (!vr) {
-               vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto);
-               if (IS_ERR(vr))
-                       return vr;
-       } else {
-               struct mlxsw_sp_prefix_usage req_prefix_usage;
-
-               mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
-                                         &vr->fib->prefix_usage);
-               mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
-               /* Need to replace LPM tree in case new prefix is required. */
-               err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
-                                                &req_prefix_usage);
-               if (err)
-                       return ERR_PTR(err);
-       }
+       vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
+       if (!vr)
+               vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
        return vr;
 }
 
-static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
 {
-       /* Destroy virtual router entity in case the associated FIB is empty
-        * and allow it to be used for other tables in future. Otherwise,
-        * check if some prefix usage did not disappear and change tree if
-        * that is the case. Note that in case new, smaller tree cannot be
-        * allocated, the original one will be kept being used.
-        */
-       if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
-               mlxsw_sp_vr_destroy(mlxsw_sp, vr);
-       else
-               mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
-                                          &vr->fib->prefix_usage);
+       if (!vr->rif_count && list_empty(&vr->fib4->node_list))
+               mlxsw_sp_vr_destroy(vr);
 }
 
 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
@@ -627,14 +773,14 @@ static struct mlxsw_sp_neigh_entry *
 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
 {
        struct mlxsw_sp_neigh_entry *neigh_entry;
-       struct mlxsw_sp_rif *r;
+       struct mlxsw_sp_rif *rif;
        int err;
 
-       r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
-       if (!r)
+       rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
+       if (!rif)
                return ERR_PTR(-EINVAL);
 
-       neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, r->rif);
+       neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
        if (!neigh_entry)
                return ERR_PTR(-ENOMEM);
 
@@ -642,7 +788,7 @@ mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
        if (err)
                goto err_neigh_entry_insert;
 
-       list_add(&neigh_entry->rif_list_node, &r->neigh_list);
+       list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
 
        return neigh_entry;
 
@@ -1050,22 +1196,22 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
 }
 
 static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
-                                   const struct mlxsw_sp_rif *r)
+                                   const struct mlxsw_sp_rif *rif)
 {
        char rauht_pl[MLXSW_REG_RAUHT_LEN];
 
        mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
-                            r->rif, r->addr);
+                            rif->rif_index, rif->addr);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
 }
 
 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
-                                        struct mlxsw_sp_rif *r)
+                                        struct mlxsw_sp_rif *rif)
 {
        struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
 
-       mlxsw_sp_neigh_rif_flush(mlxsw_sp, r);
-       list_for_each_entry_safe(neigh_entry, tmp, &r->neigh_list,
+       mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
+       list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
                                 rif_list_node)
                mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
 }
@@ -1082,7 +1228,7 @@ struct mlxsw_sp_nexthop {
                                                */
        struct rhash_head ht_node;
        struct mlxsw_sp_nexthop_key key;
-       struct mlxsw_sp_rif *r;
+       struct mlxsw_sp_rif *rif;
        u8 should_offload:1, /* set indicates this neigh is connected and
                              * should be put to KVD linear area of this group.
                              */
@@ -1109,7 +1255,7 @@ struct mlxsw_sp_nexthop_group {
        u16 ecmp_size;
        u16 count;
        struct mlxsw_sp_nexthop nexthops[0];
-#define nh_rif nexthops[0].r
+#define nh_rif nexthops[0].rif
 };
 
 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
@@ -1171,7 +1317,7 @@ mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
 }
 
 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
-                                            struct mlxsw_sp_vr *vr,
+                                            const struct mlxsw_sp_fib *fib,
                                             u32 adj_index, u16 ecmp_size,
                                             u32 new_adj_index,
                                             u16 new_ecmp_size)
@@ -1179,8 +1325,8 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
        char raleu_pl[MLXSW_REG_RALEU_LEN];
 
        mlxsw_reg_raleu_pack(raleu_pl,
-                            (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id,
-                            adj_index, ecmp_size, new_adj_index,
+                            (enum mlxsw_reg_ralxx_protocol) fib->proto,
+                            fib->vr->id, adj_index, ecmp_size, new_adj_index,
                             new_ecmp_size);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
 }
@@ -1190,14 +1336,14 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
                                          u32 old_adj_index, u16 old_ecmp_size)
 {
        struct mlxsw_sp_fib_entry *fib_entry;
-       struct mlxsw_sp_vr *vr = NULL;
+       struct mlxsw_sp_fib *fib = NULL;
        int err;
 
        list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
-               if (vr == fib_entry->fib_node->vr)
+               if (fib == fib_entry->fib_node->fib)
                        continue;
-               vr = fib_entry->fib_node->vr;
-               err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr,
+               fib = fib_entry->fib_node->fib;
+               err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
                                                        old_adj_index,
                                                        old_ecmp_size,
                                                        nh_grp->adj_index,
@@ -1280,7 +1426,6 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
        bool old_adj_index_valid;
        u32 old_adj_index;
        u16 old_ecmp_size;
-       int ret;
        int i;
        int err;
 
@@ -1318,15 +1463,14 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
                 */
                goto set_trap;
 
-       ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size);
-       if (ret < 0) {
+       err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
+       if (err) {
                /* We ran out of KVD linear space, just set the
                 * trap and let everything flow through kernel.
                 */
                dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
                goto set_trap;
        }
-       adj_index = ret;
        old_adj_index_valid = nh_grp->adj_index_valid;
        old_adj_index = nh_grp->adj_index;
        old_ecmp_size = nh_grp->ecmp_size;
@@ -1399,22 +1543,22 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
 }
 
 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
-                                     struct mlxsw_sp_rif *r)
+                                     struct mlxsw_sp_rif *rif)
 {
-       if (nh->r)
+       if (nh->rif)
                return;
 
-       nh->r = r;
-       list_add(&nh->rif_list_node, &r->nexthop_list);
+       nh->rif = rif;
+       list_add(&nh->rif_list_node, &rif->nexthop_list);
 }
 
 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
 {
-       if (!nh->r)
+       if (!nh->rif)
                return;
 
        list_del(&nh->rif_list_node);
-       nh->r = NULL;
+       nh->rif = NULL;
 }
 
 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
@@ -1505,7 +1649,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
 {
        struct net_device *dev = fib_nh->nh_dev;
        struct in_device *in_dev;
-       struct mlxsw_sp_rif *r;
+       struct mlxsw_sp_rif *rif;
        int err;
 
        nh->nh_grp = nh_grp;
@@ -1514,15 +1658,18 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
        if (err)
                return err;
 
+       if (!dev)
+               return 0;
+
        in_dev = __in_dev_get_rtnl(dev);
        if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
            fib_nh->nh_flags & RTNH_F_LINKDOWN)
                return 0;
 
-       r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
-       if (!r)
+       rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+       if (!rif)
                return 0;
-       mlxsw_sp_nexthop_rif_init(nh, r);
+       mlxsw_sp_nexthop_rif_init(nh, rif);
 
        err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
        if (err)
@@ -1548,7 +1695,7 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
 {
        struct mlxsw_sp_nexthop_key key;
        struct mlxsw_sp_nexthop *nh;
-       struct mlxsw_sp_rif *r;
+       struct mlxsw_sp_rif *rif;
 
        if (mlxsw_sp->router.aborted)
                return;
@@ -1558,13 +1705,13 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
        if (WARN_ON_ONCE(!nh))
                return;
 
-       r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
-       if (!r)
+       rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
+       if (!rif)
                return;
 
        switch (event) {
        case FIB_EVENT_NH_ADD:
-               mlxsw_sp_nexthop_rif_init(nh, r);
+               mlxsw_sp_nexthop_rif_init(nh, rif);
                mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
                break;
        case FIB_EVENT_NH_DEL:
@@ -1577,11 +1724,11 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
 }
 
 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
-                                          struct mlxsw_sp_rif *r)
+                                          struct mlxsw_sp_rif *rif)
 {
        struct mlxsw_sp_nexthop *nh, *tmp;
 
-       list_for_each_entry_safe(nh, tmp, &r->nexthop_list, rif_list_node) {
+       list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
                mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
                mlxsw_sp_nexthop_rif_fini(nh);
                mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
@@ -1699,7 +1846,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
 {
        fib_entry->offloaded = true;
 
-       switch (fib_entry->fib_node->vr->proto) {
+       switch (fib_entry->fib_node->fib->proto) {
        case MLXSW_SP_L3_PROTO_IPV4:
                fib_info_offload_inc(fib_entry->nh_group->key.fi);
                break;
@@ -1711,7 +1858,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
 static void
 mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
 {
-       switch (fib_entry->fib_node->vr->proto) {
+       switch (fib_entry->fib_node->fib->proto) {
        case MLXSW_SP_L3_PROTO_IPV4:
                fib_info_offload_dec(fib_entry->nh_group->key.fi);
                break;
@@ -1751,8 +1898,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
                                         enum mlxsw_reg_ralue_op op)
 {
        char ralue_pl[MLXSW_REG_RALUE_LEN];
+       struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
        u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
-       struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
        enum mlxsw_reg_ralue_trap_action trap_action;
        u16 trap_id = 0;
        u32 adjacency_index = 0;
@@ -1772,8 +1919,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
        }
 
        mlxsw_reg_ralue_pack4(ralue_pl,
-                             (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
-                             vr->id, fib_entry->fib_node->key.prefix_len,
+                             (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
+                             fib->vr->id, fib_entry->fib_node->key.prefix_len,
                              *p_dip);
        mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
                                        adjacency_index, ecmp_size);
@@ -1784,27 +1931,28 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
                                        struct mlxsw_sp_fib_entry *fib_entry,
                                        enum mlxsw_reg_ralue_op op)
 {
-       struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif;
+       struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
+       struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
        enum mlxsw_reg_ralue_trap_action trap_action;
        char ralue_pl[MLXSW_REG_RALUE_LEN];
        u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
-       struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
        u16 trap_id = 0;
-       u16 rif = 0;
+       u16 rif_index = 0;
 
        if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
                trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
-               rif = r->rif;
+               rif_index = rif->rif_index;
        } else {
                trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
                trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
        }
 
        mlxsw_reg_ralue_pack4(ralue_pl,
-                             (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
-                             vr->id, fib_entry->fib_node->key.prefix_len,
+                             (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
+                             fib->vr->id, fib_entry->fib_node->key.prefix_len,
                              *p_dip);
-       mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif);
+       mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
+                                      rif_index);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
 }
 
@@ -1812,13 +1960,13 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
                                       struct mlxsw_sp_fib_entry *fib_entry,
                                       enum mlxsw_reg_ralue_op op)
 {
+       struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
        char ralue_pl[MLXSW_REG_RALUE_LEN];
        u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
-       struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
 
        mlxsw_reg_ralue_pack4(ralue_pl,
-                             (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
-                             vr->id, fib_entry->fib_node->key.prefix_len,
+                             (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
+                             fib->vr->id, fib_entry->fib_node->key.prefix_len,
                              *p_dip);
        mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
@@ -1845,7 +1993,7 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
 {
        int err = -EINVAL;
 
-       switch (fib_entry->fib_node->vr->proto) {
+       switch (fib_entry->fib_node->fib->proto) {
        case MLXSW_SP_L3_PROTO_IPV4:
                err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
                break;
@@ -1877,17 +2025,29 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
 {
        struct fib_info *fi = fen_info->fi;
 
-       if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) {
+       switch (fen_info->type) {
+       case RTN_BROADCAST: /* fall through */
+       case RTN_LOCAL:
                fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
                return 0;
-       }
-       if (fen_info->type != RTN_UNICAST)
-               return -EINVAL;
-       if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
+       case RTN_UNREACHABLE: /* fall through */
+       case RTN_BLACKHOLE: /* fall through */
+       case RTN_PROHIBIT:
+               /* Packets hitting these routes need to be trapped, but
+                * can do so with a lower priority than packets directed
+                * at the host, so use action type local instead of trap.
+                */
                fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
-       else
-               fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
-       return 0;
+               return 0;
+       case RTN_UNICAST:
+               if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
+                       fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+               else
+                       fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
+               return 0;
+       default:
+               return -EINVAL;
+       }
 }
 
 static struct mlxsw_sp_fib_entry *
@@ -1996,7 +2156,7 @@ mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
 }
 
 static struct mlxsw_sp_fib_node *
-mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr,
+mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
                         size_t addr_len, unsigned char prefix_len)
 {
        struct mlxsw_sp_fib_node *fib_node;
@@ -2006,18 +2166,15 @@ mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr,
                return NULL;
 
        INIT_LIST_HEAD(&fib_node->entry_list);
-       list_add(&fib_node->list, &vr->fib->node_list);
+       list_add(&fib_node->list, &fib->node_list);
        memcpy(fib_node->key.addr, addr, addr_len);
        fib_node->key.prefix_len = prefix_len;
-       mlxsw_sp_fib_node_insert(vr->fib, fib_node);
-       fib_node->vr = vr;
 
        return fib_node;
 }
 
 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
 {
-       mlxsw_sp_fib_node_remove(fib_node->vr->fib, fib_node);
        list_del(&fib_node->list);
        WARN_ON(!list_empty(&fib_node->entry_list));
        kfree(fib_node);
@@ -2034,7 +2191,7 @@ mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
 static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
 {
        unsigned char prefix_len = fib_node->key.prefix_len;
-       struct mlxsw_sp_fib *fib = fib_node->vr->fib;
+       struct mlxsw_sp_fib *fib = fib_node->fib;
 
        if (fib->prefix_ref_count[prefix_len]++ == 0)
                mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
@@ -2043,32 +2200,98 @@ static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
 static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
 {
        unsigned char prefix_len = fib_node->key.prefix_len;
-       struct mlxsw_sp_fib *fib = fib_node->vr->fib;
+       struct mlxsw_sp_fib *fib = fib_node->fib;
 
        if (--fib->prefix_ref_count[prefix_len] == 0)
                mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
 }
 
+static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
+                                 struct mlxsw_sp_fib_node *fib_node,
+                                 struct mlxsw_sp_fib *fib)
+{
+       struct mlxsw_sp_prefix_usage req_prefix_usage;
+       struct mlxsw_sp_lpm_tree *lpm_tree;
+       int err;
+
+       err = mlxsw_sp_fib_node_insert(fib, fib_node);
+       if (err)
+               return err;
+       fib_node->fib = fib;
+
+       mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
+       mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
+
+       if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
+               err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
+                                                &req_prefix_usage);
+               if (err)
+                       goto err_tree_check;
+       } else {
+               lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+                                                fib->proto);
+               if (IS_ERR(lpm_tree))
+                       return PTR_ERR(lpm_tree);
+               fib->lpm_tree = lpm_tree;
+               err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
+               if (err)
+                       goto err_tree_bind;
+       }
+
+       mlxsw_sp_fib_node_prefix_inc(fib_node);
+
+       return 0;
+
+err_tree_bind:
+       fib->lpm_tree = NULL;
+       mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+err_tree_check:
+       fib_node->fib = NULL;
+       mlxsw_sp_fib_node_remove(fib, fib_node);
+       return err;
+}
+
+static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
+                                  struct mlxsw_sp_fib_node *fib_node)
+{
+       struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
+       struct mlxsw_sp_fib *fib = fib_node->fib;
+
+       mlxsw_sp_fib_node_prefix_dec(fib_node);
+
+       if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
+               mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
+               fib->lpm_tree = NULL;
+               mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+       } else {
+               mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
+       }
+
+       fib_node->fib = NULL;
+       mlxsw_sp_fib_node_remove(fib, fib_node);
+}
+
 static struct mlxsw_sp_fib_node *
 mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
                       const struct fib_entry_notifier_info *fen_info)
 {
        struct mlxsw_sp_fib_node *fib_node;
+       struct mlxsw_sp_fib *fib;
        struct mlxsw_sp_vr *vr;
        int err;
 
-       vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id,
-                            MLXSW_SP_L3_PROTO_IPV4);
+       vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id);
        if (IS_ERR(vr))
                return ERR_CAST(vr);
+       fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
 
-       fib_node = mlxsw_sp_fib_node_lookup(vr->fib, &fen_info->dst,
+       fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
                                            sizeof(fen_info->dst),
                                            fen_info->dst_len);
        if (fib_node)
                return fib_node;
 
-       fib_node = mlxsw_sp_fib_node_create(vr, &fen_info->dst,
+       fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst,
                                            sizeof(fen_info->dst),
                                            fen_info->dst_len);
        if (!fib_node) {
@@ -2076,22 +2299,29 @@ mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
                goto err_fib_node_create;
        }
 
+       err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
+       if (err)
+               goto err_fib_node_init;
+
        return fib_node;
 
+err_fib_node_init:
+       mlxsw_sp_fib_node_destroy(fib_node);
 err_fib_node_create:
-       mlxsw_sp_vr_put(mlxsw_sp, vr);
+       mlxsw_sp_vr_put(vr);
        return ERR_PTR(err);
 }
 
 static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
                                   struct mlxsw_sp_fib_node *fib_node)
 {
-       struct mlxsw_sp_vr *vr = fib_node->vr;
+       struct mlxsw_sp_vr *vr = fib_node->fib->vr;
 
        if (!list_empty(&fib_node->entry_list))
                return;
+       mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
        mlxsw_sp_fib_node_destroy(fib_node);
-       mlxsw_sp_vr_put(mlxsw_sp, vr);
+       mlxsw_sp_vr_put(vr);
 }
 
 static struct mlxsw_sp_fib_entry *
@@ -2236,8 +2466,6 @@ static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
        if (err)
                goto err_fib4_node_entry_add;
 
-       mlxsw_sp_fib_node_prefix_inc(fib_node);
-
        return 0;
 
 err_fib4_node_entry_add:
@@ -2251,7 +2479,6 @@ mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
 {
        struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
 
-       mlxsw_sp_fib_node_prefix_dec(fib_node);
        mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
        mlxsw_sp_fib4_node_list_remove(fib_entry);
 }
@@ -2340,9 +2567,7 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
 {
        char ralta_pl[MLXSW_REG_RALTA_LEN];
        char ralst_pl[MLXSW_REG_RALST_LEN];
-       char raltb_pl[MLXSW_REG_RALTB_LEN];
-       char ralue_pl[MLXSW_REG_RALUE_LEN];
-       int err;
+       int i, err;
 
        mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
                             MLXSW_SP_LPM_TREE_MIN);
@@ -2355,16 +2580,33 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
        if (err)
                return err;
 
-       mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4,
-                            MLXSW_SP_LPM_TREE_MIN);
-       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
-       if (err)
-               return err;
+       for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
+               struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
+               char raltb_pl[MLXSW_REG_RALTB_LEN];
+               char ralue_pl[MLXSW_REG_RALUE_LEN];
 
-       mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
-                             MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0);
-       mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+               if (!mlxsw_sp_vr_is_used(vr))
+                       continue;
+
+               mlxsw_reg_raltb_pack(raltb_pl, vr->id,
+                                    MLXSW_REG_RALXX_PROTOCOL_IPV4,
+                                    MLXSW_SP_LPM_TREE_MIN);
+               err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
+                                     raltb_pl);
+               if (err)
+                       return err;
+
+               mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
+                                     MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0,
+                                     0);
+               mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
+               err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
+                                     ralue_pl);
+               if (err)
+                       return err;
+       }
+
+       return 0;
 }
 
 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
@@ -2390,7 +2632,7 @@ static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
                                    struct mlxsw_sp_fib_node *fib_node)
 {
-       switch (fib_node->vr->proto) {
+       switch (fib_node->fib->proto) {
        case MLXSW_SP_L3_PROTO_IPV4:
                mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
                break;
@@ -2400,26 +2642,32 @@ static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
        }
 }
 
-static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
+                                 struct mlxsw_sp_vr *vr,
+                                 enum mlxsw_sp_l3proto proto)
 {
+       struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
        struct mlxsw_sp_fib_node *fib_node, *tmp;
-       struct mlxsw_sp_vr *vr;
+
+       list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
+               bool do_break = &tmp->list == &fib->node_list;
+
+               mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
+               if (do_break)
+                       break;
+       }
+}
+
+static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
+{
        int i;
 
        for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
-               vr = &mlxsw_sp->router.vrs[i];
+               struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
 
-               if (!vr->used)
+               if (!mlxsw_sp_vr_is_used(vr))
                        continue;
-
-               list_for_each_entry_safe(fib_node, tmp, &vr->fib->node_list,
-                                        list) {
-                       bool do_break = &tmp->list == &vr->fib->node_list;
-
-                       mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
-                       if (do_break)
-                               break;
-               }
+               mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
        }
 }
 
@@ -2437,86 +2685,24 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
                dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
 }
 
-static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
-{
-       char ritr_pl[MLXSW_REG_RITR_LEN];
-       int err;
-
-       mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
-       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-       if (WARN_ON_ONCE(err))
-               return err;
-
-       mlxsw_reg_ritr_enable_set(ritr_pl, false);
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
-                                  struct mlxsw_sp_rif *r)
-{
-       mlxsw_sp_router_rif_disable(mlxsw_sp, r->rif);
-       mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, r);
-       mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, r);
-}
+struct mlxsw_sp_fib_event_work {
+       struct work_struct work;
+       union {
+               struct fib_entry_notifier_info fen_info;
+               struct fib_rule_notifier_info fr_info;
+               struct fib_nh_notifier_info fnh_info;
+       };
+       struct mlxsw_sp *mlxsw_sp;
+       unsigned long event;
+};
 
-static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
 {
-       char rgcr_pl[MLXSW_REG_RGCR_LEN];
-       u64 max_rifs;
-       int err;
-
-       if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
-               return -EIO;
-
-       max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
-       mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
-                                GFP_KERNEL);
-       if (!mlxsw_sp->rifs)
-               return -ENOMEM;
-
-       mlxsw_reg_rgcr_pack(rgcr_pl, true);
-       mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
-       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
-       if (err)
-               goto err_rgcr_fail;
-
-       return 0;
-
-err_rgcr_fail:
-       kfree(mlxsw_sp->rifs);
-       return err;
-}
-
-static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
-{
-       char rgcr_pl[MLXSW_REG_RGCR_LEN];
-       int i;
-
-       mlxsw_reg_rgcr_pack(rgcr_pl, false);
-       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
-
-       for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
-               WARN_ON_ONCE(mlxsw_sp->rifs[i]);
-
-       kfree(mlxsw_sp->rifs);
-}
-
-struct mlxsw_sp_fib_event_work {
-       struct work_struct work;
-       union {
-               struct fib_entry_notifier_info fen_info;
-               struct fib_nh_notifier_info fnh_info;
-       };
-       struct mlxsw_sp *mlxsw_sp;
-       unsigned long event;
-};
-
-static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
-{
-       struct mlxsw_sp_fib_event_work *fib_work =
-               container_of(work, struct mlxsw_sp_fib_event_work, work);
-       struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
-       bool replace, append;
+       struct mlxsw_sp_fib_event_work *fib_work =
+               container_of(work, struct mlxsw_sp_fib_event_work, work);
+       struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
+       struct fib_rule *rule;
+       bool replace, append;
        int err;
 
        /* Protect internal structures from changes */
@@ -2539,7 +2725,10 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
                break;
        case FIB_EVENT_RULE_ADD: /* fall through */
        case FIB_EVENT_RULE_DEL:
-               mlxsw_sp_router_fib4_abort(mlxsw_sp);
+               rule = fib_work->fr_info.rule;
+               if (!fib4_rule_default(rule) && !rule->l3mdev)
+                       mlxsw_sp_router_fib4_abort(mlxsw_sp);
+               fib_rule_put(rule);
                break;
        case FIB_EVENT_NH_ADD: /* fall through */
        case FIB_EVENT_NH_DEL:
@@ -2582,6 +2771,11 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
                 */
                fib_info_hold(fib_work->fen_info.fi);
                break;
+       case FIB_EVENT_RULE_ADD: /* fall through */
+       case FIB_EVENT_RULE_DEL:
+               memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
+               fib_rule_get(fib_work->fr_info.rule);
+               break;
        case FIB_EVENT_NH_ADD: /* fall through */
        case FIB_EVENT_NH_DEL:
                memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
@@ -2594,6 +2788,716 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
        return NOTIFY_DONE;
 }
 
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+                        const struct net_device *dev)
+{
+       int i;
+
+       for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+               if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
+                       return mlxsw_sp->rifs[i];
+
+       return NULL;
+}
+
+static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
+{
+       char ritr_pl[MLXSW_REG_RITR_LEN];
+       int err;
+
+       mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+       if (WARN_ON_ONCE(err))
+               return err;
+
+       mlxsw_reg_ritr_enable_set(ritr_pl, false);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
+                                         struct mlxsw_sp_rif *rif)
+{
+       mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
+       mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
+       mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
+}
+
+static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif,
+                                      const struct in_device *in_dev,
+                                      unsigned long event)
+{
+       switch (event) {
+       case NETDEV_UP:
+               if (!rif)
+                       return true;
+               return false;
+       case NETDEV_DOWN:
+               if (rif && !in_dev->ifa_list &&
+                   !netif_is_l3_slave(rif->dev))
+                       return true;
+               /* It is possible we already removed the RIF ourselves
+                * if it was assigned to a netdev that is now a bridge
+                * or LAG slave.
+                */
+               return false;
+       }
+
+       return false;
+}
+
+#define MLXSW_SP_INVALID_INDEX_RIF 0xffff
+static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
+{
+       int i;
+
+       for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+               if (!mlxsw_sp->rifs[i])
+                       return i;
+
+       return MLXSW_SP_INVALID_INDEX_RIF;
+}
+
+static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
+                                          bool *p_lagged, u16 *p_system_port)
+{
+       u8 local_port = mlxsw_sp_vport->local_port;
+
+       *p_lagged = mlxsw_sp_vport->lagged;
+       *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
+}
+
+static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
+                                   u16 vr_id, struct net_device *l3_dev,
+                                   u16 rif_index, bool create)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+       bool lagged = mlxsw_sp_vport->lagged;
+       char ritr_pl[MLXSW_REG_RITR_LEN];
+       u16 system_port;
+
+       mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif_index,
+                           vr_id, l3_dev->mtu, l3_dev->dev_addr);
+
+       mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
+       mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
+                                 mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
+
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
+
+static u16 mlxsw_sp_rif_sp_to_fid(u16 rif_index)
+{
+       return MLXSW_SP_RFID_BASE + rif_index;
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
+{
+       struct mlxsw_sp_fid *f;
+
+       f = kzalloc(sizeof(*f), GFP_KERNEL);
+       if (!f)
+               return NULL;
+
+       f->leave = mlxsw_sp_vport_rif_sp_leave;
+       f->ref_count = 0;
+       f->dev = l3_dev;
+       f->fid = fid;
+
+       return f;
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_alloc(u16 rif_index, u16 vr_id, struct net_device *l3_dev,
+                  struct mlxsw_sp_fid *f)
+{
+       struct mlxsw_sp_rif *rif;
+
+       rif = kzalloc(sizeof(*rif), GFP_KERNEL);
+       if (!rif)
+               return NULL;
+
+       INIT_LIST_HEAD(&rif->nexthop_list);
+       INIT_LIST_HEAD(&rif->neigh_list);
+       ether_addr_copy(rif->addr, l3_dev->dev_addr);
+       rif->mtu = l3_dev->mtu;
+       rif->vr_id = vr_id;
+       rif->dev = l3_dev;
+       rif->rif_index = rif_index;
+       rif->f = f;
+
+       return rif;
+}
+
+u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
+{
+       return rif->rif_index;
+}
+
+int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
+{
+       return rif->dev->ifindex;
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
+                            struct net_device *l3_dev)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+       u32 tb_id = l3mdev_fib_table(l3_dev);
+       struct mlxsw_sp_vr *vr;
+       struct mlxsw_sp_fid *f;
+       struct mlxsw_sp_rif *rif;
+       u16 fid, rif_index;
+       int err;
+
+       rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
+       if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
+               return ERR_PTR(-ERANGE);
+
+       vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
+       if (IS_ERR(vr))
+               return ERR_CAST(vr);
+
+       err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev,
+                                      rif_index, true);
+       if (err)
+               goto err_vport_rif_sp_op;
+
+       fid = mlxsw_sp_rif_sp_to_fid(rif_index);
+       err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
+       if (err)
+               goto err_rif_fdb_op;
+
+       f = mlxsw_sp_rfid_alloc(fid, l3_dev);
+       if (!f) {
+               err = -ENOMEM;
+               goto err_rfid_alloc;
+       }
+
+       rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
+       if (!rif) {
+               err = -ENOMEM;
+               goto err_rif_alloc;
+       }
+
+       if (devlink_dpipe_table_counter_enabled(priv_to_devlink(mlxsw_sp->core),
+                                               MLXSW_SP_DPIPE_TABLE_NAME_ERIF)) {
+               err = mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
+                                                MLXSW_SP_RIF_COUNTER_EGRESS);
+               if (err)
+                       netdev_dbg(mlxsw_sp_vport->dev,
+                                  "Counter alloc Failed err=%d\n", err);
+       }
+
+       f->rif = rif;
+       mlxsw_sp->rifs[rif_index] = rif;
+       vr->rif_count++;
+
+       return rif;
+
+err_rif_alloc:
+       kfree(f);
+err_rfid_alloc:
+       mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
+err_rif_fdb_op:
+       mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
+                                false);
+err_vport_rif_sp_op:
+       mlxsw_sp_vr_put(vr);
+       return ERR_PTR(err);
+}
+
+static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
+                                         struct mlxsw_sp_rif *rif)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+       struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
+       struct net_device *l3_dev = rif->dev;
+       struct mlxsw_sp_fid *f = rif->f;
+       u16 rif_index = rif->rif_index;
+       u16 fid = f->fid;
+
+       mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
+
+       mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+       mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+
+       vr->rif_count--;
+       mlxsw_sp->rifs[rif_index] = NULL;
+       f->rif = NULL;
+
+       kfree(rif);
+
+       kfree(f);
+
+       mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
+
+       mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
+                                false);
+       mlxsw_sp_vr_put(vr);
+}
+
+static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
+                                     struct net_device *l3_dev)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+       struct mlxsw_sp_rif *rif;
+
+       rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+       if (!rif) {
+               rif = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
+               if (IS_ERR(rif))
+                       return PTR_ERR(rif);
+       }
+
+       mlxsw_sp_vport_fid_set(mlxsw_sp_vport, rif->f);
+       rif->f->ref_count++;
+
+       netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", rif->f->fid);
+
+       return 0;
+}
+
+static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+       struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+
+       netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
+
+       mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
+       if (--f->ref_count == 0)
+               mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->rif);
+}
+
+static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
+                                        struct net_device *port_dev,
+                                        unsigned long event, u16 vid)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
+       struct mlxsw_sp_port *mlxsw_sp_vport;
+
+       mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
+       if (WARN_ON(!mlxsw_sp_vport))
+               return -EINVAL;
+
+       switch (event) {
+       case NETDEV_UP:
+               return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
+       case NETDEV_DOWN:
+               mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
+               break;
+       }
+
+       return 0;
+}
+
+static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
+                                       unsigned long event)
+{
+       if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
+               return 0;
+
+       return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
+}
+
+static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
+                                        struct net_device *lag_dev,
+                                        unsigned long event, u16 vid)
+{
+       struct net_device *port_dev;
+       struct list_head *iter;
+       int err;
+
+       netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
+               if (mlxsw_sp_port_dev_check(port_dev)) {
+                       err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
+                                                           event, vid);
+                       if (err)
+                               return err;
+               }
+       }
+
+       return 0;
+}
+
+static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
+                                      unsigned long event)
+{
+       if (netif_is_bridge_port(lag_dev))
+               return 0;
+
+       return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
+}
+
+static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
+                                                   struct net_device *l3_dev)
+{
+       u16 fid;
+
+       if (is_vlan_dev(l3_dev))
+               fid = vlan_dev_vlan_id(l3_dev);
+       else if (mlxsw_sp->master_bridge.dev == l3_dev)
+               fid = 1;
+       else
+               return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
+
+       return mlxsw_sp_fid_find(mlxsw_sp, fid);
+}
+
+static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
+{
+       return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
+}
+
+static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
+{
+       return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
+              MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+}
+
+static u16 mlxsw_sp_flood_table_index_get(u16 fid)
+{
+       return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
+}
+
+static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
+                                         bool set)
+{
+       u8 router_port = mlxsw_sp_router_port(mlxsw_sp);
+       enum mlxsw_flood_table_type table_type;
+       char *sftr_pl;
+       u16 index;
+       int err;
+
+       sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+       if (!sftr_pl)
+               return -ENOMEM;
+
+       table_type = mlxsw_sp_flood_table_type_get(fid);
+       index = mlxsw_sp_flood_table_index_get(fid);
+       mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
+                           1, router_port, set);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+       kfree(sftr_pl);
+       return err;
+}
+
+static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
+{
+       if (mlxsw_sp_fid_is_vfid(fid))
+               return MLXSW_REG_RITR_FID_IF;
+       else
+               return MLXSW_REG_RITR_VLAN_IF;
+}
+
+static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
+                                 struct net_device *l3_dev,
+                                 u16 fid, u16 rif,
+                                 bool create)
+{
+       enum mlxsw_reg_ritr_if_type rif_type;
+       char ritr_pl[MLXSW_REG_RITR_LEN];
+
+       rif_type = mlxsw_sp_rif_type_get(fid);
+       mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu,
+                           l3_dev->dev_addr);
+       mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
+
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
+                                     struct net_device *l3_dev,
+                                     struct mlxsw_sp_fid *f)
+{
+       u32 tb_id = l3mdev_fib_table(l3_dev);
+       struct mlxsw_sp_rif *rif;
+       struct mlxsw_sp_vr *vr;
+       u16 rif_index;
+       int err;
+
+       rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
+       if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
+               return -ERANGE;
+
+       vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
+       if (IS_ERR(vr))
+               return PTR_ERR(vr);
+
+       err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
+       if (err)
+               goto err_port_flood_set;
+
+       err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid,
+                                    rif_index, true);
+       if (err)
+               goto err_rif_bridge_op;
+
+       err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
+       if (err)
+               goto err_rif_fdb_op;
+
+       rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
+       if (!rif) {
+               err = -ENOMEM;
+               goto err_rif_alloc;
+       }
+
+       f->rif = rif;
+       mlxsw_sp->rifs[rif_index] = rif;
+       vr->rif_count++;
+
+       netdev_dbg(l3_dev, "RIF=%d created\n", rif_index);
+
+       return 0;
+
+err_rif_alloc:
+       mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
+err_rif_fdb_op:
+       mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
+                              false);
+err_rif_bridge_op:
+       mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
+err_port_flood_set:
+       mlxsw_sp_vr_put(vr);
+       return err;
+}
+
+void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
+                                struct mlxsw_sp_rif *rif)
+{
+       struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
+       struct net_device *l3_dev = rif->dev;
+       struct mlxsw_sp_fid *f = rif->f;
+       u16 rif_index = rif->rif_index;
+
+       mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
+
+       vr->rif_count--;
+       mlxsw_sp->rifs[rif_index] = NULL;
+       f->rif = NULL;
+
+       kfree(rif);
+
+       mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
+
+       mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
+                              false);
+
+       mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
+
+       mlxsw_sp_vr_put(vr);
+
+       netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif_index);
+}
+
+static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
+                                         struct net_device *br_dev,
+                                         unsigned long event)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
+       struct mlxsw_sp_fid *f;
+
+       /* FID can either be an actual FID if the L3 device is the
+        * VLAN-aware bridge or a VLAN device on top. Otherwise, the
+        * L3 device is a VLAN-unaware bridge and we get a vFID.
+        */
+       f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
+       if (WARN_ON(!f))
+               return -EINVAL;
+
+       switch (event) {
+       case NETDEV_UP:
+               return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
+       case NETDEV_DOWN:
+               mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
+               break;
+       }
+
+       return 0;
+}
+
+static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
+                                       unsigned long event)
+{
+       struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
+       u16 vid = vlan_dev_vlan_id(vlan_dev);
+
+       if (mlxsw_sp_port_dev_check(real_dev))
+               return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
+                                                    vid);
+       else if (netif_is_lag_master(real_dev))
+               return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
+                                                    vid);
+       else if (netif_is_bridge_master(real_dev) &&
+                mlxsw_sp->master_bridge.dev == real_dev)
+               return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
+                                                     event);
+
+       return 0;
+}
+
+int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
+                           unsigned long event, void *ptr)
+{
+       struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
+       struct net_device *dev = ifa->ifa_dev->dev;
+       struct mlxsw_sp *mlxsw_sp;
+       struct mlxsw_sp_rif *rif;
+       int err = 0;
+
+       mlxsw_sp = mlxsw_sp_lower_get(dev);
+       if (!mlxsw_sp)
+               goto out;
+
+       rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+       if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event))
+               goto out;
+
+       if (mlxsw_sp_port_dev_check(dev))
+               err = mlxsw_sp_inetaddr_port_event(dev, event);
+       else if (netif_is_lag_master(dev))
+               err = mlxsw_sp_inetaddr_lag_event(dev, event);
+       else if (netif_is_bridge_master(dev))
+               err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
+       else if (is_vlan_dev(dev))
+               err = mlxsw_sp_inetaddr_vlan_event(dev, event);
+
+out:
+       return notifier_from_errno(err);
+}
+
+static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
+                            const char *mac, int mtu)
+{
+       char ritr_pl[MLXSW_REG_RITR_LEN];
+       int err;
+
+       mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+       if (err)
+               return err;
+
+       mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
+       mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
+       mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
+{
+       struct mlxsw_sp *mlxsw_sp;
+       struct mlxsw_sp_rif *rif;
+       int err;
+
+       mlxsw_sp = mlxsw_sp_lower_get(dev);
+       if (!mlxsw_sp)
+               return 0;
+
+       rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+       if (!rif)
+               return 0;
+
+       err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, false);
+       if (err)
+               return err;
+
+       err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
+                               dev->mtu);
+       if (err)
+               goto err_rif_edit;
+
+       err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, rif->f->fid, true);
+       if (err)
+               goto err_rif_fdb_op;
+
+       ether_addr_copy(rif->addr, dev->dev_addr);
+       rif->mtu = dev->mtu;
+
+       netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
+
+       return 0;
+
+err_rif_fdb_op:
+       mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
+err_rif_edit:
+       mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, true);
+       return err;
+}
+
+int mlxsw_sp_vport_vrf_join(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+       struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+       struct net_device *dev = mlxsw_sp_vport->dev;
+
+       /* In case vPort already has a RIF, then we need to drop it.
+        * A new one will be created using the VRF's VR.
+        */
+       if (f && f->rif)
+               mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
+
+       return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, dev);
+}
+
+void mlxsw_sp_vport_vrf_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+       mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
+}
+
+int mlxsw_sp_port_vrf_join(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       struct mlxsw_sp_port *mlxsw_sp_vport;
+
+       mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
+       if (WARN_ON(!mlxsw_sp_vport))
+               return -EINVAL;
+
+       return mlxsw_sp_vport_vrf_join(mlxsw_sp_vport);
+}
+
+void mlxsw_sp_port_vrf_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       struct mlxsw_sp_port *mlxsw_sp_vport;
+
+       mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
+       if (WARN_ON(!mlxsw_sp_vport))
+               return;
+
+       mlxsw_sp_vport_vrf_leave(mlxsw_sp_vport);
+}
+
+int mlxsw_sp_bridge_vrf_join(struct mlxsw_sp *mlxsw_sp,
+                            struct net_device *l3_dev)
+{
+       struct mlxsw_sp_fid *f;
+
+       f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
+       if (WARN_ON(!f))
+               return -EINVAL;
+
+       if (f->rif)
+               mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
+
+       return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
+}
+
+void mlxsw_sp_bridge_vrf_leave(struct mlxsw_sp *mlxsw_sp,
+                              struct net_device *l3_dev)
+{
+       struct mlxsw_sp_fid *f;
+
+       f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
+       if (WARN_ON(!f))
+               return;
+       mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
+}
+
 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
 {
        struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
@@ -2606,6 +3510,48 @@ static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
        mlxsw_sp_router_fib_flush(mlxsw_sp);
 }
 
+static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+       char rgcr_pl[MLXSW_REG_RGCR_LEN];
+       u64 max_rifs;
+       int err;
+
+       if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
+               return -EIO;
+
+       max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+       mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
+                                GFP_KERNEL);
+       if (!mlxsw_sp->rifs)
+               return -ENOMEM;
+
+       mlxsw_reg_rgcr_pack(rgcr_pl, true);
+       mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+       if (err)
+               goto err_rgcr_fail;
+
+       return 0;
+
+err_rgcr_fail:
+       kfree(mlxsw_sp->rifs);
+       return err;
+}
+
+static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       char rgcr_pl[MLXSW_REG_RGCR_LEN];
+       int i;
+
+       mlxsw_reg_rgcr_pack(rgcr_pl, false);
+       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+
+       for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+               WARN_ON_ONCE(mlxsw_sp->rifs[i]);
+
+       kfree(mlxsw_sp->rifs);
+}
+
 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
 {
        int err;
@@ -2625,7 +3571,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
        if (err)
                goto err_nexthop_group_ht_init;
 
-       mlxsw_sp_lpm_init(mlxsw_sp);
+       err = mlxsw_sp_lpm_init(mlxsw_sp);
+       if (err)
+               goto err_lpm_init;
+
        err = mlxsw_sp_vrs_init(mlxsw_sp);
        if (err)
                goto err_vrs_init;
@@ -2647,6 +3596,8 @@ err_register_fib_notifier:
 err_neigh_init:
        mlxsw_sp_vrs_fini(mlxsw_sp);
 err_vrs_init:
+       mlxsw_sp_lpm_fini(mlxsw_sp);
+err_lpm_init:
        rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
 err_nexthop_group_ht_init:
        rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
@@ -2660,6 +3611,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
        unregister_fib_notifier(&mlxsw_sp->fib_nb);
        mlxsw_sp_neigh_fini(mlxsw_sp);
        mlxsw_sp_vrs_fini(mlxsw_sp);
+       mlxsw_sp_lpm_fini(mlxsw_sp);
        rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
        rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
        __mlxsw_sp_router_fini(mlxsw_sp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
new file mode 100644 (file)
index 0000000..c3095fe
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_ROUTER_H_
+#define _MLXSW_ROUTER_H_
+
+#include "spectrum.h"
+
+enum mlxsw_sp_rif_counter_dir {
+       MLXSW_SP_RIF_COUNTER_INGRESS,
+       MLXSW_SP_RIF_COUNTER_EGRESS,
+};
+
+u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
+int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
+int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
+                                  struct mlxsw_sp_rif *rif,
+                                  enum mlxsw_sp_rif_counter_dir dir,
+                                  u64 *cnt);
+void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
+                              struct mlxsw_sp_rif *rif,
+                              enum mlxsw_sp_rif_counter_dir dir);
+int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+                              struct mlxsw_sp_rif *rif,
+                              enum mlxsw_sp_rif_counter_dir dir);
+
+#endif /* _MLXSW_ROUTER_H_*/
index 598727d578c16e924ac5b25a98a7d622e02dc06a..05eaa15ad9d5458c9b67c64ad999eac919a4b0d9 100644 (file)
@@ -568,8 +568,8 @@ void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
 
        list_del(&f->list);
 
-       if (f->r)
-               mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+       if (f->rif)
+               mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
 
        kfree(f);
 
@@ -1012,7 +1012,7 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
 
        mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
        if (clear_all_ports) {
-               for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+               for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
                        if (mlxsw_sp->ports[i])
                                mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
        }
index ec1e886d4566fb098aefc6e4d82d6f69ea62173b..3b0f72455681663514d4725b50ffebe696ada240 100644 (file)
@@ -1321,7 +1321,7 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
 {
        int i;
 
-       for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+       for (i = 1; i < mlxsw_core_max_ports(mlxsw_sx->core); i++)
                if (mlxsw_sx_port_created(mlxsw_sx, i))
                        mlxsw_sx_port_remove(mlxsw_sx, i);
        kfree(mlxsw_sx->ports);
@@ -1329,17 +1329,18 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
 
 static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
 {
+       unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sx->core);
        size_t alloc_size;
        u8 module, width;
        int i;
        int err;
 
-       alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS;
+       alloc_size = sizeof(struct mlxsw_sx_port *) * max_ports;
        mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
        if (!mlxsw_sx->ports)
                return -ENOMEM;
 
-       for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+       for (i = 1; i < max_ports; i++) {
                err = mlxsw_sx_port_module_info_get(mlxsw_sx, i, &module,
                                                    &width);
                if (err)
index 279ee4612981b0af8d482674c6f1f5525988a6f7..20358f87de57053b6e8a0cdd5078334260e1be6b 100644 (file)
@@ -211,25 +211,6 @@ static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
                netdev_err(ks->netdev, "spi_sync() failed\n");
 }
 
-/**
- * ks8851_rx_1msg - select whether to use one or two messages for spi read
- * @ks: The device structure
- *
- * Return whether to generate a single message with a tx and rx buffer
- * supplied to spi_sync(), or alternatively send the tx and rx buffers
- * as separate messages.
- *
- * Depending on the hardware in use, a single message may be more efficient
- * on interrupts or work done by the driver.
- *
- * This currently always returns true until we add some per-device data passed
- * from the platform code to specify which mode is better.
- */
-static inline bool ks8851_rx_1msg(struct ks8851_net *ks)
-{
-       return true;
-}
-
 /**
  * ks8851_rdreg - issue read register command and return the data
  * @ks: The device state
@@ -251,14 +232,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
 
        txb[0] = cpu_to_le16(op | KS_SPIOP_RD);
 
-       if (ks8851_rx_1msg(ks)) {
-               msg = &ks->spi_msg1;
-               xfer = &ks->spi_xfer1;
-
-               xfer->tx_buf = txb;
-               xfer->rx_buf = trx;
-               xfer->len = rxl + 2;
-       } else {
+       if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) {
                msg = &ks->spi_msg2;
                xfer = ks->spi_xfer2;
 
@@ -270,15 +244,22 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
                xfer->tx_buf = NULL;
                xfer->rx_buf = trx;
                xfer->len = rxl;
+       } else {
+               msg = &ks->spi_msg1;
+               xfer = &ks->spi_xfer1;
+
+               xfer->tx_buf = txb;
+               xfer->rx_buf = trx;
+               xfer->len = rxl + 2;
        }
 
        ret = spi_sync(ks->spidev, msg);
        if (ret < 0)
                netdev_err(ks->netdev, "read: spi_sync() failed\n");
-       else if (ks8851_rx_1msg(ks))
-               memcpy(rxb, trx + 2, rxl);
-       else
+       else if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX)
                memcpy(rxb, trx, rxl);
+       else
+               memcpy(rxb, trx + 2, rxl);
 }
 
 /**
index 06c9f4100cb9bd8c0abecada5fa922c7e779fc51..c0d7d5eec7e72d4d7ea3f6b61e14523fed00a1d0 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/of_irq.h>
 #include <linux/crc32.h>
 #include <linux/crc32c.h>
+#include <linux/circ_buf.h>
 
 #include "moxart_ether.h"
 
@@ -227,8 +228,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
                if (desc0 & (RX_DESC0_ERR | RX_DESC0_CRC_ERR | RX_DESC0_FTL |
                             RX_DESC0_RUNT | RX_DESC0_ODD_NB)) {
                        net_dbg_ratelimited("packet error\n");
-                       priv->stats.rx_dropped++;
-                       priv->stats.rx_errors++;
+                       ndev->stats.rx_dropped++;
+                       ndev->stats.rx_errors++;
                        goto rx_next;
                }
 
@@ -244,8 +245,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
 
                if (unlikely(!skb)) {
                        net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n");
-                       priv->stats.rx_dropped++;
-                       priv->stats.rx_errors++;
+                       ndev->stats.rx_dropped++;
+                       ndev->stats.rx_errors++;
                        goto rx_next;
                }
 
@@ -255,10 +256,10 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
                napi_gro_receive(&priv->napi, skb);
                rx++;
 
-               priv->stats.rx_packets++;
-               priv->stats.rx_bytes += len;
+               ndev->stats.rx_packets++;
+               ndev->stats.rx_bytes += len;
                if (desc0 & RX_DESC0_MULTICAST)
-                       priv->stats.multicast++;
+                       ndev->stats.multicast++;
 
 rx_next:
                wmb(); /* prevent setting ownership back too early */
@@ -278,6 +279,13 @@ rx_next:
        return rx;
 }
 
+static int moxart_tx_queue_space(struct net_device *ndev)
+{
+       struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+       return CIRC_SPACE(priv->tx_head, priv->tx_tail, TX_DESC_NUM);
+}
+
 static void moxart_tx_finished(struct net_device *ndev)
 {
        struct moxart_mac_priv_t *priv = netdev_priv(ndev);
@@ -288,8 +296,8 @@ static void moxart_tx_finished(struct net_device *ndev)
                dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
                                 priv->tx_len[tx_tail], DMA_TO_DEVICE);
 
-               priv->stats.tx_packets++;
-               priv->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
+               ndev->stats.tx_packets++;
+               ndev->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
 
                dev_kfree_skb_irq(priv->tx_skb[tx_tail]);
                priv->tx_skb[tx_tail] = NULL;
@@ -297,6 +305,9 @@ static void moxart_tx_finished(struct net_device *ndev)
                tx_tail = TX_NEXT(tx_tail);
        }
        priv->tx_tail = tx_tail;
+       if (netif_queue_stopped(ndev) &&
+           moxart_tx_queue_space(ndev) >= TX_WAKE_THRESHOLD)
+               netif_wake_queue(ndev);
 }
 
 static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
@@ -324,16 +335,21 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        struct moxart_mac_priv_t *priv = netdev_priv(ndev);
        void *desc;
        unsigned int len;
-       unsigned int tx_head = priv->tx_head;
+       unsigned int tx_head;
        u32 txdes1;
        int ret = NETDEV_TX_BUSY;
 
+       spin_lock_irq(&priv->txlock);
+
+       tx_head = priv->tx_head;
        desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
 
-       spin_lock_irq(&priv->txlock);
+       if (moxart_tx_queue_space(ndev) == 1)
+               netif_stop_queue(ndev);
+
        if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
                net_dbg_ratelimited("no TX space for packet\n");
-               priv->stats.tx_dropped++;
+               ndev->stats.tx_dropped++;
                goto out_unlock;
        }
        rmb(); /* ensure data is only read that had TX_DESC0_DMA_OWN cleared */
@@ -384,13 +400,6 @@ out_unlock:
        return ret;
 }
 
-static struct net_device_stats *moxart_mac_get_stats(struct net_device *ndev)
-{
-       struct moxart_mac_priv_t *priv = netdev_priv(ndev);
-
-       return &priv->stats;
-}
-
 static void moxart_mac_setmulticast(struct net_device *ndev)
 {
        struct moxart_mac_priv_t *priv = netdev_priv(ndev);
@@ -440,7 +449,6 @@ static const struct net_device_ops moxart_netdev_ops = {
        .ndo_open               = moxart_mac_open,
        .ndo_stop               = moxart_mac_stop,
        .ndo_start_xmit         = moxart_mac_start_xmit,
-       .ndo_get_stats          = moxart_mac_get_stats,
        .ndo_set_rx_mode        = moxart_mac_set_rx_mode,
        .ndo_set_mac_address    = moxart_set_mac_address,
        .ndo_validate_addr      = eth_validate_addr,
index 93a9563ac7c6730eec8240ac187a86b9831c9741..686b8957d5cf00edf786663abff93cf31e36d831 100644 (file)
@@ -59,6 +59,7 @@
 #define TX_NEXT(N)             (((N) + 1) & (TX_DESC_NUM_MASK))
 #define TX_BUF_SIZE            1600
 #define TX_BUF_SIZE_MAX                (TX_DESC1_BUF_SIZE_MASK+1)
+#define TX_WAKE_THRESHOLD      16
 
 #define RX_DESC_NUM            64
 #define RX_DESC_NUM_MASK       (RX_DESC_NUM-1)
 
 struct moxart_mac_priv_t {
        void __iomem *base;
-       struct net_device_stats stats;
        unsigned int reg_maccr;
        unsigned int reg_imr;
        struct napi_struct napi;
index 6933afa69df2e28b8ed24ced53d7108ccc308647..4a5d13ef92a4f431eebeaf06bf72134185a736d8 100644 (file)
@@ -6,6 +6,7 @@ nfp-objs := \
            nfpcore/nfp_cpplib.o \
            nfpcore/nfp_hwinfo.o \
            nfpcore/nfp_mip.o \
+           nfpcore/nfp_mutex.o \
            nfpcore/nfp_nffw.o \
            nfpcore/nfp_nsp.o \
            nfpcore/nfp_nsp_eth.o \
index dedac720fb292c289edc8a7db50169852945bb5c..bea2a1a6c21178ecbe2251007cbd7d79100753cc 100644 (file)
@@ -48,7 +48,7 @@
 #include "nfpcore/nfp.h"
 #include "nfpcore/nfp_cpp.h"
 #include "nfpcore/nfp_nffw.h"
-#include "nfpcore/nfp_nsp_eth.h"
+#include "nfpcore/nfp_nsp.h"
 
 #include "nfpcore/nfp6000_pcie.h"
 
@@ -385,8 +385,7 @@ static void nfp_pci_remove(struct pci_dev *pdev)
 {
        struct nfp_pf *pf = pci_get_drvdata(pdev);
 
-       if (!list_empty(&pf->ports))
-               nfp_net_pci_remove(pf);
+       nfp_net_pci_remove(pf);
 
        nfp_pcie_sriov_disable(pdev);
 
index 39105d0435e9861f96a7ec8337a472d974ae69ab..b57de047b0029fa3cad3364f207600d8d5bda063 100644 (file)
@@ -42,7 +42,9 @@
 #include <linux/list.h>
 #include <linux/types.h>
 #include <linux/msi.h>
+#include <linux/mutex.h>
 #include <linux/pci.h>
+#include <linux/workqueue.h>
 
 struct dentry;
 struct pci_dev;
@@ -64,8 +66,11 @@ struct nfp_eth_table;
  * @fw_loaded:         Is the firmware loaded?
  * @eth_tbl:           NSP ETH table
  * @ddir:              Per-device debugfs directory
- * @num_ports:         Number of adapter ports
+ * @num_ports:         Number of adapter ports app firmware supports
+ * @num_netdevs:       Number of netdevs spawned
  * @ports:             Linked list of port structures (struct nfp_net)
+ * @port_lock:         Protects @ports, @num_ports, @num_netdevs
+ * @port_refresh_work: Work entry for taking netdevs out
  */
 struct nfp_pf {
        struct pci_dev *pdev;
@@ -88,7 +93,11 @@ struct nfp_pf {
        struct dentry *ddir;
 
        unsigned int num_ports;
+       unsigned int num_netdevs;
+
        struct list_head ports;
+       struct work_struct port_refresh_work;
+       struct mutex port_lock;
 };
 
 extern struct pci_driver nfp_netvf_pci_driver;
index e614a376b595280148494e8a1029fb2328057ca4..052db9208fbb0d269b2800b615025dde656445fe 100644 (file)
 
 #include "nfp_net_ctrl.h"
 
-#define nn_err(nn, fmt, args...)  netdev_err((nn)->netdev, fmt, ## args)
-#define nn_warn(nn, fmt, args...) netdev_warn((nn)->netdev, fmt, ## args)
-#define nn_info(nn, fmt, args...) netdev_info((nn)->netdev, fmt, ## args)
-#define nn_dbg(nn, fmt, args...)  netdev_dbg((nn)->netdev, fmt, ## args)
-#define nn_warn_ratelimit(nn, fmt, args...)                            \
+#define nn_err(nn, fmt, args...)  netdev_err((nn)->dp.netdev, fmt, ## args)
+#define nn_warn(nn, fmt, args...) netdev_warn((nn)->dp.netdev, fmt, ## args)
+#define nn_info(nn, fmt, args...) netdev_info((nn)->dp.netdev, fmt, ## args)
+#define nn_dbg(nn, fmt, args...)  netdev_dbg((nn)->dp.netdev, fmt, ## args)
+#define nn_dp_warn(dp, fmt, args...)                                   \
        do {                                                            \
                if (unlikely(net_ratelimit()))                          \
-                       netdev_warn((nn)->netdev, fmt, ## args);        \
+                       netdev_warn((dp)->netdev, fmt, ## args);        \
        } while (0)
 
 /* Max time to wait for NFP to respond on updates (in seconds) */
 
 /* Forward declarations */
 struct nfp_cpp;
+struct nfp_eth_table_port;
 struct nfp_net;
 struct nfp_net_r_vector;
 
@@ -306,17 +307,13 @@ struct nfp_net_rx_buf {
  * @rd_p:       FL/RX ring read pointer (free running)
  * @idx:        Ring index from Linux's perspective
  * @fl_qcidx:   Queue Controller Peripheral (QCP) queue index for the freelist
- * @rx_qcidx:   Queue Controller Peripheral (QCP) queue index for the RX queue
  * @qcp_fl:     Pointer to base of the QCP freelist queue
- * @qcp_rx:     Pointer to base of the QCP RX queue
  * @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer
  *              (used for free list batching)
  * @rxbufs:     Array of transmitted FL/RX buffers
  * @rxds:       Virtual address of FL/RX ring in host memory
  * @dma:        DMA address of the FL/RX ring
  * @size:       Size, in bytes, of the FL/RX ring (needed to free)
- * @bufsz:     Buffer allocation size for convenience of management routines
- *             (NOTE: this is in second cache line, do not use on fast path!)
  */
 struct nfp_net_rx_ring {
        struct nfp_net_r_vector *r_vec;
@@ -325,20 +322,17 @@ struct nfp_net_rx_ring {
        u32 wr_p;
        u32 rd_p;
 
-       u16 idx;
-       u16 wr_ptr_add;
+       u32 idx;
+       u32 wr_ptr_add;
 
        int fl_qcidx;
-       int rx_qcidx;
        u8 __iomem *qcp_fl;
-       u8 __iomem *qcp_rx;
 
        struct nfp_net_rx_buf *rxbufs;
        struct nfp_net_rx_desc *rxds;
 
        dma_addr_t dma;
        unsigned int size;
-       unsigned int bufsz;
 } ____cacheline_aligned;
 
 /**
@@ -433,19 +427,76 @@ struct nfp_stat_pair {
 };
 
 /**
- * struct nfp_net - NFP network device structure
- * @pdev:               Backpointer to PCI device
- * @netdev:             Backpointer to net_device structure
- * @is_vf:              Is the driver attached to a VF?
+ * struct nfp_net_dp - NFP network device datapath data structure
+ * @dev:               Backpointer to struct device
+ * @netdev:            Backpointer to net_device structure
+ * @is_vf:             Is the driver attached to a VF?
  * @bpf_offload_skip_sw:  Offloaded BPF program will not be rerun by cls_bpf
  * @bpf_offload_xdp:   Offloaded BPF program is XDP
- * @ctrl:               Local copy of the control register/word.
- * @fl_bufsz:           Currently configured size of the freelist buffers
+ * @chained_metadata_format:  Firemware will use new metadata format
+ * @rx_dma_dir:                Mapping direction for RX buffers
+ * @rx_dma_off:                Offset at which DMA packets (for XDP headroom)
  * @rx_offset:         Offset in the RX buffers where packet data starts
+ * @ctrl:              Local copy of the control register/word.
+ * @fl_bufsz:          Currently configured size of the freelist buffers
  * @xdp_prog:          Installed XDP program
- * @fw_ver:             Firmware version
+ * @tx_rings:          Array of pre-allocated TX ring structures
+ * @rx_rings:          Array of pre-allocated RX ring structures
+ * @ctrl_bar:          Pointer to mapped control BAR
+ *
+ * @txd_cnt:           Size of the TX ring in number of descriptors
+ * @rxd_cnt:           Size of the RX ring in number of descriptors
+ * @num_r_vecs:                Number of used ring vectors
+ * @num_tx_rings:      Currently configured number of TX rings
+ * @num_stack_tx_rings:        Number of TX rings used by the stack (not XDP)
+ * @num_rx_rings:      Currently configured number of RX rings
+ * @mtu:               Device MTU
+ */
+struct nfp_net_dp {
+       struct device *dev;
+       struct net_device *netdev;
+
+       u8 is_vf:1;
+       u8 bpf_offload_skip_sw:1;
+       u8 bpf_offload_xdp:1;
+       u8 chained_metadata_format:1;
+
+       u8 rx_dma_dir;
+       u8 rx_dma_off;
+
+       u8 rx_offset;
+
+       u32 ctrl;
+       u32 fl_bufsz;
+
+       struct bpf_prog *xdp_prog;
+
+       struct nfp_net_tx_ring *tx_rings;
+       struct nfp_net_rx_ring *rx_rings;
+
+       u8 __iomem *ctrl_bar;
+
+       /* Cold data follows */
+
+       unsigned int txd_cnt;
+       unsigned int rxd_cnt;
+
+       unsigned int num_r_vecs;
+
+       unsigned int num_tx_rings;
+       unsigned int num_stack_tx_rings;
+       unsigned int num_rx_rings;
+
+       unsigned int mtu;
+};
+
+/**
+ * struct nfp_net - NFP network device structure
+ * @dp:                        Datapath structure
+ * @fw_ver:            Firmware version
  * @cap:                Capabilities advertised by the Firmware
  * @max_mtu:            Maximum support MTU advertised by the Firmware
+ * @rss_hfunc:         RSS selected hash function
  * @rss_cfg:            RSS configuration
  * @rss_key:            RSS secret key
  * @rss_itbl:           RSS indirection table
@@ -454,17 +505,9 @@ struct nfp_stat_pair {
  * @rx_filter_change:  Jiffies when statistics last changed
  * @rx_filter_stats_timer:  Timer for polling filter offload statistics
  * @rx_filter_lock:    Lock protecting timer state changes (teardown)
+ * @max_r_vecs:                Number of allocated interrupt vectors for RX/TX
  * @max_tx_rings:       Maximum number of TX rings supported by the Firmware
  * @max_rx_rings:       Maximum number of RX rings supported by the Firmware
- * @num_tx_rings:       Currently configured number of TX rings
- * @num_stack_tx_rings:        Number of TX rings used by the stack (not XDP)
- * @num_rx_rings:       Currently configured number of RX rings
- * @txd_cnt:            Size of the TX ring in number of descriptors
- * @rxd_cnt:            Size of the RX ring in number of descriptors
- * @tx_rings:           Array of pre-allocated TX ring structures
- * @rx_rings:           Array of pre-allocated RX ring structures
- * @max_r_vecs:                Number of allocated interrupt vectors for RX/TX
- * @num_r_vecs:         Number of used ring vectors
  * @r_vecs:             Pre-allocated array of ring vectors
  * @irq_entries:        Pre-allocated array of MSI-X entries
  * @lsc_handler:        Handler for Link State Change interrupt
@@ -480,7 +523,8 @@ struct nfp_stat_pair {
  * @reconfig_sync_present:  Some thread is performing synchronous reconfig
  * @reconfig_timer:    Timer for async reading of reconfig results
  * @link_up:            Is the link up?
- * @link_status_lock:  Protects @link_up and ensures atomicity with BAR reading
+ * @link_changed:      Has link state changes since last port refresh?
+ * @link_status_lock:  Protects @link_* and ensures atomicity with BAR reading
  * @rx_coalesce_usecs:      RX interrupt moderation usecs delay parameter
  * @rx_coalesce_max_frames: RX interrupt moderation frame count parameter
  * @tx_coalesce_usecs:      TX interrupt moderation usecs delay parameter
@@ -488,36 +532,24 @@ struct nfp_stat_pair {
  * @vxlan_ports:       VXLAN ports for RX inner csum offload communicated to HW
  * @vxlan_usecnt:      IPv4/IPv6 VXLAN port use counts
  * @qcp_cfg:            Pointer to QCP queue used for configuration notification
- * @ctrl_bar:           Pointer to mapped control BAR
  * @tx_bar:             Pointer to mapped TX queues
  * @rx_bar:             Pointer to mapped FL/RX queues
  * @debugfs_dir:       Device directory in debugfs
  * @ethtool_dump_flag: Ethtool dump flag
  * @port_list:         Entry on device port list
+ * @pdev:              Backpointer to PCI device
  * @cpp:               CPP device handle if available
+ * @eth_port:          Translated ETH Table port entry
  */
 struct nfp_net {
-       struct pci_dev *pdev;
-       struct net_device *netdev;
-
-       unsigned is_vf:1;
-       unsigned bpf_offload_skip_sw:1;
-       unsigned bpf_offload_xdp:1;
-
-       u32 ctrl;
-       u32 fl_bufsz;
-
-       u32 rx_offset;
-
-       struct bpf_prog *xdp_prog;
-
-       struct nfp_net_tx_ring *tx_rings;
-       struct nfp_net_rx_ring *rx_rings;
+       struct nfp_net_dp dp;
 
        struct nfp_net_fw_version fw_ver;
+
        u32 cap;
        u32 max_mtu;
 
+       u8 rss_hfunc;
        u32 rss_cfg;
        u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
        u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
@@ -530,18 +562,10 @@ struct nfp_net {
        unsigned int max_tx_rings;
        unsigned int max_rx_rings;
 
-       unsigned int num_tx_rings;
-       unsigned int num_stack_tx_rings;
-       unsigned int num_rx_rings;
-
        int stride_tx;
        int stride_rx;
 
-       int txd_cnt;
-       int rxd_cnt;
-
        unsigned int max_r_vecs;
-       unsigned int num_r_vecs;
        struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS];
        struct msix_entry irq_entries[NFP_NET_MAX_IRQS];
 
@@ -557,6 +581,7 @@ struct nfp_net {
        u32 me_freq_mhz;
 
        bool link_up;
+       bool link_changed;
        spinlock_t link_status_lock;
 
        spinlock_t reconfig_lock;
@@ -575,7 +600,6 @@ struct nfp_net {
 
        u8 __iomem *qcp_cfg;
 
-       u8 __iomem *ctrl_bar;
        u8 __iomem *tx_bar;
        u8 __iomem *rx_bar;
 
@@ -584,14 +608,10 @@ struct nfp_net {
 
        struct list_head port_list;
 
+       struct pci_dev *pdev;
        struct nfp_cpp *cpp;
-};
 
-struct nfp_net_ring_set {
-       unsigned int n_rings;
-       unsigned int mtu;
-       unsigned int dcnt;
-       void *rings;
+       struct nfp_eth_table_port *eth_port;
 };
 
 /* Functions to read/write from/to a BAR
@@ -599,42 +619,42 @@ struct nfp_net_ring_set {
  */
 static inline u16 nn_readb(struct nfp_net *nn, int off)
 {
-       return readb(nn->ctrl_bar + off);
+       return readb(nn->dp.ctrl_bar + off);
 }
 
 static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
 {
-       writeb(val, nn->ctrl_bar + off);
+       writeb(val, nn->dp.ctrl_bar + off);
 }
 
 static inline u16 nn_readw(struct nfp_net *nn, int off)
 {
-       return readw(nn->ctrl_bar + off);
+       return readw(nn->dp.ctrl_bar + off);
 }
 
 static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
 {
-       writew(val, nn->ctrl_bar + off);
+       writew(val, nn->dp.ctrl_bar + off);
 }
 
 static inline u32 nn_readl(struct nfp_net *nn, int off)
 {
-       return readl(nn->ctrl_bar + off);
+       return readl(nn->dp.ctrl_bar + off);
 }
 
 static inline void nn_writel(struct nfp_net *nn, int off, u32 val)
 {
-       writel(val, nn->ctrl_bar + off);
+       writel(val, nn->dp.ctrl_bar + off);
 }
 
 static inline u64 nn_readq(struct nfp_net *nn, int off)
 {
-       return readq(nn->ctrl_bar + off);
+       return readq(nn->dp.ctrl_bar + off);
 }
 
 static inline void nn_writeq(struct nfp_net *nn, int off, u64 val)
 {
-       writeq(val, nn->ctrl_bar + off);
+       writeq(val, nn->dp.ctrl_bar + off);
 }
 
 /* Flush posted PCI writes by reading something without side effects */
@@ -776,6 +796,7 @@ void nfp_net_netdev_clean(struct net_device *netdev);
 void nfp_net_set_ethtool_ops(struct net_device *netdev);
 void nfp_net_info(struct nfp_net *nn);
 int nfp_net_reconfig(struct nfp_net *nn, u32 update);
+unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
 void nfp_net_rss_write_itbl(struct nfp_net *nn);
 void nfp_net_rss_write_key(struct nfp_net *nn);
 void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
@@ -787,9 +808,12 @@ void nfp_net_irqs_disable(struct pci_dev *pdev);
 void
 nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
                    unsigned int n);
-int
-nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
-                     struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx);
+
+struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
+int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new);
+
+bool nfp_net_link_changed_read_clear(struct nfp_net *nn);
+void nfp_net_refresh_port_config(struct nfp_net *nn);
 
 #ifdef CONFIG_NFP_DEBUG
 void nfp_net_debugfs_create(void);
index 9179a99563afa86f4ed7bbcb41b045c2568243de..e2197160e4dcc70fe626f6f06c4d20d70925ebc7 100644 (file)
@@ -41,6 +41,7 @@
  *          Chris Telfer <chris.telfer@netronome.com>
  */
 
+#include <linux/bitfield.h>
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
 #include <linux/module.h>
@@ -66,6 +67,7 @@
 #include <net/pkt_cls.h>
 #include <net/vxlan.h>
 
+#include "nfpcore/nfp_nsp.h"
 #include "nfp_net_ctrl.h"
 #include "nfp_net.h"
 
@@ -83,20 +85,18 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
        put_unaligned_le32(reg, fw_ver);
 }
 
-static dma_addr_t
-nfp_net_dma_map_rx(struct nfp_net *nn, void *frag, unsigned int bufsz,
-                  int direction)
+static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
 {
-       return dma_map_single(&nn->pdev->dev, frag + NFP_NET_RX_BUF_HEADROOM,
-                             bufsz - NFP_NET_RX_BUF_NON_DATA, direction);
+       return dma_map_single(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
+                             dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+                             dp->rx_dma_dir);
 }
 
-static void
-nfp_net_dma_unmap_rx(struct nfp_net *nn, dma_addr_t dma_addr,
-                    unsigned int bufsz, int direction)
+static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
 {
-       dma_unmap_single(&nn->pdev->dev, dma_addr,
-                        bufsz - NFP_NET_RX_BUF_NON_DATA, direction);
+       dma_unmap_single(dp->dev, dma_addr,
+                        dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+                        dp->rx_dma_dir);
 }
 
 /* Firmware reconfig
@@ -327,19 +327,22 @@ void
 nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
                    unsigned int n)
 {
+       struct nfp_net_dp *dp = &nn->dp;
+
        nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
-       nn->num_r_vecs = nn->max_r_vecs;
+       dp->num_r_vecs = nn->max_r_vecs;
 
        memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
 
-       if (nn->num_rx_rings > nn->num_r_vecs ||
-           nn->num_tx_rings > nn->num_r_vecs)
-               nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n",
-                       nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs);
+       if (dp->num_rx_rings > dp->num_r_vecs ||
+           dp->num_tx_rings > dp->num_r_vecs)
+               dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n",
+                        dp->num_rx_rings, dp->num_tx_rings,
+                        dp->num_r_vecs);
 
-       nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings);
-       nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings);
-       nn->num_stack_tx_rings = nn->num_tx_rings;
+       dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
+       dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
+       dp->num_stack_tx_rings = dp->num_tx_rings;
 }
 
 /**
@@ -373,6 +376,19 @@ static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
        return IRQ_HANDLED;
 }
 
+bool nfp_net_link_changed_read_clear(struct nfp_net *nn)
+{
+       unsigned long flags;
+       bool ret;
+
+       spin_lock_irqsave(&nn->link_status_lock, flags);
+       ret = nn->link_changed;
+       nn->link_changed = false;
+       spin_unlock_irqrestore(&nn->link_status_lock, flags);
+
+       return ret;
+}
+
 /**
  * nfp_net_read_link_status() - Reread link status from control BAR
  * @nn:       NFP Network structure
@@ -392,13 +408,14 @@ static void nfp_net_read_link_status(struct nfp_net *nn)
                goto out;
 
        nn->link_up = link_up;
+       nn->link_changed = true;
 
        if (nn->link_up) {
-               netif_carrier_on(nn->netdev);
-               netdev_info(nn->netdev, "NIC Link is Up\n");
+               netif_carrier_on(nn->dp.netdev);
+               netdev_info(nn->dp.netdev, "NIC Link is Up\n");
        } else {
-               netif_carrier_off(nn->netdev);
-               netdev_info(nn->netdev, "NIC Link is Down\n");
+               netif_carrier_off(nn->dp.netdev);
+               netdev_info(nn->dp.netdev, "NIC Link is Down\n");
        }
 out:
        spin_unlock_irqrestore(&nn->link_status_lock, flags);
@@ -476,10 +493,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
        rx_ring->r_vec = r_vec;
 
        rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
-       rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1);
-
        rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
-       rx_ring->qcp_rx = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->rx_qcidx);
 }
 
 /**
@@ -530,7 +544,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
 
        entry = &nn->irq_entries[vector_idx];
 
-       snprintf(name, name_sz, format, netdev_name(nn->netdev));
+       snprintf(name, name_sz, format, netdev_name(nn->dp.netdev));
        err = request_irq(entry->vector, handler, 0, name, nn);
        if (err) {
                nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
@@ -617,7 +631,6 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
 
 /**
  * nfp_net_tx_tso() - Set up Tx descriptor for LSO
- * @nn:  NFP Net device
  * @r_vec: per-ring structure
  * @txbuf: Pointer to driver soft TX descriptor
  * @txd: Pointer to HW TX descriptor
@@ -626,7 +639,7 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
  * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
  * Return error on packet header greater than maximum supported LSO header size.
  */
-static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
                           struct nfp_net_tx_buf *txbuf,
                           struct nfp_net_tx_desc *txd, struct sk_buff *skb)
 {
@@ -657,7 +670,7 @@ static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
 
 /**
  * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
- * @nn:  NFP Net device
+ * @dp:  NFP Net data path struct
  * @r_vec: per-ring structure
  * @txbuf: Pointer to driver soft TX descriptor
  * @txd: Pointer to TX descriptor
@@ -666,7 +679,8 @@ static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
  * This function sets the TX checksum flags in the TX descriptor based
  * on the configuration and the protocol of the packet to be transmitted.
  */
-static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+static void nfp_net_tx_csum(struct nfp_net_dp *dp,
+                           struct nfp_net_r_vector *r_vec,
                            struct nfp_net_tx_buf *txbuf,
                            struct nfp_net_tx_desc *txd, struct sk_buff *skb)
 {
@@ -674,7 +688,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
        struct iphdr *iph;
        u8 l4_hdr;
 
-       if (!(nn->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
+       if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
                return;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -693,8 +707,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
        } else if (ipv6h->version == 6) {
                l4_hdr = ipv6h->nexthdr;
        } else {
-               nn_warn_ratelimit(nn, "partial checksum but ipv=%x!\n",
-                                 iph->version);
+               nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
                return;
        }
 
@@ -706,8 +719,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
                txd->flags |= PCIE_DESC_TX_UDP_CSUM;
                break;
        default:
-               nn_warn_ratelimit(nn, "partial checksum but l4 proto=%x!\n",
-                                 l4_hdr);
+               nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
                return;
        }
 
@@ -737,28 +749,31 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
 {
        struct nfp_net *nn = netdev_priv(netdev);
        const struct skb_frag_struct *frag;
-       struct nfp_net_r_vector *r_vec;
        struct nfp_net_tx_desc *txd, txdg;
-       struct nfp_net_tx_buf *txbuf;
        struct nfp_net_tx_ring *tx_ring;
+       struct nfp_net_r_vector *r_vec;
+       struct nfp_net_tx_buf *txbuf;
        struct netdev_queue *nd_q;
+       struct nfp_net_dp *dp;
        dma_addr_t dma_addr;
        unsigned int fsize;
        int f, nr_frags;
        int wr_idx;
        u16 qidx;
 
+       dp = &nn->dp;
        qidx = skb_get_queue_mapping(skb);
-       tx_ring = &nn->tx_rings[qidx];
+       tx_ring = &dp->tx_rings[qidx];
        r_vec = tx_ring->r_vec;
-       nd_q = netdev_get_tx_queue(nn->netdev, qidx);
+       nd_q = netdev_get_tx_queue(dp->netdev, qidx);
 
        nr_frags = skb_shinfo(skb)->nr_frags;
 
        if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
-               nn_warn_ratelimit(nn, "TX ring %d busy. wrp=%u rdp=%u\n",
-                                 qidx, tx_ring->wr_p, tx_ring->rd_p);
+               nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
+                          qidx, tx_ring->wr_p, tx_ring->rd_p);
                netif_tx_stop_queue(nd_q);
+               nfp_net_tx_xmit_more_flush(tx_ring);
                u64_stats_update_begin(&r_vec->tx_sync);
                r_vec->tx_busy++;
                u64_stats_update_end(&r_vec->tx_sync);
@@ -766,9 +781,9 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
        }
 
        /* Start with the head skbuf */
-       dma_addr = dma_map_single(&nn->pdev->dev, skb->data, skb_headlen(skb),
+       dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
                                  DMA_TO_DEVICE);
-       if (dma_mapping_error(&nn->pdev->dev, dma_addr))
+       if (dma_mapping_error(dp->dev, dma_addr))
                goto err_free;
 
        wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
@@ -792,11 +807,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
        txd->mss = 0;
        txd->l4_offset = 0;
 
-       nfp_net_tx_tso(nn, r_vec, txbuf, txd, skb);
+       nfp_net_tx_tso(r_vec, txbuf, txd, skb);
 
-       nfp_net_tx_csum(nn, r_vec, txbuf, txd, skb);
+       nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
 
-       if (skb_vlan_tag_present(skb) && nn->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
+       if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
                txd->flags |= PCIE_DESC_TX_VLAN;
                txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
        }
@@ -810,9 +825,9 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
                        frag = &skb_shinfo(skb)->frags[f];
                        fsize = skb_frag_size(frag);
 
-                       dma_addr = skb_frag_dma_map(&nn->pdev->dev, frag, 0,
+                       dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
                                                    fsize, DMA_TO_DEVICE);
-                       if (dma_mapping_error(&nn->pdev->dev, dma_addr))
+                       if (dma_mapping_error(dp->dev, dma_addr))
                                goto err_unmap;
 
                        wr_idx = (wr_idx + 1) & (tx_ring->cnt - 1);
@@ -851,8 +866,7 @@ err_unmap:
        --f;
        while (f >= 0) {
                frag = &skb_shinfo(skb)->frags[f];
-               dma_unmap_page(&nn->pdev->dev,
-                              tx_ring->txbufs[wr_idx].dma_addr,
+               dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
                               skb_frag_size(frag), DMA_TO_DEVICE);
                tx_ring->txbufs[wr_idx].skb = NULL;
                tx_ring->txbufs[wr_idx].dma_addr = 0;
@@ -861,13 +875,14 @@ err_unmap:
                if (wr_idx < 0)
                        wr_idx += tx_ring->cnt;
        }
-       dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr,
+       dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
                         skb_headlen(skb), DMA_TO_DEVICE);
        tx_ring->txbufs[wr_idx].skb = NULL;
        tx_ring->txbufs[wr_idx].dma_addr = 0;
        tx_ring->txbufs[wr_idx].fidx = -2;
 err_free:
-       nn_warn_ratelimit(nn, "Failed to map DMA TX buffer\n");
+       nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+       nfp_net_tx_xmit_more_flush(tx_ring);
        u64_stats_update_begin(&r_vec->tx_sync);
        r_vec->tx_errors++;
        u64_stats_update_end(&r_vec->tx_sync);
@@ -884,7 +899,7 @@ err_free:
 static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
 {
        struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
-       struct nfp_net *nn = r_vec->nfp_net;
+       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
        const struct skb_frag_struct *frag;
        struct netdev_queue *nd_q;
        u32 done_pkts = 0, done_bytes = 0;
@@ -918,8 +933,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
 
                if (fidx == -1) {
                        /* unmap head */
-                       dma_unmap_single(&nn->pdev->dev,
-                                        tx_ring->txbufs[idx].dma_addr,
+                       dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr,
                                         skb_headlen(skb), DMA_TO_DEVICE);
 
                        done_pkts += tx_ring->txbufs[idx].pkt_cnt;
@@ -927,8 +941,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
                } else {
                        /* unmap fragment */
                        frag = &skb_shinfo(skb)->frags[fidx];
-                       dma_unmap_page(&nn->pdev->dev,
-                                      tx_ring->txbufs[idx].dma_addr,
+                       dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr,
                                       skb_frag_size(frag), DMA_TO_DEVICE);
                }
 
@@ -948,7 +961,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
        r_vec->tx_pkts += done_pkts;
        u64_stats_update_end(&r_vec->tx_sync);
 
-       nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
+       nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
        netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
        if (nfp_net_tx_ring_should_wake(tx_ring)) {
                /* Make sure TX thread will see updated tx_ring->rd_p */
@@ -966,7 +979,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
 static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
 {
        struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
-       struct nfp_net *nn = r_vec->nfp_net;
+       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
        u32 done_pkts = 0, done_bytes = 0;
        int idx, todo;
        u32 qcp_rd_p;
@@ -989,8 +1002,7 @@ static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
                if (!tx_ring->txbufs[idx].frag)
                        continue;
 
-               nfp_net_dma_unmap_rx(nn, tx_ring->txbufs[idx].dma_addr,
-                                    nn->fl_bufsz, DMA_BIDIRECTIONAL);
+               nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[idx].dma_addr);
                __free_page(virt_to_page(tx_ring->txbufs[idx].frag));
 
                done_pkts++;
@@ -1015,17 +1027,16 @@ static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
 
 /**
  * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
- * @nn:                NFP Net device
+ * @dp:                NFP Net data path struct
  * @tx_ring:   TX ring structure
  *
  * Assumes that the device is stopped
  */
 static void
-nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
+nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
 {
        struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
        const struct skb_frag_struct *frag;
-       struct pci_dev *pdev = nn->pdev;
        struct netdev_queue *nd_q;
 
        while (tx_ring->rd_p != tx_ring->wr_p) {
@@ -1036,8 +1047,7 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
                tx_buf = &tx_ring->txbufs[idx];
 
                if (tx_ring == r_vec->xdp_ring) {
-                       nfp_net_dma_unmap_rx(nn, tx_buf->dma_addr,
-                                            nn->fl_bufsz, DMA_BIDIRECTIONAL);
+                       nfp_net_dma_unmap_rx(dp, tx_buf->dma_addr);
                        __free_page(virt_to_page(tx_ring->txbufs[idx].frag));
                } else {
                        struct sk_buff *skb = tx_ring->txbufs[idx].skb;
@@ -1045,13 +1055,13 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
 
                        if (tx_buf->fidx == -1) {
                                /* unmap head */
-                               dma_unmap_single(&pdev->dev, tx_buf->dma_addr,
+                               dma_unmap_single(dp->dev, tx_buf->dma_addr,
                                                 skb_headlen(skb),
                                                 DMA_TO_DEVICE);
                        } else {
                                /* unmap fragment */
                                frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
-                               dma_unmap_page(&pdev->dev, tx_buf->dma_addr,
+                               dma_unmap_page(dp->dev, tx_buf->dma_addr,
                                               skb_frag_size(frag),
                                               DMA_TO_DEVICE);
                        }
@@ -1078,7 +1088,7 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
        if (tx_ring == r_vec->xdp_ring)
                return;
 
-       nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
+       nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
        netdev_tx_reset_queue(nd_q);
 }
 
@@ -1087,7 +1097,7 @@ static void nfp_net_tx_timeout(struct net_device *netdev)
        struct nfp_net *nn = netdev_priv(netdev);
        int i;
 
-       for (i = 0; i < nn->netdev->real_num_tx_queues; i++) {
+       for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) {
                if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
                        continue;
                nn_warn(nn, "TX timeout on ring: %d\n", i);
@@ -1098,16 +1108,17 @@ static void nfp_net_tx_timeout(struct net_device *netdev)
 /* Receive processing
  */
 static unsigned int
-nfp_net_calc_fl_bufsz(struct nfp_net *nn, unsigned int mtu)
+nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
 {
        unsigned int fl_bufsz;
 
        fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
-       if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+       fl_bufsz += dp->rx_dma_off;
+       if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
                fl_bufsz += NFP_NET_MAX_PREPEND;
        else
-               fl_bufsz += nn->rx_offset;
-       fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + mtu;
+               fl_bufsz += dp->rx_offset;
+       fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
 
        fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
        fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1126,62 +1137,56 @@ nfp_net_free_frag(void *frag, bool xdp)
 
 /**
  * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
+ * @dp:                NFP Net data path struct
  * @rx_ring:   RX ring structure of the skb
  * @dma_addr:  Pointer to storage for DMA address (output param)
- * @fl_bufsz:  size of freelist buffers
- * @xdp:       Whether XDP is enabled
  *
  * This function will allcate a new page frag, map it for DMA.
  *
  * Return: allocated page frag or NULL on failure.
  */
 static void *
-nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
-                    unsigned int fl_bufsz, bool xdp)
+nfp_net_rx_alloc_one(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
+                    dma_addr_t *dma_addr)
 {
-       struct nfp_net *nn = rx_ring->r_vec->nfp_net;
-       int direction;
        void *frag;
 
-       if (!xdp)
-               frag = netdev_alloc_frag(fl_bufsz);
+       if (!dp->xdp_prog)
+               frag = netdev_alloc_frag(dp->fl_bufsz);
        else
                frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD));
        if (!frag) {
-               nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n");
+               nn_dp_warn(dp, "Failed to alloc receive page frag\n");
                return NULL;
        }
 
-       direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
-
-       *dma_addr = nfp_net_dma_map_rx(nn, frag, fl_bufsz, direction);
-       if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
-               nfp_net_free_frag(frag, xdp);
-               nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
+       *dma_addr = nfp_net_dma_map_rx(dp, frag);
+       if (dma_mapping_error(dp->dev, *dma_addr)) {
+               nfp_net_free_frag(frag, dp->xdp_prog);
+               nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
                return NULL;
        }
 
        return frag;
 }
 
-static void *
-nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr)
+static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
 {
        void *frag;
 
-       if (!nn->xdp_prog)
-               frag = napi_alloc_frag(nn->fl_bufsz);
+       if (!dp->xdp_prog)
+               frag = napi_alloc_frag(dp->fl_bufsz);
        else
                frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD));
        if (!frag) {
-               nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n");
+               nn_dp_warn(dp, "Failed to alloc receive page frag\n");
                return NULL;
        }
 
-       *dma_addr = nfp_net_dma_map_rx(nn, frag, nn->fl_bufsz, direction);
-       if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
-               nfp_net_free_frag(frag, nn->xdp_prog);
-               nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
+       *dma_addr = nfp_net_dma_map_rx(dp, frag);
+       if (dma_mapping_error(dp->dev, *dma_addr)) {
+               nfp_net_free_frag(frag, dp->xdp_prog);
+               nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
                return NULL;
        }
 
@@ -1190,11 +1195,13 @@ nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr)
 
 /**
  * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
+ * @dp:                NFP Net data path struct
  * @rx_ring:   RX ring structure
  * @frag:      page fragment buffer
  * @dma_addr:  DMA address of skb mapping
  */
-static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
+static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
+                               struct nfp_net_rx_ring *rx_ring,
                                void *frag, dma_addr_t dma_addr)
 {
        unsigned int wr_idx;
@@ -1208,7 +1215,8 @@ static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
        /* Fill freelist descriptor */
        rx_ring->rxds[wr_idx].fld.reserved = 0;
        rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
-       nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, dma_addr);
+       nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
+                             dma_addr + dp->rx_dma_off);
 
        rx_ring->wr_p++;
        rx_ring->wr_ptr_add++;
@@ -1249,19 +1257,17 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
 
 /**
  * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
- * @nn:                NFP Net device
+ * @dp:                NFP Net data path struct
  * @rx_ring:   RX ring to remove buffers from
- * @xdp:       Whether XDP is enabled
  *
  * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
  * entries.  After device is disabled nfp_net_rx_ring_reset() must be called
  * to restore required ring geometry.
  */
 static void
-nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
-                         bool xdp)
+nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
+                         struct nfp_net_rx_ring *rx_ring)
 {
-       int direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
        unsigned int i;
 
        for (i = 0; i < rx_ring->cnt - 1; i++) {
@@ -1272,9 +1278,8 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
                if (!rx_ring->rxbufs[i].frag)
                        continue;
 
-               nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[i].dma_addr,
-                                    rx_ring->bufsz, direction);
-               nfp_net_free_frag(rx_ring->rxbufs[i].frag, xdp);
+               nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
+               nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
                rx_ring->rxbufs[i].dma_addr = 0;
                rx_ring->rxbufs[i].frag = NULL;
        }
@@ -1282,13 +1287,12 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
 
 /**
  * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
- * @nn:                NFP Net device
+ * @dp:                NFP Net data path struct
  * @rx_ring:   RX ring to remove buffers from
- * @xdp:       Whether XDP is enabled
  */
 static int
-nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
-                          bool xdp)
+nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
+                          struct nfp_net_rx_ring *rx_ring)
 {
        struct nfp_net_rx_buf *rxbufs;
        unsigned int i;
@@ -1297,10 +1301,9 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
 
        for (i = 0; i < rx_ring->cnt - 1; i++) {
                rxbufs[i].frag =
-                       nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr,
-                                            rx_ring->bufsz, xdp);
+                       nfp_net_rx_alloc_one(dp, rx_ring, &rxbufs[i].dma_addr);
                if (!rxbufs[i].frag) {
-                       nfp_net_rx_ring_bufs_free(nn, rx_ring, xdp);
+                       nfp_net_rx_ring_bufs_free(dp, rx_ring);
                        return -ENOMEM;
                }
        }
@@ -1310,14 +1313,17 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
 
 /**
  * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
+ * @dp:             NFP Net data path struct
  * @rx_ring: RX ring to fill
  */
-static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
+static void
+nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+                             struct nfp_net_rx_ring *rx_ring)
 {
        unsigned int i;
 
        for (i = 0; i < rx_ring->cnt - 1; i++)
-               nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].frag,
+               nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
                                    rx_ring->rxbufs[i].dma_addr);
 }
 
@@ -1337,17 +1343,18 @@ static int nfp_net_rx_csum_has_errors(u16 flags)
 
 /**
  * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
- * @nn:  NFP Net device
+ * @dp:  NFP Net data path struct
  * @r_vec: per-ring structure
  * @rxd: Pointer to RX descriptor
  * @skb: Pointer to SKB
  */
-static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+static void nfp_net_rx_csum(struct nfp_net_dp *dp,
+                           struct nfp_net_r_vector *r_vec,
                            struct nfp_net_rx_desc *rxd, struct sk_buff *skb)
 {
        skb_checksum_none_assert(skb);
 
-       if (!(nn->netdev->features & NETIF_F_RXCSUM))
+       if (!(dp->netdev->features & NETIF_F_RXCSUM))
                return;
 
        if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
@@ -1398,24 +1405,21 @@ static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
 
 static void
 nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb,
-                     struct nfp_net_rx_desc *rxd)
+                     void *data, struct nfp_net_rx_desc *rxd)
 {
-       struct nfp_net_rx_hash *rx_hash;
+       struct nfp_net_rx_hash *rx_hash = data;
 
        if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
                return;
 
-       rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
-
        nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type),
                         &rx_hash->hash);
 }
 
 static void *
 nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
-                  int meta_len)
+                  void *data, int meta_len)
 {
-       u8 *data = skb->data - meta_len;
        u32 meta_info;
 
        meta_info = get_unaligned_be32(data);
@@ -1445,8 +1449,9 @@ nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
 }
 
 static void
-nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring,
-               struct nfp_net_rx_buf *rxbuf, struct sk_buff *skb)
+nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+               struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
+               struct sk_buff *skb)
 {
        u64_stats_update_begin(&r_vec->rx_sync);
        r_vec->rx_drops++;
@@ -1458,15 +1463,15 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring,
        if (skb && rxbuf && skb->head == rxbuf->frag)
                page_ref_inc(virt_to_head_page(rxbuf->frag));
        if (rxbuf)
-               nfp_net_rx_give_one(rx_ring, rxbuf->frag, rxbuf->dma_addr);
+               nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
        if (skb)
                dev_kfree_skb_any(skb);
 }
 
 static bool
-nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
+nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
                   struct nfp_net_tx_ring *tx_ring,
-                  struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off,
+                  struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
                   unsigned int pkt_len)
 {
        struct nfp_net_tx_buf *txbuf;
@@ -1476,16 +1481,16 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
        int wr_idx;
 
        if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
-               nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
+               nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, NULL);
                return false;
        }
 
-       new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr);
+       new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
        if (unlikely(!new_frag)) {
-               nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
+               nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, NULL);
                return false;
        }
-       nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
+       nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
 
        wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
 
@@ -1497,14 +1502,14 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
        txbuf->pkt_cnt = 1;
        txbuf->real_len = pkt_len;
 
-       dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off,
+       dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
                                   pkt_len, DMA_BIDIRECTIONAL);
 
        /* Build TX descriptor */
        txd = &tx_ring->txds[wr_idx];
        txd->offset_eop = PCIE_DESC_TX_EOP;
        txd->dma_len = cpu_to_le16(pkt_len);
-       nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + pkt_off);
+       nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
        txd->data_len = cpu_to_le16(pkt_len);
 
        txd->flags = 0;
@@ -1516,14 +1521,24 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
        return true;
 }
 
-static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
+static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, void *hard_start,
+                          unsigned int *off, unsigned int *len)
 {
        struct xdp_buff xdp;
+       void *orig_data;
+       int ret;
+
+       xdp.data_hard_start = hard_start;
+       xdp.data = data + *off;
+       xdp.data_end = data + *off + *len;
+
+       orig_data = xdp.data;
+       ret = bpf_prog_run_xdp(prog, &xdp);
 
-       xdp.data = data;
-       xdp.data_end = data + len;
+       *len -= xdp.data - orig_data;
+       *off += xdp.data - orig_data;
 
-       return bpf_prog_run_xdp(prog, &xdp);
+       return ret;
 }
 
 /**
@@ -1540,27 +1555,27 @@ static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
 static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
 {
        struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
-       struct nfp_net *nn = r_vec->nfp_net;
+       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
        struct nfp_net_tx_ring *tx_ring;
        struct bpf_prog *xdp_prog;
        unsigned int true_bufsz;
        struct sk_buff *skb;
        int pkts_polled = 0;
-       int rx_dma_map_dir;
        int idx;
 
        rcu_read_lock();
-       xdp_prog = READ_ONCE(nn->xdp_prog);
-       rx_dma_map_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
-       true_bufsz = xdp_prog ? PAGE_SIZE : nn->fl_bufsz;
+       xdp_prog = READ_ONCE(dp->xdp_prog);
+       true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
        tx_ring = r_vec->xdp_ring;
 
        while (pkts_polled < budget) {
-               unsigned int meta_len, data_len, data_off, pkt_len, pkt_off;
+               unsigned int meta_len, data_len, data_off, pkt_len;
+               u8 meta_prepend[NFP_NET_MAX_PREPEND];
                struct nfp_net_rx_buf *rxbuf;
                struct nfp_net_rx_desc *rxd;
                dma_addr_t new_dma_addr;
                void *new_frag;
+               u8 *meta;
 
                idx = rx_ring->rd_p & (rx_ring->cnt - 1);
 
@@ -1593,11 +1608,11 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
                data_len = le16_to_cpu(rxd->rxd.data_len);
                pkt_len = data_len - meta_len;
 
-               if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
-                       pkt_off = meta_len;
+               if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+                       data_off = NFP_NET_RX_BUF_HEADROOM + meta_len;
                else
-                       pkt_off = nn->rx_offset;
-               data_off = NFP_NET_RX_BUF_HEADROOM + pkt_off;
+                       data_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_offset;
+               data_off += dp->rx_dma_off;
 
                /* Stats update */
                u64_stats_update_begin(&r_vec->rx_sync);
@@ -1605,30 +1620,55 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
                r_vec->rx_bytes += pkt_len;
                u64_stats_update_end(&r_vec->rx_sync);
 
+               /* Pointer to start of metadata */
+               meta = rxbuf->frag + data_off - meta_len;
+
+               if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
+                            (dp->rx_offset && meta_len > dp->rx_offset))) {
+                       nn_dp_warn(dp, "oversized RX packet metadata %u\n",
+                                  meta_len);
+                       nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+                       continue;
+               }
+
                if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
-                                 nn->bpf_offload_xdp)) {
+                                 dp->bpf_offload_xdp)) {
+                       unsigned int dma_off;
+                       void *hard_start;
                        int act;
 
-                       dma_sync_single_for_cpu(&nn->pdev->dev,
-                                               rxbuf->dma_addr + pkt_off,
-                                               pkt_len, DMA_BIDIRECTIONAL);
-                       act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off,
-                                             pkt_len);
+                       hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
+                       dma_off = data_off - NFP_NET_RX_BUF_HEADROOM;
+                       dma_sync_single_for_cpu(dp->dev, rxbuf->dma_addr,
+                                               dma_off + pkt_len,
+                                               DMA_BIDIRECTIONAL);
+
+                       /* Move prepend out of the way */
+                       if (xdp_prog->xdp_adjust_head) {
+                               memcpy(meta_prepend, meta, meta_len);
+                               meta = meta_prepend;
+                       }
+
+                       act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start,
+                                             &data_off, &pkt_len);
                        switch (act) {
                        case XDP_PASS:
                                break;
                        case XDP_TX:
-                               if (unlikely(!nfp_net_tx_xdp_buf(nn, rx_ring,
+                               dma_off = data_off - NFP_NET_RX_BUF_HEADROOM;
+                               if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
                                                                 tx_ring, rxbuf,
-                                                                pkt_off, pkt_len)))
-                                       trace_xdp_exception(nn->netdev, xdp_prog, act);
+                                                                dma_off,
+                                                                pkt_len)))
+                                       trace_xdp_exception(dp->netdev,
+                                                           xdp_prog, act);
                                continue;
                        default:
                                bpf_warn_invalid_xdp_action(act);
                        case XDP_ABORTED:
-                               trace_xdp_exception(nn->netdev, xdp_prog, act);
+                               trace_xdp_exception(dp->netdev, xdp_prog, act);
                        case XDP_DROP:
-                               nfp_net_rx_give_one(rx_ring, rxbuf->frag,
+                               nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
                                                    rxbuf->dma_addr);
                                continue;
                        }
@@ -1636,41 +1676,40 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
 
                skb = build_skb(rxbuf->frag, true_bufsz);
                if (unlikely(!skb)) {
-                       nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL);
+                       nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
                        continue;
                }
-               new_frag = nfp_net_napi_alloc_one(nn, rx_dma_map_dir,
-                                                 &new_dma_addr);
+               new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
                if (unlikely(!new_frag)) {
-                       nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb);
+                       nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
                        continue;
                }
 
-               nfp_net_dma_unmap_rx(nn, rxbuf->dma_addr, nn->fl_bufsz,
-                                    rx_dma_map_dir);
+               nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
 
-               nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
+               nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
 
                skb_reserve(skb, data_off);
                skb_put(skb, pkt_len);
 
-               if (nn->fw_ver.major <= 3) {
-                       nfp_net_set_hash_desc(nn->netdev, skb, rxd);
+               if (!dp->chained_metadata_format) {
+                       nfp_net_set_hash_desc(dp->netdev, skb, meta, rxd);
                } else if (meta_len) {
                        void *end;
 
-                       end = nfp_net_parse_meta(nn->netdev, skb, meta_len);
-                       if (unlikely(end != skb->data)) {
-                               nn_warn_ratelimit(nn, "invalid RX packet metadata\n");
-                               nfp_net_rx_drop(r_vec, rx_ring, NULL, skb);
+                       end = nfp_net_parse_meta(dp->netdev, skb, meta,
+                                                meta_len);
+                       if (unlikely(end != meta + meta_len)) {
+                               nn_dp_warn(dp, "invalid RX packet metadata\n");
+                               nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
                                continue;
                        }
                }
 
                skb_record_rx_queue(skb, rx_ring->idx);
-               skb->protocol = eth_type_trans(skb, nn->netdev);
+               skb->protocol = eth_type_trans(skb, dp->netdev);
 
-               nfp_net_rx_csum(nn, r_vec, rxd, skb);
+               nfp_net_rx_csum(dp, r_vec, rxd, skb);
 
                if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
@@ -1707,10 +1746,9 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
                        nfp_net_xdp_complete(r_vec->xdp_ring);
        }
 
-       if (pkts_polled < budget) {
-               napi_complete_done(napi, pkts_polled);
-               nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
-       }
+       if (pkts_polled < budget)
+               if (napi_complete_done(napi, pkts_polled))
+                       nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
 
        return pkts_polled;
 }
@@ -1725,13 +1763,12 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
 static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
 {
        struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
-       struct nfp_net *nn = r_vec->nfp_net;
-       struct pci_dev *pdev = nn->pdev;
+       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
 
        kfree(tx_ring->txbufs);
 
        if (tx_ring->txds)
-               dma_free_coherent(&pdev->dev, tx_ring->size,
+               dma_free_coherent(dp->dev, tx_ring->size,
                                  tx_ring->txds, tx_ring->dma);
 
        tx_ring->cnt = 0;
@@ -1743,24 +1780,23 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
 
 /**
  * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
+ * @dp:        NFP Net data path struct
  * @tx_ring:   TX Ring structure to allocate
- * @cnt:       Ring buffer count
  * @is_xdp:    True if ring will be used for XDP
  *
  * Return: 0 on success, negative errno otherwise.
  */
 static int
-nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp)
+nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring,
+                     bool is_xdp)
 {
        struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
-       struct nfp_net *nn = r_vec->nfp_net;
-       struct pci_dev *pdev = nn->pdev;
        int sz;
 
-       tx_ring->cnt = cnt;
+       tx_ring->cnt = dp->txd_cnt;
 
        tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
-       tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
+       tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
                                            &tx_ring->dma, GFP_KERNEL);
        if (!tx_ring->txds)
                goto err_alloc;
@@ -1771,14 +1807,9 @@ nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp)
                goto err_alloc;
 
        if (!is_xdp)
-               netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask,
+               netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
                                    tx_ring->idx);
 
-       nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p %s\n",
-              tx_ring->idx, tx_ring->qcidx,
-              tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds,
-              is_xdp ? "XDP" : "");
-
        return 0;
 
 err_alloc:
@@ -1786,62 +1817,45 @@ err_alloc:
        return -ENOMEM;
 }
 
-static struct nfp_net_tx_ring *
-nfp_net_tx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s,
-                           unsigned int num_stack_tx_rings)
+static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
 {
-       struct nfp_net_tx_ring *rings;
        unsigned int r;
 
-       rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL);
-       if (!rings)
-               return NULL;
+       dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
+                              GFP_KERNEL);
+       if (!dp->tx_rings)
+               return -ENOMEM;
 
-       for (r = 0; r < s->n_rings; r++) {
+       for (r = 0; r < dp->num_tx_rings; r++) {
                int bias = 0;
 
-               if (r >= num_stack_tx_rings)
-                       bias = num_stack_tx_rings;
+               if (r >= dp->num_stack_tx_rings)
+                       bias = dp->num_stack_tx_rings;
 
-               nfp_net_tx_ring_init(&rings[r], &nn->r_vecs[r - bias], r);
+               nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias],
+                                    r);
 
-               if (nfp_net_tx_ring_alloc(&rings[r], s->dcnt, bias))
+               if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r], bias))
                        goto err_free_prev;
        }
 
-       return s->rings = rings;
+       return 0;
 
 err_free_prev:
        while (r--)
-               nfp_net_tx_ring_free(&rings[r]);
-       kfree(rings);
-       return NULL;
-}
-
-static void
-nfp_net_tx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
-{
-       struct nfp_net_ring_set new = *s;
-
-       s->dcnt = nn->txd_cnt;
-       s->rings = nn->tx_rings;
-       s->n_rings = nn->num_tx_rings;
-
-       nn->txd_cnt = new.dcnt;
-       nn->tx_rings = new.rings;
-       nn->num_tx_rings = new.n_rings;
+               nfp_net_tx_ring_free(&dp->tx_rings[r]);
+       kfree(dp->tx_rings);
+       return -ENOMEM;
 }
 
-static void
-nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s)
+static void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
 {
-       struct nfp_net_tx_ring *rings = s->rings;
        unsigned int r;
 
-       for (r = 0; r < s->n_rings; r++)
-               nfp_net_tx_ring_free(&rings[r]);
+       for (r = 0; r < dp->num_tx_rings; r++)
+               nfp_net_tx_ring_free(&dp->tx_rings[r]);
 
-       kfree(rings);
+       kfree(dp->tx_rings);
 }
 
 /**
@@ -1851,13 +1865,12 @@ nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s)
 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
 {
        struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
-       struct nfp_net *nn = r_vec->nfp_net;
-       struct pci_dev *pdev = nn->pdev;
+       struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
 
        kfree(rx_ring->rxbufs);
 
        if (rx_ring->rxds)
-               dma_free_coherent(&pdev->dev, rx_ring->size,
+               dma_free_coherent(dp->dev, rx_ring->size,
                                  rx_ring->rxds, rx_ring->dma);
 
        rx_ring->cnt = 0;
@@ -1869,26 +1882,19 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
 
 /**
  * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
+ * @dp:              NFP Net data path struct
  * @rx_ring:  RX ring to allocate
- * @fl_bufsz: Size of buffers to allocate
- * @cnt:      Ring buffer count
  *
  * Return: 0 on success, negative errno otherwise.
  */
 static int
-nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
-                     u32 cnt)
+nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
 {
-       struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
-       struct nfp_net *nn = r_vec->nfp_net;
-       struct pci_dev *pdev = nn->pdev;
        int sz;
 
-       rx_ring->cnt = cnt;
-       rx_ring->bufsz = fl_bufsz;
-
+       rx_ring->cnt = dp->rxd_cnt;
        rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
-       rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
+       rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
                                            &rx_ring->dma, GFP_KERNEL);
        if (!rx_ring->rxds)
                goto err_alloc;
@@ -1898,10 +1904,6 @@ nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
        if (!rx_ring->rxbufs)
                goto err_alloc;
 
-       nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n",
-              rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
-              rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds);
-
        return 0;
 
 err_alloc:
@@ -1909,82 +1911,59 @@ err_alloc:
        return -ENOMEM;
 }
 
-static struct nfp_net_rx_ring *
-nfp_net_rx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s,
-                           bool xdp)
+static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
 {
-       unsigned int fl_bufsz = nfp_net_calc_fl_bufsz(nn, s->mtu);
-       struct nfp_net_rx_ring *rings;
        unsigned int r;
 
-       rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL);
-       if (!rings)
-               return NULL;
+       dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
+                              GFP_KERNEL);
+       if (!dp->rx_rings)
+               return -ENOMEM;
 
-       for (r = 0; r < s->n_rings; r++) {
-               nfp_net_rx_ring_init(&rings[r], &nn->r_vecs[r], r);
+       for (r = 0; r < dp->num_rx_rings; r++) {
+               nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
 
-               if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, s->dcnt))
+               if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
                        goto err_free_prev;
 
-               if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r], xdp))
+               if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
                        goto err_free_ring;
        }
 
-       return s->rings = rings;
+       return 0;
 
 err_free_prev:
        while (r--) {
-               nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp);
+               nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
 err_free_ring:
-               nfp_net_rx_ring_free(&rings[r]);
+               nfp_net_rx_ring_free(&dp->rx_rings[r]);
        }
-       kfree(rings);
-       return NULL;
-}
-
-static void
-nfp_net_rx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
-{
-       struct nfp_net_ring_set new = *s;
-
-       s->mtu = nn->netdev->mtu;
-       s->dcnt = nn->rxd_cnt;
-       s->rings = nn->rx_rings;
-       s->n_rings = nn->num_rx_rings;
-
-       nn->netdev->mtu = new.mtu;
-       nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, new.mtu);
-       nn->rxd_cnt = new.dcnt;
-       nn->rx_rings = new.rings;
-       nn->num_rx_rings = new.n_rings;
+       kfree(dp->rx_rings);
+       return -ENOMEM;
 }
 
-static void
-nfp_net_rx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s,
-                        bool xdp)
+static void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
 {
-       struct nfp_net_rx_ring *rings = s->rings;
        unsigned int r;
 
-       for (r = 0; r < s->n_rings; r++) {
-               nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp);
-               nfp_net_rx_ring_free(&rings[r]);
+       for (r = 0; r < dp->num_rx_rings; r++) {
+               nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
+               nfp_net_rx_ring_free(&dp->rx_rings[r]);
        }
 
-       kfree(rings);
+       kfree(dp->rx_rings);
 }
 
 static void
-nfp_net_vector_assign_rings(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
-                           int idx)
+nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
+                           struct nfp_net_r_vector *r_vec, int idx)
 {
-       r_vec->rx_ring = idx < nn->num_rx_rings ? &nn->rx_rings[idx] : NULL;
+       r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL;
        r_vec->tx_ring =
-               idx < nn->num_stack_tx_rings ? &nn->tx_rings[idx] : NULL;
+               idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL;
 
-       r_vec->xdp_ring = idx < nn->num_tx_rings - nn->num_stack_tx_rings ?
-               &nn->tx_rings[nn->num_stack_tx_rings + idx] : NULL;
+       r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
+               &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
 }
 
 static int
@@ -1994,11 +1973,11 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
        int err;
 
        /* Setup NAPI */
-       netif_napi_add(nn->netdev, &r_vec->napi,
+       netif_napi_add(nn->dp.netdev, &r_vec->napi,
                       nfp_net_poll, NAPI_POLL_WEIGHT);
 
        snprintf(r_vec->name, sizeof(r_vec->name),
-                "%s-rxtx-%d", nn->netdev->name, idx);
+                "%s-rxtx-%d", nn->dp.netdev->name, idx);
        err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
                          r_vec);
        if (err) {
@@ -2045,7 +2024,7 @@ void nfp_net_rss_write_key(struct nfp_net *nn)
 {
        int i;
 
-       for (i = 0; i < NFP_NET_CFG_RSS_KEY_SZ; i += 4)
+       for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
                nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
                          get_unaligned_le32(nn->rss_key + i));
 }
@@ -2069,13 +2048,13 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
        /* copy RX interrupt coalesce parameters */
        value = (nn->rx_coalesce_max_frames << 16) |
                (factor * nn->rx_coalesce_usecs);
-       for (i = 0; i < nn->num_rx_rings; i++)
+       for (i = 0; i < nn->dp.num_rx_rings; i++)
                nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
 
        /* copy TX interrupt coalesce parameters */
        value = (nn->tx_coalesce_max_frames << 16) |
                (factor * nn->tx_coalesce_usecs);
-       for (i = 0; i < nn->num_tx_rings; i++)
+       for (i = 0; i < nn->dp.num_tx_rings; i++)
                nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
 }
 
@@ -2090,9 +2069,9 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
 static void nfp_net_write_mac_addr(struct nfp_net *nn)
 {
        nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
-                 get_unaligned_be32(nn->netdev->dev_addr));
+                 get_unaligned_be32(nn->dp.netdev->dev_addr));
        nn_writew(nn, NFP_NET_CFG_MACADDR + 6,
-                 get_unaligned_be16(nn->netdev->dev_addr + 4));
+                 get_unaligned_be16(nn->dp.netdev->dev_addr + 4));
 }
 
 static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
@@ -2116,7 +2095,7 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
        unsigned int r;
        int err;
 
-       new_ctrl = nn->ctrl;
+       new_ctrl = nn->dp.ctrl;
        new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
        update = NFP_NET_CFG_UPDATE_GEN;
        update |= NFP_NET_CFG_UPDATE_MSIX;
@@ -2133,14 +2112,14 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
        if (err)
                nn_err(nn, "Could not disable device: %d\n", err);
 
-       for (r = 0; r < nn->num_rx_rings; r++)
-               nfp_net_rx_ring_reset(&nn->rx_rings[r]);
-       for (r = 0; r < nn->num_tx_rings; r++)
-               nfp_net_tx_ring_reset(nn, &nn->tx_rings[r]);
-       for (r = 0; r < nn->num_r_vecs; r++)
+       for (r = 0; r < nn->dp.num_rx_rings; r++)
+               nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
+       for (r = 0; r < nn->dp.num_tx_rings; r++)
+               nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
+       for (r = 0; r < nn->dp.num_r_vecs; r++)
                nfp_net_vec_clear_ring_data(nn, r);
 
-       nn->ctrl = new_ctrl;
+       nn->dp.ctrl = new_ctrl;
 }
 
 static void
@@ -2162,13 +2141,17 @@ nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
        nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
 }
 
-static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
+/**
+ * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
+ * @nn:      NFP Net device to reconfigure
+ */
+static int nfp_net_set_config_and_enable(struct nfp_net *nn)
 {
        u32 new_ctrl, update = 0;
        unsigned int r;
        int err;
 
-       new_ctrl = nn->ctrl;
+       new_ctrl = nn->dp.ctrl;
 
        if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
                nfp_net_rss_write_key(nn);
@@ -2184,22 +2167,22 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
                update |= NFP_NET_CFG_UPDATE_IRQMOD;
        }
 
-       for (r = 0; r < nn->num_tx_rings; r++)
-               nfp_net_tx_ring_hw_cfg_write(nn, &nn->tx_rings[r], r);
-       for (r = 0; r < nn->num_rx_rings; r++)
-               nfp_net_rx_ring_hw_cfg_write(nn, &nn->rx_rings[r], r);
+       for (r = 0; r < nn->dp.num_tx_rings; r++)
+               nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r);
+       for (r = 0; r < nn->dp.num_rx_rings; r++)
+               nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
 
-       nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ?
-                 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1);
+       nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ?
+                 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1);
 
-       nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ?
-                 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1);
+       nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
+                 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
 
        nfp_net_write_mac_addr(nn);
 
-       nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
+       nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.netdev->mtu);
        nn_writel(nn, NFP_NET_CFG_FLBUFSZ,
-                 nn->fl_bufsz - NFP_NET_RX_BUF_NON_DATA);
+                 nn->dp.fl_bufsz - NFP_NET_RX_BUF_NON_DATA);
 
        /* Enable device */
        new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
@@ -2211,37 +2194,26 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
 
        nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
        err = nfp_net_reconfig(nn, update);
+       if (err) {
+               nfp_net_clear_config_and_disable(nn);
+               return err;
+       }
 
-       nn->ctrl = new_ctrl;
+       nn->dp.ctrl = new_ctrl;
 
-       for (r = 0; r < nn->num_rx_rings; r++)
-               nfp_net_rx_ring_fill_freelist(&nn->rx_rings[r]);
+       for (r = 0; r < nn->dp.num_rx_rings; r++)
+               nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
 
        /* Since reconfiguration requests while NFP is down are ignored we
         * have to wipe the entire VXLAN configuration and reinitialize it.
         */
-       if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) {
+       if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) {
                memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
                memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
-               udp_tunnel_get_rx_info(nn->netdev);
+               udp_tunnel_get_rx_info(nn->dp.netdev);
        }
 
-       return err;
-}
-
-/**
- * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
- * @nn:      NFP Net device to reconfigure
- */
-static int nfp_net_set_config_and_enable(struct nfp_net *nn)
-{
-       int err;
-
-       err = __nfp_net_set_config_and_enable(nn);
-       if (err)
-               nfp_net_clear_config_and_disable(nn);
-
-       return err;
+       return 0;
 }
 
 /**
@@ -2252,12 +2224,12 @@ static void nfp_net_open_stack(struct nfp_net *nn)
 {
        unsigned int r;
 
-       for (r = 0; r < nn->num_r_vecs; r++) {
+       for (r = 0; r < nn->dp.num_r_vecs; r++) {
                napi_enable(&nn->r_vecs[r].napi);
                enable_irq(nn->r_vecs[r].irq_vector);
        }
 
-       netif_tx_wake_all_queues(nn->netdev);
+       netif_tx_wake_all_queues(nn->dp.netdev);
 
        enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
        nfp_net_read_link_status(nn);
@@ -2266,22 +2238,8 @@ static void nfp_net_open_stack(struct nfp_net *nn)
 static int nfp_net_netdev_open(struct net_device *netdev)
 {
        struct nfp_net *nn = netdev_priv(netdev);
-       struct nfp_net_ring_set rx = {
-               .n_rings = nn->num_rx_rings,
-               .mtu = nn->netdev->mtu,
-               .dcnt = nn->rxd_cnt,
-       };
-       struct nfp_net_ring_set tx = {
-               .n_rings = nn->num_tx_rings,
-               .dcnt = nn->txd_cnt,
-       };
        int err, r;
 
-       if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
-               nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
-               return -EBUSY;
-       }
-
        /* Step 1: Allocate resources for rings and the like
         * - Request interrupts
         * - Allocate RX and TX ring resources
@@ -2299,33 +2257,28 @@ static int nfp_net_netdev_open(struct net_device *netdev)
                goto err_free_exn;
        disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
 
-       for (r = 0; r < nn->num_r_vecs; r++) {
+       for (r = 0; r < nn->dp.num_r_vecs; r++) {
                err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
                if (err)
                        goto err_cleanup_vec_p;
        }
 
-       nn->rx_rings = nfp_net_rx_ring_set_prepare(nn, &rx, nn->xdp_prog);
-       if (!nn->rx_rings) {
-               err = -ENOMEM;
+       err = nfp_net_rx_rings_prepare(nn, &nn->dp);
+       if (err)
                goto err_cleanup_vec;
-       }
 
-       nn->tx_rings = nfp_net_tx_ring_set_prepare(nn, &tx,
-                                                  nn->num_stack_tx_rings);
-       if (!nn->tx_rings) {
-               err = -ENOMEM;
+       err = nfp_net_tx_rings_prepare(nn, &nn->dp);
+       if (err)
                goto err_free_rx_rings;
-       }
 
        for (r = 0; r < nn->max_r_vecs; r++)
-               nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r);
+               nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
 
-       err = netif_set_real_num_tx_queues(netdev, nn->num_stack_tx_rings);
+       err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
        if (err)
                goto err_free_rings;
 
-       err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
+       err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
        if (err)
                goto err_free_rings;
 
@@ -2351,11 +2304,11 @@ static int nfp_net_netdev_open(struct net_device *netdev)
        return 0;
 
 err_free_rings:
-       nfp_net_tx_ring_set_free(nn, &tx);
+       nfp_net_tx_rings_free(&nn->dp);
 err_free_rx_rings:
-       nfp_net_rx_ring_set_free(nn, &rx, nn->xdp_prog);
+       nfp_net_rx_rings_free(&nn->dp);
 err_cleanup_vec:
-       r = nn->num_r_vecs;
+       r = nn->dp.num_r_vecs;
 err_cleanup_vec_p:
        while (r--)
                nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
@@ -2374,15 +2327,15 @@ static void nfp_net_close_stack(struct nfp_net *nn)
        unsigned int r;
 
        disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
-       netif_carrier_off(nn->netdev);
+       netif_carrier_off(nn->dp.netdev);
        nn->link_up = false;
 
-       for (r = 0; r < nn->num_r_vecs; r++) {
+       for (r = 0; r < nn->dp.num_r_vecs; r++) {
                disable_irq(nn->r_vecs[r].irq_vector);
                napi_disable(&nn->r_vecs[r].napi);
        }
 
-       netif_tx_disable(nn->netdev);
+       netif_tx_disable(nn->dp.netdev);
 }
 
 /**
@@ -2393,17 +2346,17 @@ static void nfp_net_close_free_all(struct nfp_net *nn)
 {
        unsigned int r;
 
-       for (r = 0; r < nn->num_rx_rings; r++) {
-               nfp_net_rx_ring_bufs_free(nn, &nn->rx_rings[r], nn->xdp_prog);
-               nfp_net_rx_ring_free(&nn->rx_rings[r]);
+       for (r = 0; r < nn->dp.num_rx_rings; r++) {
+               nfp_net_rx_ring_bufs_free(&nn->dp, &nn->dp.rx_rings[r]);
+               nfp_net_rx_ring_free(&nn->dp.rx_rings[r]);
        }
-       for (r = 0; r < nn->num_tx_rings; r++)
-               nfp_net_tx_ring_free(&nn->tx_rings[r]);
-       for (r = 0; r < nn->num_r_vecs; r++)
+       for (r = 0; r < nn->dp.num_tx_rings; r++)
+               nfp_net_tx_ring_free(&nn->dp.tx_rings[r]);
+       for (r = 0; r < nn->dp.num_r_vecs; r++)
                nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
 
-       kfree(nn->rx_rings);
-       kfree(nn->tx_rings);
+       kfree(nn->dp.rx_rings);
+       kfree(nn->dp.tx_rings);
 
        nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
        nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
@@ -2417,11 +2370,6 @@ static int nfp_net_netdev_close(struct net_device *netdev)
 {
        struct nfp_net *nn = netdev_priv(netdev);
 
-       if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) {
-               nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl);
-               return 0;
-       }
-
        /* Step 1: Disable RX and TX rings from the Linux kernel perspective
         */
        nfp_net_close_stack(nn);
@@ -2443,7 +2391,7 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
        struct nfp_net *nn = netdev_priv(netdev);
        u32 new_ctrl;
 
-       new_ctrl = nn->ctrl;
+       new_ctrl = nn->dp.ctrl;
 
        if (netdev->flags & IFF_PROMISC) {
                if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
@@ -2454,13 +2402,13 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
                new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
        }
 
-       if (new_ctrl == nn->ctrl)
+       if (new_ctrl == nn->dp.ctrl)
                return;
 
        nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
        nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
 
-       nn->ctrl = new_ctrl;
+       nn->dp.ctrl = new_ctrl;
 }
 
 static void nfp_net_rss_init_itbl(struct nfp_net *nn)
@@ -2469,61 +2417,76 @@ static void nfp_net_rss_init_itbl(struct nfp_net *nn)
 
        for (i = 0; i < sizeof(nn->rss_itbl); i++)
                nn->rss_itbl[i] =
-                       ethtool_rxfh_indir_default(i, nn->num_rx_rings);
+                       ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings);
 }
 
-static int
-nfp_net_ring_swap_enable(struct nfp_net *nn, unsigned int *num_vecs,
-                        unsigned int *stack_tx_rings,
-                        struct bpf_prog **xdp_prog,
-                        struct nfp_net_ring_set *rx,
-                        struct nfp_net_ring_set *tx)
+static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
+{
+       struct nfp_net_dp new_dp = *dp;
+
+       *dp = nn->dp;
+       nn->dp = new_dp;
+
+       nn->dp.netdev->mtu = new_dp.mtu;
+
+       if (!netif_is_rxfh_configured(nn->dp.netdev))
+               nfp_net_rss_init_itbl(nn);
+}
+
+static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
 {
        unsigned int r;
        int err;
 
-       if (rx)
-               nfp_net_rx_ring_set_swap(nn, rx);
-       if (tx)
-               nfp_net_tx_ring_set_swap(nn, tx);
-
-       swap(*num_vecs, nn->num_r_vecs);
-       swap(*stack_tx_rings, nn->num_stack_tx_rings);
-       *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog);
+       nfp_net_dp_swap(nn, dp);
 
        for (r = 0; r < nn->max_r_vecs; r++)
-               nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r);
-
-       if (!netif_is_rxfh_configured(nn->netdev))
-               nfp_net_rss_init_itbl(nn);
+               nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
 
-       err = netif_set_real_num_rx_queues(nn->netdev,
-                                          nn->num_rx_rings);
+       err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings);
        if (err)
                return err;
 
-       if (nn->netdev->real_num_tx_queues != nn->num_stack_tx_rings) {
-               err = netif_set_real_num_tx_queues(nn->netdev,
-                                                  nn->num_stack_tx_rings);
+       if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) {
+               err = netif_set_real_num_tx_queues(nn->dp.netdev,
+                                                  nn->dp.num_stack_tx_rings);
                if (err)
                        return err;
        }
 
-       return __nfp_net_set_config_and_enable(nn);
+       return nfp_net_set_config_and_enable(nn);
 }
 
-static int
-nfp_net_check_config(struct nfp_net *nn, struct bpf_prog *xdp_prog,
-                    struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx)
+struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
+{
+       struct nfp_net_dp *new;
+
+       new = kmalloc(sizeof(*new), GFP_KERNEL);
+       if (!new)
+               return NULL;
+
+       *new = nn->dp;
+
+       /* Clear things which need to be recomputed */
+       new->fl_bufsz = 0;
+       new->tx_rings = NULL;
+       new->rx_rings = NULL;
+       new->num_r_vecs = 0;
+       new->num_stack_tx_rings = 0;
+
+       return new;
+}
+
+static int nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp)
 {
        /* XDP-enabled tests */
-       if (!xdp_prog)
+       if (!dp->xdp_prog)
                return 0;
-       if (rx && nfp_net_calc_fl_bufsz(nn, rx->mtu) > PAGE_SIZE) {
+       if (dp->fl_bufsz > PAGE_SIZE) {
                nn_warn(nn, "MTU too large w/ XDP enabled\n");
                return -EINVAL;
        }
-       if (tx && tx->n_rings > nn->max_tx_rings) {
+       if (dp->num_tx_rings > nn->max_tx_rings) {
                nn_warn(nn, "Insufficient number of TX rings w/ XDP enabled\n");
                return -EINVAL;
        }
@@ -2531,119 +2494,94 @@ nfp_net_check_config(struct nfp_net *nn, struct bpf_prog *xdp_prog,
        return 0;
 }
 
-static void
-nfp_net_ring_reconfig_down(struct nfp_net *nn, struct bpf_prog **xdp_prog,
-                          struct nfp_net_ring_set *rx,
-                          struct nfp_net_ring_set *tx,
-                          unsigned int stack_tx_rings, unsigned int num_vecs)
-{
-       nn->netdev->mtu = rx ? rx->mtu : nn->netdev->mtu;
-       nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, nn->netdev->mtu);
-       nn->rxd_cnt = rx ? rx->dcnt : nn->rxd_cnt;
-       nn->txd_cnt = tx ? tx->dcnt : nn->txd_cnt;
-       nn->num_rx_rings = rx ? rx->n_rings : nn->num_rx_rings;
-       nn->num_tx_rings = tx ? tx->n_rings : nn->num_tx_rings;
-       nn->num_stack_tx_rings = stack_tx_rings;
-       nn->num_r_vecs = num_vecs;
-       *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog);
-
-       if (!netif_is_rxfh_configured(nn->netdev))
-               nfp_net_rss_init_itbl(nn);
-}
-
-int
-nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
-                     struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx)
+int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp)
 {
-       unsigned int stack_tx_rings, num_vecs, r;
-       int err;
+       int r, err;
+
+       dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp);
 
-       stack_tx_rings = tx ? tx->n_rings : nn->num_tx_rings;
-       if (*xdp_prog)
-               stack_tx_rings -= rx ? rx->n_rings : nn->num_rx_rings;
+       dp->num_stack_tx_rings = dp->num_tx_rings;
+       if (dp->xdp_prog)
+               dp->num_stack_tx_rings -= dp->num_rx_rings;
 
-       num_vecs = max(rx ? rx->n_rings : nn->num_rx_rings, stack_tx_rings);
+       dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings);
 
-       err = nfp_net_check_config(nn, *xdp_prog, rx, tx);
+       err = nfp_net_check_config(nn, dp);
        if (err)
-               return err;
+               goto exit_free_dp;
 
-       if (!netif_running(nn->netdev)) {
-               nfp_net_ring_reconfig_down(nn, xdp_prog, rx, tx,
-                                          stack_tx_rings, num_vecs);
-               return 0;
+       if (!netif_running(dp->netdev)) {
+               nfp_net_dp_swap(nn, dp);
+               err = 0;
+               goto exit_free_dp;
        }
 
        /* Prepare new rings */
-       for (r = nn->num_r_vecs; r < num_vecs; r++) {
+       for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
                err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
                if (err) {
-                       num_vecs = r;
+                       dp->num_r_vecs = r;
                        goto err_cleanup_vecs;
                }
        }
-       if (rx) {
-               if (!nfp_net_rx_ring_set_prepare(nn, rx, *xdp_prog)) {
-                       err = -ENOMEM;
-                       goto err_cleanup_vecs;
-               }
-       }
-       if (tx) {
-               if (!nfp_net_tx_ring_set_prepare(nn, tx, stack_tx_rings)) {
-                       err = -ENOMEM;
-                       goto err_free_rx;
-               }
-       }
+
+       err = nfp_net_rx_rings_prepare(nn, dp);
+       if (err)
+               goto err_cleanup_vecs;
+
+       err = nfp_net_tx_rings_prepare(nn, dp);
+       if (err)
+               goto err_free_rx;
 
        /* Stop device, swap in new rings, try to start the firmware */
        nfp_net_close_stack(nn);
        nfp_net_clear_config_and_disable(nn);
 
-       err = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings,
-                                      xdp_prog, rx, tx);
+       err = nfp_net_dp_swap_enable(nn, dp);
        if (err) {
                int err2;
 
                nfp_net_clear_config_and_disable(nn);
 
                /* Try with old configuration and old rings */
-               err2 = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings,
-                                               xdp_prog, rx, tx);
+               err2 = nfp_net_dp_swap_enable(nn, dp);
                if (err2)
                        nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
                               err, err2);
        }
-       for (r = num_vecs - 1; r >= nn->num_r_vecs; r--)
+       for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
                nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
 
-       if (rx)
-               nfp_net_rx_ring_set_free(nn, rx, *xdp_prog);
-       if (tx)
-               nfp_net_tx_ring_set_free(nn, tx);
+       nfp_net_rx_rings_free(dp);
+       nfp_net_tx_rings_free(dp);
 
        nfp_net_open_stack(nn);
+exit_free_dp:
+       kfree(dp);
 
        return err;
 
 err_free_rx:
-       if (rx)
-               nfp_net_rx_ring_set_free(nn, rx, *xdp_prog);
+       nfp_net_rx_rings_free(dp);
 err_cleanup_vecs:
-       for (r = num_vecs - 1; r >= nn->num_r_vecs; r--)
+       for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
                nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+       kfree(dp);
        return err;
 }
 
 static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct nfp_net *nn = netdev_priv(netdev);
-       struct nfp_net_ring_set rx = {
-               .n_rings = nn->num_rx_rings,
-               .mtu = new_mtu,
-               .dcnt = nn->rxd_cnt,
-       };
+       struct nfp_net_dp *dp;
+
+       dp = nfp_net_clone_dp(nn);
+       if (!dp)
+               return -ENOMEM;
+
+       dp->mtu = new_mtu;
 
-       return nfp_net_ring_reconfig(nn, &nn->xdp_prog, &rx, NULL);
+       return nfp_net_ring_reconfig(nn, dp);
 }
 
 static void nfp_net_stat64(struct net_device *netdev,
@@ -2652,7 +2590,7 @@ static void nfp_net_stat64(struct net_device *netdev,
        struct nfp_net *nn = netdev_priv(netdev);
        int r;
 
-       for (r = 0; r < nn->num_r_vecs; r++) {
+       for (r = 0; r < nn->dp.num_r_vecs; r++) {
                struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
                u64 data[3];
                unsigned int start;
@@ -2699,7 +2637,7 @@ nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
                return -ENOTSUPP;
 
        if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) {
-               if (!nn->bpf_offload_xdp)
+               if (!nn->dp.bpf_offload_xdp)
                        return nfp_net_bpf_offload(nn, tc->cls_bpf);
                else
                        return -EBUSY;
@@ -2718,7 +2656,7 @@ static int nfp_net_set_features(struct net_device *netdev,
 
        /* Assume this is not called with features we have not advertised */
 
-       new_ctrl = nn->ctrl;
+       new_ctrl = nn->dp.ctrl;
 
        if (changed & NETIF_F_RXCSUM) {
                if (features & NETIF_F_RXCSUM)
@@ -2762,7 +2700,7 @@ static int nfp_net_set_features(struct net_device *netdev,
                        new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
        }
 
-       if (changed & NETIF_F_HW_TC && nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
+       if (changed & NETIF_F_HW_TC && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
                nn_err(nn, "Cannot disable HW TC offload while in use\n");
                return -EBUSY;
        }
@@ -2770,16 +2708,16 @@ static int nfp_net_set_features(struct net_device *netdev,
        nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
               netdev->features, features, changed);
 
-       if (new_ctrl == nn->ctrl)
+       if (new_ctrl == nn->dp.ctrl)
                return 0;
 
-       nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->ctrl, new_ctrl);
+       nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl);
        nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
        err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
        if (err)
                return err;
 
-       nn->ctrl = new_ctrl;
+       nn->dp.ctrl = new_ctrl;
 
        return 0;
 }
@@ -2830,6 +2768,26 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
        return features;
 }
 
+static int
+nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
+{
+       struct nfp_net *nn = netdev_priv(netdev);
+       int err;
+
+       if (!nn->eth_port)
+               return -EOPNOTSUPP;
+
+       if (!nn->eth_port->is_split)
+               err = snprintf(name, len, "p%d", nn->eth_port->label_port);
+       else
+               err = snprintf(name, len, "p%ds%d", nn->eth_port->label_port,
+                              nn->eth_port->label_subport);
+       if (err >= len)
+               return -EINVAL;
+
+       return 0;
+}
+
 /**
  * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
  * @nn:   NFP Net device to reconfigure
@@ -2842,7 +2800,7 @@ static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
 
        nn->vxlan_ports[idx] = port;
 
-       if (!(nn->ctrl & NFP_NET_CFG_CTRL_VXLAN))
+       if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN))
                return;
 
        BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
@@ -2921,8 +2879,8 @@ static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog)
        if (!nfp_net_ebpf_capable(nn))
                return -EINVAL;
 
-       if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
-               if (!nn->bpf_offload_xdp)
+       if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
+               if (!nn->dp.bpf_offload_xdp)
                        return prog ? -EBUSY : 0;
                cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY;
        } else {
@@ -2935,48 +2893,47 @@ static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog)
        /* Stop offload if replace not possible */
        if (ret && cmd.command == TC_CLSBPF_REPLACE)
                nfp_net_xdp_offload(nn, NULL);
-       nn->bpf_offload_xdp = prog && !ret;
+       nn->dp.bpf_offload_xdp = prog && !ret;
        return ret;
 }
 
 static int nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog)
 {
-       struct nfp_net_ring_set rx = {
-               .n_rings = nn->num_rx_rings,
-               .mtu = nn->netdev->mtu,
-               .dcnt = nn->rxd_cnt,
-       };
-       struct nfp_net_ring_set tx = {
-               .n_rings = nn->num_tx_rings,
-               .dcnt = nn->txd_cnt,
-       };
+       struct bpf_prog *old_prog = nn->dp.xdp_prog;
+       struct nfp_net_dp *dp;
        int err;
 
-       if (prog && prog->xdp_adjust_head) {
-               nn_err(nn, "Does not support bpf_xdp_adjust_head()\n");
-               return -EOPNOTSUPP;
-       }
-       if (!prog && !nn->xdp_prog)
+       if (!prog && !nn->dp.xdp_prog)
                return 0;
-       if (prog && nn->xdp_prog) {
-               prog = xchg(&nn->xdp_prog, prog);
+       if (prog && nn->dp.xdp_prog) {
+               prog = xchg(&nn->dp.xdp_prog, prog);
                bpf_prog_put(prog);
-               nfp_net_xdp_offload(nn, nn->xdp_prog);
+               nfp_net_xdp_offload(nn, nn->dp.xdp_prog);
                return 0;
        }
 
-       tx.n_rings += prog ? nn->num_rx_rings : -nn->num_rx_rings;
+       dp = nfp_net_clone_dp(nn);
+       if (!dp)
+               return -ENOMEM;
+
+       dp->xdp_prog = prog;
+       dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
+       dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+       if (prog)
+               dp->rx_dma_off = XDP_PACKET_HEADROOM -
+                       (nn->dp.rx_offset ?: NFP_NET_MAX_PREPEND);
+       else
+               dp->rx_dma_off = 0;
 
        /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
-       err = nfp_net_ring_reconfig(nn, &prog, &rx, &tx);
+       err = nfp_net_ring_reconfig(nn, dp);
        if (err)
                return err;
 
-       /* @prog got swapped and is now the old one */
-       if (prog)
-               bpf_prog_put(prog);
+       if (old_prog)
+               bpf_prog_put(old_prog);
 
-       nfp_net_xdp_offload(nn, nn->xdp_prog);
+       nfp_net_xdp_offload(nn, nn->dp.xdp_prog);
 
        return 0;
 }
@@ -2989,7 +2946,7 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
        case XDP_SETUP_PROG:
                return nfp_net_xdp_setup(nn, xdp->prog);
        case XDP_QUERY_PROG:
-               xdp->prog_attached = !!nn->xdp_prog;
+               xdp->prog_attached = !!nn->dp.xdp_prog;
                return 0;
        default:
                return -EINVAL;
@@ -3008,6 +2965,7 @@ static const struct net_device_ops nfp_net_netdev_ops = {
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_set_features       = nfp_net_set_features,
        .ndo_features_check     = nfp_net_features_check,
+       .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
        .ndo_udp_tunnel_add     = nfp_net_add_vxlan_port,
        .ndo_udp_tunnel_del     = nfp_net_del_vxlan_port,
        .ndo_xdp                = nfp_net_xdp,
@@ -3020,9 +2978,9 @@ static const struct net_device_ops nfp_net_netdev_ops = {
 void nfp_net_info(struct nfp_net *nn)
 {
        nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
-               nn->is_vf ? "VF " : "",
-               nn->num_tx_rings, nn->max_tx_rings,
-               nn->num_rx_rings, nn->max_rx_rings);
+               nn->dp.is_vf ? "VF " : "",
+               nn->dp.num_tx_rings, nn->max_tx_rings,
+               nn->dp.num_rx_rings, nn->max_rx_rings);
        nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
                nn->fw_ver.resv, nn->fw_ver.class,
                nn->fw_ver.major, nn->fw_ver.minor,
@@ -3074,21 +3032,24 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
        SET_NETDEV_DEV(netdev, &pdev->dev);
        nn = netdev_priv(netdev);
 
-       nn->netdev = netdev;
+       nn->dp.netdev = netdev;
+       nn->dp.dev = &pdev->dev;
        nn->pdev = pdev;
 
        nn->max_tx_rings = max_tx_rings;
        nn->max_rx_rings = max_rx_rings;
 
-       nn->num_tx_rings = min_t(unsigned int, max_tx_rings, num_online_cpus());
-       nn->num_rx_rings = min_t(unsigned int, max_rx_rings,
+       nn->dp.num_tx_rings = min_t(unsigned int,
+                                   max_tx_rings, num_online_cpus());
+       nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings,
                                 netif_get_num_default_rss_queues());
 
-       nn->num_r_vecs = max(nn->num_tx_rings, nn->num_rx_rings);
-       nn->num_r_vecs = min_t(unsigned int, nn->num_r_vecs, num_online_cpus());
+       nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
+       nn->dp.num_r_vecs = min_t(unsigned int,
+                                 nn->dp.num_r_vecs, num_online_cpus());
 
-       nn->txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
-       nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
+       nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
+       nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
 
        spin_lock_init(&nn->reconfig_lock);
        spin_lock_init(&nn->rx_filter_lock);
@@ -3108,7 +3069,28 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
  */
 void nfp_net_netdev_free(struct nfp_net *nn)
 {
-       free_netdev(nn->netdev);
+       free_netdev(nn->dp.netdev);
+}
+
+/**
+ * nfp_net_rss_key_sz() - Get current size of the RSS key
+ * @nn:                NFP Net device instance
+ *
+ * Return: size of the RSS key for currently selected hash function.
+ */
+unsigned int nfp_net_rss_key_sz(struct nfp_net *nn)
+{
+       switch (nn->rss_hfunc) {
+       case ETH_RSS_HASH_TOP:
+               return NFP_NET_CFG_RSS_KEY_SZ;
+       case ETH_RSS_HASH_XOR:
+               return 0;
+       case ETH_RSS_HASH_CRC32:
+               return 4;
+       }
+
+       nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc);
+       return 0;
 }
 
 /**
@@ -3117,14 +3099,32 @@ void nfp_net_netdev_free(struct nfp_net *nn)
  */
 static void nfp_net_rss_init(struct nfp_net *nn)
 {
-       netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
+       unsigned long func_bit, rss_cap_hfunc;
+       u32 reg;
+
+       /* Read the RSS function capability and select first supported func */
+       reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP);
+       rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg);
+       if (!rss_cap_hfunc)
+               rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC,
+                                         NFP_NET_CFG_RSS_TOEPLITZ);
+
+       func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
+       if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
+               dev_warn(nn->dp.dev,
+                        "Bad RSS config, defaulting to Toeplitz hash\n");
+               func_bit = ETH_RSS_HASH_TOP_BIT;
+       }
+       nn->rss_hfunc = 1 << func_bit;
+
+       netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn));
 
        nfp_net_rss_init_itbl(nn);
 
        /* Enable IPv4/IPv6 TCP by default */
        nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
                      NFP_NET_CFG_RSS_IPV6_TCP |
-                     NFP_NET_CFG_RSS_TOEPLITZ |
+                     FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) |
                      NFP_NET_CFG_RSS_MASK;
 }
 
@@ -3151,6 +3151,17 @@ int nfp_net_netdev_init(struct net_device *netdev)
        struct nfp_net *nn = netdev_priv(netdev);
        int err;
 
+       /* XDP calls for 256 byte packet headroom which wouldn't fit in a u8.
+        * We, however, reuse the metadata prepend space for XDP buffers which
+        * is at least 1 byte long and as long as XDP headroom doesn't increase
+        * above 256 the *extra* XDP headroom will fit on 8 bits.
+        */
+       BUILD_BUG_ON(XDP_PACKET_HEADROOM > 256);
+
+       nn->dp.chained_metadata_format = nn->fw_ver.major > 3;
+
+       nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
+
        /* Get some of the read-only fields from the BAR */
        nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
        nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
@@ -3158,17 +3169,26 @@ int nfp_net_netdev_init(struct net_device *netdev)
        nfp_net_write_mac_addr(nn);
 
        /* Determine RX packet/metadata boundary offset */
-       if (nn->fw_ver.major >= 2)
-               nn->rx_offset = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
-       else
-               nn->rx_offset = NFP_NET_RX_OFFSET;
+       if (nn->fw_ver.major >= 2) {
+               u32 reg;
+
+               reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
+               if (reg > NFP_NET_MAX_PREPEND) {
+                       nn_err(nn, "Invalid rx offset: %d\n", reg);
+                       return -EINVAL;
+               }
+               nn->dp.rx_offset = reg;
+       } else {
+               nn->dp.rx_offset = NFP_NET_RX_OFFSET;
+       }
 
        /* Set default MTU and Freelist buffer size */
        if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
                netdev->mtu = nn->max_mtu;
        else
                netdev->mtu = NFP_NET_DEFAULT_MTU;
-       nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, netdev->mtu);
+       nn->dp.mtu = netdev->mtu;
+       nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
 
        /* Advertise/enable offloads based on capabilities
         *
@@ -3179,31 +3199,31 @@ int nfp_net_netdev_init(struct net_device *netdev)
        netdev->hw_features = NETIF_F_HIGHDMA;
        if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) {
                netdev->hw_features |= NETIF_F_RXCSUM;
-               nn->ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
+               nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
        }
        if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
                netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-               nn->ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
+               nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
        }
        if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
                netdev->hw_features |= NETIF_F_SG;
-               nn->ctrl |= NFP_NET_CFG_CTRL_GATHER;
+               nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
        }
        if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) {
                netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
-               nn->ctrl |= NFP_NET_CFG_CTRL_LSO;
+               nn->dp.ctrl |= NFP_NET_CFG_CTRL_LSO;
        }
        if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
                netdev->hw_features |= NETIF_F_RXHASH;
                nfp_net_rss_init(nn);
-               nn->ctrl |= NFP_NET_CFG_CTRL_RSS;
+               nn->dp.ctrl |= NFP_NET_CFG_CTRL_RSS;
        }
        if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
            nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
                if (nn->cap & NFP_NET_CFG_CTRL_LSO)
                        netdev->hw_features |= NETIF_F_GSO_GRE |
                                               NETIF_F_GSO_UDP_TUNNEL;
-               nn->ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
+               nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
 
                netdev->hw_enc_features = netdev->hw_features;
        }
@@ -3212,11 +3232,11 @@ int nfp_net_netdev_init(struct net_device *netdev)
 
        if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
                netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
-               nn->ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+               nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
        }
        if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
                netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
-               nn->ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+               nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
        }
 
        netdev->features = netdev->hw_features;
@@ -3229,14 +3249,14 @@ int nfp_net_netdev_init(struct net_device *netdev)
 
        /* Allow L2 Broadcast and Multicast through by default, if supported */
        if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
-               nn->ctrl |= NFP_NET_CFG_CTRL_L2BC;
+               nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
        if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
-               nn->ctrl |= NFP_NET_CFG_CTRL_L2MC;
+               nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC;
 
        /* Allow IRQ moderation, if supported */
        if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
                nfp_net_irqmod_init(nn);
-               nn->ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
+               nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
        }
 
        /* Stash the re-configuration queue away.  First odd queue in TX Bar */
@@ -3275,9 +3295,10 @@ void nfp_net_netdev_clean(struct net_device *netdev)
 {
        struct nfp_net *nn = netdev_priv(netdev);
 
-       if (nn->xdp_prog)
-               bpf_prog_put(nn->xdp_prog);
-       if (nn->bpf_offload_xdp)
+       unregister_netdev(nn->dp.netdev);
+
+       if (nn->dp.xdp_prog)
+               bpf_prog_put(nn->dp.xdp_prog);
+       if (nn->dp.bpf_offload_xdp)
                nfp_net_xdp_offload(nn, NULL);
-       unregister_netdev(nn->netdev);
 }
index 385ba355c965c35cf81ecd09f25e3c70c29b76e7..d04ccc9f61162c73bbe67f68cf5dfaed5ecb4a69 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015 Netronome Systems, Inc.
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
 #define   NFP_NET_CFG_VERSION_MINOR(x)    (((x) & 0xff) <<  0)
 #define NFP_NET_CFG_STS                 0x0034
 #define   NFP_NET_CFG_STS_LINK            (0x1 << 0) /* Link up or down */
+/* Link rate */
+#define   NFP_NET_CFG_STS_LINK_RATE_SHIFT 1
+#define   NFP_NET_CFG_STS_LINK_RATE_MASK  0xF
+#define   NFP_NET_CFG_STS_LINK_RATE       \
+       (NFP_NET_CFG_STS_LINK_RATE_MASK << NFP_NET_CFG_STS_LINK_RATE_SHIFT)
+#define   NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED   0
+#define   NFP_NET_CFG_STS_LINK_RATE_UNKNOWN       1
+#define   NFP_NET_CFG_STS_LINK_RATE_1G            2
+#define   NFP_NET_CFG_STS_LINK_RATE_10G           3
+#define   NFP_NET_CFG_STS_LINK_RATE_25G           4
+#define   NFP_NET_CFG_STS_LINK_RATE_40G           5
+#define   NFP_NET_CFG_STS_LINK_RATE_50G           6
+#define   NFP_NET_CFG_STS_LINK_RATE_100G          7
 #define NFP_NET_CFG_CAP                 0x0038
 #define NFP_NET_CFG_MAX_TXRINGS         0x003c
 #define NFP_NET_CFG_MAX_RXRINGS         0x0040
 #define NFP_NET_CFG_RX_OFFSET          0x0050
 #define NFP_NET_CFG_RX_OFFSET_DYNAMIC          0       /* Prepend mode */
 
+/**
+ * RSS capabilities
+ * @NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
+ *                             @NFP_NET_CFG_RSS_HFUNC)
+ */
+#define NFP_NET_CFG_RSS_CAP            0x0054
+#define   NFP_NET_CFG_RSS_CAP_HFUNC      0xff000000
+
 /**
  * VXLAN/UDP encap configuration
  * @NFP_NET_CFG_VXLAN_PORT:    Base address of table of tunnels' UDP dst ports
 #define   NFP_NET_CFG_RSS_IPV4_UDP        (1 << 11) /* RSS for IPv4/UDP */
 #define   NFP_NET_CFG_RSS_IPV6_TCP        (1 << 12) /* RSS for IPv6/TCP */
 #define   NFP_NET_CFG_RSS_IPV6_UDP        (1 << 13) /* RSS for IPv6/UDP */
+#define   NFP_NET_CFG_RSS_HFUNC                  0xff000000
 #define   NFP_NET_CFG_RSS_TOEPLITZ        (1 << 24) /* Use Toeplitz hash */
+#define   NFP_NET_CFG_RSS_XOR            (1 << 25) /* Use XOR as hash */
+#define   NFP_NET_CFG_RSS_CRC32                  (1 << 26) /* Use CRC32 as hash */
+#define   NFP_NET_CFG_RSS_HFUNCS         3
 #define NFP_NET_CFG_RSS_KEY             (NFP_NET_CFG_RSS_BASE + 0x4)
 #define NFP_NET_CFG_RSS_KEY_SZ          0x28
 #define NFP_NET_CFG_RSS_ITBL            (NFP_NET_CFG_RSS_BASE + 0x4 + \
index 6e9372a1837579928bb24b5435e2062dc0c534b8..4077c59bf782ea595420c1b72ad86f72ebbac1a4 100644 (file)
@@ -40,9 +40,9 @@ static struct dentry *nfp_dir;
 
 static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
 {
-       int fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p, rxd_cnt;
        struct nfp_net_r_vector *r_vec = file->private;
        struct nfp_net_rx_ring *rx_ring;
+       int fl_rd_p, fl_wr_p, rxd_cnt;
        struct nfp_net_rx_desc *rxd;
        struct nfp_net *nn;
        void *frag;
@@ -54,19 +54,18 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
                goto out;
        nn = r_vec->nfp_net;
        rx_ring = r_vec->rx_ring;
-       if (!netif_running(nn->netdev))
+       if (!netif_running(nn->dp.netdev))
                goto out;
 
        rxd_cnt = rx_ring->cnt;
 
        fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl);
        fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl);
-       rx_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_rx);
-       rx_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx);
 
-       seq_printf(file, "RX[%02d]: H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d RX_RD=%d RX_WR=%d\n",
-                  rx_ring->idx, rx_ring->rd_p, rx_ring->wr_p,
-                  fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p);
+       seq_printf(file, "RX[%02d,%02d]: cnt=%d dma=%pad host=%p   H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d\n",
+                  rx_ring->idx, rx_ring->fl_qcidx,
+                  rx_ring->cnt, &rx_ring->dma, rx_ring->rxds,
+                  rx_ring->rd_p, rx_ring->wr_p, fl_rd_p, fl_wr_p);
 
        for (i = 0; i < rxd_cnt; i++) {
                rxd = &rx_ring->rxds[i];
@@ -89,10 +88,6 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
                        seq_puts(file, " FL_RD");
                if (i == fl_wr_p % rxd_cnt)
                        seq_puts(file, " FL_WR");
-               if (i == rx_rd_p % rxd_cnt)
-                       seq_puts(file, " RX_RD");
-               if (i == rx_wr_p % rxd_cnt)
-                       seq_puts(file, " RX_WR");
 
                seq_putc(file, '\n');
        }
@@ -143,7 +138,7 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
        if (!r_vec->nfp_net || !tx_ring)
                goto out;
        nn = r_vec->nfp_net;
-       if (!netif_running(nn->netdev))
+       if (!netif_running(nn->dp.netdev))
                goto out;
 
        txd_cnt = tx_ring->cnt;
@@ -151,8 +146,11 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
        d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
        d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q);
 
-       seq_printf(file, "TX[%02d]: H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n",
-                  tx_ring->idx, tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);
+       seq_printf(file, "TX[%02d,%02d%s]: cnt=%d dma=%pad host=%p   H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n",
+                  tx_ring->idx, tx_ring->qcidx,
+                  tx_ring == r_vec->tx_ring ? "" : "xdp",
+                  tx_ring->cnt, &tx_ring->dma, tx_ring->txds,
+                  tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);
 
        for (i = 0; i < txd_cnt; i++) {
                txd = &tx_ring->txds[i];
index 2649f7523c81f11ddbb9c0b9bdba78dd220d7c6a..3328041ec290915866160f90a228b9a33dac0605 100644 (file)
@@ -40,6 +40,7 @@
  *          Brad Petrus <brad.petrus@netronome.com>
  */
 
+#include <linux/bitfield.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -48,6 +49,7 @@
 #include <linux/ethtool.h>
 
 #include "nfpcore/nfp.h"
+#include "nfpcore/nfp_nsp.h"
 #include "nfp_net_ctrl.h"
 #include "nfp_net.h"
 
@@ -126,9 +128,9 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
 };
 
 #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
-#define NN_ET_RVEC_STATS_LEN (nn->num_r_vecs * 3)
+#define NN_ET_RVEC_STATS_LEN (nn->dp.num_r_vecs * 3)
 #define NN_ET_RVEC_GATHER_STATS 7
-#define NN_ET_QUEUE_STATS_LEN ((nn->num_tx_rings + nn->num_rx_rings) * 2)
+#define NN_ET_QUEUE_STATS_LEN ((nn->dp.num_tx_rings + nn->dp.num_rx_rings) * 2)
 #define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \
                         NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN)
 
@@ -172,6 +174,114 @@ static void nfp_net_get_drvinfo(struct net_device *netdev,
        drvinfo->regdump_len = NFP_NET_CFG_BAR_SZ;
 }
 
+/**
+ * nfp_net_get_link_ksettings - Get Link Speed settings
+ * @netdev:    network interface device structure
+ * @cmd:       ethtool command
+ *
+ * Reports speed settings based on info in the BAR provided by the fw.
+ */
+static int
+nfp_net_get_link_ksettings(struct net_device *netdev,
+                          struct ethtool_link_ksettings *cmd)
+{
+       static const u32 ls_to_ethtool[] = {
+               [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = 0,
+               [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = SPEED_UNKNOWN,
+               [NFP_NET_CFG_STS_LINK_RATE_1G]          = SPEED_1000,
+               [NFP_NET_CFG_STS_LINK_RATE_10G]         = SPEED_10000,
+               [NFP_NET_CFG_STS_LINK_RATE_25G]         = SPEED_25000,
+               [NFP_NET_CFG_STS_LINK_RATE_40G]         = SPEED_40000,
+               [NFP_NET_CFG_STS_LINK_RATE_50G]         = SPEED_50000,
+               [NFP_NET_CFG_STS_LINK_RATE_100G]        = SPEED_100000,
+       };
+       struct nfp_net *nn = netdev_priv(netdev);
+       u32 sts, ls;
+
+       ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+       cmd->base.port = PORT_OTHER;
+       cmd->base.speed = SPEED_UNKNOWN;
+       cmd->base.duplex = DUPLEX_UNKNOWN;
+
+       if (nn->eth_port)
+               cmd->base.autoneg = nn->eth_port->aneg != NFP_ANEG_DISABLED ?
+                       AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+       if (!netif_carrier_ok(netdev))
+               return 0;
+
+       /* Use link speed from ETH table if available, otherwise try the BAR */
+       if (nn->eth_port && nfp_net_link_changed_read_clear(nn))
+               nfp_net_refresh_port_config(nn);
+       /* Separate if - on FW error the port could've disappeared from table */
+       if (nn->eth_port) {
+               cmd->base.port = nn->eth_port->port_type;
+               cmd->base.speed = nn->eth_port->speed;
+               cmd->base.duplex = DUPLEX_FULL;
+               return 0;
+       }
+
+       sts = nn_readl(nn, NFP_NET_CFG_STS);
+
+       ls = FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts);
+       if (ls == NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED)
+               return -EOPNOTSUPP;
+
+       if (ls == NFP_NET_CFG_STS_LINK_RATE_UNKNOWN ||
+           ls >= ARRAY_SIZE(ls_to_ethtool))
+               return 0;
+
+       cmd->base.speed = ls_to_ethtool[sts];
+       cmd->base.duplex = DUPLEX_FULL;
+
+       return 0;
+}
+
+static int
+nfp_net_set_link_ksettings(struct net_device *netdev,
+                          const struct ethtool_link_ksettings *cmd)
+{
+       struct nfp_net *nn = netdev_priv(netdev);
+       struct nfp_nsp *nsp;
+       int err;
+
+       if (!nn->eth_port)
+               return -EOPNOTSUPP;
+
+       if (netif_running(netdev)) {
+               nn_warn(nn, "Changing settings not allowed on an active interface. It may cause the port to be disabled until reboot.\n");
+               return -EBUSY;
+       }
+
+       nsp = nfp_eth_config_start(nn->cpp, nn->eth_port->index);
+       if (IS_ERR(nsp))
+               return PTR_ERR(nsp);
+
+       err = __nfp_eth_set_aneg(nsp, cmd->base.autoneg == AUTONEG_ENABLE ?
+                                NFP_ANEG_AUTO : NFP_ANEG_DISABLED);
+       if (err)
+               goto err_bad_set;
+       if (cmd->base.speed != SPEED_UNKNOWN) {
+               u32 speed = cmd->base.speed / nn->eth_port->lanes;
+
+               err = __nfp_eth_set_speed(nsp, speed);
+               if (err)
+                       goto err_bad_set;
+       }
+
+       err = nfp_eth_config_commit_end(nsp);
+       if (err > 0)
+               return 0; /* no change */
+
+       nfp_net_refresh_port_config(nn);
+
+       return err;
+
+err_bad_set:
+       nfp_eth_config_cleanup_end(nsp);
+       return err;
+}
+
 static void nfp_net_get_ringparam(struct net_device *netdev,
                                  struct ethtool_ringparam *ring)
 {
@@ -179,30 +289,22 @@ static void nfp_net_get_ringparam(struct net_device *netdev,
 
        ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
        ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
-       ring->rx_pending = nn->rxd_cnt;
-       ring->tx_pending = nn->txd_cnt;
+       ring->rx_pending = nn->dp.rxd_cnt;
+       ring->tx_pending = nn->dp.txd_cnt;
 }
 
 static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
 {
-       struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
-       struct nfp_net_ring_set rx = {
-               .n_rings = nn->num_rx_rings,
-               .mtu = nn->netdev->mtu,
-               .dcnt = rxd_cnt,
-       };
-       struct nfp_net_ring_set tx = {
-               .n_rings = nn->num_tx_rings,
-               .dcnt = txd_cnt,
-       };
+       struct nfp_net_dp *dp;
 
-       if (nn->rxd_cnt != rxd_cnt)
-               reconfig_rx = &rx;
-       if (nn->txd_cnt != txd_cnt)
-               reconfig_tx = &tx;
+       dp = nfp_net_clone_dp(nn);
+       if (!dp)
+               return -ENOMEM;
 
-       return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
-                                    reconfig_rx, reconfig_tx);
+       dp->rxd_cnt = rxd_cnt;
+       dp->txd_cnt = txd_cnt;
+
+       return nfp_net_ring_reconfig(nn, dp);
 }
 
 static int nfp_net_set_ringparam(struct net_device *netdev,
@@ -223,11 +325,11 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
            txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
                return -EINVAL;
 
-       if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt)
+       if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
                return 0;
 
        nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
-              nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
+              nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt);
 
        return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
 }
@@ -245,7 +347,7 @@ static void nfp_net_get_strings(struct net_device *netdev,
                        memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN);
                        p += ETH_GSTRING_LEN;
                }
-               for (i = 0; i < nn->num_r_vecs; i++) {
+               for (i = 0; i < nn->dp.num_r_vecs; i++) {
                        sprintf(p, "rvec_%u_rx_pkts", i);
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "rvec_%u_tx_pkts", i);
@@ -267,13 +369,13 @@ static void nfp_net_get_strings(struct net_device *netdev,
                p += ETH_GSTRING_LEN;
                strncpy(p, "tx_lso", ETH_GSTRING_LEN);
                p += ETH_GSTRING_LEN;
-               for (i = 0; i < nn->num_tx_rings; i++) {
+               for (i = 0; i < nn->dp.num_tx_rings; i++) {
                        sprintf(p, "txq_%u_pkts", i);
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "txq_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
                }
-               for (i = 0; i < nn->num_rx_rings; i++) {
+               for (i = 0; i < nn->dp.num_rx_rings; i++) {
                        sprintf(p, "rxq_%u_pkts", i);
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "rxq_%u_bytes", i);
@@ -306,12 +408,12 @@ static void nfp_net_get_stats(struct net_device *netdev,
                        break;
 
                case NFP_NET_DEV_ET_STATS:
-                       io_p = nn->ctrl_bar + nfp_net_et_stats[i].off;
+                       io_p = nn->dp.ctrl_bar + nfp_net_et_stats[i].off;
                        data[i] = readq(io_p);
                        break;
                }
        }
-       for (j = 0; j < nn->num_r_vecs; j++) {
+       for (j = 0; j < nn->dp.num_r_vecs; j++) {
                unsigned int start;
 
                do {
@@ -337,16 +439,16 @@ static void nfp_net_get_stats(struct net_device *netdev,
        }
        for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
                data[i++] = gathered_stats[j];
-       for (j = 0; j < nn->num_tx_rings; j++) {
-               io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
+       for (j = 0; j < nn->dp.num_tx_rings; j++) {
+               io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
                data[i++] = readq(io_p);
-               io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
+               io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
                data[i++] = readq(io_p);
        }
-       for (j = 0; j < nn->num_rx_rings; j++) {
-               io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
+       for (j = 0; j < nn->dp.num_rx_rings; j++) {
+               io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
                data[i++] = readq(io_p);
-               io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
+               io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
                data[i++] = readq(io_p);
        }
 }
@@ -410,7 +512,7 @@ static int nfp_net_get_rxnfc(struct net_device *netdev,
 
        switch (cmd->cmd) {
        case ETHTOOL_GRXRINGS:
-               cmd->data = nn->num_rx_rings;
+               cmd->data = nn->dp.num_rx_rings;
                return 0;
        case ETHTOOL_GRXFH:
                return nfp_net_get_rss_hash_opts(nn, cmd);
@@ -454,13 +556,13 @@ static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
                return -EINVAL;
        }
 
-       new_rss_cfg |= NFP_NET_CFG_RSS_TOEPLITZ;
+       new_rss_cfg |= FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc);
        new_rss_cfg |= NFP_NET_CFG_RSS_MASK;
 
        if (new_rss_cfg == nn->rss_cfg)
                return 0;
 
-       writel(new_rss_cfg, nn->ctrl_bar + NFP_NET_CFG_RSS_CTRL);
+       writel(new_rss_cfg, nn->dp.ctrl_bar + NFP_NET_CFG_RSS_CTRL);
        err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
        if (err)
                return err;
@@ -496,7 +598,12 @@ static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev)
 
 static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
 {
-       return NFP_NET_CFG_RSS_KEY_SZ;
+       struct nfp_net *nn = netdev_priv(netdev);
+
+       if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+               return -EOPNOTSUPP;
+
+       return nfp_net_rss_key_sz(nn);
 }
 
 static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
@@ -512,9 +619,12 @@ static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
                for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
                        indir[i] = nn->rss_itbl[i];
        if (key)
-               memcpy(key, nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
-       if (hfunc)
-               *hfunc = ETH_RSS_HASH_TOP;
+               memcpy(key, nn->rss_key, nfp_net_rss_key_sz(nn));
+       if (hfunc) {
+               *hfunc = nn->rss_hfunc;
+               if (*hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT)
+                       *hfunc = ETH_RSS_HASH_UNKNOWN;
+       }
 
        return 0;
 }
@@ -527,14 +637,14 @@ static int nfp_net_set_rxfh(struct net_device *netdev,
        int i;
 
        if (!(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
-           !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == ETH_RSS_HASH_TOP))
+           !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == nn->rss_hfunc))
                return -EOPNOTSUPP;
 
        if (!key && !indir)
                return 0;
 
        if (key) {
-               memcpy(nn->rss_key, key, NFP_NET_CFG_RSS_KEY_SZ);
+               memcpy(nn->rss_key, key, nfp_net_rss_key_sz(nn));
                nfp_net_rss_write_key(nn);
        }
        if (indir) {
@@ -564,7 +674,7 @@ static void nfp_net_get_regs(struct net_device *netdev,
        regs->version = nn_readl(nn, NFP_NET_CFG_VERSION);
 
        for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++)
-               regs_buf[i] = readl(nn->ctrl_bar + (i * sizeof(u32)));
+               regs_buf[i] = readl(nn->dp.ctrl_bar + (i * sizeof(u32)));
 }
 
 static int nfp_net_get_coalesce(struct net_device *netdev,
@@ -736,16 +846,16 @@ static void nfp_net_get_channels(struct net_device *netdev,
        struct nfp_net *nn = netdev_priv(netdev);
        unsigned int num_tx_rings;
 
-       num_tx_rings = nn->num_tx_rings;
-       if (nn->xdp_prog)
-               num_tx_rings -= nn->num_rx_rings;
+       num_tx_rings = nn->dp.num_tx_rings;
+       if (nn->dp.xdp_prog)
+               num_tx_rings -= nn->dp.num_rx_rings;
 
        channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs);
        channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs);
        channel->max_combined = min(channel->max_rx, channel->max_tx);
        channel->max_other = NFP_NET_NON_Q_VECTORS;
-       channel->combined_count = min(nn->num_rx_rings, num_tx_rings);
-       channel->rx_count = nn->num_rx_rings - channel->combined_count;
+       channel->combined_count = min(nn->dp.num_rx_rings, num_tx_rings);
+       channel->rx_count = nn->dp.num_rx_rings - channel->combined_count;
        channel->tx_count = num_tx_rings - channel->combined_count;
        channel->other_count = NFP_NET_NON_Q_VECTORS;
 }
@@ -753,29 +863,19 @@ static void nfp_net_get_channels(struct net_device *netdev,
 static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx,
                                 unsigned int total_tx)
 {
-       struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
-       struct nfp_net_ring_set rx = {
-               .n_rings = total_rx,
-               .mtu = nn->netdev->mtu,
-               .dcnt = nn->rxd_cnt,
-       };
-       struct nfp_net_ring_set tx = {
-               .n_rings = total_tx,
-               .dcnt = nn->txd_cnt,
-       };
+       struct nfp_net_dp *dp;
 
-       if (nn->num_rx_rings != total_rx)
-               reconfig_rx = &rx;
-       if (nn->num_stack_tx_rings != total_tx ||
-           (nn->xdp_prog && reconfig_rx))
-               reconfig_tx = &tx;
+       dp = nfp_net_clone_dp(nn);
+       if (!dp)
+               return -ENOMEM;
 
-       /* nfp_net_check_config() will catch tx.n_rings > nn->max_tx_rings */
-       if (nn->xdp_prog)
-               tx.n_rings += total_rx;
+       dp->num_rx_rings = total_rx;
+       dp->num_tx_rings = total_tx;
+       /* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */
+       if (dp->xdp_prog)
+               dp->num_tx_rings += total_rx;
 
-       return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
-                                    reconfig_rx, reconfig_tx);
+       return nfp_net_ring_reconfig(nn, dp);
 }
 
 static int nfp_net_set_channels(struct net_device *netdev,
@@ -823,6 +923,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
        .set_coalesce           = nfp_net_set_coalesce,
        .get_channels           = nfp_net_get_channels,
        .set_channels           = nfp_net_set_channels,
+       .get_link_ksettings     = nfp_net_get_link_ksettings,
+       .set_link_ksettings     = nfp_net_set_link_ksettings,
 };
 
 void nfp_net_set_ethtool_ops(struct net_device *netdev)
index 3afcdc11480c82c7d19f2252cae29a40066cfef8..4c6863a072d366052bb194444a1b204cdef524a8 100644 (file)
 #include <linux/pci_regs.h>
 #include <linux/msi.h>
 #include <linux/random.h>
+#include <linux/rtnetlink.h>
 
 #include "nfpcore/nfp.h"
 #include "nfpcore/nfp_cpp.h"
 #include "nfpcore/nfp_nffw.h"
-#include "nfpcore/nfp_nsp_eth.h"
+#include "nfpcore/nfp_nsp.h"
 #include "nfpcore/nfp6000_pcie.h"
 
 #include "nfp_net_ctrl.h"
@@ -129,61 +130,61 @@ err_area:
        return (u8 __iomem *)ERR_PTR(err);
 }
 
+/**
+ * nfp_net_get_mac_addr() - Get the MAC address.
+ * @nn:       NFP Network structure
+ * @cpp:      NFP CPP handle
+ * @id:              NFP port id
+ *
+ * First try to get the MAC address from NSP ETH table. If that
+ * fails try HWInfo.  As a last resort generate a random address.
+ */
 static void
-nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp,
-                           unsigned int id)
+nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id)
 {
+       struct nfp_net_dp *dp = &nn->dp;
        u8 mac_addr[ETH_ALEN];
        const char *mac_str;
        char name[32];
 
+       if (nn->eth_port) {
+               ether_addr_copy(dp->netdev->dev_addr, nn->eth_port->mac_addr);
+               ether_addr_copy(dp->netdev->perm_addr, nn->eth_port->mac_addr);
+               return;
+       }
+
        snprintf(name, sizeof(name), "eth%d.mac", id);
 
        mac_str = nfp_hwinfo_lookup(cpp, name);
        if (!mac_str) {
-               dev_warn(&nn->pdev->dev,
-                        "Can't lookup MAC address. Generate\n");
-               eth_hw_addr_random(nn->netdev);
+               dev_warn(dp->dev, "Can't lookup MAC address. Generate\n");
+               eth_hw_addr_random(dp->netdev);
                return;
        }
 
        if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
                   &mac_addr[0], &mac_addr[1], &mac_addr[2],
                   &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
-               dev_warn(&nn->pdev->dev,
+               dev_warn(dp->dev,
                         "Can't parse MAC address (%s). Generate.\n", mac_str);
-               eth_hw_addr_random(nn->netdev);
+               eth_hw_addr_random(dp->netdev);
                return;
        }
 
-       ether_addr_copy(nn->netdev->dev_addr, mac_addr);
-       ether_addr_copy(nn->netdev->perm_addr, mac_addr);
+       ether_addr_copy(dp->netdev->dev_addr, mac_addr);
+       ether_addr_copy(dp->netdev->perm_addr, mac_addr);
 }
 
-/**
- * nfp_net_get_mac_addr() - Get the MAC address.
- * @nn:       NFP Network structure
- * @pf:              NFP PF device structure
- * @id:              NFP port id
- *
- * First try to get the MAC address from NSP ETH table. If that
- * fails try HWInfo.  As a last resort generate a random address.
- */
-static void
-nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_pf *pf, unsigned int id)
+static struct nfp_eth_table_port *
+nfp_net_find_port(struct nfp_pf *pf, unsigned int id)
 {
        int i;
 
        for (i = 0; pf->eth_tbl && i < pf->eth_tbl->count; i++)
-               if (pf->eth_tbl->ports[i].eth_index == id) {
-                       const u8 *mac_addr = pf->eth_tbl->ports[i].mac_addr;
+               if (pf->eth_tbl->ports[i].eth_index == id)
+                       return &pf->eth_tbl->ports[i];
 
-                       ether_addr_copy(nn->netdev->dev_addr, mac_addr);
-                       ether_addr_copy(nn->netdev->perm_addr, mac_addr);
-                       return;
-               }
-
-       nfp_net_get_mac_addr_hwinfo(nn, pf->cpp, id);
+       return NULL;
 }
 
 static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
@@ -282,6 +283,7 @@ static void nfp_net_pf_free_netdevs(struct nfp_pf *pf)
        while (!list_empty(&pf->ports)) {
                nn = list_first_entry(&pf->ports, struct nfp_net, port_list);
                list_del(&nn->port_list);
+               pf->num_netdevs--;
 
                nfp_net_netdev_free(nn);
        }
@@ -290,7 +292,8 @@ static void nfp_net_pf_free_netdevs(struct nfp_pf *pf)
 static struct nfp_net *
 nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
                             void __iomem *tx_bar, void __iomem *rx_bar,
-                            int stride, struct nfp_net_fw_version *fw_ver)
+                            int stride, struct nfp_net_fw_version *fw_ver,
+                            struct nfp_eth_table_port *eth_port)
 {
        u32 n_tx_rings, n_rx_rings;
        struct nfp_net *nn;
@@ -305,12 +308,13 @@ nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
 
        nn->cpp = pf->cpp;
        nn->fw_ver = *fw_ver;
-       nn->ctrl_bar = ctrl_bar;
+       nn->dp.ctrl_bar = ctrl_bar;
        nn->tx_bar = tx_bar;
        nn->rx_bar = rx_bar;
-       nn->is_vf = 0;
+       nn->dp.is_vf = 0;
        nn->stride_rx = stride;
        nn->stride_tx = stride;
+       nn->eth_port = eth_port;
 
        return nn;
 }
@@ -322,7 +326,7 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
        int err;
 
        /* Get MAC address */
-       nfp_net_get_mac_addr(nn, pf, id);
+       nfp_net_get_mac_addr(nn, pf->cpp, id);
 
        /* Get ME clock frequency from ctrl BAR
         * XXX for now frequency is hardcoded until we figure out how
@@ -330,7 +334,7 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
         */
        nn->me_freq_mhz = 1200;
 
-       err = nfp_net_netdev_init(nn->netdev);
+       err = nfp_net_netdev_init(nn->dp.netdev);
        if (err)
                return err;
 
@@ -347,6 +351,7 @@ nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
                         int stride, struct nfp_net_fw_version *fw_ver)
 {
        u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
+       struct nfp_eth_table_port *eth_port;
        struct nfp_net *nn;
        unsigned int i;
        int err;
@@ -362,17 +367,27 @@ nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
                prev_tx_base = tgt_tx_base;
                prev_rx_base = tgt_rx_base;
 
-               nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar, rx_bar,
-                                                 stride, fw_ver);
-               if (IS_ERR(nn)) {
-                       err = PTR_ERR(nn);
-                       goto err_free_prev;
+               eth_port = nfp_net_find_port(pf, i);
+               if (eth_port && eth_port->override_changed) {
+                       nfp_warn(pf->cpp, "Config changed for port #%d, reboot required before port will be operational\n", i);
+               } else {
+                       nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar,
+                                                         rx_bar, stride,
+                                                         fw_ver, eth_port);
+                       if (IS_ERR(nn)) {
+                               err = PTR_ERR(nn);
+                               goto err_free_prev;
+                       }
+                       list_add_tail(&nn->port_list, &pf->ports);
+                       pf->num_netdevs++;
                }
-               list_add_tail(&nn->port_list, &pf->ports);
 
                ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
        }
 
+       if (list_empty(&pf->ports))
+               return -ENODEV;
+
        return 0;
 
 err_free_prev:
@@ -399,7 +414,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
        /* Get MSI-X vectors */
        wanted_irqs = 0;
        list_for_each_entry(nn, &pf->ports, port_list)
-               wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->num_r_vecs;
+               wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
        pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
                                  GFP_KERNEL);
        if (!pf->irq_entries) {
@@ -408,7 +423,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
        }
 
        num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
-                                     NFP_NET_MIN_PORT_IRQS * pf->num_ports,
+                                     NFP_NET_MIN_PORT_IRQS * pf->num_netdevs,
                                      wanted_irqs);
        if (!num_irqs) {
                nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
@@ -418,7 +433,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
 
        /* Distribute IRQs to ports */
        irqs_left = num_irqs;
-       ports_left = pf->num_ports;
+       ports_left = pf->num_netdevs;
        list_for_each_entry(nn, &pf->ports, port_list) {
                unsigned int n;
 
@@ -444,7 +459,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
 err_prev_deinit:
        list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
                nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
-               nfp_net_netdev_clean(nn->netdev);
+               nfp_net_netdev_clean(nn->dp.netdev);
        }
        nfp_net_irqs_disable(pf->pdev);
 err_vec_free:
@@ -454,6 +469,81 @@ err_nn_free:
        return err;
 }
 
+static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
+{
+       nfp_net_debugfs_dir_clean(&pf->ddir);
+
+       nfp_net_irqs_disable(pf->pdev);
+       kfree(pf->irq_entries);
+
+       nfp_cpp_area_release_free(pf->rx_area);
+       nfp_cpp_area_release_free(pf->tx_area);
+       nfp_cpp_area_release_free(pf->ctrl_area);
+}
+
+static void nfp_net_refresh_netdevs(struct work_struct *work)
+{
+       struct nfp_pf *pf = container_of(work, struct nfp_pf,
+                                        port_refresh_work);
+       struct nfp_net *nn, *next;
+
+       mutex_lock(&pf->port_lock);
+
+       /* Check for nfp_net_pci_remove() racing against us */
+       if (list_empty(&pf->ports))
+               goto out;
+
+       list_for_each_entry_safe(nn, next, &pf->ports, port_list) {
+               if (!nn->eth_port) {
+                       nfp_warn(pf->cpp, "Warning: port not present after reconfig\n");
+                       continue;
+               }
+               if (!nn->eth_port->override_changed)
+                       continue;
+
+               nn_warn(nn, "Port config changed, unregistering. Reboot required before port will be operational again.\n");
+
+               nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
+               nfp_net_netdev_clean(nn->dp.netdev);
+
+               list_del(&nn->port_list);
+               pf->num_netdevs--;
+               nfp_net_netdev_free(nn);
+       }
+
+       if (list_empty(&pf->ports))
+               nfp_net_pci_remove_finish(pf);
+out:
+       mutex_unlock(&pf->port_lock);
+}
+
+void nfp_net_refresh_port_config(struct nfp_net *nn)
+{
+       struct nfp_pf *pf = pci_get_drvdata(nn->pdev);
+       struct nfp_eth_table *old_table;
+
+       ASSERT_RTNL();
+
+       old_table = pf->eth_tbl;
+
+       list_for_each_entry(nn, &pf->ports, port_list)
+               nfp_net_link_changed_read_clear(nn);
+
+       pf->eth_tbl = nfp_eth_read_ports(pf->cpp);
+       if (!pf->eth_tbl) {
+               pf->eth_tbl = old_table;
+               nfp_err(pf->cpp, "Error refreshing port config!\n");
+               return;
+       }
+
+       list_for_each_entry(nn, &pf->ports, port_list)
+               nn->eth_port = nfp_net_find_port(pf, nn->eth_port->eth_index);
+
+       kfree(old_table);
+
+       schedule_work(&pf->port_refresh_work);
+}
+
 /*
  * PCI device functions
  */
@@ -467,17 +557,23 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
        int stride;
        int err;
 
+       INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_netdevs);
+       mutex_init(&pf->port_lock);
+
        /* Verify that the board has completed initialization */
        if (!nfp_is_ready(pf->cpp)) {
                nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n");
                return -EINVAL;
        }
 
+       mutex_lock(&pf->port_lock);
        pf->num_ports = nfp_net_pf_get_num_ports(pf);
 
        ctrl_bar = nfp_net_pf_map_ctrl_bar(pf);
-       if (!ctrl_bar)
-               return pf->fw_loaded ? -EINVAL : -EPROBE_DEFER;
+       if (!ctrl_bar) {
+               err = pf->fw_loaded ? -EINVAL : -EPROBE_DEFER;
+               goto err_unlock;
+       }
 
        nfp_net_get_fw_version(&fw_ver, ctrl_bar);
        if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
@@ -551,6 +647,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
        if (err)
                goto err_clean_ddir;
 
+       mutex_unlock(&pf->port_lock);
+
        return 0;
 
 err_clean_ddir:
@@ -560,6 +658,8 @@ err_unmap_tx:
        nfp_cpp_area_release_free(pf->tx_area);
 err_ctrl_unmap:
        nfp_cpp_area_release_free(pf->ctrl_area);
+err_unlock:
+       mutex_unlock(&pf->port_lock);
        return err;
 }
 
@@ -567,20 +667,21 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
 {
        struct nfp_net *nn;
 
+       mutex_lock(&pf->port_lock);
+       if (list_empty(&pf->ports))
+               goto out;
+
        list_for_each_entry(nn, &pf->ports, port_list) {
                nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
 
-               nfp_net_netdev_clean(nn->netdev);
+               nfp_net_netdev_clean(nn->dp.netdev);
        }
 
        nfp_net_pf_free_netdevs(pf);
 
-       nfp_net_debugfs_dir_clean(&pf->ddir);
+       nfp_net_pci_remove_finish(pf);
+out:
+       mutex_unlock(&pf->port_lock);
 
-       nfp_net_irqs_disable(pf->pdev);
-       kfree(pf->irq_entries);
-
-       nfp_cpp_area_release_free(pf->rx_area);
-       nfp_cpp_area_release_free(pf->tx_area);
-       nfp_cpp_area_release_free(pf->ctrl_area);
+       cancel_work_sync(&pf->port_refresh_work);
 }
index 18a851eb35084397dd6fa003b3def76ed42a6960..b5b6f69d1e0f8489aa91284052dca2eb092d0135 100644 (file)
@@ -58,7 +58,7 @@ void nfp_net_filter_stats_timer(unsigned long data)
 
        spin_lock_bh(&nn->rx_filter_lock);
 
-       if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
+       if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
                mod_timer(&nn->rx_filter_stats_timer,
                          jiffies + NFP_NET_STAT_POLL_IVL);
 
@@ -132,7 +132,7 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
                        return NN_ACT_TC_DROP;
 
                if (is_tcf_mirred_egress_redirect(a) &&
-                   tcf_mirred_ifindex(a) == nn->netdev->ifindex)
+                   tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex)
                        return NN_ACT_TC_REDIR;
        }
 
@@ -160,7 +160,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
        act = ret;
 
        max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
-       if (max_mtu < nn->netdev->mtu) {
+       if (max_mtu < nn->dp.netdev->mtu) {
                nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
                return -ENOTSUPP;
        }
@@ -168,8 +168,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
        start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
        done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
 
-       *code = dma_zalloc_coherent(&nn->pdev->dev, code_sz, dma_addr,
-                                   GFP_KERNEL);
+       *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
        if (!*code)
                return -ENOMEM;
 
@@ -181,7 +180,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
        return 0;
 
 out:
-       dma_free_coherent(&nn->pdev->dev, code_sz, *code, *dma_addr);
+       dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr);
        return ret;
 }
 
@@ -194,7 +193,7 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
        u64 bpf_addr = dma_addr;
        int err;
 
-       nn->bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
+       nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
 
        if (dense_mode)
                bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
@@ -208,13 +207,13 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
                nn_err(nn, "FW command error while loading BPF: %d\n", err);
 
        /* Enable passing packets through BPF function */
-       nn->ctrl |= NFP_NET_CFG_CTRL_BPF;
-       nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
+       nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
+       nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
        err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
        if (err)
                nn_err(nn, "FW command error while enabling BPF: %d\n", err);
 
-       dma_free_coherent(&nn->pdev->dev, code_sz, code, dma_addr);
+       dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
 
        nfp_net_bpf_stats_reset(nn);
        mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL);
@@ -222,16 +221,16 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
 
 static int nfp_net_bpf_stop(struct nfp_net *nn)
 {
-       if (!(nn->ctrl & NFP_NET_CFG_CTRL_BPF))
+       if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
                return 0;
 
        spin_lock_bh(&nn->rx_filter_lock);
-       nn->ctrl &= ~NFP_NET_CFG_CTRL_BPF;
+       nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
        spin_unlock_bh(&nn->rx_filter_lock);
-       nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
+       nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
 
        del_timer_sync(&nn->rx_filter_stats_timer);
-       nn->bpf_offload_skip_sw = 0;
+       nn->dp.bpf_offload_skip_sw = 0;
 
        return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
 }
@@ -255,7 +254,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
                 * frames which didn't have BPF applied in the hardware should
                 * be fine if software fallback is available, though.
                 */
-               if (nn->bpf_offload_skip_sw)
+               if (nn->dp.bpf_offload_skip_sw)
                        return -EBUSY;
 
                err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
@@ -270,7 +269,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
                return 0;
 
        case TC_CLSBPF_ADD:
-               if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
+               if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
                        return -EBUSY;
 
                err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
index 39407f7cc586c948319b963aae3fb455a7a6cb8f..86e61be6f35c11f8c7b932844b15c989cd6db079 100644 (file)
@@ -84,12 +84,12 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
        put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]);
 
        if (!is_valid_ether_addr(mac_addr)) {
-               eth_hw_addr_random(nn->netdev);
+               eth_hw_addr_random(nn->dp.netdev);
                return;
        }
 
-       ether_addr_copy(nn->netdev->dev_addr, mac_addr);
-       ether_addr_copy(nn->netdev->perm_addr, mac_addr);
+       ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
+       ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
 }
 
 static int nfp_netvf_pci_probe(struct pci_dev *pdev,
@@ -210,8 +210,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
        vf->nn = nn;
 
        nn->fw_ver = fw_ver;
-       nn->ctrl_bar = ctrl_bar;
-       nn->is_vf = 1;
+       nn->dp.ctrl_bar = ctrl_bar;
+       nn->dp.is_vf = 1;
        nn->stride_tx = stride;
        nn->stride_rx = stride;
 
@@ -268,7 +268,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
 
        num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
                                      NFP_NET_MIN_PORT_IRQS,
-                                     NFP_NET_NON_Q_VECTORS + nn->num_r_vecs);
+                                     NFP_NET_NON_Q_VECTORS +
+                                     nn->dp.num_r_vecs);
        if (!num_irqs) {
                nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
                err = -EIO;
@@ -282,7 +283,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
         */
        nn->me_freq_mhz = 1200;
 
-       err = nfp_net_netdev_init(nn->netdev);
+       err = nfp_net_netdev_init(nn->dp.netdev);
        if (err)
                goto err_irqs_disable;
 
@@ -327,7 +328,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev)
        nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
        nfp_net_debugfs_dir_clean(&vf->ddir);
 
-       nfp_net_netdev_clean(nn->netdev);
+       nfp_net_netdev_clean(nn->dp.netdev);
 
        nfp_net_irqs_disable(pdev);
 
@@ -337,7 +338,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev)
        } else {
                iounmap(vf->q_bar);
        }
-       iounmap(nn->ctrl_bar);
+       iounmap(nn->dp.ctrl_bar);
 
        nfp_net_netdev_free(nn);
 
index 42cb720b696d17b6dfb580a145237245a8217412..8afef7593f13d91a026a90e07d9fcecd1648fc7f 100644 (file)
 
 const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup);
 
-/* Implemented in nfp_nsp.c */
+/* Implemented in nfp_nsp.c, low level functions */
 
 struct nfp_nsp;
-struct firmware;
-
-struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp);
-void nfp_nsp_close(struct nfp_nsp *state);
-u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state);
-u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state);
-int nfp_nsp_wait(struct nfp_nsp *state);
-int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
-int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw);
+
+struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state);
+bool nfp_nsp_config_modified(struct nfp_nsp *state);
+void nfp_nsp_config_set_modified(struct nfp_nsp *state, bool modified);
+void *nfp_nsp_config_entries(struct nfp_nsp *state);
+unsigned int nfp_nsp_config_idx(struct nfp_nsp *state);
+void nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries,
+                             unsigned int idx);
+void nfp_nsp_config_clear_state(struct nfp_nsp *state);
 int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size);
 int nfp_nsp_write_eth_table(struct nfp_nsp *state,
                            const void *buf, unsigned int size);
 
 /* Implemented in nfp_resource.c */
 
-#define NFP_RESOURCE_TBL_TARGET                NFP_CPP_TARGET_MU
-#define NFP_RESOURCE_TBL_BASE          0x8100000000ULL
-
-/* NFP Resource Table self-identifier */
-#define NFP_RESOURCE_TBL_NAME          "nfp.res"
-#define NFP_RESOURCE_TBL_KEY           0x00000000 /* Special key for entry 0 */
-
-/* All other keys are CRC32-POSIX of the 8-byte identification string */
+/* All keys are CRC32-POSIX of the 8-byte identification string */
 
 /* ARM/PCI vNIC Interfaces 0..3 */
 #define NFP_RESOURCE_VNIC_PCI_0                "vnic.p0"
index 15cc3e77cf6acddfec0afe8906e2a61f3f958aa5..43dc68e01274225c79fea7760ac303a8a2409a85 100644 (file)
@@ -217,7 +217,7 @@ static resource_size_t nfp_bar_resource_start(struct nfp_bar *bar)
 #define TARGET_WIDTH_64    8
 
 static int
-compute_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar,
+compute_bar(const struct nfp6000_pcie *nfp, const struct nfp_bar *bar,
            u32 *bar_config, u64 *bar_base,
            int tgt, int act, int tok, u64 offset, size_t size, int width)
 {
@@ -410,35 +410,36 @@ find_matching_bar(struct nfp6000_pcie *nfp,
 
 /* Return EAGAIN if no resource is available */
 static int
-find_unused_bar_noblock(struct nfp6000_pcie *nfp,
+find_unused_bar_noblock(const struct nfp6000_pcie *nfp,
                        int tgt, int act, int tok,
                        u64 offset, size_t size, int width)
 {
-       int n, invalid = 0;
+       int n, busy = 0;
 
        for (n = 0; n < nfp->bars; n++) {
-               struct nfp_bar *bar = &nfp->bar[n];
+               const struct nfp_bar *bar = &nfp->bar[n];
                int err;
 
-               if (bar->bitsize == 0) {
-                       invalid++;
-                       continue;
-               }
-
-               if (atomic_read(&bar->refcnt) != 0)
+               if (!bar->bitsize)
                        continue;
 
                /* Just check to see if we can make it fit... */
                err = compute_bar(nfp, bar, NULL, NULL,
                                  tgt, act, tok, offset, size, width);
+               if (err)
+                       continue;
 
-               if (err < 0)
-                       invalid++;
-               else
+               if (!atomic_read(&bar->refcnt))
                        return n;
+
+               busy++;
        }
 
-       return (n == invalid) ? -EINVAL : -EAGAIN;
+       if (WARN(!busy, "No suitable BAR found for request tgt:0x%x act:0x%x tok:0x%x off:0x%llx size:%zd width:%d\n",
+                tgt, act, tok, offset, size, width))
+               return -EINVAL;
+
+       return -EAGAIN;
 }
 
 static int
index 40108e66c65480fcf8e002379bf68ec948cd22ee..e2abba4c3a3fd84225f443d82e61454d41093819 100644 (file)
@@ -65,39 +65,49 @@ struct nfp_cpp_resource {
        u64 end;
 };
 
-struct nfp_cpp_mutex {
-       struct list_head list;
-       struct nfp_cpp *cpp;
-       int target;
-       u16 usage;
-       u16 depth;
-       unsigned long long address;
-       u32 key;
-};
-
+/**
+ * struct nfp_cpp - main nfpcore device structure
+ * Following fields are read-only after probe() exits or netdevs are spawned.
+ * @dev:               embedded device structure
+ * @op:                        low-level implementation ops
+ * @priv:              private data of the low-level implementation
+ * @model:             chip model
+ * @interface:         chip interface id we are using to reach it
+ * @serial:            chip serial number
+ * @imb_cat_table:     CPP Mapping Table
+ *
+ * Following fields can be used only in probe() or with rtnl held:
+ * @hwinfo:            HWInfo database fetched from the device
+ * @rtsym:             firmware run time symbols
+ *
+ * Following fields use explicit locking:
+ * @resource_list:     NFP CPP resource list
+ * @resource_lock:     protects @resource_list
+ *
+ * @area_cache_list:   cached areas for cpp/xpb read/write speed up
+ * @area_cache_mutex:  protects @area_cache_list
+ *
+ * @waitq:             area wait queue
+ */
 struct nfp_cpp {
        struct device dev;
 
-       void *priv; /* Private data of the low-level implementation */
+       void *priv;
 
        u32 model;
        u16 interface;
        u8 serial[NFP_SERIAL_LEN];
 
        const struct nfp_cpp_operations *op;
-       struct list_head resource_list; /* NFP CPP resource list */
-       struct list_head mutex_cache;   /* Mutex cache */
+       struct list_head resource_list;
        rwlock_t resource_lock;
        wait_queue_head_t waitq;
 
-       /* NFP6000 CPP Mapping Table */
        u32 imb_cat_table[16];
 
-       /* Cached areas for cpp/xpb readl/writel speedups */
-       struct mutex area_cache_mutex;  /* Lock for the area cache */
+       struct mutex area_cache_mutex;
        struct list_head area_cache_list;
 
-       /* Cached information */
        void *hwinfo;
        void *rtsym;
 };
@@ -187,24 +197,6 @@ void nfp_cpp_free(struct nfp_cpp *cpp)
 {
        struct nfp_cpp_area_cache *cache, *ctmp;
        struct nfp_cpp_resource *res, *rtmp;
-       struct nfp_cpp_mutex *mutex, *mtmp;
-
-       /* There should be no mutexes in the cache at this point. */
-       WARN_ON(!list_empty(&cpp->mutex_cache));
-       /* .. but if there are, unlock them and complain. */
-       list_for_each_entry_safe(mutex, mtmp, &cpp->mutex_cache, list) {
-               dev_err(cpp->dev.parent, "Dangling mutex: @%d::0x%llx, %d locks held by %d owners\n",
-                       mutex->target, (unsigned long long)mutex->address,
-                       mutex->depth, mutex->usage);
-
-               /* Forcing an unlock */
-               mutex->depth = 1;
-               nfp_cpp_mutex_unlock(mutex);
-
-               /* Forcing a free */
-               mutex->usage = 1;
-               nfp_cpp_mutex_free(mutex);
-       }
 
        /* Remove all caches */
        list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) {
@@ -419,9 +411,43 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
  */
 void nfp_cpp_area_free(struct nfp_cpp_area *area)
 {
+       if (atomic_read(&area->refcount))
+               nfp_warn(area->cpp, "Warning: freeing busy area\n");
        nfp_cpp_area_put(area);
 }
 
+static bool nfp_cpp_area_acquire_try(struct nfp_cpp_area *area, int *status)
+{
+       *status = area->cpp->op->area_acquire(area);
+
+       return *status != -EAGAIN;
+}
+
+static int __nfp_cpp_area_acquire(struct nfp_cpp_area *area)
+{
+       int err, status;
+
+       if (atomic_inc_return(&area->refcount) > 1)
+               return 0;
+
+       if (!area->cpp->op->area_acquire)
+               return 0;
+
+       err = wait_event_interruptible(area->cpp->waitq,
+                                      nfp_cpp_area_acquire_try(area, &status));
+       if (!err)
+               err = status;
+       if (err) {
+               nfp_warn(area->cpp, "Warning: area wait failed: %d\n", err);
+               atomic_dec(&area->refcount);
+               return err;
+       }
+
+       nfp_cpp_area_get(area);
+
+       return 0;
+}
+
 /**
  * nfp_cpp_area_acquire() - lock down a CPP area for access
  * @area:      CPP area handle
@@ -433,27 +459,13 @@ void nfp_cpp_area_free(struct nfp_cpp_area *area)
  */
 int nfp_cpp_area_acquire(struct nfp_cpp_area *area)
 {
-       mutex_lock(&area->mutex);
-       if (atomic_inc_return(&area->refcount) == 1) {
-               int (*a_a)(struct nfp_cpp_area *);
-
-               a_a = area->cpp->op->area_acquire;
-               if (a_a) {
-                       int err;
+       int ret;
 
-                       wait_event_interruptible(area->cpp->waitq,
-                                                (err = a_a(area)) != -EAGAIN);
-                       if (err < 0) {
-                               atomic_dec(&area->refcount);
-                               mutex_unlock(&area->mutex);
-                               return err;
-                       }
-               }
-       }
+       mutex_lock(&area->mutex);
+       ret = __nfp_cpp_area_acquire(area);
        mutex_unlock(&area->mutex);
 
-       nfp_cpp_area_get(area);
-       return 0;
+       return ret;
 }
 
 /**
@@ -829,10 +841,7 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
         * the need for special case code below when
         * checking against available cache size.
         */
-       if (length == 0)
-               return NULL;
-
-       if (list_empty(&cpp->area_cache_list) || id == 0)
+       if (length == 0 || id == 0)
                return NULL;
 
        /* Remap from cpp_island to cpp_target */
@@ -840,10 +849,15 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
        if (err < 0)
                return NULL;
 
-       addr += *offset;
-
        mutex_lock(&cpp->area_cache_mutex);
 
+       if (list_empty(&cpp->area_cache_list)) {
+               mutex_unlock(&cpp->area_cache_mutex);
+               return NULL;
+       }
+
+       addr += *offset;
+
        /* See if we have a match */
        list_for_each_entry(cache, &cpp->area_cache_list, entry) {
                if (id == cache->id &&
@@ -937,12 +951,14 @@ int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
                        return -ENOMEM;
 
                err = nfp_cpp_area_acquire(area);
-               if (err)
-                       goto out;
+               if (err) {
+                       nfp_cpp_area_free(area);
+                       return err;
+               }
        }
 
        err = nfp_cpp_area_read(area, offset, kernel_vaddr, length);
-out:
+
        if (cache)
                area_cache_put(cpp, cache);
        else
@@ -979,13 +995,14 @@ int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
                        return -ENOMEM;
 
                err = nfp_cpp_area_acquire(area);
-               if (err)
-                       goto out;
+               if (err) {
+                       nfp_cpp_area_free(area);
+                       return err;
+               }
        }
 
        err = nfp_cpp_area_write(area, offset, kernel_vaddr, length);
 
-out:
        if (cache)
                area_cache_put(cpp, cache);
        else
@@ -1127,7 +1144,6 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
        rwlock_init(&cpp->resource_lock);
        init_waitqueue_head(&cpp->waitq);
        lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key);
-       INIT_LIST_HEAD(&cpp->mutex_cache);
        INIT_LIST_HEAD(&cpp->resource_list);
        INIT_LIST_HEAD(&cpp->area_cache_list);
        mutex_init(&cpp->area_cache_mutex);
@@ -1425,322 +1441,3 @@ void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit)
 {
        return &cpp_explicit[1];
 }
-
-/* THIS FUNCTION IS NOT EXPORTED */
-static u32 nfp_mutex_locked(u16 interface)
-{
-       return (u32)interface << 16 | 0x000f;
-}
-
-static u32 nfp_mutex_unlocked(u16 interface)
-{
-       return (u32)interface << 16 | 0x0000;
-}
-
-static bool nfp_mutex_is_locked(u32 val)
-{
-       return (val & 0xffff) == 0x000f;
-}
-
-static bool nfp_mutex_is_unlocked(u32 val)
-{
-       return (val & 0xffff) == 0000;
-}
-
-/* If you need more than 65536 recursive locks, please rethink your code. */
-#define MUTEX_DEPTH_MAX         0xffff
-
-static int
-nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address)
-{
-       /* Not permitted on invalid interfaces */
-       if (NFP_CPP_INTERFACE_TYPE_of(interface) ==
-           NFP_CPP_INTERFACE_TYPE_INVALID)
-               return -EINVAL;
-
-       /* Address must be 64-bit aligned */
-       if (address & 7)
-               return -EINVAL;
-
-       if (*target != NFP_CPP_TARGET_MU)
-               return -EINVAL;
-
-       return 0;
-}
-
-/**
- * nfp_cpp_mutex_init() - Initialize a mutex location
- * @cpp:       NFP CPP handle
- * @target:    NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
- * @address:   Offset into the address space of the NFP CPP target ID
- * @key:       Unique 32-bit value for this mutex
- *
- * The CPP target:address must point to a 64-bit aligned location, and
- * will initialize 64 bits of data at the location.
- *
- * This creates the initial mutex state, as locked by this
- * nfp_cpp_interface().
- *
- * This function should only be called when setting up
- * the initial lock state upon boot-up of the system.
- *
- * Return: 0 on success, or -errno on failure
- */
-int nfp_cpp_mutex_init(struct nfp_cpp *cpp,
-                      int target, unsigned long long address, u32 key)
-{
-       const u32 muw = NFP_CPP_ID(target, 4, 0);    /* atomic_write */
-       u16 interface = nfp_cpp_interface(cpp);
-       int err;
-
-       err = nfp_cpp_mutex_validate(interface, &target, address);
-       if (err)
-               return err;
-
-       err = nfp_cpp_writel(cpp, muw, address + 4, key);
-       if (err)
-               return err;
-
-       err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface));
-       if (err)
-               return err;
-
-       return 0;
-}
-
-/**
- * nfp_cpp_mutex_alloc() - Create a mutex handle
- * @cpp:       NFP CPP handle
- * @target:    NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
- * @address:   Offset into the address space of the NFP CPP target ID
- * @key:       32-bit unique key (must match the key at this location)
- *
- * The CPP target:address must point to a 64-bit aligned location, and
- * reserve 64 bits of data at the location for use by the handle.
- *
- * Only target/address pairs that point to entities that support the
- * MU Atomic Engine's CmpAndSwap32 command are supported.
- *
- * Return:     A non-NULL struct nfp_cpp_mutex * on success, NULL on failure.
- */
-struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
-                                         unsigned long long address, u32 key)
-{
-       const u32 mur = NFP_CPP_ID(target, 3, 0);    /* atomic_read */
-       u16 interface = nfp_cpp_interface(cpp);
-       struct nfp_cpp_mutex *mutex;
-       int err;
-       u32 tmp;
-
-       err = nfp_cpp_mutex_validate(interface, &target, address);
-       if (err)
-               return NULL;
-
-       /* Look for mutex on cache list */
-       list_for_each_entry(mutex, &cpp->mutex_cache, list) {
-               if (mutex->target == target && mutex->address == address) {
-                       mutex->usage++;
-                       return mutex;
-               }
-       }
-
-       err = nfp_cpp_readl(cpp, mur, address + 4, &tmp);
-       if (err < 0)
-               return NULL;
-
-       if (tmp != key)
-               return NULL;
-
-       mutex = kzalloc(sizeof(*mutex), GFP_KERNEL);
-       if (!mutex)
-               return NULL;
-
-       mutex->cpp = cpp;
-       mutex->target = target;
-       mutex->address = address;
-       mutex->key = key;
-       mutex->depth = 0;
-       mutex->usage = 1;
-
-       /* Add mutex to cache list */
-       list_add(&mutex->list, &cpp->mutex_cache);
-
-       return mutex;
-}
-
-/**
- * nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state
- * @mutex:     NFP CPP Mutex handle
- */
-void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex)
-{
-       if (--mutex->usage)
-               return;
-
-       /* Remove mutex from cache */
-       list_del(&mutex->list);
-       kfree(mutex);
-}
-
-/**
- * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine
- * @mutex:     NFP CPP Mutex handle
- *
- * Return: 0 on success, or -errno on failure
- */
-int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
-{
-       unsigned long warn_at = jiffies + 15 * HZ;
-       unsigned int timeout_ms = 1;
-       int err;
-
-       /* We can't use a waitqueue here, because the unlocker
-        * might be on a separate CPU.
-        *
-        * So just wait for now.
-        */
-       for (;;) {
-               err = nfp_cpp_mutex_trylock(mutex);
-               if (err != -EBUSY)
-                       break;
-
-               err = msleep_interruptible(timeout_ms);
-               if (err != 0)
-                       return -ERESTARTSYS;
-
-               if (time_is_before_eq_jiffies(warn_at)) {
-                       warn_at = jiffies + 60 * HZ;
-                       dev_warn(mutex->cpp->dev.parent,
-                                "Warning: waiting for NFP mutex [usage:%hd depth:%hd target:%d addr:%llx key:%08x]\n",
-                                mutex->usage, mutex->depth,
-                                mutex->target, mutex->address, mutex->key);
-               }
-       }
-
-       return err;
-}
-
-/**
- * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine
- * @mutex:     NFP CPP Mutex handle
- *
- * Return: 0 on success, or -errno on failure
- */
-int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex)
-{
-       const u32 muw = NFP_CPP_ID(mutex->target, 4, 0);    /* atomic_write */
-       const u32 mur = NFP_CPP_ID(mutex->target, 3, 0);    /* atomic_read */
-       struct nfp_cpp *cpp = mutex->cpp;
-       u32 key, value;
-       u16 interface;
-       int err;
-
-       interface = nfp_cpp_interface(cpp);
-
-       if (mutex->depth > 1) {
-               mutex->depth--;
-               return 0;
-       }
-
-       err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
-       if (err < 0)
-               return err;
-
-       if (key != mutex->key)
-               return -EPERM;
-
-       err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
-       if (err < 0)
-               return err;
-
-       if (value != nfp_mutex_locked(interface))
-               return -EACCES;
-
-       err = nfp_cpp_writel(cpp, muw, mutex->address,
-                            nfp_mutex_unlocked(interface));
-       if (err < 0)
-               return err;
-
-       mutex->depth = 0;
-       return 0;
-}
-
-/**
- * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle
- * @mutex:     NFP CPP Mutex handle
- *
- * Return:      0 if the lock succeeded, -errno on failure
- */
-int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
-{
-       const u32 muw = NFP_CPP_ID(mutex->target, 4, 0);    /* atomic_write */
-       const u32 mus = NFP_CPP_ID(mutex->target, 5, 3);    /* test_set_imm */
-       const u32 mur = NFP_CPP_ID(mutex->target, 3, 0);    /* atomic_read */
-       struct nfp_cpp *cpp = mutex->cpp;
-       u32 key, value, tmp;
-       int err;
-
-       if (mutex->depth > 0) {
-               if (mutex->depth == MUTEX_DEPTH_MAX)
-                       return -E2BIG;
-               mutex->depth++;
-               return 0;
-       }
-
-       /* Verify that the lock marker is not damaged */
-       err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key);
-       if (err < 0)
-               return err;
-
-       if (key != mutex->key)
-               return -EPERM;
-
-       /* Compare against the unlocked state, and if true,
-        * write the interface id into the top 16 bits, and
-        * mark as locked.
-        */
-       value = nfp_mutex_locked(nfp_cpp_interface(cpp));
-
-       /* We use test_set_imm here, as it implies a read
-        * of the current state, and sets the bits in the
-        * bytemask of the command to 1s. Since the mutex
-        * is guaranteed to be 64-bit aligned, the bytemask
-        * of this 32-bit command is ensured to be 8'b00001111,
-        * which implies that the lower 4 bits will be set to
-        * ones regardless of the initial state.
-        *
-        * Since this is a 'Readback' operation, with no Pull
-        * data, we can treat this as a normal Push (read)
-        * atomic, which returns the original value.
-        */
-       err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp);
-       if (err < 0)
-               return err;
-
-       /* Was it unlocked? */
-       if (nfp_mutex_is_unlocked(tmp)) {
-               /* The read value can only be 0x....0000 in the unlocked state.
-                * If there was another contending for this lock, then
-                * the lock state would be 0x....000f
-                */
-
-               /* Write our owner ID into the lock
-                * While not strictly necessary, this helps with
-                * debug and bookkeeping.
-                */
-               err = nfp_cpp_writel(cpp, muw, mutex->address, value);
-               if (err < 0)
-                       return err;
-
-               mutex->depth = 1;
-               return 0;
-       }
-
-       /* Already locked by us? Success! */
-       if (tmp == value) {
-               mutex->depth = 1;
-               return 0;
-       }
-
-       return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL;
-}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
new file mode 100644 (file)
index 0000000..8a99c18
--- /dev/null
@@ -0,0 +1,345 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+
+struct nfp_cpp_mutex {
+       struct nfp_cpp *cpp;
+       int target;
+       u16 depth;
+       unsigned long long address;
+       u32 key;
+};
+
+static u32 nfp_mutex_locked(u16 interface)
+{
+       return (u32)interface << 16 | 0x000f;
+}
+
+static u32 nfp_mutex_unlocked(u16 interface)
+{
+       return (u32)interface << 16 | 0x0000;
+}
+
+static bool nfp_mutex_is_locked(u32 val)
+{
+       return (val & 0xffff) == 0x000f;
+}
+
+static bool nfp_mutex_is_unlocked(u32 val)
+{
+       return (val & 0xffff) == 0000;
+}
+
+/* If you need more than 65536 recursive locks, please rethink your code. */
+#define NFP_MUTEX_DEPTH_MAX         0xffff
+
+static int
+nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address)
+{
+       /* Not permitted on invalid interfaces */
+       if (NFP_CPP_INTERFACE_TYPE_of(interface) ==
+           NFP_CPP_INTERFACE_TYPE_INVALID)
+               return -EINVAL;
+
+       /* Address must be 64-bit aligned */
+       if (address & 7)
+               return -EINVAL;
+
+       if (*target != NFP_CPP_TARGET_MU)
+               return -EINVAL;
+
+       return 0;
+}
+
+/**
+ * nfp_cpp_mutex_init() - Initialize a mutex location
+ * @cpp:       NFP CPP handle
+ * @target:    NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
+ * @address:   Offset into the address space of the NFP CPP target ID
+ * @key:       Unique 32-bit value for this mutex
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and
+ * will initialize 64 bits of data at the location.
+ *
+ * This creates the initial mutex state, as locked by this
+ * nfp_cpp_interface().
+ *
+ * This function should only be called when setting up
+ * the initial lock state upon boot-up of the system.
+ *
+ * Return: 0 on success, or -errno on failure
+ */
+int nfp_cpp_mutex_init(struct nfp_cpp *cpp,
+                      int target, unsigned long long address, u32 key)
+{
+       const u32 muw = NFP_CPP_ID(target, 4, 0);    /* atomic_write */
+       u16 interface = nfp_cpp_interface(cpp);
+       int err;
+
+       err = nfp_cpp_mutex_validate(interface, &target, address);
+       if (err)
+               return err;
+
+       err = nfp_cpp_writel(cpp, muw, address + 4, key);
+       if (err)
+               return err;
+
+       err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface));
+       if (err)
+               return err;
+
+       return 0;
+}
+
+/**
+ * nfp_cpp_mutex_alloc() - Create a mutex handle
+ * @cpp:       NFP CPP handle
+ * @target:    NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
+ * @address:   Offset into the address space of the NFP CPP target ID
+ * @key:       32-bit unique key (must match the key at this location)
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and
+ * reserve 64 bits of data at the location for use by the handle.
+ *
+ * Only target/address pairs that point to entities that support the
+ * MU Atomic Engine's CmpAndSwap32 command are supported.
+ *
+ * Return:     A non-NULL struct nfp_cpp_mutex * on success, NULL on failure.
+ */
+struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
+                                         unsigned long long address, u32 key)
+{
+       const u32 mur = NFP_CPP_ID(target, 3, 0);    /* atomic_read */
+       u16 interface = nfp_cpp_interface(cpp);
+       struct nfp_cpp_mutex *mutex;
+       int err;
+       u32 tmp;
+
+       err = nfp_cpp_mutex_validate(interface, &target, address);
+       if (err)
+               return NULL;
+
+       err = nfp_cpp_readl(cpp, mur, address + 4, &tmp);
+       if (err < 0)
+               return NULL;
+
+       if (tmp != key)
+               return NULL;
+
+       mutex = kzalloc(sizeof(*mutex), GFP_KERNEL);
+       if (!mutex)
+               return NULL;
+
+       mutex->cpp = cpp;
+       mutex->target = target;
+       mutex->address = address;
+       mutex->key = key;
+       mutex->depth = 0;
+
+       return mutex;
+}
+
+/**
+ * nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state
+ * @mutex:     NFP CPP Mutex handle
+ */
+void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex)
+{
+       kfree(mutex);
+}
+
+/**
+ * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine
+ * @mutex:     NFP CPP Mutex handle
+ *
+ * Return: 0 on success, or -errno on failure
+ */
+int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
+{
+       unsigned long warn_at = jiffies + 15 * HZ;
+       unsigned int timeout_ms = 1;
+       int err;
+
+       /* We can't use a waitqueue here, because the unlocker
+        * might be on a separate CPU.
+        *
+        * So just wait for now.
+        */
+       for (;;) {
+               err = nfp_cpp_mutex_trylock(mutex);
+               if (err != -EBUSY)
+                       break;
+
+               err = msleep_interruptible(timeout_ms);
+               if (err != 0)
+                       return -ERESTARTSYS;
+
+               if (time_is_before_eq_jiffies(warn_at)) {
+                       warn_at = jiffies + 60 * HZ;
+                       nfp_warn(mutex->cpp,
+                                "Warning: waiting for NFP mutex [depth:%hd target:%d addr:%llx key:%08x]\n",
+                                mutex->depth,
+                                mutex->target, mutex->address, mutex->key);
+               }
+       }
+
+       return err;
+}
+
+/**
+ * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine
+ * @mutex:     NFP CPP Mutex handle
+ *
+ * Return: 0 on success, or -errno on failure
+ */
+int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex)
+{
+       const u32 muw = NFP_CPP_ID(mutex->target, 4, 0);    /* atomic_write */
+       const u32 mur = NFP_CPP_ID(mutex->target, 3, 0);    /* atomic_read */
+       struct nfp_cpp *cpp = mutex->cpp;
+       u32 key, value;
+       u16 interface;
+       int err;
+
+       interface = nfp_cpp_interface(cpp);
+
+       if (mutex->depth > 1) {
+               mutex->depth--;
+               return 0;
+       }
+
+       err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
+       if (err < 0)
+               return err;
+
+       if (key != mutex->key)
+               return -EPERM;
+
+       err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
+       if (err < 0)
+               return err;
+
+       if (value != nfp_mutex_locked(interface))
+               return -EACCES;
+
+       err = nfp_cpp_writel(cpp, muw, mutex->address,
+                            nfp_mutex_unlocked(interface));
+       if (err < 0)
+               return err;
+
+       mutex->depth = 0;
+       return 0;
+}
+
+/**
+ * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle
+ * @mutex:     NFP CPP Mutex handle
+ *
+ * Return:      0 if the lock succeeded, -errno on failure
+ */
+int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
+{
+       const u32 muw = NFP_CPP_ID(mutex->target, 4, 0);    /* atomic_write */
+       const u32 mus = NFP_CPP_ID(mutex->target, 5, 3);    /* test_set_imm */
+       const u32 mur = NFP_CPP_ID(mutex->target, 3, 0);    /* atomic_read */
+       struct nfp_cpp *cpp = mutex->cpp;
+       u32 key, value, tmp;
+       int err;
+
+       if (mutex->depth > 0) {
+               if (mutex->depth == NFP_MUTEX_DEPTH_MAX)
+                       return -E2BIG;
+               mutex->depth++;
+               return 0;
+       }
+
+       /* Verify that the lock marker is not damaged */
+       err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key);
+       if (err < 0)
+               return err;
+
+       if (key != mutex->key)
+               return -EPERM;
+
+       /* Compare against the unlocked state, and if true,
+        * write the interface id into the top 16 bits, and
+        * mark as locked.
+        */
+       value = nfp_mutex_locked(nfp_cpp_interface(cpp));
+
+       /* We use test_set_imm here, as it implies a read
+        * of the current state, and sets the bits in the
+        * bytemask of the command to 1s. Since the mutex
+        * is guaranteed to be 64-bit aligned, the bytemask
+        * of this 32-bit command is ensured to be 8'b00001111,
+        * which implies that the lower 4 bits will be set to
+        * ones regardless of the initial state.
+        *
+        * Since this is a 'Readback' operation, with no Pull
+        * data, we can treat this as a normal Push (read)
+        * atomic, which returns the original value.
+        */
+       err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp);
+       if (err < 0)
+               return err;
+
+       /* Was it unlocked? */
+       if (nfp_mutex_is_unlocked(tmp)) {
+               /* The read value can only be 0x....0000 in the unlocked state.
+                * If there was another contending for this lock, then
+                * the lock state would be 0x....000f
+                */
+
+               /* Write our owner ID into the lock
+                * While not strictly necessary, this helps with
+                * debug and bookkeeping.
+                */
+               err = nfp_cpp_writel(cpp, muw, mutex->address, value);
+               if (err < 0)
+                       return err;
+
+               mutex->depth = 1;
+               return 0;
+       }
+
+       return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL;
+}
index 34c50987c377c6e721d82621b51702457a629781..4635f42e15b005dd66ff5503f7c369160c0827b0 100644 (file)
@@ -49,6 +49,7 @@
 
 #include "nfp.h"
 #include "nfp_cpp.h"
+#include "nfp_nsp.h"
 
 /* Offsets relative to the CSR base */
 #define NSP_STATUS             0x00
@@ -96,6 +97,17 @@ enum nfp_nsp_cmd {
        __MAX_SPCODE,
 };
 
+static const struct {
+       int code;
+       const char *msg;
+} nsp_errors[] = {
+       { 6010, "could not map to phy for port" },
+       { 6011, "not an allowed rate/lanes for port" },
+       { 6012, "not an allowed rate/lanes for port" },
+       { 6013, "high/low error, change other port first" },
+       { 6014, "config not found in flash" },
+};
+
 struct nfp_nsp {
        struct nfp_cpp *cpp;
        struct nfp_resource *res;
@@ -103,8 +115,63 @@ struct nfp_nsp {
                u16 major;
                u16 minor;
        } ver;
+
+       /* Eth table config state */
+       bool modified;
+       unsigned int idx;
+       void *entries;
 };
 
+struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state)
+{
+       return state->cpp;
+}
+
+bool nfp_nsp_config_modified(struct nfp_nsp *state)
+{
+       return state->modified;
+}
+
+void nfp_nsp_config_set_modified(struct nfp_nsp *state, bool modified)
+{
+       state->modified = modified;
+}
+
+void *nfp_nsp_config_entries(struct nfp_nsp *state)
+{
+       return state->entries;
+}
+
+unsigned int nfp_nsp_config_idx(struct nfp_nsp *state)
+{
+       return state->idx;
+}
+
+void
+nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, unsigned int idx)
+{
+       state->entries = entries;
+       state->idx = idx;
+}
+
+void nfp_nsp_config_clear_state(struct nfp_nsp *state)
+{
+       state->entries = NULL;
+       state->idx = 0;
+}
+
+static void nfp_nsp_print_extended_error(struct nfp_nsp *state, u32 ret_val)
+{
+       int i;
+
+       if (!ret_val)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(nsp_errors); i++)
+               if (ret_val == nsp_errors[i].code)
+                       nfp_err(state->cpp, "err msg: %s\n", nsp_errors[i].msg);
+}
+
 static int nfp_nsp_check(struct nfp_nsp *state)
 {
        struct nfp_cpp *cpp = state->cpp;
@@ -209,9 +276,8 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
                if ((*reg & mask) == val)
                        return 0;
 
-               err = msleep_interruptible(100);
-               if (err)
-                       return err;
+               if (msleep_interruptible(25))
+                       return -ERESTARTSYS;
 
                if (time_after(start_time, wait_until))
                        return -ETIMEDOUT;
@@ -228,7 +294,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
  *
  * Return: 0 for success with no result
  *
- *      1..255 for NSP completion with a result code
+ *      positive value for NSP completion with a result code
  *
  *     -EAGAIN if the NSP is not yet present
  *     -ENODEV if the NSP is not a supported model
@@ -239,7 +305,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
 static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
                           u32 buff_cpp, u64 buff_addr)
 {
-       u64 reg, nsp_base, nsp_buffer, nsp_status, nsp_command;
+       u64 reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command;
        struct nfp_cpp *cpp = state->cpp;
        u32 nsp_cpp;
        int err;
@@ -292,18 +358,20 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
                return err;
        }
 
+       err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &ret_val);
+       if (err < 0)
+               return err;
+       ret_val = FIELD_GET(NSP_COMMAND_OPTION, ret_val);
+
        err = FIELD_GET(NSP_STATUS_RESULT, reg);
        if (err) {
-               nfp_warn(cpp, "Result (error) code set: %d command: %d\n",
-                        -err, code);
+               nfp_warn(cpp, "Result (error) code set: %d (%d) command: %d\n",
+                        -err, (int)ret_val, code);
+               nfp_nsp_print_extended_error(state, ret_val);
                return -err;
        }
 
-       err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &reg);
-       if (err < 0)
-               return err;
-
-       return FIELD_GET(NSP_COMMAND_OPTION, reg);
+       return ret_val;
 }
 
 static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
@@ -380,9 +448,10 @@ int nfp_nsp_wait(struct nfp_nsp *state)
                if (err != -EAGAIN)
                        break;
 
-               err = msleep_interruptible(100);
-               if (err)
+               if (msleep_interruptible(25)) {
+                       err = -ERESTARTSYS;
                        break;
+               }
 
                if (time_after(start_time, wait_until)) {
                        err = -ETIMEDOUT;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
new file mode 100644 (file)
index 0000000..7d34ff1
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NSP_NSP_H
+#define NSP_NSP_H 1
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+struct firmware;
+struct nfp_cpp;
+struct nfp_nsp;
+
+struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp);
+void nfp_nsp_close(struct nfp_nsp *state);
+u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state);
+u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state);
+int nfp_nsp_wait(struct nfp_nsp *state);
+int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
+int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw);
+
+enum nfp_eth_interface {
+       NFP_INTERFACE_NONE      = 0,
+       NFP_INTERFACE_SFP       = 1,
+       NFP_INTERFACE_SFPP      = 10,
+       NFP_INTERFACE_SFP28     = 28,
+       NFP_INTERFACE_QSFP      = 40,
+       NFP_INTERFACE_CXP       = 100,
+       NFP_INTERFACE_QSFP28    = 112,
+};
+
+enum nfp_eth_media {
+       NFP_MEDIA_DAC_PASSIVE = 0,
+       NFP_MEDIA_DAC_ACTIVE,
+       NFP_MEDIA_FIBRE,
+};
+
+enum nfp_eth_aneg {
+       NFP_ANEG_AUTO = 0,
+       NFP_ANEG_SEARCH,
+       NFP_ANEG_25G_CONSORTIUM,
+       NFP_ANEG_25G_IEEE,
+       NFP_ANEG_DISABLED,
+};
+
+/**
+ * struct nfp_eth_table - ETH table information
+ * @count:     number of table entries
+ * @ports:     table of ports
+ *
+ * @eth_index: port index according to legacy ethX numbering
+ * @index:     chip-wide first channel index
+ * @nbi:       NBI index
+ * @base:      first channel index (within NBI)
+ * @lanes:     number of channels
+ * @speed:     interface speed (in Mbps)
+ * @interface: interface (module) plugged in
+ * @media:     media type of the @interface
+ * @aneg:      auto negotiation mode
+ * @mac_addr:  interface MAC address
+ * @label_port:        port id
+ * @label_subport:  id of interface within port (for split ports)
+ * @enabled:   is enabled?
+ * @tx_enabled:        is TX enabled?
+ * @rx_enabled:        is RX enabled?
+ * @override_changed: is media reconfig pending?
+ *
+ * @port_type: one of %PORT_* defines for ethtool
+ * @is_split:  is interface part of a split port
+ */
+struct nfp_eth_table {
+       unsigned int count;
+       struct nfp_eth_table_port {
+               unsigned int eth_index;
+               unsigned int index;
+               unsigned int nbi;
+               unsigned int base;
+               unsigned int lanes;
+               unsigned int speed;
+
+               unsigned int interface;
+               enum nfp_eth_media media;
+
+               enum nfp_eth_aneg aneg;
+
+               u8 mac_addr[ETH_ALEN];
+
+               u8 label_port;
+               u8 label_subport;
+
+               bool enabled;
+               bool tx_enabled;
+               bool rx_enabled;
+
+               bool override_changed;
+
+               /* Computed fields */
+               u8 port_type;
+
+               bool is_split;
+       } ports[0];
+};
+
+struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
+struct nfp_eth_table *
+__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp);
+
+int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable);
+int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx,
+                          bool configed);
+
+struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx);
+int nfp_eth_config_commit_end(struct nfp_nsp *nsp);
+void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp);
+
+int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode);
+int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed);
+int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes);
+
+#endif
index 1ece1f8ae4b30c0c74a7f630487749d91d5b5620..639438d8313afc558d53233b6da7ab20c26417c0 100644 (file)
 #include <linux/module.h>
 
 #include "nfp.h"
-#include "nfp_nsp_eth.h"
+#include "nfp_nsp.h"
 #include "nfp6000/nfp6000.h"
 
 #define NSP_ETH_NBI_PORT_COUNT         24
 #define NSP_ETH_MAX_COUNT              (2 * NSP_ETH_NBI_PORT_COUNT)
 #define NSP_ETH_TABLE_SIZE             (NSP_ETH_MAX_COUNT *            \
-                                        sizeof(struct eth_table_entry))
+                                        sizeof(union eth_table_entry))
 
 #define NSP_ETH_PORT_LANES             GENMASK_ULL(3, 0)
 #define NSP_ETH_PORT_INDEX             GENMASK_ULL(15, 8)
 
 #define NSP_ETH_PORT_LANES_MASK                cpu_to_le64(NSP_ETH_PORT_LANES)
 
+#define NSP_ETH_STATE_CONFIGURED       BIT_ULL(0)
 #define NSP_ETH_STATE_ENABLED          BIT_ULL(1)
 #define NSP_ETH_STATE_TX_ENABLED       BIT_ULL(2)
 #define NSP_ETH_STATE_RX_ENABLED       BIT_ULL(3)
 #define NSP_ETH_STATE_RATE             GENMASK_ULL(11, 8)
+#define NSP_ETH_STATE_INTERFACE                GENMASK_ULL(19, 12)
+#define NSP_ETH_STATE_MEDIA            GENMASK_ULL(21, 20)
+#define NSP_ETH_STATE_OVRD_CHNG                BIT_ULL(22)
+#define NSP_ETH_STATE_ANEG             GENMASK_ULL(25, 23)
 
+#define NSP_ETH_CTRL_CONFIGURED                BIT_ULL(0)
 #define NSP_ETH_CTRL_ENABLED           BIT_ULL(1)
 #define NSP_ETH_CTRL_TX_ENABLED                BIT_ULL(2)
 #define NSP_ETH_CTRL_RX_ENABLED                BIT_ULL(3)
+#define NSP_ETH_CTRL_SET_RATE          BIT_ULL(4)
+#define NSP_ETH_CTRL_SET_LANES         BIT_ULL(5)
+#define NSP_ETH_CTRL_SET_ANEG          BIT_ULL(6)
+
+enum nfp_eth_raw {
+       NSP_ETH_RAW_PORT = 0,
+       NSP_ETH_RAW_STATE,
+       NSP_ETH_RAW_MAC,
+       NSP_ETH_RAW_CONTROL,
+
+       NSP_ETH_NUM_RAW
+};
 
 enum nfp_eth_rate {
        RATE_INVALID = 0,
@@ -76,29 +94,49 @@ enum nfp_eth_rate {
        RATE_25G,
 };
 
-struct eth_table_entry {
-       __le64 port;
-       __le64 state;
-       u8 mac_addr[6];
-       u8 resv[2];
-       __le64 control;
+union eth_table_entry {
+       struct {
+               __le64 port;
+               __le64 state;
+               u8 mac_addr[6];
+               u8 resv[2];
+               __le64 control;
+       };
+       __le64 raw[NSP_ETH_NUM_RAW];
+};
+
+static const struct {
+       enum nfp_eth_rate rate;
+       unsigned int speed;
+} nsp_eth_rate_tbl[] = {
+       { RATE_INVALID, 0, },
+       { RATE_10M,     SPEED_10, },
+       { RATE_100M,    SPEED_100, },
+       { RATE_1G,      SPEED_1000, },
+       { RATE_10G,     SPEED_10000, },
+       { RATE_25G,     SPEED_25000, },
 };
 
-static unsigned int nfp_eth_rate(enum nfp_eth_rate rate)
+static unsigned int nfp_eth_rate2speed(enum nfp_eth_rate rate)
 {
-       unsigned int rate_xlate[] = {
-               [RATE_INVALID]          = 0,
-               [RATE_10M]              = SPEED_10,
-               [RATE_100M]             = SPEED_100,
-               [RATE_1G]               = SPEED_1000,
-               [RATE_10G]              = SPEED_10000,
-               [RATE_25G]              = SPEED_25000,
-       };
+       int i;
 
-       if (rate >= ARRAY_SIZE(rate_xlate))
-               return 0;
+       for (i = 0; i < ARRAY_SIZE(nsp_eth_rate_tbl); i++)
+               if (nsp_eth_rate_tbl[i].rate == rate)
+                       return nsp_eth_rate_tbl[i].speed;
+
+       return 0;
+}
+
+static unsigned int nfp_eth_speed2rate(unsigned int speed)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(nsp_eth_rate_tbl); i++)
+               if (nsp_eth_rate_tbl[i].speed == speed)
+                       return nsp_eth_rate_tbl[i].rate;
 
-       return rate_xlate[rate];
+       return RATE_INVALID;
 }
 
 static void nfp_eth_copy_mac_reverse(u8 *dst, const u8 *src)
@@ -110,8 +148,8 @@ static void nfp_eth_copy_mac_reverse(u8 *dst, const u8 *src)
 }
 
 static void
-nfp_eth_port_translate(const struct eth_table_entry *src, unsigned int index,
-                      struct nfp_eth_table_port *dst)
+nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
+                      unsigned int index, struct nfp_eth_table_port *dst)
 {
        unsigned int rate;
        u64 port, state;
@@ -129,14 +167,60 @@ nfp_eth_port_translate(const struct eth_table_entry *src, unsigned int index,
        dst->tx_enabled = FIELD_GET(NSP_ETH_STATE_TX_ENABLED, state);
        dst->rx_enabled = FIELD_GET(NSP_ETH_STATE_RX_ENABLED, state);
 
-       rate = nfp_eth_rate(FIELD_GET(NSP_ETH_STATE_RATE, state));
+       rate = nfp_eth_rate2speed(FIELD_GET(NSP_ETH_STATE_RATE, state));
        dst->speed = dst->lanes * rate;
 
+       dst->interface = FIELD_GET(NSP_ETH_STATE_INTERFACE, state);
+       dst->media = FIELD_GET(NSP_ETH_STATE_MEDIA, state);
+
        nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr);
 
-       snprintf(dst->label, sizeof(dst->label) - 1, "%llu.%llu",
-                FIELD_GET(NSP_ETH_PORT_PHYLABEL, port),
-                FIELD_GET(NSP_ETH_PORT_LABEL, port));
+       dst->label_port = FIELD_GET(NSP_ETH_PORT_PHYLABEL, port);
+       dst->label_subport = FIELD_GET(NSP_ETH_PORT_LABEL, port);
+
+       if (nfp_nsp_get_abi_ver_minor(nsp) < 17)
+               return;
+
+       dst->override_changed = FIELD_GET(NSP_ETH_STATE_OVRD_CHNG, state);
+       dst->aneg = FIELD_GET(NSP_ETH_STATE_ANEG, state);
+}
+
+static void
+nfp_eth_mark_split_ports(struct nfp_cpp *cpp, struct nfp_eth_table *table)
+{
+       unsigned int i, j;
+
+       for (i = 0; i < table->count; i++)
+               for (j = 0; j < table->count; j++) {
+                       if (i == j)
+                               continue;
+                       if (table->ports[i].label_port !=
+                           table->ports[j].label_port)
+                               continue;
+                       if (table->ports[i].label_subport ==
+                           table->ports[j].label_subport)
+                               nfp_warn(cpp,
+                                        "Port %d subport %d is a duplicate\n",
+                                        table->ports[i].label_port,
+                                        table->ports[i].label_subport);
+
+                       table->ports[i].is_split = true;
+                       break;
+               }
+}
+
+static void
+nfp_eth_calc_port_type(struct nfp_cpp *cpp, struct nfp_eth_table_port *entry)
+{
+       if (entry->interface == NFP_INTERFACE_NONE) {
+               entry->port_type = PORT_NONE;
+               return;
+       }
+
+       if (entry->media == NFP_MEDIA_FIBRE)
+               entry->port_type = PORT_FIBRE;
+       else
+               entry->port_type = PORT_DA;
 }
 
 /**
@@ -166,10 +250,9 @@ struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp)
 struct nfp_eth_table *
 __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
 {
-       struct eth_table_entry *entries;
+       union eth_table_entry *entries;
        struct nfp_eth_table *table;
-       unsigned int cnt;
-       int i, j, ret;
+       int i, j, ret, cnt = 0;
 
        entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL);
        if (!entries)
@@ -178,93 +261,288 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
        ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
        if (ret < 0) {
                nfp_err(cpp, "reading port table failed %d\n", ret);
-               kfree(entries);
-               return NULL;
+               goto err;
        }
 
-       /* Some versions of flash will give us 0 instead of port count */
-       cnt = ret;
-       if (!cnt) {
-               for (i = 0; i < NSP_ETH_MAX_COUNT; i++)
-                       if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
-                               cnt++;
+       for (i = 0; i < NSP_ETH_MAX_COUNT; i++)
+               if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
+                       cnt++;
+
+       /* Some versions of flash will give us 0 instead of port count.
+        * For those that give a port count, verify it against the value
+        * calculated above.
+        */
+       if (ret && ret != cnt) {
+               nfp_err(cpp, "table entry count reported (%d) does not match entries present (%d)\n",
+                       ret, cnt);
+               goto err;
        }
 
        table = kzalloc(sizeof(*table) +
                        sizeof(struct nfp_eth_table_port) * cnt, GFP_KERNEL);
-       if (!table) {
-               kfree(entries);
-               return NULL;
-       }
+       if (!table)
+               goto err;
 
        table->count = cnt;
        for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++)
                if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
-                       nfp_eth_port_translate(&entries[i], i,
+                       nfp_eth_port_translate(nsp, &entries[i], i,
                                               &table->ports[j++]);
 
+       nfp_eth_mark_split_ports(cpp, table);
+       for (i = 0; i < table->count; i++)
+               nfp_eth_calc_port_type(cpp, &table->ports[i]);
+
        kfree(entries);
 
        return table;
+
+err:
+       kfree(entries);
+       return NULL;
 }
 
-/**
- * nfp_eth_set_mod_enable() - set PHY module enable control bit
- * @cpp:       NFP CPP handle
- * @idx:       NFP chip-wide port index
- * @enable:    Desired state
- *
- * Enable or disable PHY module (this usually means setting the TX lanes
- * disable bits).
- *
- * Return: 0 or -ERRNO.
- */
-int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable)
+struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx)
 {
-       struct eth_table_entry *entries;
+       union eth_table_entry *entries;
        struct nfp_nsp *nsp;
-       u64 reg;
        int ret;
 
        entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL);
        if (!entries)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        nsp = nfp_nsp_open(cpp);
        if (IS_ERR(nsp)) {
                kfree(entries);
-               return PTR_ERR(nsp);
+               return nsp;
        }
 
        ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
        if (ret < 0) {
                nfp_err(cpp, "reading port table failed %d\n", ret);
-               goto exit_close_nsp;
+               goto err;
        }
 
        if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) {
                nfp_warn(cpp, "trying to set port state on disabled port %d\n",
                         idx);
-               ret = -EINVAL;
-               goto exit_close_nsp;
+               goto err;
+       }
+
+       nfp_nsp_config_set_state(nsp, entries, idx);
+       return nsp;
+
+err:
+       nfp_nsp_close(nsp);
+       kfree(entries);
+       return ERR_PTR(-EIO);
+}
+
+void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp)
+{
+       union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+
+       nfp_nsp_config_set_modified(nsp, false);
+       nfp_nsp_config_clear_state(nsp);
+       nfp_nsp_close(nsp);
+       kfree(entries);
+}
+
+/**
+ * nfp_eth_config_commit_end() - perform recorded configuration changes
+ * @nsp:       NFP NSP handle returned from nfp_eth_config_start()
+ *
+ * Perform the configuration which was requested with __nfp_eth_set_*()
+ * helpers and recorded in @nsp state.  If device was already configured
+ * as requested or no __nfp_eth_set_*() operations were made no NSP command
+ * will be performed.
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int nfp_eth_config_commit_end(struct nfp_nsp *nsp)
+{
+       union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+       int ret = 1;
+
+       if (nfp_nsp_config_modified(nsp)) {
+               ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+               ret = ret < 0 ? ret : 0;
+       }
+
+       nfp_eth_config_cleanup_end(nsp);
+
+       return ret;
+}
+
+/**
+ * nfp_eth_set_mod_enable() - set PHY module enable control bit
+ * @cpp:       NFP CPP handle
+ * @idx:       NFP chip-wide port index
+ * @enable:    Desired state
+ *
+ * Enable or disable PHY module (this usually means setting the TX lanes
+ * disable bits).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable)
+{
+       union eth_table_entry *entries;
+       struct nfp_nsp *nsp;
+       u64 reg;
+
+       nsp = nfp_eth_config_start(cpp, idx);
+       if (IS_ERR(nsp))
+               return PTR_ERR(nsp);
+
+       entries = nfp_nsp_config_entries(nsp);
+
+       /* Check if we are already in requested state */
+       reg = le64_to_cpu(entries[idx].state);
+       if (enable != FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) {
+               reg = le64_to_cpu(entries[idx].control);
+               reg &= ~NSP_ETH_CTRL_ENABLED;
+               reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable);
+               entries[idx].control = cpu_to_le64(reg);
+
+               nfp_nsp_config_set_modified(nsp, true);
        }
 
+       return nfp_eth_config_commit_end(nsp);
+}
+
+/**
+ * nfp_eth_set_configured() - set PHY module configured control bit
+ * @cpp:       NFP CPP handle
+ * @idx:       NFP chip-wide port index
+ * @configed:  Desired state
+ *
+ * Set the ifup/ifdown state on the PHY.
+ *
+ * Return: 0 or -ERRNO.
+ */
+int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed)
+{
+       union eth_table_entry *entries;
+       struct nfp_nsp *nsp;
+       u64 reg;
+
+       nsp = nfp_eth_config_start(cpp, idx);
+       if (IS_ERR(nsp))
+               return PTR_ERR(nsp);
+
+       entries = nfp_nsp_config_entries(nsp);
+
        /* Check if we are already in requested state */
        reg = le64_to_cpu(entries[idx].state);
-       if (enable == FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) {
-               ret = 0;
-               goto exit_close_nsp;
+       if (configed != FIELD_GET(NSP_ETH_STATE_CONFIGURED, reg)) {
+               reg = le64_to_cpu(entries[idx].control);
+               reg &= ~NSP_ETH_CTRL_CONFIGURED;
+               reg |= FIELD_PREP(NSP_ETH_CTRL_CONFIGURED, configed);
+               entries[idx].control = cpu_to_le64(reg);
+
+               nfp_nsp_config_set_modified(nsp, true);
        }
 
-       reg = le64_to_cpu(entries[idx].control);
-       reg &= ~NSP_ETH_CTRL_ENABLED;
-       reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable);
-       entries[idx].control = cpu_to_le64(reg);
+       return nfp_eth_config_commit_end(nsp);
+}
 
-       ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
-exit_close_nsp:
-       nfp_nsp_close(nsp);
-       kfree(entries);
+/* Force inline, FIELD_* macroes require masks to be compilation-time known */
+static __always_inline int
+nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
+                      const u64 mask, unsigned int val, const u64 ctrl_bit)
+{
+       union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+       unsigned int idx = nfp_nsp_config_idx(nsp);
+       u64 reg;
+
+       /* Note: set features were added in ABI 0.14 but the error
+        *       codes were initially not populated correctly.
+        */
+       if (nfp_nsp_get_abi_ver_minor(nsp) < 17) {
+               nfp_err(nfp_nsp_cpp(nsp),
+                       "set operations not supported, please update flash\n");
+               return -EOPNOTSUPP;
+       }
+
+       /* Check if we are already in requested state */
+       reg = le64_to_cpu(entries[idx].raw[raw_idx]);
+       if (val == FIELD_GET(mask, reg))
+               return 0;
 
-       return ret < 0 ? ret : 0;
+       reg &= ~mask;
+       reg |= FIELD_PREP(mask, val);
+       entries[idx].raw[raw_idx] = cpu_to_le64(reg);
+
+       entries[idx].control |= cpu_to_le64(ctrl_bit);
+
+       nfp_nsp_config_set_modified(nsp, true);
+
+       return 0;
+}
+
+/**
+ * __nfp_eth_set_aneg() - set PHY autonegotiation control bit
+ * @nsp:       NFP NSP handle returned from nfp_eth_config_start()
+ * @mode:      Desired autonegotiation mode
+ *
+ * Allow/disallow PHY module to advertise/perform autonegotiation.
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode)
+{
+       return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_STATE,
+                                     NSP_ETH_STATE_ANEG, mode,
+                                     NSP_ETH_CTRL_SET_ANEG);
+}
+
+/**
+ * __nfp_eth_set_speed() - set interface speed/rate
+ * @nsp:       NFP NSP handle returned from nfp_eth_config_start()
+ * @speed:     Desired speed (per lane)
+ *
+ * Set lane speed.  Provided @speed value should be subport speed divided
+ * by number of lanes this subport is spanning (i.e. 10000 for 40G, 25000 for
+ * 50G, etc.)
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed)
+{
+       enum nfp_eth_rate rate;
+
+       rate = nfp_eth_speed2rate(speed);
+       if (rate == RATE_INVALID) {
+               nfp_warn(nfp_nsp_cpp(nsp),
+                        "could not find matching lane rate for speed %u\n",
+                        speed);
+               return -EINVAL;
+       }
+
+       return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_STATE,
+                                     NSP_ETH_STATE_RATE, rate,
+                                     NSP_ETH_CTRL_SET_RATE);
+}
+
+/**
+ * __nfp_eth_set_split() - set interface lane split
+ * @nsp:       NFP NSP handle returned from nfp_eth_config_start()
+ * @lanes:     Desired lanes per port
+ *
+ * Set number of lanes in the port.
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes)
+{
+       return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES,
+                                     lanes, NSP_ETH_CTRL_SET_LANES);
 }
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h
deleted file mode 100644 (file)
index edf703d..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below.  You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      1. Redistributions of source code must retain the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer.
- *
- *      2. Redistributions in binary form must reproduce the above
- *         copyright notice, this list of conditions and the following
- *         disclaimer in the documentation and/or other materials
- *         provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef NSP_NSP_ETH_H
-#define NSP_NSP_ETH_H 1
-
-#include <linux/types.h>
-#include <linux/if_ether.h>
-
-/**
- * struct nfp_eth_table - ETH table information
- * @count:     number of table entries
- * @ports:     table of ports
- *
- * @eth_index: port index according to legacy ethX numbering
- * @index:     chip-wide first channel index
- * @nbi:       NBI index
- * @base:      first channel index (within NBI)
- * @lanes:     number of channels
- * @speed:     interface speed (in Mbps)
- * @mac_addr:  interface MAC address
- * @label:     interface id string
- * @enabled:   is enabled?
- * @tx_enabled:        is TX enabled?
- * @rx_enabled:        is RX enabled?
- */
-struct nfp_eth_table {
-       unsigned int count;
-       struct nfp_eth_table_port {
-               unsigned int eth_index;
-               unsigned int index;
-               unsigned int nbi;
-               unsigned int base;
-               unsigned int lanes;
-               unsigned int speed;
-
-               u8 mac_addr[ETH_ALEN];
-               char label[8];
-
-               bool enabled;
-               bool tx_enabled;
-               bool rx_enabled;
-       } ports[0];
-};
-
-struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
-struct nfp_eth_table *
-__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp);
-int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable);
-
-#endif
index a2850344f8b44179dac1501e1f0a5369b6dddc6a..2d15a7c9d0de33d6b3773e3c7da7ed49c694095d 100644 (file)
 #include "nfp_cpp.h"
 #include "nfp6000/nfp6000.h"
 
+#define NFP_RESOURCE_TBL_TARGET                NFP_CPP_TARGET_MU
+#define NFP_RESOURCE_TBL_BASE          0x8100000000ULL
+
+/* NFP Resource Table self-identifier */
+#define NFP_RESOURCE_TBL_NAME          "nfp.res"
+#define NFP_RESOURCE_TBL_KEY           0x00000000 /* Special key for entry 0 */
+
 #define NFP_RESOURCE_ENTRY_NAME_SZ     8
 
 /**
@@ -100,9 +107,11 @@ static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res)
        strncpy(name_pad, res->name, sizeof(name_pad));
 
        /* Search for a matching entry */
-       key = NFP_RESOURCE_TBL_KEY;
-       if (memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8))
-               key = crc32_posix(name_pad, sizeof(name_pad));
+       if (!memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) {
+               nfp_err(cpp, "Grabbing device lock not supported\n");
+               return -EOPNOTSUPP;
+       }
+       key = crc32_posix(name_pad, sizeof(name_pad));
 
        for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) {
                u64 addr = NFP_RESOURCE_TBL_BASE +
index 9709c8ca0774dcf70f226d29d63b4eaf70cc578c..159564d8dcdb5cd1ec5d6cd02049e380d7b73e44 100644 (file)
@@ -152,7 +152,6 @@ struct  w90p910_ether {
        struct tran_pdesc *tdesc;
        dma_addr_t rdesc_phys;
        dma_addr_t tdesc_phys;
-       struct net_device_stats stats;
        struct platform_device *pdev;
        struct resource *res;
        struct sk_buff *skb;
@@ -584,15 +583,6 @@ static int w90p910_ether_close(struct net_device *dev)
        return 0;
 }
 
-static struct net_device_stats *w90p910_ether_stats(struct net_device *dev)
-{
-       struct w90p910_ether *ether;
-
-       ether = netdev_priv(dev);
-
-       return &ether->stats;
-}
-
 static int w90p910_send_frame(struct net_device *dev,
                                        unsigned char *data, int length)
 {
@@ -671,10 +661,10 @@ static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id)
                        ether->finish_tx = 0;
 
                if (txbd->sl & TXDS_TXCP) {
-                       ether->stats.tx_packets++;
-                       ether->stats.tx_bytes += txbd->sl & 0xFFFF;
+                       dev->stats.tx_packets++;
+                       dev->stats.tx_bytes += txbd->sl & 0xFFFF;
                } else {
-                       ether->stats.tx_errors++;
+                       dev->stats.tx_errors++;
                }
 
                txbd->sl = 0x0;
@@ -730,7 +720,7 @@ static void netdev_rx(struct net_device *dev)
                        data = ether->rdesc->recv_buf[ether->cur_rx];
                        skb = netdev_alloc_skb(dev, length + 2);
                        if (!skb) {
-                               ether->stats.rx_dropped++;
+                               dev->stats.rx_dropped++;
                                return;
                        }
 
@@ -738,24 +728,24 @@ static void netdev_rx(struct net_device *dev)
                        skb_put(skb, length);
                        skb_copy_to_linear_data(skb, data, length);
                        skb->protocol = eth_type_trans(skb, dev);
-                       ether->stats.rx_packets++;
-                       ether->stats.rx_bytes += length;
+                       dev->stats.rx_packets++;
+                       dev->stats.rx_bytes += length;
                        netif_rx(skb);
                } else {
-                       ether->stats.rx_errors++;
+                       dev->stats.rx_errors++;
 
                        if (status & RXDS_RP) {
                                dev_err(&pdev->dev, "rx runt err\n");
-                               ether->stats.rx_length_errors++;
+                               dev->stats.rx_length_errors++;
                        } else if (status & RXDS_CRCE) {
                                dev_err(&pdev->dev, "rx crc err\n");
-                               ether->stats.rx_crc_errors++;
+                               dev->stats.rx_crc_errors++;
                        } else if (status & RXDS_ALIE) {
                                dev_err(&pdev->dev, "rx alignment err\n");
-                               ether->stats.rx_frame_errors++;
+                               dev->stats.rx_frame_errors++;
                        } else if (status & RXDS_PTLE) {
                                dev_err(&pdev->dev, "rx longer err\n");
-                               ether->stats.rx_over_errors++;
+                               dev->stats.rx_over_errors++;
                        }
                }
 
@@ -912,7 +902,6 @@ static const struct net_device_ops w90p910_ether_netdev_ops = {
        .ndo_open               = w90p910_ether_open,
        .ndo_stop               = w90p910_ether_close,
        .ndo_start_xmit         = w90p910_ether_start_xmit,
-       .ndo_get_stats          = w90p910_ether_stats,
        .ndo_set_rx_mode        = w90p910_ether_set_multicast_list,
        .ndo_set_mac_address    = w90p910_set_mac_address,
        .ndo_do_ioctl           = w90p910_ether_ioctl,
index 7b43a3b4abdcbc7bc1cdfd4d13c611563e2760a2..3dd973475125c0f856d4eccacba4ce4c0c34cc50 100644 (file)
@@ -1375,13 +1375,8 @@ netxen_receive_peg_ready(struct netxen_adapter *adapter)
 
        } while (--retries);
 
-       if (!retries) {
-               printk(KERN_ERR "Receive Peg initialization not "
-                             "complete, state: 0x%x.\n", val);
-               return -EIO;
-       }
-
-       return 0;
+       pr_err("Receive Peg initialization not complete, state: 0x%x.\n", val);
+       return -EIO;
 }
 
 int netxen_init_firmware(struct netxen_adapter *adapter)
index 00c17fa6545bd5752a427e3660b062dc26ba57db..4896ee0cc458b4e1666ae9bd3c643a4c6d95a7aa 100644 (file)
 #include "qed_hsi.h"
 
 extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.10.10.20"
+
+#define QED_MAJOR_VERSION               8
+#define QED_MINOR_VERSION               10
+#define QED_REVISION_VERSION            10
+#define QED_ENGINEERING_VERSION 21
+
+#define QED_VERSION                                             \
+       ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
+        (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
+
+#define STORM_FW_VERSION                                      \
+       ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
+        (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
 
 #define MAX_HWFNS_PER_DEVICE    (4)
 #define NAME_SIZE 16
@@ -59,8 +71,6 @@ extern const struct qed_common_ops qed_common_ops_pass;
 
 #define QED_WFQ_UNIT   100
 
-#define ISCSI_BDQ_ID(_port_id) (_port_id)
-#define FCOE_BDQ_ID(_port_id) ((_port_id) + 2)
 #define QED_WID_SIZE            (1024)
 #define QED_PF_DEMS_SIZE        (4)
 
@@ -76,6 +86,15 @@ union qed_mcp_protocol_stats;
 enum qed_mcp_protocol_type;
 
 /* helpers */
+#define QED_MFW_GET_FIELD(name, field) \
+       (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+#define QED_MFW_SET_FIELD(name, field, value)                                 \
+       do {                                                                   \
+               (name)  &= ~((field ## _MASK) << (field ## _SHIFT));           \
+               (name)  |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
+       } while (0)
+
 static inline u32 qed_db_addr(u32 cid, u32 DEMS)
 {
        u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
@@ -198,6 +217,7 @@ enum qed_resources {
        QED_LL2_QUEUE,
        QED_CMDQS_CQS,
        QED_RDMA_STATS_QUEUE,
+       QED_BDQ,
        QED_MAX_RESC,
 };
 
@@ -205,8 +225,9 @@ enum QED_FEATURE {
        QED_PF_L2_QUE,
        QED_VF,
        QED_RDMA_CNQ,
-       QED_VF_L2_QUE,
+       QED_ISCSI_CQ,
        QED_FCOE_CQ,
+       QED_VF_L2_QUE,
        QED_MAX_FEATURES,
 };
 
@@ -219,7 +240,9 @@ enum QED_PORT_MODE {
        QED_PORT_MODE_DE_4X20G,
        QED_PORT_MODE_DE_1X40G,
        QED_PORT_MODE_DE_2X25G,
-       QED_PORT_MODE_DE_1X25G
+       QED_PORT_MODE_DE_1X25G,
+       QED_PORT_MODE_DE_4X25G,
+       QED_PORT_MODE_DE_2X10G,
 };
 
 enum qed_dev_cap {
@@ -249,9 +272,14 @@ struct qed_hw_info {
                                 RESC_NUM(_p_hwfn, resc))
 #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
 
-       u8                              num_tc;
+       /* Amount of traffic classes HW supports */
+       u8 num_hw_tc;
+
+       /* Amount of TCs which should be active according to DCBx or upper
+        * layer driver configuration.
+        */
+       u8 num_active_tc;
        u8                              offload_tc;
-       u8                              non_offload_tc;
 
        u32                             concrete_fid;
        u16                             opaque_fid;
@@ -314,15 +342,19 @@ struct qed_qm_info {
        struct init_qm_port_params      *qm_port_params;
        u16                             start_pq;
        u8                              start_vport;
-       u8                              pure_lb_pq;
-       u8                              offload_pq;
-       u8                              pure_ack_pq;
-       u8 ooo_pq;
-       u8                              vf_queues_offset;
+       u16                              pure_lb_pq;
+       u16                             offload_pq;
+       u16                             low_latency_pq;
+       u16                             pure_ack_pq;
+       u16                             ooo_pq;
+       u16                             first_vf_pq;
+       u16                             first_mcos_pq;
+       u16                             first_rl_pq;
        u16                             num_pqs;
        u16                             num_vf_pqs;
        u8                              num_vports;
        u8                              max_phys_tcs_per_port;
+       u8                              ooo_tc;
        bool                            pf_rl_en;
        bool                            pf_wfq_en;
        bool                            vport_rl_en;
@@ -353,6 +385,12 @@ struct qed_fw_data {
        u32                     init_ops_size;
 };
 
+#define DRV_MODULE_VERSION                   \
+       __stringify(QED_MAJOR_VERSION) "."    \
+       __stringify(QED_MINOR_VERSION) "."    \
+       __stringify(QED_REVISION_VERSION) "." \
+       __stringify(QED_ENGINEERING_VERSION)
+
 struct qed_simd_fp_handler {
        void    *token;
        void    (*func)(void *);
@@ -364,7 +402,8 @@ struct qed_hwfn {
 #define IS_LEAD_HWFN(edev)              (!((edev)->my_id))
        u8                              rel_pf_id;      /* Relative to engine*/
        u8                              abs_pf_id;
-#define QED_PATH_ID(_p_hwfn)           ((_p_hwfn)->abs_pf_id & 1)
+#define QED_PATH_ID(_p_hwfn) \
+       (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
        u8                              port_id;
        bool                            b_active;
 
@@ -523,9 +562,7 @@ struct qed_dev {
        u8      dp_level;
        char    name[NAME_SIZE];
 
-       u8      type;
-#define QED_DEV_TYPE_BB (0 << 0)
-#define QED_DEV_TYPE_AH BIT(0)
+       enum    qed_dev_type type;
 /* Translate type/revision combo into the proper conditions */
 #define QED_IS_BB(dev)  ((dev)->type == QED_DEV_TYPE_BB)
 #define QED_IS_BB_A0(dev)       (QED_IS_BB(dev) && \
@@ -540,6 +577,9 @@ struct qed_dev {
 
        u16     vendor_id;
        u16     device_id;
+#define QED_DEV_ID_MASK                0xff00
+#define QED_DEV_ID_MASK_BB     0x1600
+#define QED_DEV_ID_MASK_AH     0x8000
 
        u16     chip_num;
 #define CHIP_NUM_MASK                   0xffff
@@ -654,10 +694,16 @@ struct qed_dev {
        u32 rdma_max_srq_sge;
 };
 
-#define NUM_OF_VFS(dev)         MAX_NUM_VFS_BB
-#define NUM_OF_L2_QUEUES(dev)  MAX_NUM_L2_QUEUES_BB
-#define NUM_OF_SBS(dev)         MAX_SB_PER_PATH_BB
-#define NUM_OF_ENG_PFS(dev)     MAX_NUM_PFS_BB
+#define NUM_OF_VFS(dev)         (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
+                                               : MAX_NUM_VFS_K2)
+#define NUM_OF_L2_QUEUES(dev)   (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
+                                               : MAX_NUM_L2_QUEUES_K2)
+#define NUM_OF_PORTS(dev)       (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
+                                               : MAX_NUM_PORTS_K2)
+#define NUM_OF_SBS(dev)         (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
+                                               : MAX_SB_PER_PATH_K2)
+#define NUM_OF_ENG_PFS(dev)     (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
+                                               : MAX_NUM_PFS_K2)
 
 /**
  * @brief qed_concrete_to_sw_fid - get the sw function id from
@@ -693,6 +739,25 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
                                         u32 min_pf_rate);
 
 void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_device_num_engines(struct qed_dev *cdev);
+
+#define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])
+
+/* Flags for indication of required queues */
+#define PQ_FLAGS_RLS    (BIT(0))
+#define PQ_FLAGS_MCOS   (BIT(1))
+#define PQ_FLAGS_LB     (BIT(2))
+#define PQ_FLAGS_OOO    (BIT(3))
+#define PQ_FLAGS_ACK    (BIT(4))
+#define PQ_FLAGS_OFLD   (BIT(5))
+#define PQ_FLAGS_VFS    (BIT(6))
+#define PQ_FLAGS_LLT    (BIT(7))
+
+/* physical queue index for cm context intialization */
+u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
+u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
+u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
+
 #define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])
 
 /* Other Linux specific common definitions */
@@ -721,5 +786,6 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
                            enum qed_mcp_protocol_type type,
                            union qed_mcp_protocol_stats *stats);
 int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
+void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
 
 #endif /* _QED_H */
index d42d03df751acbb32dd725c769bbf5d56aeb72e4..15ef6ebed6bb62f7b8578c31f4fa94cf03e1f491 100644 (file)
@@ -71,8 +71,7 @@
 #define TM_ALIGN        BIT(TM_SHIFT)
 #define TM_ELEM_SIZE    4
 
-/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
-#define ILT_DEFAULT_HW_P_SIZE  (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
+#define ILT_DEFAULT_HW_P_SIZE  4
 
 #define ILT_PAGE_IN_BYTES(hw_p_size)   (1U << ((hw_p_size) + 12))
 #define ILT_CFG_REG(cli, reg)  PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -242,8 +241,7 @@ struct qed_cxt_mngr {
 static bool src_proto(enum protocol_type type)
 {
        return type == PROTOCOLID_ISCSI ||
-              type == PROTOCOLID_FCOE ||
-              type == PROTOCOLID_ROCE;
+              type == PROTOCOLID_FCOE;
 }
 
 static bool tm_cid_proto(enum protocol_type type)
@@ -304,16 +302,34 @@ struct qed_tm_iids {
        u32 per_vf_tids;
 };
 
-static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr,
+static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
+                           struct qed_cxt_mngr *p_mngr,
                            struct qed_tm_iids *iids)
 {
-       u32 i, j;
-
-       for (i = 0; i < MAX_CONN_TYPES; i++) {
+       bool tm_vf_required = false;
+       bool tm_required = false;
+       int i, j;
+
+       /* Timers is a special case -> we don't count how many cids require
+        * timers but what's the max cid that will be used by the timer block.
+        * therefore we traverse in reverse order, and once we hit a protocol
+        * that requires the timers memory, we'll sum all the protocols up
+        * to that one.
+        */
+       for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
                struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
 
-               if (tm_cid_proto(i)) {
+               if (tm_cid_proto(i) || tm_required) {
+                       if (p_cfg->cid_count)
+                               tm_required = true;
+
                        iids->pf_cids += p_cfg->cid_count;
+               }
+
+               if (tm_cid_proto(i) || tm_vf_required) {
+                       if (p_cfg->cids_per_vf)
+                               tm_vf_required = true;
+
                        iids->per_vf_cids += p_cfg->cids_per_vf;
                }
 
@@ -422,8 +438,9 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
                u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
                u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
                u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+               u32 align = elems_per_page * DQ_RANGE_ALIGN;
 
-               p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
+               p_conn->cid_count = roundup(p_conn->cid_count, align);
        }
 }
 
@@ -526,7 +543,22 @@ static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
        return lines_to_skip;
 }
 
-int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
+static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg
+                                                 *p_cli)
+{
+       p_cli->active = false;
+       p_cli->first.val = 0;
+       p_cli->last.val = 0;
+       return p_cli;
+}
+
+static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
+{
+       p_blk->total_size = 0;
+       return p_blk;
+}
+
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
 {
        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
        u32 curr_line, total, i, task_size, line;
@@ -550,7 +582,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
                   p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
 
        /* CDUC */
-       p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+       p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
+
        curr_line = p_mngr->pf_start_line;
 
        /* CDUC PF */
@@ -559,7 +592,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
        /* get the counters for the CDUC and QM clients  */
        qed_cxt_cdu_iids(p_mngr, &cdu_iids);
 
-       p_blk = &p_cli->pf_blks[CDUC_BLK];
+       p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
 
        total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
 
@@ -573,7 +606,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
                                                               ILT_CLI_CDUC);
 
        /* CDUC VF */
-       p_blk = &p_cli->vf_blks[CDUC_BLK];
+       p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
        total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
 
        qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
@@ -587,7 +620,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
                                     ILT_CLI_CDUC);
 
        /* CDUT PF */
-       p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+       p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
        p_cli->first.val = curr_line;
 
        /* first the 'working' task memory */
@@ -596,7 +629,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
                if (!p_seg || p_seg->count == 0)
                        continue;
 
-               p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+               p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
                total = p_seg->count * p_mngr->task_type_size[p_seg->type];
                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
                                     p_mngr->task_type_size[p_seg->type]);
@@ -611,7 +644,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
                if (!p_seg || p_seg->count == 0)
                        continue;
 
-               p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+               p_blk =
+                   qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
 
                if (!p_seg->has_fl_mem) {
                        /* The segment is active (total size pf 'working'
@@ -656,7 +690,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
                /* 'working' memory */
                total = p_seg->count * p_mngr->task_type_size[p_seg->type];
 
-               p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+               p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
                qed_ilt_cli_blk_fill(p_cli, p_blk,
                                     curr_line, total,
                                     p_mngr->task_type_size[p_seg->type]);
@@ -665,7 +699,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
                                     ILT_CLI_CDUT);
 
                /* 'init' memory */
-               p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+               p_blk =
+                   qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
                if (!p_seg->has_fl_mem) {
                        /* see comment above */
                        line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
@@ -693,8 +728,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
        }
 
        /* QM */
-       p_cli = &p_mngr->clients[ILT_CLI_QM];
-       p_blk = &p_cli->pf_blks[0];
+       p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
+       p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
 
        qed_cxt_qm_iids(p_hwfn, &qm_iids);
        total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
@@ -718,7 +753,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
        p_cli->pf_total_lines = curr_line - p_blk->start_line;
 
        /* SRC */
-       p_cli = &p_mngr->clients[ILT_CLI_SRC];
+       p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
        qed_cxt_src_iids(p_mngr, &src_iids);
 
        /* Both the PF and VFs searcher connections are stored in the per PF
@@ -732,7 +767,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
 
                total = roundup_pow_of_two(local_max);
 
-               p_blk = &p_cli->pf_blks[0];
+               p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
                                     total * sizeof(struct src_ent),
                                     sizeof(struct src_ent));
@@ -743,11 +778,11 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
        }
 
        /* TM PF */
-       p_cli = &p_mngr->clients[ILT_CLI_TM];
-       qed_cxt_tm_iids(p_mngr, &tm_iids);
+       p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
+       qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
        total = tm_iids.pf_cids + tm_iids.pf_tids_total;
        if (total) {
-               p_blk = &p_cli->pf_blks[0];
+               p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
                                     total * TM_ELEM_SIZE, TM_ELEM_SIZE);
 
@@ -759,14 +794,14 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
        /* TM VF */
        total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
        if (total) {
-               p_blk = &p_cli->vf_blks[0];
+               p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]);
                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
                                     total * TM_ELEM_SIZE, TM_ELEM_SIZE);
 
                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
                                     ILT_CLI_TM);
-               p_cli->pf_total_lines = curr_line - p_blk->start_line;
 
+               p_cli->vf_total_lines = curr_line - p_blk->start_line;
                for (i = 1; i < p_mngr->vf_count; i++)
                        qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
                                             ILT_CLI_TM);
@@ -776,8 +811,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
        total = qed_cxt_get_srq_count(p_hwfn);
 
        if (total) {
-               p_cli = &p_mngr->clients[ILT_CLI_TSDM];
-               p_blk = &p_cli->pf_blks[SRQ_BLK];
+               p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
+               p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
                                     total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
 
@@ -786,13 +821,50 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
                p_cli->pf_total_lines = curr_line - p_blk->start_line;
        }
 
+       *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
+
        if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
-           RESC_NUM(p_hwfn, QED_ILT)) {
-               DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
-                      curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
+           RESC_NUM(p_hwfn, QED_ILT))
                return -EINVAL;
+
+       return 0;
+}
+
+u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
+{
+       struct qed_ilt_client_cfg *p_cli;
+       u32 excess_lines, available_lines;
+       struct qed_cxt_mngr *p_mngr;
+       u32 ilt_page_size, elem_size;
+       struct qed_tid_seg *p_seg;
+       int i;
+
+       available_lines = RESC_NUM(p_hwfn, QED_ILT);
+       excess_lines = used_lines - available_lines;
+
+       if (!excess_lines)
+               return 0;
+
+       if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+               return 0;
+
+       p_mngr = p_hwfn->p_cxt_mngr;
+       p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+       ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+       for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+               p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+               if (!p_seg || p_seg->count == 0)
+                       continue;
+
+               elem_size = p_mngr->task_type_size[p_seg->type];
+               if (!elem_size)
+                       continue;
+
+               return (ilt_page_size / elem_size) * excess_lines;
        }
 
+       DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n");
        return 0;
 }
 
@@ -1126,7 +1198,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
        clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
        clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
        clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
-       /* default ILT page size for all clients is 32K */
+       /* default ILT page size for all clients is 64K */
        for (i = 0; i < ILT_CLI_MAX; i++)
                p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
 
@@ -1366,7 +1438,7 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
        }
 }
 
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        struct qed_qm_pf_rt_init_params params;
        struct qed_qm_info *qm_info = &p_hwfn->qm_info;
@@ -1392,22 +1464,15 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
        params.pq_params = qm_info->qm_pq_params;
        params.vport_params = qm_info->qm_vport_params;
 
-       qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
+       qed_qm_pf_rt_init(p_hwfn, p_ptt, &params);
 }
 
 /* CM PF */
-static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
+void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
 {
-       union qed_qm_pq_params pq_params;
-       u16 pq;
-
        /* XCM pure-LB queue */
-       memset(&pq_params, 0, sizeof(pq_params));
-       pq_params.core.tc = LB_TC;
-       pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
-       STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
-
-       return 0;
+       STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
+                    qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
 }
 
 /* DQ PF */
@@ -1639,7 +1704,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
        u8 i;
 
        memset(&tm_iids, 0, sizeof(tm_iids));
-       qed_cxt_tm_iids(p_mngr, &tm_iids);
+       qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
 
        /* @@@TBD No pre-scan for now */
 
@@ -1757,9 +1822,9 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
        qed_prs_init_common(p_hwfn);
 }
 
-void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       qed_qm_init_pf(p_hwfn);
+       qed_qm_init_pf(p_hwfn, p_ptt);
        qed_cm_init_pf(p_hwfn);
        qed_dq_init_pf(p_hwfn);
        qed_cdu_init_pf(p_hwfn);
@@ -1883,13 +1948,12 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
 }
 
 static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
-                                  struct qed_rdma_pf_params *p_params)
+                                  struct qed_rdma_pf_params *p_params,
+                                  u32 num_tasks)
 {
-       u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
+       u32 num_cons, num_qps, num_srqs;
        enum protocol_type proto;
 
-       num_mrs = min_t(u32, RDMA_MAX_TIDS, p_params->num_mrs);
-       num_tasks = num_mrs;    /* each mr uses a single task id */
        num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
 
        switch (p_hwfn->hw_info.personality) {
@@ -1918,7 +1982,7 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
        }
 }
 
-int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
 {
        /* Set the number of required CORE connections */
        u32 core_cids = 1; /* SPQ */
@@ -1930,9 +1994,10 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
        switch (p_hwfn->hw_info.personality) {
        case QED_PCI_ETH_ROCE:
        {
-               qed_rdma_set_pf_params(p_hwfn,
-                                      &p_hwfn->
-                                      pf_params.rdma_pf_params);
+                       qed_rdma_set_pf_params(p_hwfn,
+                                              &p_hwfn->
+                                              pf_params.rdma_pf_params,
+                                              rdma_tasks);
                /* no need for break since RoCE coexist with Ethernet */
        }
        case QED_PCI_ETH:
index 8b010324268ad2b5b5313f6b8e62db226f7650e5..53ad532dc21223e4a6fa15039e5ab17acb5e6a01 100644 (file)
@@ -105,19 +105,28 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
  * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
  *
  * @param p_hwfn
- *
+ * @param rdma_tasks - requested maximum
  * @return int
  */
-int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn);
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks);
 
 /**
  * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
  *
  * @param p_hwfn
+ * @param last_line
  *
  * @return int
  */
-int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn);
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line);
+
+/**
+ * @brief qed_cxt_cfg_ilt_compute_excess - how many lines can be decreased
+ *
+ * @param p_hwfn
+ * @param used_lines
+ */
+u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines);
 
 /**
  * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
@@ -163,19 +172,18 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
 /**
  * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
  *
- *
- *
  * @param p_hwfn
+ * @param p_ptt
  */
-void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 
 /**
  * @brief qed_qm_init_pf - Initailze the QM PF phase, per path
  *
  * @param p_hwfn
+ * @param p_ptt
  */
-
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 
 /**
  * @brief Reconfigures QM pf on the fly
index 5bd36a4a8fcdfd201b40321c7fefb82776cd347d..2fc1fde824bdbe817b4d6e538b25de33fd8987bc 100644 (file)
@@ -183,7 +183,7 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
                           "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n",
                           qed_dcbx_app_update[i].name, p_data->arr[id].update,
                           p_data->arr[id].enable, p_data->arr[id].priority,
-                          p_data->arr[id].tc, p_hwfn->hw_info.num_tc);
+                          p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc);
        }
 }
 
@@ -204,12 +204,8 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
        p_data->arr[type].tc = tc;
 
        /* QM reconf data */
-       if (p_info->personality == personality) {
-               if (personality == QED_PCI_ETH)
-                       p_info->non_offload_tc = tc;
-               else
-                       p_info->offload_tc = tc;
-       }
+       if (p_info->personality == personality)
+               p_info->offload_tc = tc;
 }
 
 /* Update app protocol data and hw_info fields with the TLV info */
@@ -376,7 +372,9 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
        if (rc)
                return rc;
 
-       p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+       p_info->num_active_tc = QED_MFW_GET_FIELD(p_ets->flags,
+                                                 DCBX_ETS_MAX_TCS);
+       p_hwfn->qm_info.ooo_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_OOO_TC);
        data.pf_id = p_hwfn->rel_pf_id;
        data.dcbx_enabled = !!dcbx_version;
 
index 0fabe97f998d2c8e29b343f47ba90396d61f4398..2eb988fe1298dfd043fbd8c18d49187930a5c970 100644 (file)
@@ -85,9 +85,6 @@ struct qed_dcbx_app_metadata {
        enum qed_pci_personality personality;
 };
 
-#define QED_MFW_GET_FIELD(name, field) \
-       (((name) & (field ## _MASK)) >> (field ## _SHIFT))
-
 struct qed_dcbx_info {
        struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
        struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
index 68f19ca57f965b13d6fbf32c85e86d65e500b881..483241b4b05db2add64ff928ccc9419fe733355a 100644 (file)
@@ -17,7 +17,6 @@
 
 /* Chip IDs enum */
 enum chip_ids {
-       CHIP_RESERVED,
        CHIP_BB_B0,
        CHIP_K2,
        MAX_CHIP_IDS
@@ -40,6 +39,7 @@ enum mem_groups {
        MEM_GROUP_BTB_RAM,
        MEM_GROUP_RDIF_CTX,
        MEM_GROUP_TDIF_CTX,
+       MEM_GROUP_CFC_MEM,
        MEM_GROUP_CONN_CFC_MEM,
        MEM_GROUP_TASK_CFC_MEM,
        MEM_GROUP_CAU_PI,
@@ -72,6 +72,7 @@ static const char * const s_mem_group_names[] = {
        "BTB_RAM",
        "RDIF_CTX",
        "TDIF_CTX",
+       "CFC_MEM",
        "CONN_CFC_MEM",
        "TASK_CFC_MEM",
        "CAU_PI",
@@ -185,13 +186,16 @@ struct dbg_array {
        u32 size_in_dwords;
 };
 
+struct chip_platform_defs {
+       u8 num_ports;
+       u8 num_pfs;
+       u8 num_vfs;
+};
+
 /* Chip constant definitions */
 struct chip_defs {
        const char *name;
-       struct {
-               u8 num_ports;
-               u8 num_pfs;
-       } per_platform[MAX_PLATFORM_IDS];
+       struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
 };
 
 /* Platform constant definitions */
@@ -405,22 +409,23 @@ struct phy_defs {
 /***************************** Constant Arrays *******************************/
 
 /* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
 
 /* Chip constant definitions array */
 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
-       { "reserved", { {0, 0}, {0, 0}, {0, 0}, {0, 0} } },
        { "bb_b0",
-         { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB}, {0, 0}, {0, 0}, {0, 0} } },
-       { "k2", { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2}, {0, 0}, {0, 0}, {0, 0} } }
+         { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, {0, 0, 0},
+           {0, 0, 0}, {0, 0, 0} } },
+       { "k2",
+         { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0},
+           {0, 0, 0}, {0, 0, 0} } }
 };
 
 /* Storm constant definitions array */
 static struct storm_defs s_storm_defs[] = {
        /* Tstorm */
        {'T', BLOCK_TSEM,
-        {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
-         DBG_BUS_CLIENT_RBCT}, true,
+        {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
         TSEM_REG_FAST_MEMORY,
         TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
         TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
@@ -432,8 +437,7 @@ static struct storm_defs s_storm_defs[] = {
         4, TCM_REG_SM_TASK_CTX},
        /* Mstorm */
        {'M', BLOCK_MSEM,
-        {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
-         DBG_BUS_CLIENT_RBCM}, false,
+        {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
         MSEM_REG_FAST_MEMORY,
         MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
         MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
@@ -445,8 +449,7 @@ static struct storm_defs s_storm_defs[] = {
         7, MCM_REG_SM_TASK_CTX},
        /* Ustorm */
        {'U', BLOCK_USEM,
-        {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
-         DBG_BUS_CLIENT_RBCU}, false,
+        {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
         USEM_REG_FAST_MEMORY,
         USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
         USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
@@ -458,8 +461,7 @@ static struct storm_defs s_storm_defs[] = {
         3, UCM_REG_SM_TASK_CTX},
        /* Xstorm */
        {'X', BLOCK_XSEM,
-        {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
-         DBG_BUS_CLIENT_RBCX}, false,
+        {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
         XSEM_REG_FAST_MEMORY,
         XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
         XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
@@ -471,8 +473,7 @@ static struct storm_defs s_storm_defs[] = {
         0, 0},
        /* Ystorm */
        {'Y', BLOCK_YSEM,
-        {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
-         DBG_BUS_CLIENT_RBCY}, false,
+        {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
         YSEM_REG_FAST_MEMORY,
         YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
         YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
@@ -484,8 +485,7 @@ static struct storm_defs s_storm_defs[] = {
         12, YCM_REG_SM_TASK_CTX},
        /* Pstorm */
        {'P', BLOCK_PSEM,
-        {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
-         DBG_BUS_CLIENT_RBCS}, true,
+        {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
         PSEM_REG_FAST_MEMORY,
         PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
         PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
@@ -499,8 +499,9 @@ static struct storm_defs s_storm_defs[] = {
 
 /* Block definitions array */
 static struct block_defs block_grc_defs = {
-       "grc", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+       "grc",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
        GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
        GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
        GRC_REG_DBG_FORCE_FRAME,
@@ -508,29 +509,30 @@ static struct block_defs block_grc_defs = {
 };
 
 static struct block_defs block_miscs_defs = {
-       "miscs", {false, false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "miscs", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_misc_defs = {
-       "misc", {false, false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "misc", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_dbu_defs = {
-       "dbu", {false, false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "dbu", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_pglue_b_defs = {
-       "pglue_b", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+       "pglue_b",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
        PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
        PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
        PGLUE_B_REG_DBG_FORCE_FRAME,
@@ -538,8 +540,9 @@ static struct block_defs block_pglue_b_defs = {
 };
 
 static struct block_defs block_cnig_defs = {
-       "cnig", {false, false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+       "cnig",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
        CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
        CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
        CNIG_REG_DBG_FORCE_FRAME_K2,
@@ -547,15 +550,16 @@ static struct block_defs block_cnig_defs = {
 };
 
 static struct block_defs block_cpmu_defs = {
-       "cpmu", {false, false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "cpmu", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, false, DBG_RESET_REG_MISCS_PL_HV, 8
 };
 
 static struct block_defs block_ncsi_defs = {
-       "ncsi", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+       "ncsi",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
        NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
        NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
        NCSI_REG_DBG_FORCE_FRAME,
@@ -563,15 +567,16 @@ static struct block_defs block_ncsi_defs = {
 };
 
 static struct block_defs block_opte_defs = {
-       "opte", {false, false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "opte", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, false, DBG_RESET_REG_MISCS_PL_HV, 4
 };
 
 static struct block_defs block_bmb_defs = {
-       "bmb", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+       "bmb",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
        BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
        BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
        BMB_REG_DBG_FORCE_FRAME,
@@ -579,8 +584,9 @@ static struct block_defs block_bmb_defs = {
 };
 
 static struct block_defs block_pcie_defs = {
-       "pcie", {false, false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+       "pcie",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
        PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
        PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
        PCIE_REG_DBG_COMMON_FORCE_FRAME,
@@ -588,15 +594,16 @@ static struct block_defs block_pcie_defs = {
 };
 
 static struct block_defs block_mcp_defs = {
-       "mcp", {false, false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "mcp", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_mcp2_defs = {
-       "mcp2", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+       "mcp2",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
        MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
        MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
        MCP2_REG_DBG_FORCE_FRAME,
@@ -604,8 +611,9 @@ static struct block_defs block_mcp2_defs = {
 };
 
 static struct block_defs block_pswhst_defs = {
-       "pswhst", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       "pswhst",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
        PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
        PSWHST_REG_DBG_FORCE_FRAME,
@@ -613,8 +621,9 @@ static struct block_defs block_pswhst_defs = {
 };
 
 static struct block_defs block_pswhst2_defs = {
-       "pswhst2", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       "pswhst2",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
        PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
        PSWHST2_REG_DBG_FORCE_FRAME,
@@ -622,8 +631,9 @@ static struct block_defs block_pswhst2_defs = {
 };
 
 static struct block_defs block_pswrd_defs = {
-       "pswrd", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       "pswrd",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
        PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
        PSWRD_REG_DBG_FORCE_FRAME,
@@ -631,8 +641,9 @@ static struct block_defs block_pswrd_defs = {
 };
 
 static struct block_defs block_pswrd2_defs = {
-       "pswrd2", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       "pswrd2",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
        PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
        PSWRD2_REG_DBG_FORCE_FRAME,
@@ -640,8 +651,9 @@ static struct block_defs block_pswrd2_defs = {
 };
 
 static struct block_defs block_pswwr_defs = {
-       "pswwr", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       "pswwr",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
        PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
        PSWWR_REG_DBG_FORCE_FRAME,
@@ -649,15 +661,16 @@ static struct block_defs block_pswwr_defs = {
 };
 
 static struct block_defs block_pswwr2_defs = {
-       "pswwr2", {false, false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "pswwr2", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, false, DBG_RESET_REG_MISC_PL_HV, 3
 };
 
 static struct block_defs block_pswrq_defs = {
-       "pswrq", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       "pswrq",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
        PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
        PSWRQ_REG_DBG_FORCE_FRAME,
@@ -665,8 +678,9 @@ static struct block_defs block_pswrq_defs = {
 };
 
 static struct block_defs block_pswrq2_defs = {
-       "pswrq2", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       "pswrq2",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
        PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
        PSWRQ2_REG_DBG_FORCE_FRAME,
@@ -674,8 +688,9 @@ static struct block_defs block_pswrq2_defs = {
 };
 
 static struct block_defs block_pglcs_defs = {
-       "pglcs", {false, false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+       "pglcs",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
        PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
        PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
        PGLCS_REG_DBG_FORCE_FRAME,
@@ -683,8 +698,9 @@ static struct block_defs block_pglcs_defs = {
 };
 
 static struct block_defs block_ptu_defs = {
-       "ptu", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       "ptu",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
        PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
        PTU_REG_DBG_FORCE_FRAME,
@@ -692,8 +708,9 @@ static struct block_defs block_ptu_defs = {
 };
 
 static struct block_defs block_dmae_defs = {
-       "dmae", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       "dmae",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
        DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
        DMAE_REG_DBG_FORCE_FRAME,
@@ -701,8 +718,9 @@ static struct block_defs block_dmae_defs = {
 };
 
 static struct block_defs block_tcm_defs = {
-       "tcm", {true, true, true}, true, DBG_TSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+       "tcm",
+       {true, true}, true, DBG_TSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
        TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
        TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
        TCM_REG_DBG_FORCE_FRAME,
@@ -710,8 +728,9 @@ static struct block_defs block_tcm_defs = {
 };
 
 static struct block_defs block_mcm_defs = {
-       "mcm", {true, true, true}, true, DBG_MSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       "mcm",
+       {true, true}, true, DBG_MSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
        MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
        MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
        MCM_REG_DBG_FORCE_FRAME,
@@ -719,8 +738,9 @@ static struct block_defs block_mcm_defs = {
 };
 
 static struct block_defs block_ucm_defs = {
-       "ucm", {true, true, true}, true, DBG_USTORM_ID,
-       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       "ucm",
+       {true, true}, true, DBG_USTORM_ID,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
        UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
        UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
        UCM_REG_DBG_FORCE_FRAME,
@@ -728,8 +748,9 @@ static struct block_defs block_ucm_defs = {
 };
 
 static struct block_defs block_xcm_defs = {
-       "xcm", {true, true, true}, true, DBG_XSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+       "xcm",
+       {true, true}, true, DBG_XSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
        XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
        XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
        XCM_REG_DBG_FORCE_FRAME,
@@ -737,8 +758,9 @@ static struct block_defs block_xcm_defs = {
 };
 
 static struct block_defs block_ycm_defs = {
-       "ycm", {true, true, true}, true, DBG_YSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+       "ycm",
+       {true, true}, true, DBG_YSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
        YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
        YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
        YCM_REG_DBG_FORCE_FRAME,
@@ -746,8 +768,9 @@ static struct block_defs block_ycm_defs = {
 };
 
 static struct block_defs block_pcm_defs = {
-       "pcm", {true, true, true}, true, DBG_PSTORM_ID,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       "pcm",
+       {true, true}, true, DBG_PSTORM_ID,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
        PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
        PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
        PCM_REG_DBG_FORCE_FRAME,
@@ -755,8 +778,9 @@ static struct block_defs block_pcm_defs = {
 };
 
 static struct block_defs block_qm_defs = {
-       "qm", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+       "qm",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
        QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
        QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
        QM_REG_DBG_FORCE_FRAME,
@@ -764,8 +788,9 @@ static struct block_defs block_qm_defs = {
 };
 
 static struct block_defs block_tm_defs = {
-       "tm", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       "tm",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
        TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
        TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
        TM_REG_DBG_FORCE_FRAME,
@@ -773,8 +798,9 @@ static struct block_defs block_tm_defs = {
 };
 
 static struct block_defs block_dorq_defs = {
-       "dorq", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+       "dorq",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
        DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
        DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
        DORQ_REG_DBG_FORCE_FRAME,
@@ -782,8 +808,9 @@ static struct block_defs block_dorq_defs = {
 };
 
 static struct block_defs block_brb_defs = {
-       "brb", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+       "brb",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
        BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
        BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
        BRB_REG_DBG_FORCE_FRAME,
@@ -791,8 +818,9 @@ static struct block_defs block_brb_defs = {
 };
 
 static struct block_defs block_src_defs = {
-       "src", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+       "src",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
        SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
        SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
        SRC_REG_DBG_FORCE_FRAME,
@@ -800,8 +828,9 @@ static struct block_defs block_src_defs = {
 };
 
 static struct block_defs block_prs_defs = {
-       "prs", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+       "prs",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
        PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
        PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
        PRS_REG_DBG_FORCE_FRAME,
@@ -809,8 +838,9 @@ static struct block_defs block_prs_defs = {
 };
 
 static struct block_defs block_tsdm_defs = {
-       "tsdm", {true, true, true}, true, DBG_TSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+       "tsdm",
+       {true, true}, true, DBG_TSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
        TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
        TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
        TSDM_REG_DBG_FORCE_FRAME,
@@ -818,8 +848,9 @@ static struct block_defs block_tsdm_defs = {
 };
 
 static struct block_defs block_msdm_defs = {
-       "msdm", {true, true, true}, true, DBG_MSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       "msdm",
+       {true, true}, true, DBG_MSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
        MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
        MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
        MSDM_REG_DBG_FORCE_FRAME,
@@ -827,8 +858,9 @@ static struct block_defs block_msdm_defs = {
 };
 
 static struct block_defs block_usdm_defs = {
-       "usdm", {true, true, true}, true, DBG_USTORM_ID,
-       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       "usdm",
+       {true, true}, true, DBG_USTORM_ID,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
        USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
        USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
        USDM_REG_DBG_FORCE_FRAME,
@@ -836,8 +868,9 @@ static struct block_defs block_usdm_defs = {
 };
 
 static struct block_defs block_xsdm_defs = {
-       "xsdm", {true, true, true}, true, DBG_XSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+       "xsdm",
+       {true, true}, true, DBG_XSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
        XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
        XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
        XSDM_REG_DBG_FORCE_FRAME,
@@ -845,8 +878,9 @@ static struct block_defs block_xsdm_defs = {
 };
 
 static struct block_defs block_ysdm_defs = {
-       "ysdm", {true, true, true}, true, DBG_YSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+       "ysdm",
+       {true, true}, true, DBG_YSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
        YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
        YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
        YSDM_REG_DBG_FORCE_FRAME,
@@ -854,8 +888,9 @@ static struct block_defs block_ysdm_defs = {
 };
 
 static struct block_defs block_psdm_defs = {
-       "psdm", {true, true, true}, true, DBG_PSTORM_ID,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       "psdm",
+       {true, true}, true, DBG_PSTORM_ID,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
        PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
        PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
        PSDM_REG_DBG_FORCE_FRAME,
@@ -863,8 +898,9 @@ static struct block_defs block_psdm_defs = {
 };
 
 static struct block_defs block_tsem_defs = {
-       "tsem", {true, true, true}, true, DBG_TSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+       "tsem",
+       {true, true}, true, DBG_TSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
        TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
        TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
        TSEM_REG_DBG_FORCE_FRAME,
@@ -872,8 +908,9 @@ static struct block_defs block_tsem_defs = {
 };
 
 static struct block_defs block_msem_defs = {
-       "msem", {true, true, true}, true, DBG_MSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       "msem",
+       {true, true}, true, DBG_MSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
        MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
        MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
        MSEM_REG_DBG_FORCE_FRAME,
@@ -881,8 +918,9 @@ static struct block_defs block_msem_defs = {
 };
 
 static struct block_defs block_usem_defs = {
-       "usem", {true, true, true}, true, DBG_USTORM_ID,
-       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       "usem",
+       {true, true}, true, DBG_USTORM_ID,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
        USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
        USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
        USEM_REG_DBG_FORCE_FRAME,
@@ -890,8 +928,9 @@ static struct block_defs block_usem_defs = {
 };
 
 static struct block_defs block_xsem_defs = {
-       "xsem", {true, true, true}, true, DBG_XSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+       "xsem",
+       {true, true}, true, DBG_XSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
        XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
        XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
        XSEM_REG_DBG_FORCE_FRAME,
@@ -899,8 +938,9 @@ static struct block_defs block_xsem_defs = {
 };
 
 static struct block_defs block_ysem_defs = {
-       "ysem", {true, true, true}, true, DBG_YSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+       "ysem",
+       {true, true}, true, DBG_YSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
        YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
        YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
        YSEM_REG_DBG_FORCE_FRAME,
@@ -908,8 +948,9 @@ static struct block_defs block_ysem_defs = {
 };
 
 static struct block_defs block_psem_defs = {
-       "psem", {true, true, true}, true, DBG_PSTORM_ID,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       "psem",
+       {true, true}, true, DBG_PSTORM_ID,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
        PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
        PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
        PSEM_REG_DBG_FORCE_FRAME,
@@ -917,8 +958,9 @@ static struct block_defs block_psem_defs = {
 };
 
 static struct block_defs block_rss_defs = {
-       "rss", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+       "rss",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
        RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
        RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
        RSS_REG_DBG_FORCE_FRAME,
@@ -926,8 +968,9 @@ static struct block_defs block_rss_defs = {
 };
 
 static struct block_defs block_tmld_defs = {
-       "tmld", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       "tmld",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
        TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
        TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
        TMLD_REG_DBG_FORCE_FRAME,
@@ -935,8 +978,9 @@ static struct block_defs block_tmld_defs = {
 };
 
 static struct block_defs block_muld_defs = {
-       "muld", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       "muld",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
        MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
        MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
        MULD_REG_DBG_FORCE_FRAME,
@@ -944,8 +988,9 @@ static struct block_defs block_muld_defs = {
 };
 
 static struct block_defs block_yuld_defs = {
-       "yuld", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       "yuld",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
        YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
        YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
        YULD_REG_DBG_FORCE_FRAME,
@@ -953,8 +998,9 @@ static struct block_defs block_yuld_defs = {
 };
 
 static struct block_defs block_xyld_defs = {
-       "xyld", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+       "xyld",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
        XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
        XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
        XYLD_REG_DBG_FORCE_FRAME,
@@ -962,8 +1008,9 @@ static struct block_defs block_xyld_defs = {
 };
 
 static struct block_defs block_prm_defs = {
-       "prm", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       "prm",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
        PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
        PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
        PRM_REG_DBG_FORCE_FRAME,
@@ -971,8 +1018,9 @@ static struct block_defs block_prm_defs = {
 };
 
 static struct block_defs block_pbf_pb1_defs = {
-       "pbf_pb1", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+       "pbf_pb1",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
        PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
        PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
        PBF_PB1_REG_DBG_FORCE_FRAME,
@@ -981,8 +1029,9 @@ static struct block_defs block_pbf_pb1_defs = {
 };
 
 static struct block_defs block_pbf_pb2_defs = {
-       "pbf_pb2", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+       "pbf_pb2",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
        PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
        PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
        PBF_PB2_REG_DBG_FORCE_FRAME,
@@ -991,8 +1040,9 @@ static struct block_defs block_pbf_pb2_defs = {
 };
 
 static struct block_defs block_rpb_defs = {
-       "rpb", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       "rpb",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
        RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
        RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
        RPB_REG_DBG_FORCE_FRAME,
@@ -1000,8 +1050,9 @@ static struct block_defs block_rpb_defs = {
 };
 
 static struct block_defs block_btb_defs = {
-       "btb", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+       "btb",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
        BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
        BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
        BTB_REG_DBG_FORCE_FRAME,
@@ -1009,8 +1060,9 @@ static struct block_defs block_btb_defs = {
 };
 
 static struct block_defs block_pbf_defs = {
-       "pbf", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+       "pbf",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
        PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
        PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
        PBF_REG_DBG_FORCE_FRAME,
@@ -1018,8 +1070,9 @@ static struct block_defs block_pbf_defs = {
 };
 
 static struct block_defs block_rdif_defs = {
-       "rdif", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       "rdif",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
        RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
        RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
        RDIF_REG_DBG_FORCE_FRAME,
@@ -1027,8 +1080,9 @@ static struct block_defs block_rdif_defs = {
 };
 
 static struct block_defs block_tdif_defs = {
-       "tdif", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       "tdif",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
        TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
        TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
        TDIF_REG_DBG_FORCE_FRAME,
@@ -1036,8 +1090,9 @@ static struct block_defs block_tdif_defs = {
 };
 
 static struct block_defs block_cdu_defs = {
-       "cdu", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+       "cdu",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
        CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
        CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
        CDU_REG_DBG_FORCE_FRAME,
@@ -1045,8 +1100,9 @@ static struct block_defs block_cdu_defs = {
 };
 
 static struct block_defs block_ccfc_defs = {
-       "ccfc", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+       "ccfc",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
        CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
        CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
        CCFC_REG_DBG_FORCE_FRAME,
@@ -1054,8 +1110,9 @@ static struct block_defs block_ccfc_defs = {
 };
 
 static struct block_defs block_tcfc_defs = {
-       "tcfc", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+       "tcfc",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
        TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
        TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
        TCFC_REG_DBG_FORCE_FRAME,
@@ -1063,8 +1120,9 @@ static struct block_defs block_tcfc_defs = {
 };
 
 static struct block_defs block_igu_defs = {
-       "igu", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       "igu",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
        IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
        IGU_REG_DBG_FORCE_FRAME,
@@ -1072,8 +1130,9 @@ static struct block_defs block_igu_defs = {
 };
 
 static struct block_defs block_cau_defs = {
-       "cau", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       "cau",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
        CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
        CAU_REG_DBG_FORCE_FRAME,
@@ -1081,8 +1140,9 @@ static struct block_defs block_cau_defs = {
 };
 
 static struct block_defs block_umac_defs = {
-       "umac", {false, false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+       "umac",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
        UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
        UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
        UMAC_REG_DBG_FORCE_FRAME,
@@ -1090,22 +1150,23 @@ static struct block_defs block_umac_defs = {
 };
 
 static struct block_defs block_xmac_defs = {
-       "xmac", {false, false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "xmac", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_dbg_defs = {
-       "dbg", {false, false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "dbg", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
 };
 
 static struct block_defs block_nig_defs = {
-       "nig", {true, true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+       "nig",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
        NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
        NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
        NIG_REG_DBG_FORCE_FRAME,
@@ -1113,8 +1174,9 @@ static struct block_defs block_nig_defs = {
 };
 
 static struct block_defs block_wol_defs = {
-       "wol", {false, false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+       "wol",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
        WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
        WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
        WOL_REG_DBG_FORCE_FRAME,
@@ -1122,8 +1184,9 @@ static struct block_defs block_wol_defs = {
 };
 
 static struct block_defs block_bmbn_defs = {
-       "bmbn", {false, false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
+       "bmbn",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
        BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
        BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
        BMBN_REG_DBG_FORCE_FRAME,
@@ -1131,15 +1194,16 @@ static struct block_defs block_bmbn_defs = {
 };
 
 static struct block_defs block_ipc_defs = {
-       "ipc", {false, false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "ipc", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, false, DBG_RESET_REG_MISCS_PL_UA, 8
 };
 
 static struct block_defs block_nwm_defs = {
-       "nwm", {false, false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+       "nwm",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
        NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
        NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
        NWM_REG_DBG_FORCE_FRAME,
@@ -1147,22 +1211,29 @@ static struct block_defs block_nwm_defs = {
 };
 
 static struct block_defs block_nws_defs = {
-       "nws", {false, false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-       0, 0, 0, 0, 0,
+       "nws",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+       NWS_REG_DBG_SELECT, NWS_REG_DBG_DWORD_ENABLE,
+       NWS_REG_DBG_SHIFT, NWS_REG_DBG_FORCE_VALID,
+       NWS_REG_DBG_FORCE_FRAME,
        true, false, DBG_RESET_REG_MISCS_PL_HV, 12
 };
 
 static struct block_defs block_ms_defs = {
-       "ms", {false, false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-       0, 0, 0, 0, 0,
+       "ms",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+       MS_REG_DBG_SELECT, MS_REG_DBG_DWORD_ENABLE,
+       MS_REG_DBG_SHIFT, MS_REG_DBG_FORCE_VALID,
+       MS_REG_DBG_FORCE_FRAME,
        true, false, DBG_RESET_REG_MISCS_PL_HV, 13
 };
 
 static struct block_defs block_phy_pcie_defs = {
-       "phy_pcie", {false, false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+       "phy_pcie",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
        PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
        PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
        PCIE_REG_DBG_COMMON_FORCE_FRAME,
@@ -1170,22 +1241,57 @@ static struct block_defs block_phy_pcie_defs = {
 };
 
 static struct block_defs block_led_defs = {
-       "led", {false, false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "led", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       true, false, DBG_RESET_REG_MISCS_PL_HV, 14
+};
+
+static struct block_defs block_avs_wrap_defs = {
+       "avs_wrap", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       true, false, DBG_RESET_REG_MISCS_PL_UA, 11
+};
+
+static struct block_defs block_rgfs_defs = {
+       "rgfs", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
-       true, true, DBG_RESET_REG_MISCS_PL_HV, 14
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_tgfs_defs = {
+       "tgfs", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ptld_defs = {
+       "ptld", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ypld_defs = {
+       "ypld", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_misc_aeu_defs = {
-       "misc_aeu", {false, false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "misc_aeu", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_bar0_map_defs = {
-       "bar0_map", {false, false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "bar0_map", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
@@ -1269,6 +1375,11 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
        &block_ms_defs,
        &block_phy_pcie_defs,
        &block_led_defs,
+       &block_avs_wrap_defs,
+       &block_rgfs_defs,
+       &block_tgfs_defs,
+       &block_ptld_defs,
+       &block_ypld_defs,
        &block_misc_aeu_defs,
        &block_bar0_map_defs,
 };
@@ -1281,65 +1392,67 @@ static struct platform_defs s_platform_defs[] = {
 };
 
 static struct grc_param_defs s_grc_param_defs[] = {
-       {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
-       {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
-       {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
-       {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
-       {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
-       {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
-       {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
-       {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
-       {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
-       {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
-       {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
-       {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
-       {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+       {{1, 1}, 0, 1, false, 1, 1},    /* DBG_GRC_PARAM_DUMP_TSTORM */
+       {{1, 1}, 0, 1, false, 1, 1},    /* DBG_GRC_PARAM_DUMP_MSTORM */
+       {{1, 1}, 0, 1, false, 1, 1},    /* DBG_GRC_PARAM_DUMP_USTORM */
+       {{1, 1}, 0, 1, false, 1, 1},    /* DBG_GRC_PARAM_DUMP_XSTORM */
+       {{1, 1}, 0, 1, false, 1, 1},    /* DBG_GRC_PARAM_DUMP_YSTORM */
+       {{1, 1}, 0, 1, false, 1, 1},    /* DBG_GRC_PARAM_DUMP_PSTORM */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_REGS */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_RAM */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_PBUF */
+       {{0, 0}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_IOR */
+       {{0, 0}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_VFC */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_CM_CTX */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_ILT */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_RSS */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_CAU */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_QM */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_MCP */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_RESERVED */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_CFC */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_IGU */
+       {{0, 0}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_BRB */
+       {{0, 0}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_BTB */
+       {{0, 0}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_BMB */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_NIG */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_MULD */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_PRS */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_DMAE */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_TM */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_SDM */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_DIF */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_STATIC */
+       {{0, 0}, 0, 1, false, 0, 0},    /* DBG_GRC_PARAM_UNSTALL */
+       {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
         MAX_LCIDS},                    /* DBG_GRC_PARAM_NUM_LCIDS */
-       {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+       {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
         MAX_LTIDS},                    /* DBG_GRC_PARAM_NUM_LTIDS */
-       {{0, 0, 0}, 0, 1, true, 0, 0},  /* DBG_GRC_PARAM_EXCLUDE_ALL */
-       {{0, 0, 0}, 0, 1, true, 0, 0},  /* DBG_GRC_PARAM_CRASH */
-       {{0, 0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
-       {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
-       {{1, 1, 1}, 0, 1, false, 0, 1}  /* DBG_GRC_PARAM_DUMP_PHY */
+       {{0, 0}, 0, 1, true, 0, 0},     /* DBG_GRC_PARAM_EXCLUDE_ALL */
+       {{0, 0}, 0, 1, true, 0, 0},     /* DBG_GRC_PARAM_CRASH */
+       {{0, 0}, 0, 1, false, 1, 0},    /* DBG_GRC_PARAM_PARITY_SAFE */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_CM */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_PHY */
+       {{0, 0}, 0, 1, false, 0, 0},    /* DBG_GRC_PARAM_NO_MCP */
+       {{0, 0}, 0, 1, false, 0, 0}     /* DBG_GRC_PARAM_NO_FW_VER */
 };
 
 static struct rss_mem_defs s_rss_mem_defs[] = {
        { "rss_mem_cid", "rss_cid", 0,
-         {256, 256, 320},
-         {32, 32, 32} },
+         {256, 320},
+         {32, 32} },
        { "rss_mem_key_msb", "rss_key", 1024,
-         {128, 128, 208},
-         {256, 256, 256} },
+         {128, 208},
+         {256, 256} },
        { "rss_mem_key_lsb", "rss_key", 2048,
-         {128, 128, 208},
-         {64, 64, 64} },
+         {128, 208},
+         {64, 64} },
        { "rss_mem_info", "rss_info", 3072,
-         {128, 128, 208},
-         {16, 16, 16} },
+         {128, 208},
+         {16, 16} },
        { "rss_mem_ind", "rss_ind", 4096,
-         {(128 * 128), (128 * 128), (128 * 208)},
-         {16, 16, 16} }
+         {(128 * 128), (128 * 208)},
+         {16, 16} }
 };
 
 static struct vfc_ram_defs s_vfc_ram_defs[] = {
@@ -1352,32 +1465,32 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = {
 static struct big_ram_defs s_big_ram_defs[] = {
        { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
          BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
-         {4800, 4800, 5632} },
+         {4800, 5632} },
        { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
          BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
-         {2880, 2880, 3680} },
+         {2880, 3680} },
        { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
          BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
-         {1152, 1152, 1152} }
+         {1152, 1152} }
 };
 
 static struct reset_reg_defs s_reset_regs_defs[] = {
        { MISCS_REG_RESET_PL_UA, 0x0,
-         {true, true, true} },         /* DBG_RESET_REG_MISCS_PL_UA */
+         {true, true} },               /* DBG_RESET_REG_MISCS_PL_UA */
        { MISCS_REG_RESET_PL_HV, 0x0,
-         {true, true, true} },         /* DBG_RESET_REG_MISCS_PL_HV */
+         {true, true} },               /* DBG_RESET_REG_MISCS_PL_HV */
        { MISCS_REG_RESET_PL_HV_2, 0x0,
-         {false, false, true} },       /* DBG_RESET_REG_MISCS_PL_HV_2 */
+         {false, true} },      /* DBG_RESET_REG_MISCS_PL_HV_2 */
        { MISC_REG_RESET_PL_UA, 0x0,
-         {true, true, true} },         /* DBG_RESET_REG_MISC_PL_UA */
+         {true, true} },               /* DBG_RESET_REG_MISC_PL_UA */
        { MISC_REG_RESET_PL_HV, 0x0,
-         {true, true, true} },         /* DBG_RESET_REG_MISC_PL_HV */
+         {true, true} },               /* DBG_RESET_REG_MISC_PL_HV */
        { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
-         {true, true, true} },         /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
+         {true, true} },               /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
        { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
-         {true, true, true} },         /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
+         {true, true} },               /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
        { MISC_REG_RESET_PL_PDA_VAUX, 0x2,
-         {true, true, true} },         /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
+         {true, true} },               /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
 };
 
 static struct phy_defs s_phy_defs[] = {
@@ -1410,6 +1523,26 @@ static u32 qed_read_unaligned_dword(u8 *buf)
        return dword;
 }
 
+/* Returns the value of the specified GRC param */
+static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
+                            enum dbg_grc_params grc_param)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+       return dev_data->grc.param_val[grc_param];
+}
+
+/* Initializes the GRC parameters */
+static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+       if (!dev_data->grc.params_initialized) {
+               qed_dbg_grc_set_params_default(p_hwfn);
+               dev_data->grc.params_initialized = 1;
+       }
+}
+
 /* Initializes debug data for the specified device */
 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
                                        struct qed_ptt *p_ptt)
@@ -1424,13 +1557,17 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
                dev_data->mode_enable[MODE_K2] = 1;
        } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
                dev_data->chip_id = CHIP_BB_B0;
-               dev_data->mode_enable[MODE_BB_B0] = 1;
+               dev_data->mode_enable[MODE_BB] = 1;
        } else {
                return DBG_STATUS_UNKNOWN_CHIP;
        }
 
        dev_data->platform_id = PLATFORM_ASIC;
        dev_data->mode_enable[MODE_ASIC] = 1;
+
+       /* Initializes the GRC parameters */
+       qed_dbg_grc_init_params(p_hwfn);
+
        dev_data->initialized = true;
        return DBG_STATUS_OK;
 }
@@ -1561,7 +1698,7 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
        int printed_chars;
        u32 offset = 0;
 
-       if (dump) {
+       if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
                /* Read FW image/version from PRAM in a non-reset SEMI */
                bool found = false;
                u8 storm_id;
@@ -1622,7 +1759,7 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
 {
        char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
 
-       if (dump) {
+       if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
                u32 global_section_offsize, global_section_addr, mfw_ver;
                u32 public_data_addr, global_section_offsize_addr;
                int printed_chars;
@@ -1683,15 +1820,13 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
                                         bool dump,
                                         u8 num_specific_global_params)
 {
+       u8 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
        struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
        u32 offset = 0;
 
        /* Find platform string and dump global params section header */
        offset += qed_dump_section_hdr(dump_buf + offset,
-                                      dump,
-                                      "global_params",
-                                      NUM_COMMON_GLOBAL_PARAMS +
-                                      num_specific_global_params);
+                                      dump, "global_params", num_params);
 
        /* Store params */
        offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
@@ -1815,37 +1950,6 @@ static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
        }
 }
 
-/* Returns the value of the specified GRC param */
-static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
-                            enum dbg_grc_params grc_param)
-{
-       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-
-       return dev_data->grc.param_val[grc_param];
-}
-
-/* Clear all GRC params */
-static void qed_dbg_grc_clear_params(struct qed_hwfn *p_hwfn)
-{
-       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-       u32 i;
-
-       for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
-               dev_data->grc.param_set_by_user[i] = 0;
-}
-
-/* Assign default GRC param values */
-static void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
-{
-       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-       u32 i;
-
-       for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
-               if (!dev_data->grc.param_set_by_user[i])
-                       dev_data->grc.param_val[i] =
-                           s_grc_param_defs[i].default_val[dev_data->chip_id];
-}
-
 /* Returns true if the specified entity (indicated by GRC param) should be
  * included in the dump, false otherwise.
  */
@@ -1971,7 +2075,7 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
        }
 }
 
-/* Returns the attention name offsets of the specified block */
+/* Returns the attention block data of the specified block */
 static const struct dbg_attn_block_type_data *
 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
 {
@@ -2040,7 +2144,7 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
  * The following parameters are dumped:
  * - 'count' = num_dumped_entries
  * - 'split' = split_type
- * - 'id'i = split_id (dumped only if split_id >= 0)
+ * - 'id' = split_id (dumped only if split_id >= 0)
  * - 'param_name' = param_val (user param, dumped only if param_name != NULL and
  *     param_val != NULL)
  */
@@ -2069,21 +2173,81 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
        return offset;
 }
 
-/* Dumps GRC register/memory. Returns the dumped size in dwords. */
+/* Dumps the GRC registers in the specified address range.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt, u32 *dump_buf,
+                                  bool dump, u32 addr, u32 len)
+{
+       u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
+
+       if (dump)
+               for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
+                       *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+       else
+               offset += len;
+       return offset;
+}
+
+/* Dumps GRC registers sequence header. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr,
+                                     u32 len)
+{
+       if (dump)
+               *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
+       return 1;
+}
+
+/* Dumps GRC registers sequence. Returns the dumped size in dwords. */
 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
                                  struct qed_ptt *p_ptt, u32 *dump_buf,
                                  bool dump, u32 addr, u32 len)
 {
-       u32 offset = 0, i;
+       u32 offset = 0;
+
+       offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
+       offset += qed_grc_dump_addr_range(p_hwfn,
+                                         p_ptt,
+                                         dump_buf + offset, dump, addr, len);
+       return offset;
+}
+
+/* Dumps GRC registers sequence with skip cycle.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
+                                      struct qed_ptt *p_ptt, u32 *dump_buf,
+                                      bool dump, u32 addr, u32 total_len,
+                                      u32 read_len, u32 skip_len)
+{
+       u32 offset = 0, reg_offset = 0;
 
+       offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
        if (dump) {
-               *(dump_buf + offset++) = addr | (len << REG_DUMP_LEN_SHIFT);
-               for (i = 0; i < len; i++, addr++, offset++)
-                       *(dump_buf + offset) = qed_rd(p_hwfn,
-                                                     p_ptt,
-                                                     DWORDS_TO_BYTES(addr));
+               while (reg_offset < total_len) {
+                       u32 curr_len = min_t(u32,
+                                            read_len,
+                                            total_len - reg_offset);
+                       offset += qed_grc_dump_addr_range(p_hwfn,
+                                                         p_ptt,
+                                                         dump_buf + offset,
+                                                         dump, addr, curr_len);
+                       reg_offset += curr_len;
+                       addr += curr_len;
+                       if (reg_offset < total_len) {
+                               curr_len = min_t(u32,
+                                                skip_len,
+                                                total_len - skip_len);
+                               memset(dump_buf + offset, 0,
+                                      DWORDS_TO_BYTES(curr_len));
+                               offset += curr_len;
+                               reg_offset += curr_len;
+                               addr += curr_len;
+                       }
+               }
        } else {
-               offset += len + 1;
+               offset += total_len;
        }
 
        return offset;
@@ -2124,14 +2288,17 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
                                const struct dbg_dump_reg *reg =
                                    (const struct dbg_dump_reg *)
                                    &input_regs_arr.ptr[input_offset];
+                               u32 addr, len;
 
+                               addr = GET_FIELD(reg->data,
+                                                DBG_DUMP_REG_ADDRESS);
+                               len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
                                offset +=
-                                       qed_grc_dump_reg_entry(p_hwfn, p_ptt,
-                                                   dump_buf + offset, dump,
-                                                   GET_FIELD(reg->data,
-                                                       DBG_DUMP_REG_ADDRESS),
-                                                   GET_FIELD(reg->data,
-                                                       DBG_DUMP_REG_LENGTH));
+                                   qed_grc_dump_reg_entry(p_hwfn, p_ptt,
+                                                          dump_buf + offset,
+                                                          dump,
+                                                          addr,
+                                                          len);
                                (*num_dumped_reg_entries)++;
                        }
                } else {
@@ -2194,8 +2361,14 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
                                  const char *param_name, const char *param_val)
 {
        struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       struct chip_platform_defs *p_platform_defs;
        u32 offset = 0, input_offset = 0;
-       u8 port_id, pf_id;
+       struct chip_defs *p_chip_defs;
+       u8 port_id, pf_id, vf_id;
+       u16 fid;
+
+       p_chip_defs = &s_chip_defs[dev_data->chip_id];
+       p_platform_defs = &p_chip_defs->per_platform[dev_data->platform_id];
 
        if (dump)
                DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
@@ -2214,7 +2387,6 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
 
                switch (split_type_id) {
                case SPLIT_TYPE_NONE:
-               case SPLIT_TYPE_VF:
                        offset += qed_grc_dump_split_data(p_hwfn,
                                                          p_ptt,
                                                          curr_input_regs_arr,
@@ -2227,10 +2399,7 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
                                                          param_val);
                        break;
                case SPLIT_TYPE_PORT:
-                       for (port_id = 0;
-                            port_id <
-                            s_chip_defs[dev_data->chip_id].
-                            per_platform[dev_data->platform_id].num_ports;
+                       for (port_id = 0; port_id < p_platform_defs->num_ports;
                             port_id++) {
                                if (dump)
                                        qed_port_pretend(p_hwfn, p_ptt,
@@ -2247,20 +2416,48 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
                        break;
                case SPLIT_TYPE_PF:
                case SPLIT_TYPE_PORT_PF:
-                       for (pf_id = 0;
-                            pf_id <
-                            s_chip_defs[dev_data->chip_id].
-                            per_platform[dev_data->platform_id].num_pfs;
+                       for (pf_id = 0; pf_id < p_platform_defs->num_pfs;
                             pf_id++) {
-                               if (dump)
-                                       qed_fid_pretend(p_hwfn, p_ptt, pf_id);
-                               offset += qed_grc_dump_split_data(p_hwfn,
-                                                       p_ptt,
-                                                       curr_input_regs_arr,
-                                                       dump_buf + offset,
-                                                       dump, block_enable,
-                                                       "pf", pf_id, param_name,
-                                                       param_val);
+                               u8 pfid_shift =
+                                       PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+
+                               if (dump) {
+                                       fid = pf_id << pfid_shift;
+                                       qed_fid_pretend(p_hwfn, p_ptt, fid);
+                               }
+
+                               offset +=
+                                   qed_grc_dump_split_data(p_hwfn, p_ptt,
+                                                           curr_input_regs_arr,
+                                                           dump_buf + offset,
+                                                           dump, block_enable,
+                                                           "pf", pf_id,
+                                                           param_name,
+                                                           param_val);
+                       }
+                       break;
+               case SPLIT_TYPE_VF:
+                       for (vf_id = 0; vf_id < p_platform_defs->num_vfs;
+                            vf_id++) {
+                               u8 vfvalid_shift =
+                                       PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
+                               u8 vfid_shift =
+                                       PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
+
+                               if (dump) {
+                                       fid = BIT(vfvalid_shift) |
+                                             (vf_id << vfid_shift);
+                                       qed_fid_pretend(p_hwfn, p_ptt, fid);
+                               }
+
+                               offset +=
+                                   qed_grc_dump_split_data(p_hwfn, p_ptt,
+                                                           curr_input_regs_arr,
+                                                           dump_buf + offset,
+                                                           dump, block_enable,
+                                                           "vf", vf_id,
+                                                           param_name,
+                                                           param_val);
                        }
                        break;
                default:
@@ -2271,8 +2468,11 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
        }
 
        /* Pretend to original PF */
-       if (dump)
-               qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+       if (dump) {
+               fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+               qed_fid_pretend(p_hwfn, p_ptt, fid);
+       }
+
        return offset;
 }
 
@@ -2291,13 +2491,14 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
        /* Write reset registers */
        for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
                if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
+                       u32 addr = BYTES_TO_DWORDS(s_reset_regs_defs[i].addr);
+
                        offset += qed_grc_dump_reg_entry(p_hwfn,
                                                         p_ptt,
                                                         dump_buf + offset,
                                                         dump,
-                                                        BYTES_TO_DWORDS
-                                                        (s_reset_regs_defs
-                                                         [i].addr), 1);
+                                                        addr,
+                                                        1);
                        num_regs++;
                }
        }
@@ -2339,6 +2540,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
                                &attn_reg_arr[reg_idx];
                        u16 modes_buf_offset;
                        bool eval_mode;
+                       u32 addr;
 
                        /* Check mode */
                        eval_mode = GET_FIELD(reg_data->mode.data,
@@ -2349,19 +2551,23 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
                        if (!eval_mode ||
                            qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
                                /* Mode match - read and dump registers */
-                               offset += qed_grc_dump_reg_entry(p_hwfn,
-                                                       p_ptt,
-                                                       dump_buf + offset,
-                                                       dump,
-                                                       reg_data->mask_address,
-                                                       1);
-                               offset += qed_grc_dump_reg_entry(p_hwfn,
-                                               p_ptt,
-                                               dump_buf + offset,
-                                               dump,
-                                               GET_FIELD(reg_data->data,
-                                                   DBG_ATTN_REG_STS_ADDRESS),
-                                               1);
+                               addr = reg_data->mask_address;
+                               offset +=
+                                   qed_grc_dump_reg_entry(p_hwfn,
+                                                          p_ptt,
+                                                          dump_buf + offset,
+                                                          dump,
+                                                          addr,
+                                                          1);
+                               addr = GET_FIELD(reg_data->data,
+                                                DBG_ATTN_REG_STS_ADDRESS);
+                               offset +=
+                                   qed_grc_dump_reg_entry(p_hwfn,
+                                                          p_ptt,
+                                                          dump_buf + offset,
+                                                          dump,
+                                                          addr,
+                                                          1);
                                num_reg_entries += 2;
                        }
                }
@@ -2369,18 +2575,21 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
 
        /* Write storm stall status registers */
        for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+               u32 addr;
+
                if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
                    dump)
                        continue;
 
+               addr =
+                   BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
+                                   SEM_FAST_REG_STALLED);
                offset += qed_grc_dump_reg_entry(p_hwfn,
-                                       p_ptt,
-                                       dump_buf + offset,
-                                       dump,
-                                       BYTES_TO_DWORDS(s_storm_defs[storm_id].
-                                                       sem_fast_mem_addr +
-                                                       SEM_FAST_REG_STALLED),
-                                       1);
+                                                p_ptt,
+                                                dump_buf + offset,
+                                                dump,
+                                                addr,
+                                                1);
                num_reg_entries++;
        }
 
@@ -2392,11 +2601,47 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
        return offset;
 }
 
+/* Dumps registers that can't be represented in the debug arrays */
+static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    u32 *dump_buf, bool dump)
+{
+       u32 offset = 0, addr;
+
+       offset += qed_grc_dump_regs_hdr(dump_buf,
+                                       dump, 2, "eng", -1, NULL, NULL);
+
+       /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
+        * skipped).
+        */
+       addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
+       offset += qed_grc_dump_reg_entry_skip(p_hwfn,
+                                             p_ptt,
+                                             dump_buf + offset,
+                                             dump,
+                                             addr,
+                                             RDIF_REG_DEBUG_ERROR_INFO_SIZE,
+                                             7,
+                                             1);
+       addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
+       offset +=
+           qed_grc_dump_reg_entry_skip(p_hwfn,
+                                       p_ptt,
+                                       dump_buf + offset,
+                                       dump,
+                                       addr,
+                                       TDIF_REG_DEBUG_ERROR_INFO_SIZE,
+                                       7,
+                                       1);
+
+       return offset;
+}
+
 /* Dumps a GRC memory header (section and params).
  * The following parameters are dumped:
  * name - name is dumped only if it's not NULL.
- * addr - byte_addr is dumped only if name is NULL.
- * len - dword_len is always dumped.
+ * addr - addr is dumped only if name is NULL.
+ * len - len is always dumped.
  * width - bit_width is dumped if it's not zero.
  * packed - packed=1 is dumped if it's not false.
  * mem_group - mem_group is always dumped.
@@ -2408,8 +2653,8 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
                                u32 *dump_buf,
                                bool dump,
                                const char *name,
-                               u32 byte_addr,
-                               u32 dword_len,
+                               u32 addr,
+                               u32 len,
                                u32 bit_width,
                                bool packed,
                                const char *mem_group,
@@ -2419,7 +2664,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
        u32 offset = 0;
        char buf[64];
 
-       if (!dword_len)
+       if (!len)
                DP_NOTICE(p_hwfn,
                          "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
        if (bit_width)
@@ -2446,20 +2691,21 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
                        DP_VERBOSE(p_hwfn,
                                   QED_MSG_DEBUG,
                                   "Dumping %d registers from %s...\n",
-                                  dword_len, buf);
+                                  len, buf);
        } else {
                /* Dump address */
                offset += qed_dump_num_param(dump_buf + offset,
-                                            dump, "addr", byte_addr);
-               if (dump && dword_len > 64)
+                                            dump, "addr",
+                                            DWORDS_TO_BYTES(addr));
+               if (dump && len > 64)
                        DP_VERBOSE(p_hwfn,
                                   QED_MSG_DEBUG,
                                   "Dumping %d registers from address 0x%x...\n",
-                                  dword_len, byte_addr);
+                                  len, (u32)DWORDS_TO_BYTES(addr));
        }
 
        /* Dump len */
-       offset += qed_dump_num_param(dump_buf + offset, dump, "len", dword_len);
+       offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
 
        /* Dump bit width */
        if (bit_width)
@@ -2492,8 +2738,8 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
                            u32 *dump_buf,
                            bool dump,
                            const char *name,
-                           u32 byte_addr,
-                           u32 dword_len,
+                           u32 addr,
+                           u32 len,
                            u32 bit_width,
                            bool packed,
                            const char *mem_group,
@@ -2505,21 +2751,14 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
                                       dump_buf + offset,
                                       dump,
                                       name,
-                                      byte_addr,
-                                      dword_len,
+                                      addr,
+                                      len,
                                       bit_width,
                                       packed,
                                       mem_group, is_storm, storm_letter);
-       if (dump) {
-               u32 i;
-
-               for (i = 0; i < dword_len;
-                    i++, byte_addr += BYTES_IN_DWORD, offset++)
-                       *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
-       } else {
-               offset += dword_len;
-       }
-
+       offset += qed_grc_dump_addr_range(p_hwfn,
+                                         p_ptt,
+                                         dump_buf + offset, dump, addr, len);
        return offset;
 }
 
@@ -2575,25 +2814,41 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
                        if (qed_grc_is_mem_included(p_hwfn,
                                        (enum block_id)cond_hdr->block_id,
                                        mem_group_id)) {
-                               u32 mem_byte_addr =
-                                       DWORDS_TO_BYTES(GET_FIELD(mem->dword0,
-                                                       DBG_DUMP_MEM_ADDRESS));
+                               u32 mem_addr = GET_FIELD(mem->dword0,
+                                                        DBG_DUMP_MEM_ADDRESS);
                                u32 mem_len = GET_FIELD(mem->dword1,
                                                        DBG_DUMP_MEM_LENGTH);
+                               enum dbg_grc_params grc_param;
                                char storm_letter = 'a';
                                bool is_storm = false;
 
                                /* Update memory length for CCFC/TCFC memories
                                 * according to number of LCIDs/LTIDs.
                                 */
-                               if (mem_group_id == MEM_GROUP_CONN_CFC_MEM)
+                               if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
+                                       if (mem_len % MAX_LCIDS != 0) {
+                                               DP_NOTICE(p_hwfn,
+                                                         "Invalid CCFC connection memory size\n");
+                                               return 0;
+                                       }
+
+                                       grc_param = DBG_GRC_PARAM_NUM_LCIDS;
                                        mem_len = qed_grc_get_param(p_hwfn,
-                                                       DBG_GRC_PARAM_NUM_LCIDS)
-                                                       * (mem_len / MAX_LCIDS);
-                               else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+                                                                   grc_param) *
+                                                 (mem_len / MAX_LCIDS);
+                               } else if (mem_group_id ==
+                                          MEM_GROUP_TASK_CFC_MEM) {
+                                       if (mem_len % MAX_LTIDS != 0) {
+                                               DP_NOTICE(p_hwfn,
+                                                         "Invalid TCFC task memory size\n");
+                                               return 0;
+                                       }
+
+                                       grc_param = DBG_GRC_PARAM_NUM_LTIDS;
                                        mem_len = qed_grc_get_param(p_hwfn,
-                                                       DBG_GRC_PARAM_NUM_LTIDS)
-                                                       * (mem_len / MAX_LTIDS);
+                                                                   grc_param) *
+                                                 (mem_len / MAX_LTIDS);
+                               }
 
                                /* If memory is associated with Storm, update
                                 * Storm details.
@@ -2610,7 +2865,7 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
                                /* Dump memory */
                                offset += qed_grc_dump_mem(p_hwfn, p_ptt,
                                                dump_buf + offset, dump, NULL,
-                                               mem_byte_addr, mem_len, 0,
+                                               mem_addr, mem_len, 0,
                                                false,
                                                s_mem_group_names[mem_group_id],
                                                is_storm, storm_letter);
@@ -2799,29 +3054,31 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
        u32 offset = 0;
 
        for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
-               if (qed_grc_is_storm_included(p_hwfn,
-                                             (enum dbg_storms)storm_id)) {
-                       for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
-                               u32 addr =
-                                   s_storm_defs[storm_id].sem_fast_mem_addr +
-                                   SEM_FAST_REG_STORM_REG_FILE +
-                                   DWORDS_TO_BYTES(IOR_SET_OFFSET(set_id));
+               struct storm_defs *storm = &s_storm_defs[storm_id];
 
-                               buf[strlen(buf) - 1] = '0' + set_id;
-                               offset += qed_grc_dump_mem(p_hwfn,
-                                                          p_ptt,
-                                                          dump_buf + offset,
-                                                          dump,
-                                                          buf,
-                                                          addr,
-                                                          IORS_PER_SET,
-                                                          32,
-                                                          false,
-                                                          "ior",
-                                                          true,
-                                                          s_storm_defs
-                                                          [storm_id].letter);
-                       }
+               if (!qed_grc_is_storm_included(p_hwfn,
+                                              (enum dbg_storms)storm_id))
+                       continue;
+
+               for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
+                       u32 dwords, addr;
+
+                       dwords = storm->sem_fast_mem_addr +
+                                SEM_FAST_REG_STORM_REG_FILE;
+                       addr = BYTES_TO_DWORDS(dwords) + IOR_SET_OFFSET(set_id);
+                       buf[strlen(buf) - 1] = '0' + set_id;
+                       offset += qed_grc_dump_mem(p_hwfn,
+                                                  p_ptt,
+                                                  dump_buf + offset,
+                                                  dump,
+                                                  buf,
+                                                  addr,
+                                                  IORS_PER_SET,
+                                                  32,
+                                                  false,
+                                                  "ior",
+                                                  true,
+                                                  storm->letter);
                }
        }
 
@@ -2990,34 +3247,39 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
                struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
                u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
                u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
-               u32 total_size = (num_entries * entry_width) / 32;
+               u32 total_dwords = (num_entries * entry_width) / 32;
+               u32 size = RSS_REG_RSS_RAM_DATA_SIZE;
                bool packed = (entry_width == 16);
-               u32 addr = rss_defs->addr;
-               u32 i, j;
+               u32 rss_addr = rss_defs->addr;
+               u32 i, addr;
 
                offset += qed_grc_dump_mem_hdr(p_hwfn,
                                               dump_buf + offset,
                                               dump,
                                               rss_defs->mem_name,
-                                              addr,
-                                              total_size,
+                                              0,
+                                              total_dwords,
                                               entry_width,
                                               packed,
                                               rss_defs->type_name, false, 0);
 
                if (!dump) {
-                       offset += total_size;
+                       offset += total_dwords;
                        continue;
                }
 
                /* Dump RSS data */
-               for (i = 0; i < BYTES_TO_DWORDS(total_size); i++, addr++) {
-                       qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, addr);
-                       for (j = 0; j < BYTES_IN_DWORD; j++, offset++)
-                               *(dump_buf + offset) =
-                                       qed_rd(p_hwfn, p_ptt,
-                                              RSS_REG_RSS_RAM_DATA +
-                                              DWORDS_TO_BYTES(j));
+               for (i = 0; i < total_dwords;
+                    i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) {
+                       addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
+                       qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
+                               offset += qed_grc_dump_addr_range(p_hwfn,
+                                                                 p_ptt,
+                                                                 dump_buf +
+                                                                 offset,
+                                                                 dump,
+                                                                 addr,
+                                                                 size);
                }
        }
 
@@ -3030,19 +3292,19 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
                                u32 *dump_buf, bool dump, u8 big_ram_id)
 {
        struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       u32 total_blocks, ram_size, offset = 0, i;
        char mem_name[12] = "???_BIG_RAM";
        char type_name[8] = "???_RAM";
-       u32 ram_size, total_blocks;
-       u32 offset = 0, i, j;
+       struct big_ram_defs *big_ram;
 
-       total_blocks =
-               s_big_ram_defs[big_ram_id].num_of_blocks[dev_data->chip_id];
+       big_ram = &s_big_ram_defs[big_ram_id];
+       total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
        ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
 
-       strncpy(type_name, s_big_ram_defs[big_ram_id].instance_name,
-               strlen(s_big_ram_defs[big_ram_id].instance_name));
-       strncpy(mem_name, s_big_ram_defs[big_ram_id].instance_name,
-               strlen(s_big_ram_defs[big_ram_id].instance_name));
+       strncpy(type_name, big_ram->instance_name,
+               strlen(big_ram->instance_name));
+       strncpy(mem_name, big_ram->instance_name,
+               strlen(big_ram->instance_name));
 
        /* Dump memory header */
        offset += qed_grc_dump_mem_hdr(p_hwfn,
@@ -3059,13 +3321,17 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
 
        /* Read and dump Big RAM data */
        for (i = 0; i < total_blocks / 2; i++) {
-               qed_wr(p_hwfn, p_ptt, s_big_ram_defs[big_ram_id].addr_reg_addr,
-                      i);
-               for (j = 0; j < 2 * BIG_RAM_BLOCK_SIZE_DWORDS; j++, offset++)
-                       *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt,
-                                               s_big_ram_defs[big_ram_id].
-                                                       data_reg_addr +
-                                               DWORDS_TO_BYTES(j));
+               u32 addr, len;
+
+               qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
+               addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
+               len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
+               offset += qed_grc_dump_addr_range(p_hwfn,
+                                                 p_ptt,
+                                                 dump_buf + offset,
+                                                 dump,
+                                                 addr,
+                                                 len);
        }
 
        return offset;
@@ -3075,11 +3341,11 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
                            struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
 {
        bool block_enable[MAX_BLOCK_ID] = { 0 };
+       u32 offset = 0, addr;
        bool halted = false;
-       u32 offset = 0;
 
        /* Halt MCP */
-       if (dump) {
+       if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
                halted = !qed_mcp_halt(p_hwfn, p_ptt);
                if (!halted)
                        DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -3091,7 +3357,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
                                   dump_buf + offset,
                                   dump,
                                   NULL,
-                                  MCP_REG_SCRATCH,
+                                  BYTES_TO_DWORDS(MCP_REG_SCRATCH),
                                   MCP_REG_SCRATCH_SIZE,
                                   0, false, "MCP", false, 0);
 
@@ -3101,7 +3367,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
                                   dump_buf + offset,
                                   dump,
                                   NULL,
-                                  MCP_REG_CPU_REG_FILE,
+                                  BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
                                   MCP_REG_CPU_REG_FILE_SIZE,
                                   0, false, "MCP", false, 0);
 
@@ -3115,12 +3381,13 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
        /* Dump required non-MCP registers */
        offset += qed_grc_dump_regs_hdr(dump_buf + offset,
                                        dump, 1, "eng", -1, "block", "MCP");
+       addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
        offset += qed_grc_dump_reg_entry(p_hwfn,
                                         p_ptt,
                                         dump_buf + offset,
                                         dump,
-                                        BYTES_TO_DWORDS
-                                        (MISC_REG_SHARED_MEM_ADDR), 1);
+                                        addr,
+                                        1);
 
        /* Release MCP */
        if (halted && qed_mcp_resume(p_hwfn, p_ptt))
@@ -3212,7 +3479,7 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
 {
        u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
        struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-       u32 offset = 0, block_id, line_id, addr, i;
+       u32 offset = 0, block_id, line_id;
        struct block_defs *p_block_defs;
 
        if (dump) {
@@ -3255,6 +3522,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
                if (dump && !dev_data->block_in_reset[block_id]) {
                        u8 dbg_client_id =
                                p_block_defs->dbg_client_id[dev_data->chip_id];
+                       u32 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
+                       u32 len = STATIC_DEBUG_LINE_DWORDS;
 
                        /* Enable block's client */
                        qed_bus_enable_clients(p_hwfn, p_ptt,
@@ -3270,11 +3539,13 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
                                                    0xf, 0, 0, 0);
 
                                /* Read debug line info */
-                               for (i = 0, addr = DBG_REG_CALENDAR_OUT_DATA;
-                                    i < STATIC_DEBUG_LINE_DWORDS;
-                                    i++, offset++, addr += BYTES_IN_DWORD)
-                                       dump_buf[offset] = qed_rd(p_hwfn, p_ptt,
-                                                                 addr);
+                               offset +=
+                                   qed_grc_dump_addr_range(p_hwfn,
+                                                           p_ptt,
+                                                           dump_buf + offset,
+                                                           dump,
+                                                           addr,
+                                                           len);
                        }
 
                        /* Disable block's client and debug output */
@@ -3311,14 +3582,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
        u8 i, port_mode = 0;
        u32 offset = 0;
 
-       /* Check if emulation platform */
        *num_dumped_dwords = 0;
 
-       /* Fill GRC parameters that were not set by the user with their default
-        * value.
-        */
-       qed_dbg_grc_set_params_default(p_hwfn);
-
        /* Find port mode */
        if (dump) {
                switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
@@ -3370,15 +3635,14 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
        }
 
        /* Disable all parities using MFW command */
-       if (dump) {
+       if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
                parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
                if (!parities_masked) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed to mask parities using MFW\n");
                        if (qed_grc_get_param
                            (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
                                return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
-                       else
-                               DP_NOTICE(p_hwfn,
-                                         "Failed to mask parities using MFW\n");
                }
        }
 
@@ -3409,6 +3673,11 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
                                                 offset,
                                                 dump,
                                                 block_enable, NULL, NULL);
+
+               /* Dump special registers */
+               offset += qed_grc_dump_special_regs(p_hwfn,
+                                                   p_ptt,
+                                                   dump_buf + offset, dump);
        }
 
        /* Dump memories */
@@ -3583,9 +3852,9 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
                        }
 
                        if (mode_match) {
-                               u32 grc_addr =
-                                       DWORDS_TO_BYTES(GET_FIELD(reg->data,
-                                               DBG_IDLE_CHK_INFO_REG_ADDRESS));
+                               u32 addr =
+                                   GET_FIELD(reg->data,
+                                             DBG_IDLE_CHK_INFO_REG_ADDRESS);
 
                                /* Write register header */
                                struct dbg_idle_chk_result_reg_hdr *reg_hdr =
@@ -3597,16 +3866,19 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
                                memset(reg_hdr, 0, sizeof(*reg_hdr));
                                reg_hdr->size = reg->size;
                                SET_FIELD(reg_hdr->data,
-                                       DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
-                                       rule->num_cond_regs + reg_id);
+                                         DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
+                                         rule->num_cond_regs + reg_id);
 
                                /* Write register values */
-                               for (i = 0; i < reg->size;
-                                    i++, offset++, grc_addr += 4)
-                                       dump_buf[offset] =
-                                               qed_rd(p_hwfn, p_ptt, grc_addr);
-                               }
+                               offset +=
+                                   qed_grc_dump_addr_range(p_hwfn,
+                                                           p_ptt,
+                                                           dump_buf + offset,
+                                                           dump,
+                                                           addr,
+                                                           reg->size);
                        }
+               }
        }
 
        return offset;
@@ -3621,7 +3893,7 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 {
        struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
        u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
-       u32 i, j, offset = 0;
+       u32 i, offset = 0;
        u16 entry_id;
        u8 reg_id;
 
@@ -3664,73 +3936,83 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                if (!check_rule && dump)
                        continue;
 
+               if (!dump) {
+                       u32 entry_dump_size =
+                               qed_idle_chk_dump_failure(p_hwfn,
+                                                         p_ptt,
+                                                         dump_buf + offset,
+                                                         false,
+                                                         rule->rule_id,
+                                                         rule,
+                                                         0,
+                                                         NULL);
+
+                       offset += num_reg_entries * entry_dump_size;
+                       (*num_failing_rules) += num_reg_entries;
+                       continue;
+               }
+
                /* Go over all register entries (number of entries is the same
                 * for all condition registers).
                 */
                for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
                        /* Read current entry of all condition registers */
-                       if (dump) {
-                               u32 next_reg_offset = 0;
-
-                               for (reg_id = 0;
-                                    reg_id < rule->num_cond_regs;
-                                    reg_id++) {
-                                       const struct dbg_idle_chk_cond_reg
-                                               *reg = &cond_regs[reg_id];
-
-                                       /* Find GRC address (if it's a memory,
-                                        * the address of the specific entry is
-                                        * calculated).
-                                        */
-                                       u32 grc_addr =
-                                          DWORDS_TO_BYTES(
-                                               GET_FIELD(reg->data,
-                                                   DBG_IDLE_CHK_COND_REG_ADDRESS));
-
-                                       if (reg->num_entries > 1 ||
-                                           reg->start_entry > 0) {
-                                               u32 padded_entry_size =
-                                                       reg->entry_size > 1 ?
-                                                       roundup_pow_of_two
-                                                       (reg->entry_size) : 1;
-
-                                               grc_addr +=
-                                                       DWORDS_TO_BYTES(
-                                                               (reg->start_entry +
-                                                               entry_id)
-                                                               * padded_entry_size);
-                                       }
+                       u32 next_reg_offset = 0;
 
-                                       /* Read registers */
-                                       if (next_reg_offset + reg->entry_size >=
-                                           IDLE_CHK_MAX_ENTRIES_SIZE) {
-                                               DP_NOTICE(p_hwfn,
-                                                         "idle check registers entry is too large\n");
-                                               return 0;
-                                       }
+                       for (reg_id = 0; reg_id < rule->num_cond_regs;
+                            reg_id++) {
+                               const struct dbg_idle_chk_cond_reg *reg =
+                                       &cond_regs[reg_id];
 
-                                       for (j = 0; j < reg->entry_size;
-                                            j++, next_reg_offset++,
-                                            grc_addr += 4)
-                                            cond_reg_values[next_reg_offset] =
-                                               qed_rd(p_hwfn, p_ptt, grc_addr);
+                               /* Find GRC address (if it's a memory,the
+                                * address of the specific entry is calculated).
+                                */
+                               u32 addr =
+                                   GET_FIELD(reg->data,
+                                             DBG_IDLE_CHK_COND_REG_ADDRESS);
+
+                               if (reg->num_entries > 1 ||
+                                   reg->start_entry > 0) {
+                                       u32 padded_entry_size =
+                                          reg->entry_size > 1 ?
+                                          roundup_pow_of_two(reg->entry_size) :
+                                          1;
+
+                                       addr += (reg->start_entry + entry_id) *
+                                               padded_entry_size;
                                }
+
+                               /* Read registers */
+                               if (next_reg_offset + reg->entry_size >=
+                                   IDLE_CHK_MAX_ENTRIES_SIZE) {
+                                       DP_NOTICE(p_hwfn,
+                                                 "idle check registers entry is too large\n");
+                                       return 0;
+                               }
+
+                               next_reg_offset +=
+                                   qed_grc_dump_addr_range(p_hwfn,
+                                                           p_ptt,
+                                                           cond_reg_values +
+                                                           next_reg_offset,
+                                                           dump, addr,
+                                                           reg->entry_size);
                        }
 
                        /* Call rule's condition function - a return value of
                         * true indicates failure.
                         */
                        if ((*cond_arr[rule->cond_id])(cond_reg_values,
-                                                      imm_values) || !dump) {
+                                                      imm_values)) {
                                offset +=
-                                       qed_idle_chk_dump_failure(p_hwfn,
-                                                       p_ptt,
-                                                       dump_buf + offset,
-                                                       dump,
-                                                       rule->rule_id,
-                                                       rule,
-                                                       entry_id,
-                                                       cond_reg_values);
+                                   qed_idle_chk_dump_failure(p_hwfn,
+                                                             p_ptt,
+                                                             dump_buf + offset,
+                                                             dump,
+                                                             rule->rule_id,
+                                                             rule,
+                                                             entry_id,
+                                                             cond_reg_values);
                                (*num_failing_rules)++;
                                break;
                        }
@@ -3818,13 +4100,18 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
        struct mcp_file_att file_att;
 
        /* Call NVRAM get file command */
-       if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT,
-                              image_type, &ret_mcp_resp, &ret_mcp_param,
-                              &ret_txn_size, (u32 *)&file_att) != 0)
-               return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+       int nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
+                                           p_ptt,
+                                           DRV_MSG_CODE_NVM_GET_FILE_ATT,
+                                           image_type,
+                                           &ret_mcp_resp,
+                                           &ret_mcp_param,
+                                           &ret_txn_size,
+                                           (u32 *)&file_att);
 
        /* Check response */
-       if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+       if (nvm_result ||
+           (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
                return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
 
        /* Update return values */
@@ -3944,7 +4231,6 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
        u32 running_mfw_addr =
                MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
                QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
-       enum dbg_status status;
        u32 nvram_image_type;
 
        *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
@@ -3955,30 +4241,12 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
        nvram_image_type =
            (*running_bundle_id ==
             DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
-       status = qed_find_nvram_image(p_hwfn,
-                                     p_ptt,
-                                     nvram_image_type,
-                                     trace_meta_offset_bytes,
-                                     trace_meta_size_bytes);
-
-       return status;
-}
-
-/* Reads the MCP Trace data from the specified GRC address into the specified
- * buffer.
- */
-static void qed_mcp_trace_read_data(struct qed_hwfn *p_hwfn,
-                                   struct qed_ptt *p_ptt,
-                                   u32 grc_addr, u32 size_in_dwords, u32 *buf)
-{
-       u32 i;
 
-       DP_VERBOSE(p_hwfn,
-                  QED_MSG_DEBUG,
-                  "mcp_trace_read_data: reading trace data of size %d dwords from GRC address 0x%x\n",
-                  size_in_dwords, grc_addr);
-       for (i = 0; i < size_in_dwords; i++, grc_addr += BYTES_IN_DWORD)
-               buf[i] = qed_rd(p_hwfn, p_ptt, grc_addr);
+       return qed_find_nvram_image(p_hwfn,
+                                   p_ptt,
+                                   nvram_image_type,
+                                   trace_meta_offset_bytes,
+                                   trace_meta_size_bytes);
 }
 
 /* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
@@ -4034,11 +4302,14 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
                                          bool dump, u32 *num_dumped_dwords)
 {
        u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
-       u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
-       u32 trace_meta_offset_bytes, trace_meta_size_bytes;
+       u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
+       u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
        enum dbg_status status;
+       bool mcp_access;
        int halted = 0;
 
+       mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
+
        *num_dumped_dwords = 0;
 
        /* Get trace data info */
@@ -4060,7 +4331,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
         * consistent if halt fails, MCP trace is taken anyway, with a small
         * risk that it may be corrupt.
         */
-       if (dump) {
+       if (dump && mcp_access) {
                halted = !qed_mcp_halt(p_hwfn, p_ptt);
                if (!halted)
                        DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -4078,13 +4349,12 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
                                     dump, "size", trace_data_size_dwords);
 
        /* Read trace data from scratchpad into dump buffer */
-       if (dump)
-               qed_mcp_trace_read_data(p_hwfn,
-                                       p_ptt,
-                                       trace_data_grc_addr,
-                                       trace_data_size_dwords,
-                                       dump_buf + offset);
-       offset += trace_data_size_dwords;
+       offset += qed_grc_dump_addr_range(p_hwfn,
+                                         p_ptt,
+                                         dump_buf + offset,
+                                         dump,
+                                         BYTES_TO_DWORDS(trace_data_grc_addr),
+                                         trace_data_size_dwords);
 
        /* Resume MCP (only if halt succeeded) */
        if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
@@ -4095,38 +4365,38 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
                                       dump, "mcp_trace_meta", 1);
 
        /* Read trace meta info */
-       status = qed_mcp_trace_get_meta_info(p_hwfn,
-                                            p_ptt,
-                                            trace_data_size_bytes,
-                                            &running_bundle_id,
-                                            &trace_meta_offset_bytes,
-                                            &trace_meta_size_bytes);
-       if (status != DBG_STATUS_OK)
-               return status;
+       if (mcp_access) {
+               status = qed_mcp_trace_get_meta_info(p_hwfn,
+                                                    p_ptt,
+                                                    trace_data_size_bytes,
+                                                    &running_bundle_id,
+                                                    &trace_meta_offset_bytes,
+                                                    &trace_meta_size_bytes);
+               if (status == DBG_STATUS_OK)
+                       trace_meta_size_dwords =
+                               BYTES_TO_DWORDS(trace_meta_size_bytes);
+       }
 
-       /* Dump trace meta size param (trace_meta_size_bytes is always
-        * dword-aligned).
-        */
-       trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
-       offset += qed_dump_num_param(dump_buf + offset, dump, "size",
-                                    trace_meta_size_dwords);
+       /* Dump trace meta size param */
+       offset += qed_dump_num_param(dump_buf + offset,
+                                    dump, "size", trace_meta_size_dwords);
 
        /* Read trace meta image into dump buffer */
-       if (dump) {
+       if (dump && trace_meta_size_dwords)
                status = qed_mcp_trace_read_meta(p_hwfn,
-                                               p_ptt,
-                                               trace_meta_offset_bytes,
-                                               trace_meta_size_bytes,
-                                               dump_buf + offset);
-               if (status != DBG_STATUS_OK)
-                       return status;
-       }
-
-       offset += trace_meta_size_dwords;
+                                                p_ptt,
+                                                trace_meta_offset_bytes,
+                                                trace_meta_size_bytes,
+                                                dump_buf + offset);
+       if (status == DBG_STATUS_OK)
+               offset += trace_meta_size_dwords;
 
        *num_dumped_dwords = offset;
 
-       return DBG_STATUS_OK;
+       /* If no mcp access, indicate that the dump doesn't contain the meta
+        * data from NVRAM.
+        */
+       return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
 }
 
 /* Dump GRC FIFO */
@@ -4311,9 +4581,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
                               struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
 {
        struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       struct fw_asserts_ram_section *asserts;
        char storm_letter_str[2] = "?";
        struct fw_info fw_info;
-       u32 offset = 0, i;
+       u32 offset = 0;
        u8 storm_id;
 
        /* Dump global params */
@@ -4323,8 +4594,8 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
        offset += qed_dump_str_param(dump_buf + offset,
                                     dump, "dump-type", "fw-asserts");
        for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
-               u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx,
-                       last_list_idx, element_addr;
+               u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
+               u32 last_list_idx, addr;
 
                if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
                        continue;
@@ -4332,6 +4603,8 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
                /* Read FW info for the current Storm */
                qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
 
+               asserts = &fw_info.fw_asserts_section;
+
                /* Dump FW Asserts section header and params */
                storm_letter_str[0] = s_storm_defs[storm_id].letter;
                offset += qed_dump_section_hdr(dump_buf + offset, dump,
@@ -4339,12 +4612,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
                offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
                                             storm_letter_str);
                offset += qed_dump_num_param(dump_buf + offset, dump, "size",
-                                            fw_info.fw_asserts_section.
-                                            list_element_dword_size);
+                                            asserts->list_element_dword_size);
 
                if (!dump) {
-                       offset += fw_info.fw_asserts_section.
-                                 list_element_dword_size;
+                       offset += asserts->list_element_dword_size;
                        continue;
                }
 
@@ -4352,28 +4623,22 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
                fw_asserts_section_addr =
                        s_storm_defs[storm_id].sem_fast_mem_addr +
                        SEM_FAST_REG_INT_RAM +
-                       RAM_LINES_TO_BYTES(fw_info.fw_asserts_section.
-                                          section_ram_line_offset);
+                       RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
                next_list_idx_addr =
                        fw_asserts_section_addr +
-                       DWORDS_TO_BYTES(fw_info.fw_asserts_section.
-                                       list_next_index_dword_offset);
+                       DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
                next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
                last_list_idx = (next_list_idx > 0
                                 ? next_list_idx
-                                : fw_info.fw_asserts_section.list_num_elements)
-                               - 1;
-               element_addr =
-                       fw_asserts_section_addr +
-                       DWORDS_TO_BYTES(fw_info.fw_asserts_section.
-                                       list_dword_offset) +
-                       last_list_idx *
-                       DWORDS_TO_BYTES(fw_info.fw_asserts_section.
-                                       list_element_dword_size);
-               for (i = 0;
-                    i < fw_info.fw_asserts_section.list_element_dword_size;
-                    i++, offset++, element_addr += BYTES_IN_DWORD)
-                       dump_buf[offset] = qed_rd(p_hwfn, p_ptt, element_addr);
+                                : asserts->list_num_elements) - 1;
+               addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
+                      asserts->list_dword_offset +
+                      last_list_idx * asserts->list_element_dword_size;
+               offset +=
+                   qed_grc_dump_addr_range(p_hwfn, p_ptt,
+                                           dump_buf + offset,
+                                           dump, addr,
+                                           asserts->list_element_dword_size);
        }
 
        /* Dump last section */
@@ -4386,13 +4651,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
 {
        /* Convert binary data to debug arrays */
-       u32 num_of_buffers = *(u32 *)bin_ptr;
-       struct bin_buffer_hdr *buf_array;
+       struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
        u8 buf_id;
 
-       buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
-
-       for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+       for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
                s_dbg_arrays[buf_id].ptr =
                    (u32 *)(bin_ptr + buf_array[buf_id].offset);
                s_dbg_arrays[buf_id].size_in_dwords =
@@ -4402,6 +4664,17 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
        return DBG_STATUS_OK;
 }
 
+/* Assign default GRC param values */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       u32 i;
+
+       for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+               dev_data->grc.param_val[i] =
+                   s_grc_param_defs[i].default_val[dev_data->chip_id];
+}
+
 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
                                              struct qed_ptt *p_ptt,
                                              u32 *buf_size)
@@ -4441,8 +4714,9 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
        /* GRC Dump */
        status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
 
-       /* Clear all GRC params */
-       qed_dbg_grc_clear_params(p_hwfn);
+       /* Revert GRC params to their default */
+       qed_dbg_grc_set_params_default(p_hwfn);
+
        return status;
 }
 
@@ -4495,6 +4769,10 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
 
        /* Idle Check Dump */
        *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
+
+       /* Revert GRC params to their default */
+       qed_dbg_grc_set_params_default(p_hwfn);
+
        return DBG_STATUS_OK;
 }
 
@@ -4519,11 +4797,15 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
        u32 needed_buf_size_in_dwords;
        enum dbg_status status;
 
-       status = qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
+       /* validate buffer size */
+       status =
+           qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
                                                &needed_buf_size_in_dwords);
 
-       if (status != DBG_STATUS_OK)
+       if (status != DBG_STATUS_OK &&
+           status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
                return status;
+
        if (buf_size_in_dwords < needed_buf_size_in_dwords)
                return DBG_STATUS_DUMP_BUF_TOO_SMALL;
 
@@ -4531,8 +4813,13 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
        qed_update_blocks_reset_state(p_hwfn, p_ptt);
 
        /* Perform dump */
-       return qed_mcp_trace_dump(p_hwfn,
-                                 p_ptt, dump_buf, true, num_dumped_dwords);
+       status = qed_mcp_trace_dump(p_hwfn,
+                                   p_ptt, dump_buf, true, num_dumped_dwords);
+
+       /* Revert GRC params to their default */
+       qed_dbg_grc_set_params_default(p_hwfn);
+
+       return status;
 }
 
 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -4567,8 +4854,14 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
 
        /* Update reset state */
        qed_update_blocks_reset_state(p_hwfn, p_ptt);
-       return qed_reg_fifo_dump(p_hwfn,
-                                p_ptt, dump_buf, true, num_dumped_dwords);
+
+       status = qed_reg_fifo_dump(p_hwfn,
+                                  p_ptt, dump_buf, true, num_dumped_dwords);
+
+       /* Revert GRC params to their default */
+       qed_dbg_grc_set_params_default(p_hwfn);
+
+       return status;
 }
 
 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -4603,8 +4896,13 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
 
        /* Update reset state */
        qed_update_blocks_reset_state(p_hwfn, p_ptt);
-       return qed_igu_fifo_dump(p_hwfn,
-                                p_ptt, dump_buf, true, num_dumped_dwords);
+
+       status = qed_igu_fifo_dump(p_hwfn,
+                                  p_ptt, dump_buf, true, num_dumped_dwords);
+       /* Revert GRC params to their default */
+       qed_dbg_grc_set_params_default(p_hwfn);
+
+       return status;
 }
 
 enum dbg_status
@@ -4641,9 +4939,16 @@ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
 
        /* Update reset state */
        qed_update_blocks_reset_state(p_hwfn, p_ptt);
-       return qed_protection_override_dump(p_hwfn,
-                                           p_ptt,
-                                           dump_buf, true, num_dumped_dwords);
+
+       status = qed_protection_override_dump(p_hwfn,
+                                             p_ptt,
+                                             dump_buf,
+                                             true, num_dumped_dwords);
+
+       /* Revert GRC params to their default */
+       qed_dbg_grc_set_params_default(p_hwfn);
+
+       return status;
 }
 
 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -5045,13 +5350,10 @@ static char s_temp_buf[MAX_MSG_LEN];
 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
 {
        /* Convert binary data to debug arrays */
-       u32 num_of_buffers = *(u32 *)bin_ptr;
-       struct bin_buffer_hdr *buf_array;
+       struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
        u8 buf_id;
 
-       buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
-
-       for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+       for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
                s_dbg_arrays[buf_id].ptr =
                    (u32 *)(bin_ptr + buf_array[buf_id].offset);
                s_dbg_arrays[buf_id].size_in_dwords =
@@ -5874,16 +6176,16 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
                results_offset +=
                    sprintf(qed_get_buf_ptr(results_buf,
                                            results_offset),
-                           "raw: 0x%016llx, address: 0x%07llx, access: %-5s, pf: %2lld, vf: %s, port: %lld, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
+                           "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
                            elements[i].data,
-                           GET_FIELD(elements[i].data,
+                           (u32)GET_FIELD(elements[i].data,
                                      REG_FIFO_ELEMENT_ADDRESS) *
                                      REG_FIFO_ELEMENT_ADDR_FACTOR,
                                      s_access_strs[GET_FIELD(elements[i].data,
                                                    REG_FIFO_ELEMENT_ACCESS)],
-                           GET_FIELD(elements[i].data,
-                                     REG_FIFO_ELEMENT_PF), vf_str,
-                           GET_FIELD(elements[i].data,
+                           (u32)GET_FIELD(elements[i].data,
+                                          REG_FIFO_ELEMENT_PF), vf_str,
+                           (u32)GET_FIELD(elements[i].data,
                                      REG_FIFO_ELEMENT_PORT),
                                      s_privilege_strs[GET_FIELD(elements[i].
                                      data,
@@ -6189,13 +6491,13 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
                results_offset +=
                    sprintf(qed_get_buf_ptr(results_buf,
                                            results_offset),
-                           "window %2d, address: 0x%07x, size: %7lld regs, read: %lld, write: %lld, read protection: %-12s, write protection: %-12s\n",
+                           "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
                            i, address,
-                           GET_FIELD(elements[i].data,
+                           (u32)GET_FIELD(elements[i].data,
                                      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
-                           GET_FIELD(elements[i].data,
+                           (u32)GET_FIELD(elements[i].data,
                                      PROTECTION_OVERRIDE_ELEMENT_READ),
-                           GET_FIELD(elements[i].data,
+                           (u32)GET_FIELD(elements[i].data,
                                      PROTECTION_OVERRIDE_ELEMENT_WRITE),
                            s_protection_strs[GET_FIELD(elements[i].data,
                                PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
@@ -6508,7 +6810,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
         */
        rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
                                                       &buf_size_dwords);
-       if (rc != DBG_STATUS_OK)
+       if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
                return rc;
        feature->buf_size = buf_size_dwords * sizeof(u32);
        feature->dump_buf = vmalloc(feature->buf_size);
index e2a081ceaf520c429b90e1fcc1e2b6cb7d3b10aa..fad73195010d01b4d50ad80ff5c7e399787daa94 100644 (file)
@@ -75,7 +75,8 @@ enum BAR_ID {
        BAR_ID_1        /* Used for doorbells */
 };
 
-static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
+static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt, enum BAR_ID bar_id)
 {
        u32 bar_reg = (bar_id == BAR_ID_0 ?
                       PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
@@ -84,7 +85,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
        if (IS_VF(p_hwfn->cdev))
                return 1 << 17;
 
-       val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+       val = qed_rd(p_hwfn, p_ptt, bar_reg);
        if (val)
                return 1 << (val + 15);
 
@@ -186,195 +187,569 @@ void qed_resc_free(struct qed_dev *cdev)
        }
 }
 
-static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
+/******************** QM initialization *******************/
+#define ACTIVE_TCS_BMAP 0x9f
+#define ACTIVE_TCS_BMAP_4PORT_K2 0xf
+
+/* determines the physical queue flags for a given PF. */
+static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
 {
-       u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
-       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
-       struct init_qm_port_params *p_qm_port;
-       bool init_rdma_offload_pq = false;
-       bool init_pure_ack_pq = false;
-       bool init_ooo_pq = false;
-       u16 num_pqs, multi_cos_tcs = 1;
-       u8 pf_wfq = qm_info->pf_wfq;
-       u32 pf_rl = qm_info->pf_rl;
-       u16 num_pf_rls = 0;
-       u16 num_vfs = 0;
-
-#ifdef CONFIG_QED_SRIOV
-       if (p_hwfn->cdev->p_iov_info)
-               num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
-#endif
-       memset(qm_info, 0, sizeof(*qm_info));
+       u32 flags;
 
-       num_pqs = multi_cos_tcs + num_vfs + 1;  /* The '1' is for pure-LB */
-       num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
+       /* common flags */
+       flags = PQ_FLAGS_LB;
 
-       if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
-               num_pqs++;      /* for RoCE queue */
-               init_rdma_offload_pq = true;
-               /* we subtract num_vfs because each require a rate limiter,
-                * and one default rate limiter
-                */
-               if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn)
-                       num_pf_rls = RESC_NUM(p_hwfn, QED_RL) - num_vfs - 1;
+       /* feature flags */
+       if (IS_QED_SRIOV(p_hwfn->cdev))
+               flags |= PQ_FLAGS_VFS;
 
-               num_pqs += num_pf_rls;
-               qm_info->num_pf_rls = (u8) num_pf_rls;
+       /* protocol flags */
+       switch (p_hwfn->hw_info.personality) {
+       case QED_PCI_ETH:
+               flags |= PQ_FLAGS_MCOS;
+               break;
+       case QED_PCI_FCOE:
+               flags |= PQ_FLAGS_OFLD;
+               break;
+       case QED_PCI_ISCSI:
+               flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+               break;
+       case QED_PCI_ETH_ROCE:
+               flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
+               break;
+       default:
+               DP_ERR(p_hwfn,
+                      "unknown personality %d\n", p_hwfn->hw_info.personality);
+               return 0;
        }
 
-       if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
-               num_pqs += 2;   /* for iSCSI pure-ACK / OOO queue */
-               init_pure_ack_pq = true;
-               init_ooo_pq = true;
-       }
+       return flags;
+}
 
-       /* Sanity checking that setup requires legal number of resources */
-       if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
-               DP_ERR(p_hwfn,
-                      "Need too many Physical queues - 0x%04x when only %04x are available\n",
-                      num_pqs, RESC_NUM(p_hwfn, QED_PQ));
-               return -EINVAL;
-       }
+/* Getters for resource amounts necessary for qm initialization */
+u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn)
+{
+       return p_hwfn->hw_info.num_hw_tc;
+}
 
-       /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
-        */
-       qm_info->qm_pq_params = kcalloc(num_pqs,
-                                       sizeof(struct init_qm_pq_params),
-                                       b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
-       if (!qm_info->qm_pq_params)
-               goto alloc_err;
+u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
+{
+       return IS_QED_SRIOV(p_hwfn->cdev) ?
+              p_hwfn->cdev->p_iov_info->total_vfs : 0;
+}
 
-       qm_info->qm_vport_params = kcalloc(num_vports,
-                                          sizeof(struct init_qm_vport_params),
-                                          b_sleepable ? GFP_KERNEL
-                                                      : GFP_ATOMIC);
-       if (!qm_info->qm_vport_params)
-               goto alloc_err;
+#define NUM_DEFAULT_RLS 1
 
-       qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
-                                         sizeof(struct init_qm_port_params),
-                                         b_sleepable ? GFP_KERNEL
-                                                     : GFP_ATOMIC);
-       if (!qm_info->qm_port_params)
-               goto alloc_err;
+u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
+{
+       u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
 
-       qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
-                                   b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
-       if (!qm_info->wfq_data)
-               goto alloc_err;
+       /* num RLs can't exceed resource amount of rls or vports */
+       num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL),
+                                RESC_NUM(p_hwfn, QED_VPORT));
 
-       vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+       /* Make sure after we reserve there's something left */
+       if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS)
+               return 0;
 
-       /* First init rate limited queues */
-       for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) {
-               qm_info->qm_pq_params[curr_queue].vport_id = vport_id++;
-               qm_info->qm_pq_params[curr_queue].tc_id =
-                   p_hwfn->hw_info.non_offload_tc;
-               qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-               qm_info->qm_pq_params[curr_queue].rl_valid = 1;
-       }
+       /* subtract rls necessary for VFs and one default one for the PF */
+       num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
 
-       /* First init per-TC PQs */
-       for (i = 0; i < multi_cos_tcs; i++) {
-               struct init_qm_pq_params *params =
-                   &qm_info->qm_pq_params[curr_queue++];
+       return num_pf_rls;
+}
 
-               if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
-                   p_hwfn->hw_info.personality == QED_PCI_ETH) {
-                       params->vport_id = vport_id;
-                       params->tc_id = p_hwfn->hw_info.non_offload_tc;
-                       params->wrr_group = 1;
-               } else {
-                       params->vport_id = vport_id;
-                       params->tc_id = p_hwfn->hw_info.offload_tc;
-                       params->wrr_group = 1;
-               }
-       }
+u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
+{
+       u32 pq_flags = qed_get_pq_flags(p_hwfn);
+
+       /* all pqs share the same vport, except for vfs and pf_rl pqs */
+       return (!!(PQ_FLAGS_RLS & pq_flags)) *
+              qed_init_qm_get_num_pf_rls(p_hwfn) +
+              (!!(PQ_FLAGS_VFS & pq_flags)) *
+              qed_init_qm_get_num_vfs(p_hwfn) + 1;
+}
 
-       /* Then init pure-LB PQ */
-       qm_info->pure_lb_pq = curr_queue;
-       qm_info->qm_pq_params[curr_queue].vport_id =
-           (u8) RESC_START(p_hwfn, QED_VPORT);
-       qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
-       qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-       curr_queue++;
-
-       qm_info->offload_pq = 0;
-       if (init_rdma_offload_pq) {
-               qm_info->offload_pq = curr_queue;
-               qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
-               qm_info->qm_pq_params[curr_queue].tc_id =
-                   p_hwfn->hw_info.offload_tc;
-               qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-               curr_queue++;
-       }
-
-       if (init_pure_ack_pq) {
-               qm_info->pure_ack_pq = curr_queue;
-               qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
-               qm_info->qm_pq_params[curr_queue].tc_id =
-                   p_hwfn->hw_info.offload_tc;
-               qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-               curr_queue++;
-       }
-
-       if (init_ooo_pq) {
-               qm_info->ooo_pq = curr_queue;
-               qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
-               qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC;
-               qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-               curr_queue++;
-       }
-
-       /* Then init per-VF PQs */
-       vf_offset = curr_queue;
-       for (i = 0; i < num_vfs; i++) {
-               /* First vport is used by the PF */
-               qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
-               qm_info->qm_pq_params[curr_queue].tc_id =
-                   p_hwfn->hw_info.non_offload_tc;
-               qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-               qm_info->qm_pq_params[curr_queue].rl_valid = 1;
-               curr_queue++;
-       }
-
-       qm_info->vf_queues_offset = vf_offset;
-       qm_info->num_pqs = num_pqs;
-       qm_info->num_vports = num_vports;
+/* calc amount of PQs according to the requested flags */
+u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
+{
+       u32 pq_flags = qed_get_pq_flags(p_hwfn);
+
+       return (!!(PQ_FLAGS_RLS & pq_flags)) *
+              qed_init_qm_get_num_pf_rls(p_hwfn) +
+              (!!(PQ_FLAGS_MCOS & pq_flags)) *
+              qed_init_qm_get_num_tcs(p_hwfn) +
+              (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) +
+              (!!(PQ_FLAGS_ACK & pq_flags)) + (!!(PQ_FLAGS_OFLD & pq_flags)) +
+              (!!(PQ_FLAGS_LLT & pq_flags)) +
+              (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn);
+}
+
+/* initialize the top level QM params */
+static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       bool four_port;
+
+       /* pq and vport bases for this PF */
+       qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ);
+       qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
 
+       /* rate limiting and weighted fair queueing are always enabled */
+       qm_info->vport_rl_en = 1;
+       qm_info->vport_wfq_en = 1;
+
+       /* TC config is different for AH 4 port */
+       four_port = p_hwfn->cdev->num_ports_in_engines == MAX_NUM_PORTS_K2;
+
+       /* in AH 4 port we have fewer TCs per port */
+       qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
+                                                    NUM_OF_PHYS_TCS;
+
+       /* unless MFW indicated otherwise, ooo_tc == 3 for
+        * AH 4-port and 4 otherwise.
+        */
+       if (!qm_info->ooo_tc)
+               qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC :
+                                             DCBX_TCP_OOO_TC;
+}
+
+/* initialize qm vport params */
+static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       u8 i;
+
+       /* all vports participate in weighted fair queueing */
+       for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++)
+               qm_info->qm_vport_params[i].vport_wfq = 1;
+}
+
+/* initialize qm port params */
+static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
+{
        /* Initialize qm port parameters */
-       num_ports = p_hwfn->cdev->num_ports_in_engines;
+       u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engines;
+
+       /* indicate how ooo and high pri traffic is dealt with */
+       active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
+                         ACTIVE_TCS_BMAP_4PORT_K2 :
+                         ACTIVE_TCS_BMAP;
+
        for (i = 0; i < num_ports; i++) {
-               p_qm_port = &qm_info->qm_port_params[i];
+               struct init_qm_port_params *p_qm_port =
+                   &p_hwfn->qm_info.qm_port_params[i];
+
                p_qm_port->active = 1;
-               if (num_ports == 4)
-                       p_qm_port->active_phys_tcs = 0x7;
-               else
-                       p_qm_port->active_phys_tcs = 0x9f;
+               p_qm_port->active_phys_tcs = active_phys_tcs;
                p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
                p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
        }
+}
+
+/* Reset the params which must be reset for qm init. QM init may be called as
+ * a result of flows other than driver load (e.g. dcbx renegotiation). Other
+ * params may be affected by the init but would simply recalculate to the same
+ * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
+ * affected as these amounts stay the same.
+ */
+static void qed_init_qm_reset_params(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       qm_info->num_pqs = 0;
+       qm_info->num_vports = 0;
+       qm_info->num_pf_rls = 0;
+       qm_info->num_vf_pqs = 0;
+       qm_info->first_vf_pq = 0;
+       qm_info->first_mcos_pq = 0;
+       qm_info->first_rl_pq = 0;
+}
+
+static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       qm_info->num_vports++;
+
+       if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
+               DP_ERR(p_hwfn,
+                      "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
+                      qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
+}
+
+/* initialize a single pq and manage qm_info resources accounting.
+ * The pq_init_flags param determines whether the PQ is rate limited
+ * (for VF or PF) and whether a new vport is allocated to the pq or not
+ * (i.e. vport will be shared).
+ */
+
+/* flags for pq init */
+#define PQ_INIT_SHARE_VPORT     (1 << 0)
+#define PQ_INIT_PF_RL           (1 << 1)
+#define PQ_INIT_VF_RL           (1 << 2)
+
+/* defines for pq init */
+#define PQ_INIT_DEFAULT_WRR_GROUP       1
+#define PQ_INIT_DEFAULT_TC              0
+#define PQ_INIT_OFLD_TC                 (p_hwfn->hw_info.offload_tc)
+
+static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
+                          struct qed_qm_info *qm_info,
+                          u8 tc, u32 pq_init_flags)
+{
+       u16 pq_idx = qm_info->num_pqs, max_pq = qed_init_qm_get_num_pqs(p_hwfn);
+
+       if (pq_idx > max_pq)
+               DP_ERR(p_hwfn,
+                      "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
+
+       /* init pq params */
+       qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
+           qm_info->num_vports;
+       qm_info->qm_pq_params[pq_idx].tc_id = tc;
+       qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
+       qm_info->qm_pq_params[pq_idx].rl_valid =
+           (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL);
+
+       /* qm params accounting */
+       qm_info->num_pqs++;
+       if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
+               qm_info->num_vports++;
+
+       if (pq_init_flags & PQ_INIT_PF_RL)
+               qm_info->num_pf_rls++;
+
+       if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
+               DP_ERR(p_hwfn,
+                      "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
+                      qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
+
+       if (qm_info->num_pf_rls > qed_init_qm_get_num_pf_rls(p_hwfn))
+               DP_ERR(p_hwfn,
+                      "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n",
+                      qm_info->num_pf_rls, qed_init_qm_get_num_pf_rls(p_hwfn));
+}
+
+/* get pq index according to PQ_FLAGS */
+static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
+                                          u32 pq_flags)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       /* Can't have multiple flags set here */
+       if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
+               goto err;
+
+       switch (pq_flags) {
+       case PQ_FLAGS_RLS:
+               return &qm_info->first_rl_pq;
+       case PQ_FLAGS_MCOS:
+               return &qm_info->first_mcos_pq;
+       case PQ_FLAGS_LB:
+               return &qm_info->pure_lb_pq;
+       case PQ_FLAGS_OOO:
+               return &qm_info->ooo_pq;
+       case PQ_FLAGS_ACK:
+               return &qm_info->pure_ack_pq;
+       case PQ_FLAGS_OFLD:
+               return &qm_info->offload_pq;
+       case PQ_FLAGS_LLT:
+               return &qm_info->low_latency_pq;
+       case PQ_FLAGS_VFS:
+               return &qm_info->first_vf_pq;
+       default:
+               goto err;
+       }
+
+err:
+       DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
+       return NULL;
+}
 
-       qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+/* save pq index in qm info */
+static void qed_init_qm_set_idx(struct qed_hwfn *p_hwfn,
+                               u32 pq_flags, u16 pq_val)
+{
+       u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+       *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
+}
+
+/* get tx pq index, with the PQ TX base already set (ready for context init) */
+u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags)
+{
+       u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+       return *base_pq_idx + CM_TX_PQ_BASE;
+}
+
+u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
+{
+       u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
+
+       if (tc > max_tc)
+               DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
+
+       return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
+}
 
-       qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
+u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
+{
+       u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
+
+       if (vf > max_vf)
+               DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
+
+       return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
+}
+
+u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl)
+{
+       u16 max_rl = qed_init_qm_get_num_pf_rls(p_hwfn);
+
+       if (rl > max_rl)
+               DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
+
+       return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
+}
+
+/* Functions for creating specific types of pqs */
+static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
+               return;
+
+       qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
+       qed_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_ooo_pq(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
+               return;
+
+       qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
+       qed_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
+}
 
+static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
+               return;
+
+       qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
+       qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
+               return;
+
+       qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
+       qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT))
+               return;
+
+       qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
+       qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       u8 tc_idx;
+
+       if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
+               return;
+
+       qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
+       for (tc_idx = 0; tc_idx < qed_init_qm_get_num_tcs(p_hwfn); tc_idx++)
+               qed_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_vf_pqs(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
+
+       if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
+               return;
+
+       qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
        qm_info->num_vf_pqs = num_vfs;
-       qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+       for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
+               qed_init_qm_pq(p_hwfn,
+                              qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL);
+}
 
-       for (i = 0; i < qm_info->num_vports; i++)
-               qm_info->qm_vport_params[i].vport_wfq = 1;
+static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn)
+{
+       u16 pf_rls_idx, num_pf_rls = qed_init_qm_get_num_pf_rls(p_hwfn);
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
 
-       qm_info->vport_rl_en = 1;
-       qm_info->vport_wfq_en = 1;
-       qm_info->pf_rl = pf_rl;
-       qm_info->pf_wfq = pf_wfq;
+       if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
+               return;
+
+       qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
+       for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
+               qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL);
+}
+
+static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn)
+{
+       /* rate limited pqs, must come first (FW assumption) */
+       qed_init_qm_rl_pqs(p_hwfn);
+
+       /* pqs for multi cos */
+       qed_init_qm_mcos_pqs(p_hwfn);
+
+       /* pure loopback pq */
+       qed_init_qm_lb_pq(p_hwfn);
+
+       /* out of order pq */
+       qed_init_qm_ooo_pq(p_hwfn);
+
+       /* pure ack pq */
+       qed_init_qm_pure_ack_pq(p_hwfn);
+
+       /* pq for offloaded protocol */
+       qed_init_qm_offload_pq(p_hwfn);
+
+       /* low latency pq */
+       qed_init_qm_low_latency_pq(p_hwfn);
+
+       /* done sharing vports */
+       qed_init_qm_advance_vport(p_hwfn);
+
+       /* pqs for vfs */
+       qed_init_qm_vf_pqs(p_hwfn);
+}
+
+/* compare values of getters against resources amounts */
+static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn)
+{
+       if (qed_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, QED_VPORT)) {
+               DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
+               return -EINVAL;
+       }
+
+       if (qed_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, QED_PQ)) {
+               DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
+               return -EINVAL;
+       }
 
        return 0;
+}
 
-alloc_err:
-       qed_qm_info_free(p_hwfn);
-       return -ENOMEM;
+static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       struct init_qm_vport_params *vport;
+       struct init_qm_port_params *port;
+       struct init_qm_pq_params *pq;
+       int i, tc;
+
+       /* top level params */
+       DP_VERBOSE(p_hwfn,
+                  NETIF_MSG_HW,
+                  "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
+                  qm_info->start_pq,
+                  qm_info->start_vport,
+                  qm_info->pure_lb_pq,
+                  qm_info->offload_pq, qm_info->pure_ack_pq);
+       DP_VERBOSE(p_hwfn,
+                  NETIF_MSG_HW,
+                  "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
+                  qm_info->ooo_pq,
+                  qm_info->first_vf_pq,
+                  qm_info->num_pqs,
+                  qm_info->num_vf_pqs,
+                  qm_info->num_vports, qm_info->max_phys_tcs_per_port);
+       DP_VERBOSE(p_hwfn,
+                  NETIF_MSG_HW,
+                  "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
+                  qm_info->pf_rl_en,
+                  qm_info->pf_wfq_en,
+                  qm_info->vport_rl_en,
+                  qm_info->vport_wfq_en,
+                  qm_info->pf_wfq,
+                  qm_info->pf_rl,
+                  qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn));
+
+       /* port table */
+       for (i = 0; i < p_hwfn->cdev->num_ports_in_engines; i++) {
+               port = &(qm_info->qm_port_params[i]);
+               DP_VERBOSE(p_hwfn,
+                          NETIF_MSG_HW,
+                          "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n",
+                          i,
+                          port->active,
+                          port->active_phys_tcs,
+                          port->num_pbf_cmd_lines,
+                          port->num_btb_blocks, port->reserved);
+       }
+
+       /* vport table */
+       for (i = 0; i < qm_info->num_vports; i++) {
+               vport = &(qm_info->qm_vport_params[i]);
+               DP_VERBOSE(p_hwfn,
+                          NETIF_MSG_HW,
+                          "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
+                          qm_info->start_vport + i,
+                          vport->vport_rl, vport->vport_wfq);
+               for (tc = 0; tc < NUM_OF_TCS; tc++)
+                       DP_VERBOSE(p_hwfn,
+                                  NETIF_MSG_HW,
+                                  "%d ", vport->first_tx_pq_id[tc]);
+               DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "]\n");
+       }
+
+       /* pq table */
+       for (i = 0; i < qm_info->num_pqs; i++) {
+               pq = &(qm_info->qm_pq_params[i]);
+               DP_VERBOSE(p_hwfn,
+                          NETIF_MSG_HW,
+                          "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
+                          qm_info->start_pq + i,
+                          pq->vport_id,
+                          pq->tc_id, pq->wrr_group, pq->rl_valid);
+       }
+}
+
+static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
+{
+       /* reset params required for init run */
+       qed_init_qm_reset_params(p_hwfn);
+
+       /* init QM top level params */
+       qed_init_qm_params(p_hwfn);
+
+       /* init QM port params */
+       qed_init_qm_port_params(p_hwfn);
+
+       /* init QM vport params */
+       qed_init_qm_vport_params(p_hwfn);
+
+       /* init QM physical queue params */
+       qed_init_qm_pq_params(p_hwfn);
+
+       /* display all that init */
+       qed_dp_init_qm_params(p_hwfn);
 }
 
 /* This function reconfigures the QM pf on the fly.
@@ -391,17 +766,8 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        bool b_rc;
        int rc;
 
-       /* qm_info is allocated in qed_init_qm_info() which is already called
-        * from qed_resc_alloc() or previous call of qed_qm_reconf().
-        * The allocated size may change each init, so we free it before next
-        * allocation.
-        */
-       qed_qm_info_free(p_hwfn);
-
        /* initialize qed's qm data structure */
-       rc = qed_init_qm_info(p_hwfn, false);
-       if (rc)
-               return rc;
+       qed_init_qm_info(p_hwfn);
 
        /* stop PF's qm queues */
        spin_lock_bh(&qm_lock);
@@ -415,7 +781,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        qed_init_clear_rt_data(p_hwfn);
 
        /* prepare QM portion of runtime array */
-       qed_qm_init_pf(p_hwfn);
+       qed_qm_init_pf(p_hwfn, p_ptt);
 
        /* activate init tool on runtime array */
        rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
@@ -434,6 +800,47 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        return 0;
 }
 
+static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       int rc;
+
+       rc = qed_init_qm_sanity(p_hwfn);
+       if (rc)
+               goto alloc_err;
+
+       qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
+                                       qed_init_qm_get_num_pqs(p_hwfn),
+                                       GFP_KERNEL);
+       if (!qm_info->qm_pq_params)
+               goto alloc_err;
+
+       qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
+                                          qed_init_qm_get_num_vports(p_hwfn),
+                                          GFP_KERNEL);
+       if (!qm_info->qm_vport_params)
+               goto alloc_err;
+
+       qm_info->qm_port_params = kzalloc(sizeof(qm_info->qm_port_params) *
+                                         p_hwfn->cdev->num_ports_in_engines,
+                                         GFP_KERNEL);
+       if (!qm_info->qm_port_params)
+               goto alloc_err;
+
+       qm_info->wfq_data = kzalloc(sizeof(*qm_info->wfq_data) *
+                                   qed_init_qm_get_num_vports(p_hwfn),
+                                   GFP_KERNEL);
+       if (!qm_info->wfq_data)
+               goto alloc_err;
+
+       return 0;
+
+alloc_err:
+       DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
+       qed_qm_info_free(p_hwfn);
+       return -ENOMEM;
+}
+
 int qed_resc_alloc(struct qed_dev *cdev)
 {
        struct qed_iscsi_info *p_iscsi_info;
@@ -442,8 +849,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
 #ifdef CONFIG_QED_LL2
        struct qed_ll2_info *p_ll2_info;
 #endif
+       u32 rdma_tasks, excess_tasks;
        struct qed_consq *p_consq;
        struct qed_eq *p_eq;
+       u32 line_count;
        int i, rc = 0;
 
        if (IS_VF(cdev))
@@ -465,19 +874,44 @@ int qed_resc_alloc(struct qed_dev *cdev)
                /* Set the HW cid/tid numbers (in the contest manager)
                 * Must be done prior to any further computations.
                 */
-               rc = qed_cxt_set_pf_params(p_hwfn);
+               rc = qed_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS);
                if (rc)
                        goto alloc_err;
 
-               /* Prepare and process QM requirements */
-               rc = qed_init_qm_info(p_hwfn, true);
+               rc = qed_alloc_qm_data(p_hwfn);
                if (rc)
                        goto alloc_err;
 
+               /* init qm info */
+               qed_init_qm_info(p_hwfn);
+
                /* Compute the ILT client partition */
-               rc = qed_cxt_cfg_ilt_compute(p_hwfn);
-               if (rc)
-                       goto alloc_err;
+               rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "too many ILT lines; re-computing with less lines\n");
+                       /* In case there are not enough ILT lines we reduce the
+                        * number of RDMA tasks and re-compute.
+                        */
+                       excess_tasks =
+                           qed_cxt_cfg_ilt_compute_excess(p_hwfn, line_count);
+                       if (!excess_tasks)
+                               goto alloc_err;
+
+                       rdma_tasks = RDMA_MAX_TIDS - excess_tasks;
+                       rc = qed_cxt_set_pf_params(p_hwfn, rdma_tasks);
+                       if (rc)
+                               goto alloc_err;
+
+                       rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
+                       if (rc) {
+                               DP_ERR(p_hwfn,
+                                      "failed ILT compute. Requested too many lines: %u\n",
+                                      line_count);
+
+                               goto alloc_err;
+                       }
+               }
 
                /* CID map / ILT shadow table / T2
                 * The talbes sizes are determined by the computations above
@@ -674,11 +1108,19 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
-static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
+static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
 {
        int hw_mode = 0;
 
-       hw_mode = (1 << MODE_BB_B0);
+       if (QED_IS_BB_B0(p_hwfn->cdev)) {
+               hw_mode |= 1 << MODE_BB;
+       } else if (QED_IS_AH(p_hwfn->cdev)) {
+               hw_mode |= 1 << MODE_K2;
+       } else {
+               DP_NOTICE(p_hwfn, "Unknown chip type %#x\n",
+                         p_hwfn->cdev->type);
+               return -EINVAL;
+       }
 
        switch (p_hwfn->cdev->num_ports_in_engines) {
        case 1:
@@ -693,7 +1135,7 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
        default:
                DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
                          p_hwfn->cdev->num_ports_in_engines);
-               return;
+               return -EINVAL;
        }
 
        switch (p_hwfn->cdev->mf_mode) {
@@ -719,6 +1161,8 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
        DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
                   "Configuring function for hw_mode: 0x%08x\n",
                   p_hwfn->hw_info.hw_mode);
+
+       return 0;
 }
 
 /* Init run time data for all PFs on an engine. */
@@ -748,16 +1192,67 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev)
        }
 }
 
+static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt)
+{
+       u32 val, wr_mbs, cache_line_size;
+
+       val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
+       switch (val) {
+       case 0:
+               wr_mbs = 128;
+               break;
+       case 1:
+               wr_mbs = 256;
+               break;
+       case 2:
+               wr_mbs = 512;
+               break;
+       default:
+               DP_INFO(p_hwfn,
+                       "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+                       val);
+               return;
+       }
+
+       cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs);
+       switch (cache_line_size) {
+       case 32:
+               val = 0;
+               break;
+       case 64:
+               val = 1;
+               break;
+       case 128:
+               val = 2;
+               break;
+       case 256:
+               val = 3;
+               break;
+       default:
+               DP_INFO(p_hwfn,
+                       "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+                       cache_line_size);
+       }
+
+       if (L1_CACHE_BYTES > wr_mbs)
+               DP_INFO(p_hwfn,
+                       "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
+                       L1_CACHE_BYTES, wr_mbs);
+
+       STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
+}
+
 static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
                              struct qed_ptt *p_ptt, int hw_mode)
 {
        struct qed_qm_info *qm_info = &p_hwfn->qm_info;
        struct qed_qm_common_rt_init_params params;
        struct qed_dev *cdev = p_hwfn->cdev;
+       u8 vf_id, max_num_vfs;
        u16 num_pfs, pf_id;
        u32 concrete_fid;
        int rc = 0;
-       u8 vf_id;
 
        qed_init_cau_rt_data(cdev);
 
@@ -784,17 +1279,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
 
        qed_cxt_hw_init_common(p_hwfn);
 
-       /* Close gate from NIG to BRB/Storm; By default they are open, but
-        * we close them to prevent NIG from passing data to reset blocks.
-        * Should have been done in the ENGINE phase, but init-tool lacks
-        * proper port-pretend capabilities.
-        */
-       qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
-       qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
-       qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
-       qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
-       qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
-       qed_port_unpretend(p_hwfn, p_ptt);
+       qed_init_cache_line_size(p_hwfn, p_ptt);
 
        rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
        if (rc)
@@ -814,7 +1299,8 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
                qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
        }
 
-       for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
+       max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
+       for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
                concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
                qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
                qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
@@ -876,7 +1362,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        int rc = 0;
        u8 cond;
 
-       db_bar_size = qed_hw_bar_size(p_hwfn, BAR_ID_1);
+       db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
        if (p_hwfn->cdev->num_hwfns > 1)
                db_bar_size /= 2;
 
@@ -987,7 +1473,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
                p_hwfn->qm_info.pf_rl = 100000;
        }
 
-       qed_cxt_hw_init_pf(p_hwfn);
+       qed_cxt_hw_init_pf(p_hwfn, p_ptt);
 
        qed_int_igu_init_rt(p_hwfn);
 
@@ -1095,25 +1581,34 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
               p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
 }
 
-int qed_hw_init(struct qed_dev *cdev,
-               struct qed_tunn_start_params *p_tunn,
-               bool b_hw_start,
-               enum qed_int_mode int_mode,
-               bool allow_npar_tx_switch,
-               const u8 *bin_fw_data)
+static void
+qed_fill_load_req_params(struct qed_load_req_params *p_load_req,
+                        struct qed_drv_load_params *p_drv_load)
+{
+       memset(p_load_req, 0, sizeof(*p_load_req));
+
+       p_load_req->drv_role = p_drv_load->is_crash_kernel ?
+                              QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS;
+       p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
+       p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
+       p_load_req->override_force_load = p_drv_load->override_force_load;
+}
+
+int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
 {
+       struct qed_load_req_params load_req_params;
        u32 load_code, param, drv_mb_param;
        bool b_default_mtu = true;
        struct qed_hwfn *p_hwfn;
        int rc = 0, mfw_rc, i;
 
-       if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+       if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
                DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
                return -EINVAL;
        }
 
        if (IS_PF(cdev)) {
-               rc = qed_init_fw_data(cdev, bin_fw_data);
+               rc = qed_init_fw_data(cdev, p_params->bin_fw_data);
                if (rc)
                        return rc;
        }
@@ -1135,19 +1630,25 @@ int qed_hw_init(struct qed_dev *cdev,
                /* Enable DMAE in PXP */
                rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
 
-               qed_calc_hw_mode(p_hwfn);
+               rc = qed_calc_hw_mode(p_hwfn);
+               if (rc)
+                       return rc;
 
-               rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
+               qed_fill_load_req_params(&load_req_params,
+                                        p_params->p_drv_load_params);
+               rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+                                     &load_req_params);
                if (rc) {
-                       DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
+                       DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
                        return rc;
                }
 
-               qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
-
+               load_code = load_req_params.load_code;
                DP_VERBOSE(p_hwfn, QED_MSG_SP,
-                          "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
-                          rc, load_code);
+                          "Load request was sent. Load code: 0x%x\n",
+                          load_code);
+
+               qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
 
                p_hwfn->first_on_engine = (load_code ==
                                           FW_MSG_CODE_DRV_LOAD_ENGINE);
@@ -1168,11 +1669,15 @@ int qed_hw_init(struct qed_dev *cdev,
                /* Fall into */
                case FW_MSG_CODE_DRV_LOAD_FUNCTION:
                        rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
-                                           p_tunn, p_hwfn->hw_info.hw_mode,
-                                           b_hw_start, int_mode,
-                                           allow_npar_tx_switch);
+                                           p_params->p_tunn,
+                                           p_hwfn->hw_info.hw_mode,
+                                           p_params->b_hw_start,
+                                           p_params->int_mode,
+                                           p_params->allow_npar_tx_switch);
                        break;
                default:
+                       DP_NOTICE(p_hwfn,
+                                 "Unexpected load code [0x%08x]", load_code);
                        rc = -EINVAL;
                        break;
                }
@@ -1212,10 +1717,7 @@ int qed_hw_init(struct qed_dev *cdev,
 
        if (IS_PF(cdev)) {
                p_hwfn = QED_LEADING_HWFN(cdev);
-               drv_mb_param = (FW_MAJOR_VERSION << 24) |
-                              (FW_MINOR_VERSION << 16) |
-                              (FW_REVISION_VERSION << 8) |
-                              (FW_ENGINEERING_VERSION);
+               drv_mb_param = STORM_FW_VERSION;
                rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
                                 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
                                 drv_mb_param, &load_code, &param);
@@ -1290,27 +1792,53 @@ void qed_hw_timers_stop_all(struct qed_dev *cdev)
 
 int qed_hw_stop(struct qed_dev *cdev)
 {
-       int rc = 0, t_rc;
+       struct qed_hwfn *p_hwfn;
+       struct qed_ptt *p_ptt;
+       int rc, rc2 = 0;
        int j;
 
        for_each_hwfn(cdev, j) {
-               struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
-               struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+               p_hwfn = &cdev->hwfns[j];
+               p_ptt = p_hwfn->p_main_ptt;
 
                DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
 
                if (IS_VF(cdev)) {
                        qed_vf_pf_int_cleanup(p_hwfn);
+                       rc = qed_vf_pf_reset(p_hwfn);
+                       if (rc) {
+                               DP_NOTICE(p_hwfn,
+                                         "qed_vf_pf_reset failed. rc = %d.\n",
+                                         rc);
+                               rc2 = -EINVAL;
+                       }
                        continue;
                }
 
                /* mark the hw as uninitialized... */
                p_hwfn->hw_init_done = false;
 
+               /* Send unload command to MCP */
+               rc = qed_mcp_unload_req(p_hwfn, p_ptt);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed sending a UNLOAD_REQ command. rc = %d.\n",
+                                 rc);
+                       rc2 = -EINVAL;
+               }
+
+               qed_slowpath_irq_sync(p_hwfn);
+
+               /* After this point no MFW attentions are expected, e.g. prevent
+                * race between pf stop and dcbx pf update.
+                */
                rc = qed_sp_pf_stop(p_hwfn);
-               if (rc)
+               if (rc) {
                        DP_NOTICE(p_hwfn,
-                                 "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
+                                 "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
+                                 rc);
+                       rc2 = -EINVAL;
+               }
 
                qed_wr(p_hwfn, p_ptt,
                       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
@@ -1333,34 +1861,54 @@ int qed_hw_stop(struct qed_dev *cdev)
 
                /* Need to wait 1ms to guarantee SBs are cleared */
                usleep_range(1000, 2000);
+
+               /* Disable PF in HW blocks */
+               qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+               qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+
+               qed_mcp_unload_done(p_hwfn, p_ptt);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed sending a UNLOAD_DONE command. rc = %d.\n",
+                                 rc);
+                       rc2 = -EINVAL;
+               }
        }
 
        if (IS_PF(cdev)) {
+               p_hwfn = QED_LEADING_HWFN(cdev);
+               p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt;
+
                /* Disable DMAE in PXP - in CMT, this should only be done for
                 * first hw-function, and only after all transactions have
                 * stopped for all active hw-functions.
                 */
-               t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
-                                          cdev->hwfns[0].p_main_ptt, false);
-               if (t_rc != 0)
-                       rc = t_rc;
+               rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "qed_change_pci_hwfn failed. rc = %d.\n", rc);
+                       rc2 = -EINVAL;
+               }
        }
 
-       return rc;
+       return rc2;
 }
 
-void qed_hw_stop_fastpath(struct qed_dev *cdev)
+int qed_hw_stop_fastpath(struct qed_dev *cdev)
 {
        int j;
 
        for_each_hwfn(cdev, j) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
-               struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+               struct qed_ptt *p_ptt;
 
                if (IS_VF(cdev)) {
                        qed_vf_pf_int_cleanup(p_hwfn);
                        continue;
                }
+               p_ptt = qed_ptt_acquire(p_hwfn);
+               if (!p_ptt)
+                       return -EAGAIN;
 
                DP_VERBOSE(p_hwfn,
                           NETIF_MSG_IFDOWN, "Shutting down the fastpath\n");
@@ -1378,100 +1926,28 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
 
                /* Need to wait 1ms to guarantee SBs are cleared */
                usleep_range(1000, 2000);
-       }
-}
-
-void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
-{
-       if (IS_VF(p_hwfn->cdev))
-               return;
-
-       /* Re-open incoming traffic */
-       qed_wr(p_hwfn, p_hwfn->p_main_ptt,
-              NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
-}
-
-static int qed_reg_assert(struct qed_hwfn *p_hwfn,
-                         struct qed_ptt *p_ptt, u32 reg, bool expected)
-{
-       u32 assert_val = qed_rd(p_hwfn, p_ptt, reg);
-
-       if (assert_val != expected) {
-               DP_NOTICE(p_hwfn, "Value at address 0x%08x != 0x%08x\n",
-                         reg, expected);
-               return -EINVAL;
+               qed_ptt_release(p_hwfn, p_ptt);
        }
 
        return 0;
 }
 
-int qed_hw_reset(struct qed_dev *cdev)
+int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
 {
-       int rc = 0;
-       u32 unload_resp, unload_param;
-       u32 wol_param;
-       int i;
-
-       switch (cdev->wol_config) {
-       case QED_OV_WOL_DISABLED:
-               wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
-               break;
-       case QED_OV_WOL_ENABLED:
-               wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
-               break;
-       default:
-               DP_NOTICE(cdev,
-                         "Unknown WoL configuration %02x\n", cdev->wol_config);
-               /* Fallthrough */
-       case QED_OV_WOL_DEFAULT:
-               wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
-       }
-
-       for_each_hwfn(cdev, i) {
-               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
-
-               if (IS_VF(cdev)) {
-                       rc = qed_vf_pf_reset(p_hwfn);
-                       if (rc)
-                               return rc;
-                       continue;
-               }
-
-               DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
-
-               /* Check for incorrect states */
-               qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
-                              QM_REG_USG_CNT_PF_TX, 0);
-               qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
-                              QM_REG_USG_CNT_PF_OTHER, 0);
+       struct qed_ptt *p_ptt;
 
-               /* Disable PF in HW blocks */
-               qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
-               qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
-               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
-                      TCFC_REG_STRONG_ENABLE_PF, 0);
-               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
-                      CCFC_REG_STRONG_ENABLE_PF, 0);
+       if (IS_VF(p_hwfn->cdev))
+               return 0;
 
-               /* Send unload command to MCP */
-               rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
-                                DRV_MSG_CODE_UNLOAD_REQ, wol_param,
-                                &unload_resp, &unload_param);
-               if (rc) {
-                       DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
-                       unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
-               }
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return -EAGAIN;
 
-               rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
-                                DRV_MSG_CODE_UNLOAD_DONE,
-                                0, &unload_resp, &unload_param);
-               if (rc) {
-                       DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
-                       return rc;
-               }
-       }
+       /* Re-open incoming traffic */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+       qed_ptt_release(p_hwfn, p_ptt);
 
-       return rc;
+       return 0;
 }
 
 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
@@ -1485,10 +1961,25 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
 static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
 {
        /* clear indirect access */
-       qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
-       qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
-       qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
-       qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+       if (QED_IS_AH(p_hwfn->cdev)) {
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0);
+       } else {
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
+       }
 
        /* Clean Previous errors if such exist */
        qed_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -1522,7 +2013,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
 {
        u32 *feat_num = p_hwfn->hw_info.feat_num;
        struct qed_sb_cnt_info sb_cnt_info;
-       int num_features = 1;
+       u32 non_l2_sbs = 0;
 
        if (IS_ENABLED(CONFIG_QED_RDMA) &&
            p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
@@ -1530,204 +2021,260 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
                 * the status blocks equally between L2 / RoCE but with
                 * consideration as to how many l2 queues / cnqs we have.
                 */
-               num_features++;
-
                feat_num[QED_RDMA_CNQ] =
-                       min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
+                       min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 2,
                              RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
-       }
 
-       feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
-                                               num_features,
-                                       RESC_NUM(p_hwfn, QED_L2_QUEUE));
-
-       memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
-       qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
-       feat_num[QED_VF_L2_QUE] =
-           min_t(u32,
-                 RESC_NUM(p_hwfn, QED_L2_QUEUE) -
-                 FEAT_NUM(p_hwfn, QED_PF_L2_QUE), sb_cnt_info.sb_iov_cnt);
+               non_l2_sbs = feat_num[QED_RDMA_CNQ];
+       }
 
+       if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
+           p_hwfn->hw_info.personality == QED_PCI_ETH) {
+               /* Start by allocating VF queues, then PF's */
+               memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+               qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+               feat_num[QED_VF_L2_QUE] = min_t(u32,
+                                               RESC_NUM(p_hwfn, QED_L2_QUEUE),
+                                               sb_cnt_info.sb_iov_cnt);
+               feat_num[QED_PF_L2_QUE] = min_t(u32,
+                                               RESC_NUM(p_hwfn, QED_SB) -
+                                               non_l2_sbs,
+                                               RESC_NUM(p_hwfn,
+                                                        QED_L2_QUEUE) -
+                                               FEAT_NUM(p_hwfn,
+                                                        QED_VF_L2_QUE));
+       }
+
+       if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+               feat_num[QED_ISCSI_CQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB),
+                                              RESC_NUM(p_hwfn,
+                                                       QED_CMDQS_CQS));
        DP_VERBOSE(p_hwfn,
                   NETIF_MSG_PROBE,
-                  "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n",
+                  "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d ISCSI_CQ=%d #SBS=%d\n",
                   (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
                   (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
                   (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
-                  RESC_NUM(p_hwfn, QED_SB), num_features);
+                  (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
+                  RESC_NUM(p_hwfn, QED_SB));
 }
 
-static enum resource_id_enum qed_hw_get_mfw_res_id(enum qed_resources res_id)
+const char *qed_hw_get_resc_name(enum qed_resources res_id)
 {
-       enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
-
        switch (res_id) {
-       case QED_SB:
-               mfw_res_id = RESOURCE_NUM_SB_E;
-               break;
        case QED_L2_QUEUE:
-               mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
-               break;
+               return "L2_QUEUE";
        case QED_VPORT:
-               mfw_res_id = RESOURCE_NUM_VPORT_E;
-               break;
+               return "VPORT";
        case QED_RSS_ENG:
-               mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
-               break;
+               return "RSS_ENG";
        case QED_PQ:
-               mfw_res_id = RESOURCE_NUM_PQ_E;
-               break;
+               return "PQ";
        case QED_RL:
-               mfw_res_id = RESOURCE_NUM_RL_E;
-               break;
+               return "RL";
        case QED_MAC:
+               return "MAC";
        case QED_VLAN:
-               /* Each VFC resource can accommodate both a MAC and a VLAN */
-               mfw_res_id = RESOURCE_VFC_FILTER_E;
-               break;
+               return "VLAN";
+       case QED_RDMA_CNQ_RAM:
+               return "RDMA_CNQ_RAM";
        case QED_ILT:
-               mfw_res_id = RESOURCE_ILT_E;
-               break;
+               return "ILT";
        case QED_LL2_QUEUE:
-               mfw_res_id = RESOURCE_LL2_QUEUE_E;
-               break;
-       case QED_RDMA_CNQ_RAM:
+               return "LL2_QUEUE";
        case QED_CMDQS_CQS:
-               /* CNQ/CMDQS are the same resource */
-               mfw_res_id = RESOURCE_CQS_E;
-               break;
+               return "CMDQS_CQS";
        case QED_RDMA_STATS_QUEUE:
-               mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
-               break;
+               return "RDMA_STATS_QUEUE";
+       case QED_BDQ:
+               return "BDQ";
+       case QED_SB:
+               return "SB";
        default:
-               break;
+               return "UNKNOWN_RESOURCE";
+       }
+}
+
+static int
+__qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           enum qed_resources res_id,
+                           u32 resc_max_val, u32 *p_mcp_resp)
+{
+       int rc;
+
+       rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
+                                     resc_max_val, p_mcp_resp);
+       if (rc) {
+               DP_NOTICE(p_hwfn,
+                         "MFW response failure for a max value setting of resource %d [%s]\n",
+                         res_id, qed_hw_get_resc_name(res_id));
+               return rc;
+       }
+
+       if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
+               DP_INFO(p_hwfn,
+                       "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
+                       res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp);
+
+       return 0;
+}
+
+static int
+qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       bool b_ah = QED_IS_AH(p_hwfn->cdev);
+       u32 resc_max_val, mcp_resp;
+       u8 res_id;
+       int rc;
+
+       for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
+               switch (res_id) {
+               case QED_LL2_QUEUE:
+                       resc_max_val = MAX_NUM_LL2_RX_QUEUES;
+                       break;
+               case QED_RDMA_CNQ_RAM:
+                       /* No need for a case for QED_CMDQS_CQS since
+                        * CNQ/CMDQS are the same resource.
+                        */
+                       resc_max_val = NUM_OF_CMDQS_CQS;
+                       break;
+               case QED_RDMA_STATS_QUEUE:
+                       resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
+                           : RDMA_NUM_STATISTIC_COUNTERS_BB;
+                       break;
+               case QED_BDQ:
+                       resc_max_val = BDQ_NUM_RESOURCES;
+                       break;
+               default:
+                       continue;
+               }
+
+               rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
+                                                resc_max_val, &mcp_resp);
+               if (rc)
+                       return rc;
+
+               /* There's no point to continue to the next resource if the
+                * command is not supported by the MFW.
+                * We do continue if the command is supported but the resource
+                * is unknown to the MFW. Such a resource will be later
+                * configured with the default allocation values.
+                */
+               if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
+                       return -EINVAL;
        }
 
-       return mfw_res_id;
+       return 0;
 }
 
-static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
-                                   enum qed_resources res_id)
+static
+int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
+                        enum qed_resources res_id,
+                        u32 *p_resc_num, u32 *p_resc_start)
 {
        u8 num_funcs = p_hwfn->num_funcs_on_engine;
+       bool b_ah = QED_IS_AH(p_hwfn->cdev);
        struct qed_sb_cnt_info sb_cnt_info;
-       u32 dflt_resc_num = 0;
 
        switch (res_id) {
-       case QED_SB:
-               memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
-               qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
-               dflt_resc_num = sb_cnt_info.sb_cnt;
-               break;
        case QED_L2_QUEUE:
-               dflt_resc_num = MAX_NUM_L2_QUEUES_BB / num_funcs;
+               *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
+                              MAX_NUM_L2_QUEUES_BB) / num_funcs;
                break;
        case QED_VPORT:
-               dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs;
+               *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+                              MAX_NUM_VPORTS_BB) / num_funcs;
                break;
        case QED_RSS_ENG:
-               dflt_resc_num = ETH_RSS_ENGINE_NUM_BB / num_funcs;
+               *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
+                              ETH_RSS_ENGINE_NUM_BB) / num_funcs;
                break;
        case QED_PQ:
-               /* The granularity of the PQs is 8 */
-               dflt_resc_num = MAX_QM_TX_QUEUES_BB / num_funcs;
-               dflt_resc_num &= ~0x7;
+               *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
+                              MAX_QM_TX_QUEUES_BB) / num_funcs;
+               *p_resc_num &= ~0x7;    /* The granularity of the PQs is 8 */
                break;
        case QED_RL:
-               dflt_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+               *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
                break;
        case QED_MAC:
        case QED_VLAN:
                /* Each VFC resource can accommodate both a MAC and a VLAN */
-               dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
+               *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
                break;
        case QED_ILT:
-               dflt_resc_num = PXP_NUM_ILT_RECORDS_BB / num_funcs;
+               *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
+                              PXP_NUM_ILT_RECORDS_BB) / num_funcs;
                break;
        case QED_LL2_QUEUE:
-               dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+               *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
                break;
        case QED_RDMA_CNQ_RAM:
        case QED_CMDQS_CQS:
                /* CNQ/CMDQS are the same resource */
-               dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
+               *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
                break;
        case QED_RDMA_STATS_QUEUE:
-               dflt_resc_num = RDMA_NUM_STATISTIC_COUNTERS_BB / num_funcs;
+               *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
+                              RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs;
                break;
-       default:
+       case QED_BDQ:
+               if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
+                   p_hwfn->hw_info.personality != QED_PCI_FCOE)
+                       *p_resc_num = 0;
+               else
+                       *p_resc_num = 1;
+               break;
+       case QED_SB:
+               memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+               qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+               *p_resc_num = sb_cnt_info.sb_cnt;
                break;
+       default:
+               return -EINVAL;
        }
 
-       return dflt_resc_num;
-}
-
-static const char *qed_hw_get_resc_name(enum qed_resources res_id)
-{
        switch (res_id) {
-       case QED_SB:
-               return "SB";
-       case QED_L2_QUEUE:
-               return "L2_QUEUE";
-       case QED_VPORT:
-               return "VPORT";
-       case QED_RSS_ENG:
-               return "RSS_ENG";
-       case QED_PQ:
-               return "PQ";
-       case QED_RL:
-               return "RL";
-       case QED_MAC:
-               return "MAC";
-       case QED_VLAN:
-               return "VLAN";
-       case QED_RDMA_CNQ_RAM:
-               return "RDMA_CNQ_RAM";
-       case QED_ILT:
-               return "ILT";
-       case QED_LL2_QUEUE:
-               return "LL2_QUEUE";
-       case QED_CMDQS_CQS:
-               return "CMDQS_CQS";
-       case QED_RDMA_STATS_QUEUE:
-               return "RDMA_STATS_QUEUE";
+       case QED_BDQ:
+               if (!*p_resc_num)
+                       *p_resc_start = 0;
+               else if (p_hwfn->cdev->num_ports_in_engines == 4)
+                       *p_resc_start = p_hwfn->port_id;
+               else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+                       *p_resc_start = p_hwfn->port_id;
+               else if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
+                       *p_resc_start = p_hwfn->port_id + 2;
+               break;
        default:
-               return "UNKNOWN_RESOURCE";
+               *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
+               break;
        }
+
+       return 0;
 }
 
-static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
-                               enum qed_resources res_id)
+static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
+                                 enum qed_resources res_id)
 {
-       u32 dflt_resc_num = 0, dflt_resc_start = 0, mcp_resp, mcp_param;
-       u32 *p_resc_num, *p_resc_start;
-       struct resource_info resc_info;
+       u32 dflt_resc_num = 0, dflt_resc_start = 0;
+       u32 mcp_resp, *p_resc_num, *p_resc_start;
        int rc;
 
        p_resc_num = &RESC_NUM(p_hwfn, res_id);
        p_resc_start = &RESC_START(p_hwfn, res_id);
 
-       /* Default values assumes that each function received equal share */
-       dflt_resc_num = qed_hw_get_dflt_resc_num(p_hwfn, res_id);
-       if (!dflt_resc_num) {
+       rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
+                                 &dflt_resc_start);
+       if (rc) {
                DP_ERR(p_hwfn,
                       "Failed to get default amount for resource %d [%s]\n",
                       res_id, qed_hw_get_resc_name(res_id));
-               return -EINVAL;
-       }
-       dflt_resc_start = dflt_resc_num * p_hwfn->enabled_func_idx;
-
-       memset(&resc_info, 0, sizeof(resc_info));
-       resc_info.res_id = qed_hw_get_mfw_res_id(res_id);
-       if (resc_info.res_id == RESOURCE_NUM_INVALID) {
-               DP_ERR(p_hwfn,
-                      "Failed to match resource %d [%s] with the MFW resources\n",
-                      res_id, qed_hw_get_resc_name(res_id));
-               return -EINVAL;
+               return rc;
        }
 
-       rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, &resc_info,
-                                  &mcp_resp, &mcp_param);
+       rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
+                                  &mcp_resp, p_resc_num, p_resc_start);
        if (rc) {
                DP_NOTICE(p_hwfn,
                          "MFW response failure for an allocation request for resource %d [%s]\n",
@@ -1740,13 +2287,12 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
         * - There is an internal error in the MFW while processing the request
         * - The resource ID is unknown to the MFW
         */
-       if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK &&
-           mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED) {
-               DP_NOTICE(p_hwfn,
-                         "Resource %d [%s]: No allocation info was received [mcp_resp 0x%x]. Applying default values [num %d, start %d].\n",
-                         res_id,
-                         qed_hw_get_resc_name(res_id),
-                         mcp_resp, dflt_resc_num, dflt_resc_start);
+       if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+               DP_INFO(p_hwfn,
+                       "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
+                       res_id,
+                       qed_hw_get_resc_name(res_id),
+                       mcp_resp, dflt_resc_num, dflt_resc_start);
                *p_resc_num = dflt_resc_num;
                *p_resc_start = dflt_resc_start;
                goto out;
@@ -1754,13 +2300,9 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
 
        /* Special handling for status blocks; Would be revised in future */
        if (res_id == QED_SB) {
-               resc_info.size -= 1;
-               resc_info.offset -= p_hwfn->enabled_func_idx;
+               *p_resc_num -= 1;
+               *p_resc_start -= p_hwfn->enabled_func_idx;
        }
-
-       *p_resc_num = resc_info.size;
-       *p_resc_start = resc_info.offset;
-
 out:
        /* PQs have to divide by 8 [that's the HW granularity].
         * Reduce number so it would fit.
@@ -1778,19 +2320,88 @@ out:
        return 0;
 }
 
-static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn)
 {
-       u8 res_id;
        int rc;
+       u8 res_id;
 
        for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
-               rc = qed_hw_set_resc_info(p_hwfn, res_id);
+               rc = __qed_hw_set_resc_info(p_hwfn, res_id);
                if (rc)
                        return rc;
        }
 
+       return 0;
+}
+
+#define QED_RESC_ALLOC_LOCK_RETRY_CNT           10
+#define QED_RESC_ALLOC_LOCK_RETRY_INTVL_US      10000  /* 10 msec */
+
+static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       struct qed_resc_unlock_params resc_unlock_params;
+       struct qed_resc_lock_params resc_lock_params;
+       bool b_ah = QED_IS_AH(p_hwfn->cdev);
+       u8 res_id;
+       int rc;
+
+       /* Setting the max values of the soft resources and the following
+        * resources allocation queries should be atomic. Since several PFs can
+        * run in parallel - a resource lock is needed.
+        * If either the resource lock or resource set value commands are not
+        * supported - skip the the max values setting, release the lock if
+        * needed, and proceed to the queries. Other failures, including a
+        * failure to acquire the lock, will cause this function to fail.
+        */
+       memset(&resc_lock_params, 0, sizeof(resc_lock_params));
+       resc_lock_params.resource = QED_RESC_LOCK_RESC_ALLOC;
+       resc_lock_params.retry_num = QED_RESC_ALLOC_LOCK_RETRY_CNT;
+       resc_lock_params.retry_interval = QED_RESC_ALLOC_LOCK_RETRY_INTVL_US;
+       resc_lock_params.sleep_b4_retry = true;
+       memset(&resc_unlock_params, 0, sizeof(resc_unlock_params));
+       resc_unlock_params.resource = QED_RESC_LOCK_RESC_ALLOC;
+
+       rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
+       if (rc && rc != -EINVAL) {
+               return rc;
+       } else if (rc == -EINVAL) {
+               DP_INFO(p_hwfn,
+                       "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
+       } else if (!rc && !resc_lock_params.b_granted) {
+               DP_NOTICE(p_hwfn,
+                         "Failed to acquire the resource lock for the resource allocation commands\n");
+               return -EBUSY;
+       } else {
+               rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt);
+               if (rc && rc != -EINVAL) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed to set the max values of the soft resources\n");
+                       goto unlock_and_exit;
+               } else if (rc == -EINVAL) {
+                       DP_INFO(p_hwfn,
+                               "Skip the max values setting of the soft resources since it is not supported by the MFW\n");
+                       rc = qed_mcp_resc_unlock(p_hwfn, p_ptt,
+                                                &resc_unlock_params);
+                       if (rc)
+                               DP_INFO(p_hwfn,
+                                       "Failed to release the resource lock for the resource allocation commands\n");
+               }
+       }
+
+       rc = qed_hw_set_resc_info(p_hwfn);
+       if (rc)
+               goto unlock_and_exit;
+
+       if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
+               rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
+               if (rc)
+                       DP_INFO(p_hwfn,
+                               "Failed to release the resource lock for the resource allocation commands\n");
+       }
+
        /* Sanity for ILT */
-       if ((RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB)) {
+       if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
+           (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
                DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
                          RESC_START(p_hwfn, QED_ILT),
                          RESC_END(p_hwfn, QED_ILT) - 1);
@@ -1799,8 +2410,6 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
 
        qed_hw_set_feat(p_hwfn);
 
-       DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
-                  "The numbers for each resource are:\n");
        for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
                DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n",
                           qed_hw_get_resc_name(res_id),
@@ -1808,6 +2417,11 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
                           RESC_START(p_hwfn, res_id));
 
        return 0;
+
+unlock_and_exit:
+       if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
+               qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
+       return rc;
 }
 
 static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -1860,9 +2474,15 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
                p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
                break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G;
+               break;
        case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
                p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
                break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G;
+               break;
        default:
                DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
                break;
@@ -1976,8 +2596,9 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
        u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
+       struct qed_dev *cdev = p_hwfn->cdev;
 
-       num_funcs = MAX_NUM_PFS_BB;
+       num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
 
        /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
         * in the other bits are selected.
@@ -1990,12 +2611,17 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
 
        if (reg_function_hide & 0x1) {
-               if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
-                       num_funcs = 0;
-                       eng_mask = 0xaaaa;
+               if (QED_IS_BB(cdev)) {
+                       if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) {
+                               num_funcs = 0;
+                               eng_mask = 0xaaaa;
+                       } else {
+                               num_funcs = 1;
+                               eng_mask = 0x5554;
+                       }
                } else {
                        num_funcs = 1;
-                       eng_mask = 0x5554;
+                       eng_mask = 0xfffe;
                }
 
                /* Get the number of the enabled functions on the engine */
@@ -2027,24 +2653,12 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
                   p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
 }
 
-static int
-qed_get_hw_info(struct qed_hwfn *p_hwfn,
-               struct qed_ptt *p_ptt,
-               enum qed_pci_personality personality)
+static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt)
 {
        u32 port_mode;
-       int rc;
-
-       /* Since all information is common, only first hwfns should do this */
-       if (IS_LEAD_HWFN(p_hwfn)) {
-               rc = qed_iov_hw_info(p_hwfn);
-               if (rc)
-                       return rc;
-       }
 
-       /* Read the port mode */
-       port_mode = qed_rd(p_hwfn, p_ptt,
-                          CNIG_REG_NW_PORT_MODE_BB_B0);
+       port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);
 
        if (port_mode < 3) {
                p_hwfn->cdev->num_ports_in_engines = 1;
@@ -2057,6 +2671,54 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
                /* Default num_ports_in_engines to something */
                p_hwfn->cdev->num_ports_in_engines = 1;
        }
+}
+
+static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt)
+{
+       u32 port;
+       int i;
+
+       p_hwfn->cdev->num_ports_in_engines = 0;
+
+       for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
+               port = qed_rd(p_hwfn, p_ptt,
+                             CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
+               if (port & 1)
+                       p_hwfn->cdev->num_ports_in_engines++;
+       }
+
+       if (!p_hwfn->cdev->num_ports_in_engines) {
+               DP_NOTICE(p_hwfn, "All NIG ports are inactive\n");
+
+               /* Default num_ports_in_engine to something */
+               p_hwfn->cdev->num_ports_in_engines = 1;
+       }
+}
+
+static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       if (QED_IS_BB(p_hwfn->cdev))
+               qed_hw_info_port_num_bb(p_hwfn, p_ptt);
+       else
+               qed_hw_info_port_num_ah(p_hwfn, p_ptt);
+}
+
+static int
+qed_get_hw_info(struct qed_hwfn *p_hwfn,
+               struct qed_ptt *p_ptt,
+               enum qed_pci_personality personality)
+{
+       int rc;
+
+       /* Since all information is common, only first hwfns should do this */
+       if (IS_LEAD_HWFN(p_hwfn)) {
+               rc = qed_iov_hw_info(p_hwfn);
+               if (rc)
+                       return rc;
+       }
+
+       qed_hw_info_port_num(p_hwfn, p_ptt);
 
        qed_hw_get_nvm_info(p_hwfn, p_ptt);
 
@@ -2085,33 +2747,48 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
                p_hwfn->hw_info.personality = protocol;
        }
 
+       p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
+       p_hwfn->hw_info.num_active_tc = 1;
+
        qed_get_num_funcs(p_hwfn, p_ptt);
 
        if (qed_mcp_is_init(p_hwfn))
                p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
 
-       return qed_hw_get_resc(p_hwfn);
+       return qed_hw_get_resc(p_hwfn, p_ptt);
 }
 
-static int qed_get_dev_info(struct qed_dev *cdev)
+static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dev *cdev = p_hwfn->cdev;
+       u16 device_id_mask;
        u32 tmp;
 
        /* Read Vendor Id / Device Id */
        pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
        pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
 
-       cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
-                                    MISCS_REG_CHIP_NUM);
-       cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
-                                    MISCS_REG_CHIP_REV);
+       /* Determine type */
+       device_id_mask = cdev->device_id & QED_DEV_ID_MASK;
+       switch (device_id_mask) {
+       case QED_DEV_ID_MASK_BB:
+               cdev->type = QED_DEV_TYPE_BB;
+               break;
+       case QED_DEV_ID_MASK_AH:
+               cdev->type = QED_DEV_TYPE_AH;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id);
+               return -EBUSY;
+       }
+
+       cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM);
+       cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
+
        MASK_FIELD(CHIP_REV, cdev->chip_rev);
 
-       cdev->type = QED_DEV_TYPE_BB;
        /* Learn number of HW-functions */
-       tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
-                    MISCS_REG_CMT_ENABLED_FOR_PAIR);
+       tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
 
        if (tmp & (1 << p_hwfn->rel_pf_id)) {
                DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
@@ -2120,15 +2797,17 @@ static int qed_get_dev_info(struct qed_dev *cdev)
                cdev->num_hwfns = 1;
        }
 
-       cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
+       cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt,
                                    MISCS_REG_CHIP_TEST_REG) >> 4;
        MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
-       cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
-                                      MISCS_REG_CHIP_METAL);
+       cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
        MASK_FIELD(CHIP_METAL, cdev->chip_metal);
 
        DP_INFO(cdev->hwfns,
-               "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+               "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+               QED_IS_BB(cdev) ? "BB" : "AH",
+               'A' + cdev->chip_rev,
+               (int)cdev->chip_metal,
                cdev->chip_num, cdev->chip_rev,
                cdev->chip_bond_id, cdev->chip_metal);
 
@@ -2174,7 +2853,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
 
        /* First hwfn learns basic information, e.g., number of hwfns */
        if (!p_hwfn->my_id) {
-               rc = qed_get_dev_info(p_hwfn->cdev);
+               rc = qed_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
                if (rc)
                        goto err1;
        }
@@ -2195,6 +2874,15 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
                goto err2;
        }
 
+       /* Sending a mailbox to the MFW should be done after qed_get_hw_info()
+        * is called as it sets the ports number in an engine.
+        */
+       if (IS_LEAD_HWFN(p_hwfn)) {
+               rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
+               if (rc)
+                       DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n");
+       }
+
        /* Allocate the init RT array and initialize the init-ops engine */
        rc = qed_init_alloc(p_hwfn);
        if (rc)
@@ -2236,11 +2924,14 @@ int qed_hw_prepare(struct qed_dev *cdev,
                u8 __iomem *addr;
 
                /* adjust bar offset for second engine */
-               addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
+               addr = cdev->regview +
+                      qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+                                      BAR_ID_0) / 2;
                p_regview = addr;
 
-               /* adjust doorbell bar offset for second engine */
-               addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
+               addr = cdev->doorbells +
+                      qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+                                      BAR_ID_1) / 2;
                p_doorbell = addr;
 
                /* prepare second hw function */
@@ -2389,9 +3080,8 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev,
         * size/capacity fields are of a u32 type.
         */
        if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
-            chain_size > 0x10000) ||
-           (cnt_type == QED_CHAIN_CNT_TYPE_U32 &&
-            chain_size > 0x100000000ULL)) {
+            chain_size > ((u32)U16_MAX + 1)) ||
+           (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) {
                DP_NOTICE(cdev,
                          "The actual chain size (0x%llx) is larger than the maximal possible value\n",
                          chain_size);
@@ -3364,3 +4054,8 @@ void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        memset(p_hwfn->qm_info.wfq_data, 0,
               sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
 }
+
+int qed_device_num_engines(struct qed_dev *cdev)
+{
+       return QED_IS_BB(cdev) ? 2 : 1;
+}
index 6812003411cdc9870a6508de198a962e4bd1e68e..341636da9964b2801009215bca8dba88433ef64a 100644 (file)
@@ -82,26 +82,63 @@ int qed_resc_alloc(struct qed_dev *cdev);
  */
 void qed_resc_setup(struct qed_dev *cdev);
 
+enum qed_override_force_load {
+       QED_OVERRIDE_FORCE_LOAD_NONE,
+       QED_OVERRIDE_FORCE_LOAD_ALWAYS,
+       QED_OVERRIDE_FORCE_LOAD_NEVER,
+};
+
+struct qed_drv_load_params {
+       /* Indicates whether the driver is running over a crash kernel.
+        * As part of the load request, this will be used for providing the
+        * driver role to the MFW.
+        * In case of a crash kernel over PDA - this should be set to false.
+        */
+       bool is_crash_kernel;
+
+       /* The timeout value that the MFW should use when locking the engine for
+        * the driver load process.
+        * A value of '0' means the default value, and '255' means no timeout.
+        */
+       u8 mfw_timeout_val;
+#define QED_LOAD_REQ_LOCK_TO_DEFAULT    0
+#define QED_LOAD_REQ_LOCK_TO_NONE       255
+
+       /* Avoid engine reset when first PF loads on it */
+       bool avoid_eng_reset;
+
+       /* Allow overriding the default force load behavior */
+       enum qed_override_force_load override_force_load;
+};
+
+struct qed_hw_init_params {
+       /* Tunneling parameters */
+       struct qed_tunn_start_params *p_tunn;
+
+       bool b_hw_start;
+
+       /* Interrupt mode [msix, inta, etc.] to use */
+       enum qed_int_mode int_mode;
+
+       /* NPAR tx switching to be used for vports for tx-switching */
+       bool allow_npar_tx_switch;
+
+       /* Binary fw data pointer in binary fw file */
+       const u8 *bin_fw_data;
+
+       /* Driver load parameters */
+       struct qed_drv_load_params *p_drv_load_params;
+};
+
 /**
  * @brief qed_hw_init -
  *
  * @param cdev
- * @param p_tunn
- * @param b_hw_start
- * @param int_mode - interrupt mode [msix, inta, etc.] to use.
- * @param allow_npar_tx_switch - npar tx switching to be used
- *       for vports configured for tx-switching.
- * @param bin_fw_data - binary fw data pointer in binary fw file.
- *                     Pass NULL if not using binary fw file.
+ * @param p_params
  *
  * @return int
  */
-int qed_hw_init(struct qed_dev *cdev,
-               struct qed_tunn_start_params *p_tunn,
-               bool b_hw_start,
-               enum qed_int_mode int_mode,
-               bool allow_npar_tx_switch,
-               const u8 *bin_fw_data);
+int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params);
 
 /**
  * @brief qed_hw_timers_stop_all - stop the timers HW block
@@ -128,26 +165,20 @@ int qed_hw_stop(struct qed_dev *cdev);
  *
  * @param cdev
  *
+ * @return int
  */
-void qed_hw_stop_fastpath(struct qed_dev *cdev);
+int qed_hw_stop_fastpath(struct qed_dev *cdev);
 
 /**
  * @brief qed_hw_start_fastpath -restart fastpath traffic,
  *             only if hw_stop_fastpath was called
  *
- * @param cdev
- *
- */
-void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
-
-/**
- * @brief qed_hw_reset -
- *
- * @param cdev
+ * @param p_hwfn
  *
  * @return int
  */
-int qed_hw_reset(struct qed_dev *cdev);
+int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
+
 
 /**
  * @brief qed_hw_prepare -
@@ -441,4 +472,6 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
  */
 int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                         u16 coalesce, u8 qid, u16 sb_id);
+
+const char *qed_hw_get_resc_name(enum qed_resources res_id);
 #endif
index cbc81412174f9e1ea07a763a90072348e883699f..21a58fffd02bed96ba07580446c72e2280da47eb 100644 (file)
@@ -191,7 +191,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
        p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
        p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
 
-       p_data->q_params.bdq_resource_id = FCOE_BDQ_ID(p_hwfn->port_id);
+       p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
 
        DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
                       fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
@@ -241,7 +241,7 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
        struct fcoe_conn_offload_ramrod_data *p_data;
        struct qed_spq_entry *p_ent = NULL;
        struct qed_sp_init_data init_data;
-       u16 pq_id = 0, tmp;
+       u16 physical_q0, tmp;
        int rc;
 
        /* Get SPQ entry */
@@ -261,9 +261,9 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
        p_data = &p_ramrod->offload_ramrod_data;
 
        /* Transmission PQ is the first of the PF */
-       pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_FCOE, NULL);
-       p_conn->physical_q0 = cpu_to_le16(pq_id);
-       p_data->physical_q0 = cpu_to_le16(pq_id);
+       physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+       p_conn->physical_q0 = cpu_to_le16(physical_q0);
+       p_data->physical_q0 = cpu_to_le16(physical_q0);
 
        p_data->conn_id = cpu_to_le16(p_conn->conn_id);
        DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
@@ -340,10 +340,10 @@ qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
 
 static int
 qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
                      enum spq_mode comp_mode,
                      struct qed_spq_comp_cb *p_comp_addr)
 {
-       struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
        struct qed_spq_entry *p_ent = NULL;
        struct qed_sp_init_data init_data;
        u32 active_segs = 0;
@@ -512,19 +512,31 @@ static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
 static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
                                                   u8 bdq_id)
 {
-       u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id);
-
-       return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM +
-              MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id);
+       if (RESC_NUM(p_hwfn, QED_BDQ)) {
+               return (u8 __iomem *)p_hwfn->regview +
+                      GTT_BAR0_MAP_REG_MSDM_RAM +
+                      MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+                                                                 QED_BDQ),
+                                                      bdq_id);
+       } else {
+               DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+               return NULL;
+       }
 }
 
 static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
                                                     u8 bdq_id)
 {
-       u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id);
-
-       return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM +
-              TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id);
+       if (RESC_NUM(p_hwfn, QED_BDQ)) {
+               return (u8 __iomem *)p_hwfn->regview +
+                      GTT_BAR0_MAP_REG_TSDM_RAM +
+                      TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+                                                                 QED_BDQ),
+                                                      bdq_id);
+       } else {
+               DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+               return NULL;
+       }
 }
 
 struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
@@ -753,6 +765,7 @@ static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
 
 static int qed_fcoe_stop(struct qed_dev *cdev)
 {
+       struct qed_ptt *p_ptt;
        int rc;
 
        if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
@@ -766,10 +779,15 @@ static int qed_fcoe_stop(struct qed_dev *cdev)
                return -EINVAL;
        }
 
+       p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+       if (!p_ptt)
+               return -EAGAIN;
+
        /* Stop the fcoe */
-       rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev),
+       rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), p_ptt,
                                   QED_SPQ_MODE_EBLOCK, NULL);
        cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
+       qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
 
        return rc;
 }
index 37c2bfb663bb481c3d7241e8ccfc614d389d7535..815c4ec5b458c1f03e29f4cd68b0d55f29502887 100644 (file)
@@ -574,6 +574,7 @@ enum core_event_opcode {
        CORE_EVENT_TX_QUEUE_STOP,
        CORE_EVENT_RX_QUEUE_START,
        CORE_EVENT_RX_QUEUE_STOP,
+       CORE_EVENT_RX_QUEUE_FLUSH,
        MAX_CORE_EVENT_OPCODE
 };
 
@@ -625,6 +626,7 @@ enum core_ramrod_cmd_id {
        CORE_RAMROD_TX_QUEUE_START,
        CORE_RAMROD_RX_QUEUE_STOP,
        CORE_RAMROD_TX_QUEUE_STOP,
+       CORE_RAMROD_RX_QUEUE_FLUSH,
        MAX_CORE_RAMROD_CMD_ID
 };
 
@@ -698,7 +700,8 @@ struct core_rx_slow_path_cqe {
        u8 type;
        u8 ramrod_cmd_id;
        __le16 echo;
-       __le32 reserved1[7];
+       struct core_rx_cqe_opaque_data opaque_data;
+       __le32 reserved1[5];
 };
 
 union core_rx_cqe_union {
@@ -735,45 +738,46 @@ struct core_rx_stop_ramrod_data {
        __le16 reserved2[2];
 };
 
-struct core_tx_bd_flags {
-       u8 as_bitfield;
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK  0x1
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK   0x1
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT  1
-#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1
-#define CORE_TX_BD_FLAGS_START_BD_SHIFT        2
-#define CORE_TX_BD_FLAGS_IP_CSUM_MASK  0x1
-#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3
-#define CORE_TX_BD_FLAGS_L4_CSUM_MASK  0x1
-#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4
-#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1
-#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT        5
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK      0x1
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT     6
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK      0x1
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+struct core_tx_bd_data {
+       __le16 as_bitfield;
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK   0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT     0
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK    0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT      1
+#define CORE_TX_BD_DATA_START_BD_MASK  0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT            2
+#define CORE_TX_BD_DATA_IP_CSUM_MASK   0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT             3
+#define CORE_TX_BD_DATA_L4_CSUM_MASK   0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT             4
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK  0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT            5
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK       0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT         6
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK       0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_NBDS_MASK      0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT                8
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT           12
+#define CORE_TX_BD_DATA_IP_LEN_MASK    0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT              13
+#define CORE_TX_BD_DATA_RESERVED0_MASK            0x3
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT           14
 };
 
 struct core_tx_bd {
        struct regpair addr;
        __le16 nbytes;
        __le16 nw_vlan_or_lb_echo;
-       u8 bitfield0;
-#define CORE_TX_BD_NBDS_MASK   0xF
-#define CORE_TX_BD_NBDS_SHIFT  0
-#define CORE_TX_BD_ROCE_FLAV_MASK      0x1
-#define CORE_TX_BD_ROCE_FLAV_SHIFT     4
-#define CORE_TX_BD_RESERVED0_MASK      0x7
-#define CORE_TX_BD_RESERVED0_SHIFT     5
-       struct core_tx_bd_flags bd_flags;
+       struct core_tx_bd_data bd_data;
        __le16 bitfield1;
 #define CORE_TX_BD_L4_HDR_OFFSET_W_MASK        0x3FFF
 #define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
 #define CORE_TX_BD_TX_DST_MASK 0x1
 #define CORE_TX_BD_TX_DST_SHIFT        14
-#define CORE_TX_BD_RESERVED1_MASK      0x1
-#define CORE_TX_BD_RESERVED1_SHIFT     15
+#define CORE_TX_BD_RESERVED_MASK         0x1
+#define CORE_TX_BD_RESERVED_SHIFT        15
 };
 
 enum core_tx_dest {
@@ -800,6 +804,14 @@ struct core_tx_stop_ramrod_data {
        __le32 reserved0[2];
 };
 
+enum dcb_dhcp_update_flag {
+       DONT_UPDATE_DCB_DHCP,
+       UPDATE_DCB,
+       UPDATE_DSCP,
+       UPDATE_DCB_DSCP,
+       MAX_DCB_DHCP_UPDATE_FLAG
+};
+
 struct eth_mstorm_per_pf_stat {
        struct regpair gre_discard_pkts;
        struct regpair vxlan_discard_pkts;
@@ -893,6 +905,12 @@ union event_ring_element {
        struct event_ring_next_addr next_addr;
 };
 
+enum fw_flow_ctrl_mode {
+       flow_ctrl_pause,
+       flow_ctrl_pfc,
+       MAX_FW_FLOW_CTRL_MODE
+};
+
 /* Major and Minor hsi Versions */
 struct hsi_fp_ver_struct {
        u8 minor_ver_arr[2];
@@ -921,6 +939,7 @@ enum malicious_vf_error_id {
        ETH_EDPM_OUT_OF_SYNC,
        ETH_TUNN_IPV6_EXT_NBD_ERR,
        ETH_CONTROL_PACKET_VIOLATION,
+       ETH_ANTI_SPOOFING_ERR,
        MAX_MALICIOUS_VF_ERROR_ID
 };
 
@@ -1106,8 +1125,9 @@ struct tstorm_per_port_stat {
        struct regpair ll2_mac_filter_discard;
        struct regpair ll2_conn_disabled_discard;
        struct regpair iscsi_irregular_pkt;
-       struct regpair reserved;
+       struct regpair fcoe_irregular_pkt;
        struct regpair roce_irregular_pkt;
+       struct regpair reserved;
        struct regpair eth_irregular_pkt;
        struct regpair reserved1;
        struct regpair preroce_irregular_pkt;
@@ -1648,6 +1668,11 @@ enum block_addr {
        GRCBASE_MS = 0x6a0000,
        GRCBASE_PHY_PCIE = 0x620000,
        GRCBASE_LED = 0x6b8000,
+       GRCBASE_AVS_WRAP = 0x6b0000,
+       GRCBASE_RGFS = 0x19d0000,
+       GRCBASE_TGFS = 0x19e0000,
+       GRCBASE_PTLD = 0x19f0000,
+       GRCBASE_YPLD = 0x1a10000,
        GRCBASE_MISC_AEU = 0x8000,
        GRCBASE_BAR0_MAP = 0x1c00000,
        MAX_BLOCK_ADDR
@@ -1732,6 +1757,11 @@ enum block_id {
        BLOCK_MS,
        BLOCK_PHY_PCIE,
        BLOCK_LED,
+       BLOCK_AVS_WRAP,
+       BLOCK_RGFS,
+       BLOCK_TGFS,
+       BLOCK_PTLD,
+       BLOCK_YPLD,
        BLOCK_MISC_AEU,
        BLOCK_BAR0_MAP,
        MAX_BLOCK_ID
@@ -1783,9 +1813,9 @@ struct dbg_attn_reg_result {
        __le32 data;
 #define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK   0xFFFFFF
 #define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT  0
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK  0xFF
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24
-       __le16 attn_idx_offset;
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK  0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
+       __le16 block_attn_offset;
        __le16 reserved;
        __le32 sts_val;
        __le32 mask_val;
@@ -1815,12 +1845,12 @@ struct dbg_mode_hdr {
 /* Attention register */
 struct dbg_attn_reg {
        struct dbg_mode_hdr mode;
-       __le16 attn_idx_offset;
+       __le16 block_attn_offset;
        __le32 data;
 #define DBG_ATTN_REG_STS_ADDRESS_MASK  0xFFFFFF
 #define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF
-#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT        24
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
        __le32 sts_clr_address;
        __le32 mask_address;
 };
@@ -2001,6 +2031,20 @@ enum dbg_bus_clients {
        MAX_DBG_BUS_CLIENTS
 };
 
+enum dbg_bus_constraint_ops {
+       DBG_BUS_CONSTRAINT_OP_EQ,
+       DBG_BUS_CONSTRAINT_OP_NE,
+       DBG_BUS_CONSTRAINT_OP_LT,
+       DBG_BUS_CONSTRAINT_OP_LTC,
+       DBG_BUS_CONSTRAINT_OP_LE,
+       DBG_BUS_CONSTRAINT_OP_LEC,
+       DBG_BUS_CONSTRAINT_OP_GT,
+       DBG_BUS_CONSTRAINT_OP_GTC,
+       DBG_BUS_CONSTRAINT_OP_GE,
+       DBG_BUS_CONSTRAINT_OP_GEC,
+       MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
 /* Debug Bus memory address */
 struct dbg_bus_mem_addr {
        __le32 lo;
@@ -2092,10 +2136,18 @@ struct dbg_bus_data {
                                              * DBG_BUS_TARGET_ID_PCI.
                                              */
        __le16 reserved;
-       struct dbg_bus_block_data blocks[80];/* Debug Bus data for each block */
+       struct dbg_bus_block_data blocks[88];/* Debug Bus data for each block */
        struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */
 };
 
+enum dbg_bus_filter_types {
+       DBG_BUS_FILTER_TYPE_OFF,
+       DBG_BUS_FILTER_TYPE_PRE,
+       DBG_BUS_FILTER_TYPE_POST,
+       DBG_BUS_FILTER_TYPE_ON,
+       MAX_DBG_BUS_FILTER_TYPES
+};
+
 /* Debug bus frame modes */
 enum dbg_bus_frame_modes {
        DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */
@@ -2104,6 +2156,40 @@ enum dbg_bus_frame_modes {
        MAX_DBG_BUS_FRAME_MODES
 };
 
+enum dbg_bus_input_types {
+       DBG_BUS_INPUT_TYPE_STORM,
+       DBG_BUS_INPUT_TYPE_BLOCK,
+       MAX_DBG_BUS_INPUT_TYPES
+};
+
+enum dbg_bus_other_engine_modes {
+       DBG_BUS_OTHER_ENGINE_MODE_NONE,
+       DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+       DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+       DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+       DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
+       MAX_DBG_BUS_OTHER_ENGINE_MODES
+};
+
+enum dbg_bus_post_trigger_types {
+       DBG_BUS_POST_TRIGGER_RECORD,
+       DBG_BUS_POST_TRIGGER_DROP,
+       MAX_DBG_BUS_POST_TRIGGER_TYPES
+};
+
+enum dbg_bus_pre_trigger_types {
+       DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
+       DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
+       DBG_BUS_PRE_TRIGGER_DROP,
+       MAX_DBG_BUS_PRE_TRIGGER_TYPES
+};
+
+enum dbg_bus_semi_frame_modes {
+       DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
+       DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
+       MAX_DBG_BUS_SEMI_FRAME_MODES
+};
+
 /* Debug bus states */
 enum dbg_bus_states {
        DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */
@@ -2115,6 +2201,19 @@ enum dbg_bus_states {
        MAX_DBG_BUS_STATES
 };
 
+enum dbg_bus_storm_modes {
+       DBG_BUS_STORM_MODE_PRINTF,
+       DBG_BUS_STORM_MODE_PRAM_ADDR,
+       DBG_BUS_STORM_MODE_DRA_RW,
+       DBG_BUS_STORM_MODE_DRA_W,
+       DBG_BUS_STORM_MODE_LD_ST_ADDR,
+       DBG_BUS_STORM_MODE_DRA_FSM,
+       DBG_BUS_STORM_MODE_RH,
+       DBG_BUS_STORM_MODE_FOC,
+       DBG_BUS_STORM_MODE_EXT_STORE,
+       MAX_DBG_BUS_STORM_MODES
+};
+
 /* Debug bus target IDs */
 enum dbg_bus_targets {
        /* records debug bus to DBG block internal buffer */
@@ -2128,13 +2227,10 @@ enum dbg_bus_targets {
 
 /* GRC Dump data */
 struct dbg_grc_data {
-       __le32 param_val[40]; /* Value of each GRC parameter. Array size must
-                              * match the enum dbg_grc_params.
-                              */
-       u8 param_set_by_user[40]; /* Indicates for each GRC parameter if it was
-                                  * set by the user (0/1). Array size must
-                                  * match the enum dbg_grc_params.
-                                  */
+       u8 params_initialized;
+       u8 reserved1;
+       __le16 reserved2;
+       __le32 param_val[48];
 };
 
 /* Debug GRC params */
@@ -2181,6 +2277,8 @@ enum dbg_grc_params {
        DBG_GRC_PARAM_PARITY_SAFE,
        DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */
        DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */
+       DBG_GRC_PARAM_NO_MCP,
+       DBG_GRC_PARAM_NO_FW_VER,
        MAX_DBG_GRC_PARAMS
 };
 
@@ -2280,7 +2378,7 @@ struct dbg_tools_data {
        struct dbg_bus_data bus; /* Debug Bus data */
        struct idle_chk_data idle_chk; /* Idle Check data */
        u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */
-       u8 block_in_reset[80]; /* Indicates if a block is in reset state (0/1).
+       u8 block_in_reset[88]; /* Indicates if a block is in reset state (0/1).
                                */
        u8 chip_id; /* Chip ID (from enum chip_ids) */
        u8 platform_id; /* Platform ID (from enum platform_ids) */
@@ -2404,7 +2502,7 @@ struct fw_info_location {
 
 enum init_modes {
        MODE_RESERVED,
-       MODE_BB_B0,
+       MODE_BB,
        MODE_K2,
        MODE_ASIC,
        MODE_RESERVED2,
@@ -2418,7 +2516,6 @@ enum init_modes {
        MODE_PORTS_PER_ENG_2,
        MODE_PORTS_PER_ENG_4,
        MODE_100G,
-       MODE_40G,
        MODE_RESERVED6,
        MAX_INIT_MODES
 };
@@ -2685,6 +2782,13 @@ struct iro {
  * @param bin_ptr - a pointer to the binary data with debug arrays.
  */
 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
+/**
+ * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
+ *     default value.
+ *
+ * @param p_hwfn               - HW device data
+ */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
 /**
  * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
  *     GRC Dump.
@@ -3418,7 +3522,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
 #define        MSTORM_TPA_TIMEOUT_US_SIZE                      (IRO[21].size)
 #define        MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
        (IRO[22].base + ((pf_id) * IRO[22].m1))
-#define        MSTORM_ETH_PF_STAT_SIZE                         (IRO[21].size)
+#define        MSTORM_ETH_PF_STAT_SIZE                         (IRO[22].size)
 #define        USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
        (IRO[23].base + ((stat_counter_id) * IRO[23].m1))
 #define        USTORM_QUEUE_STAT_SIZE                          (IRO[23].size)
@@ -3482,7 +3586,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
 
 static const struct iro iro_arr[47] = {
        {0x0, 0x0, 0x0, 0x0, 0x8},
-       {0x4cb0, 0x78, 0x0, 0x0, 0x78},
+       {0x4cb0, 0x80, 0x0, 0x0, 0x80},
        {0x6318, 0x20, 0x0, 0x0, 0x20},
        {0xb00, 0x8, 0x0, 0x0, 0x4},
        {0xa80, 0x8, 0x0, 0x0, 0x4},
@@ -3521,13 +3625,13 @@ static const struct iro iro_arr[47] = {
        {0xd888, 0x38, 0x0, 0x0, 0x24},
        {0x12c38, 0x10, 0x0, 0x0, 0x8},
        {0x11aa0, 0x38, 0x0, 0x0, 0x18},
-       {0xa8c0, 0x30, 0x0, 0x0, 0x10},
-       {0x86f8, 0x28, 0x0, 0x0, 0x18},
+       {0xa8c0, 0x38, 0x0, 0x0, 0x10},
+       {0x86f8, 0x30, 0x0, 0x0, 0x18},
        {0x101f8, 0x10, 0x0, 0x0, 0x10},
        {0xdd08, 0x48, 0x0, 0x0, 0x38},
        {0x10660, 0x20, 0x0, 0x0, 0x20},
        {0x2b80, 0x80, 0x0, 0x0, 0x10},
-       {0x5000, 0x10, 0x0, 0x0, 0x10},
+       {0x5020, 0x10, 0x0, 0x0, 0x10},
 };
 
 /* Runtime array offsets */
@@ -4595,6 +4699,12 @@ enum eth_ipv4_frag_type {
        MAX_ETH_IPV4_FRAG_TYPE
 };
 
+enum eth_ip_type {
+       ETH_IPV4,
+       ETH_IPV6,
+       MAX_ETH_IP_TYPE
+};
+
 enum eth_ramrod_cmd_id {
        ETH_RAMROD_UNUSED,
        ETH_RAMROD_VPORT_START,
@@ -4944,7 +5054,10 @@ struct vport_update_ramrod_data_cmn {
        u8 update_mtu_flg;
 
        __le16 mtu;
-       u8 reserved[2];
+       u8 update_ctl_frame_checks_en_flg;
+       u8 ctl_frame_mac_check_en;
+       u8 ctl_frame_ethtype_check_en;
+       u8 reserved[15];
 };
 
 struct vport_update_ramrod_mcast {
@@ -4962,6 +5075,492 @@ struct vport_update_ramrod_data {
        struct eth_vport_rss_config rss_config;
 };
 
+struct mstorm_eth_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK       0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK       0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT         1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK        0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT          2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK        0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT          4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK        0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT          6
+       u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK      0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT        0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK      0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT        1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK      0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT        2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK    0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT      3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK    0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT      4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK    0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT      5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK    0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT      6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK    0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT      7
+       __le16 word0;
+       __le16 word1;
+       __le32 reg0;
+       __le32 reg1;
+};
+
+struct xstorm_eth_conn_agctxdq_ext_ldpart {
+       u8 reserved0;
+       u8 eth_state;
+       u8 flags0;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT           0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT              1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT              2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT              4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT              5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT              6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT              7
+       u8 flags1;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT              0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT              1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT              2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT                  3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT                  5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT           7
+       u8 flags2;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT                    4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT                    6
+       u8 flags3;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT                    4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT                    6
+       u8 flags4;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT                   4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT                   6
+       u8 flags5;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT                   0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT                   2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT                   4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT                   6
+       u8 flags6;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK       0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT           6
+       u8 flags7;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK  0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT             2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK   0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT              4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT                  6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT                  7
+       u8 flags8;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT                  0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT                  1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT                  2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT                  3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT                  5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT                  6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT                  7
+       u8 flags9;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT                 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT                 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT                 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT                 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT                 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT                 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT    7
+       u8 flags10;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK     0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT        1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT            2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT             3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT           4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT             6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT             7
+       u8 flags11;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT             0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT             1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK     0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT                3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK     0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT                4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK     0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT                5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT           6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK     0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT                7
+       u8 flags12;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT               1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT           2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT               4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT               5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT               6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT               7
+       u8 flags13;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT               1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT           2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT           4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT           5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT           6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT           7
+       u8 flags14;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK  0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT             6
+       u8 edpm_event_id;
+       __le16 physical_q0;
+       __le16 quota;
+       __le16 edpm_num_bds;
+       __le16 tx_bd_cons;
+       __le16 tx_bd_prod;
+       __le16 tx_class;
+       __le16 conn_dpi;
+       u8 byte3;
+       u8 byte4;
+       u8 byte5;
+       u8 byte6;
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le32 reg4;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+       u8 reserved0;
+       u8 eth_state;
+       u8 flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT              1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT              2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT              4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT              5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT              6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT              7
+       u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT              0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT              1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT              2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT                  3
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT                  5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+       u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT                    4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT                    6
+       u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT                    4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT                    6
+       u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT                   4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT                   6
+       u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT                   0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT                   2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT                   4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT                   6
+       u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK        0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK        0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK   0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+       u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK        0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK      0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT             2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK       0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT              4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT                  6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT                  7
+       u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT                  0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT                  1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT                  2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT                  3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT                  5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT                  6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT                  7
+       u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT                 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT                 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT                 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT                 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT                 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT                 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK     0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK     0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+       u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK     0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK      0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT             3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK      0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT             6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK      0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT             7
+       u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK      0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT             0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK      0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT             1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT                3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT                4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT                5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT                7
+       u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT               1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT               4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT               5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT               6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT               7
+       u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT               1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+       u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK      0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK      0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+       u8 edpm_event_id;
+       __le16 physical_q0;
+       __le16 quota;
+       __le16 edpm_num_bds;
+       __le16 tx_bd_cons;
+       __le16 tx_bd_prod;
+       __le16 tx_class;
+       __le16 conn_dpi;
+};
+
 struct mstorm_rdma_task_st_ctx {
        struct regpair temp[4];
 };
@@ -6165,7 +6764,7 @@ struct ystorm_roce_conn_st_ctx {
 };
 
 struct xstorm_roce_conn_st_ctx {
-       struct regpair temp[22];
+       struct regpair temp[24];
 };
 
 struct tstorm_roce_conn_st_ctx {
@@ -6220,7 +6819,7 @@ struct roce_create_qp_req_ramrod_data {
        __le16 mtu;
        __le16 pd;
        __le16 sq_num_pages;
-       __le16 reseved2;
+       __le16 low_latency_phy_queue;
        struct regpair sq_pbl_addr;
        struct regpair orq_pbl_addr;
        __le16 local_mac_addr[3];
@@ -6234,7 +6833,7 @@ struct roce_create_qp_req_ramrod_data {
        u8 stats_counter_id;
        u8 reserved3[7];
        __le32 cq_cid;
-       __le16 physical_queue0;
+       __le16 regular_latency_phy_queue;
        __le16 dpi;
 };
 
@@ -6282,15 +6881,16 @@ struct roce_create_qp_resp_ramrod_data {
        __le32 dst_gid[4];
        struct regpair qp_handle_for_cqe;
        struct regpair qp_handle_for_async;
-       __le32 reserved2[2];
+       __le16 low_latency_phy_queue;
+       u8 reserved2[6];
        __le32 cq_cid;
-       __le16 physical_queue0;
+       __le16 regular_latency_phy_queue;
        __le16 dpi;
 };
 
 struct roce_destroy_qp_req_output_params {
        __le32 num_bound_mw;
-       __le32 reserved;
+       __le32 cq_prod;
 };
 
 struct roce_destroy_qp_req_ramrod_data {
@@ -6299,7 +6899,7 @@ struct roce_destroy_qp_req_ramrod_data {
 
 struct roce_destroy_qp_resp_output_params {
        __le32 num_invalidated_mw;
-       __le32 reserved;
+       __le32 cq_prod;
 };
 
 struct roce_destroy_qp_resp_ramrod_data {
@@ -7426,6 +8026,7 @@ struct ystorm_fcoe_conn_st_ctx {
        u8 fcp_rsp_size;
        __le16 mss;
        struct regpair reserved;
+       __le16 min_frame_size;
        u8 protection_info_flags;
 #define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK  0x1
 #define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
@@ -7444,7 +8045,6 @@ struct ystorm_fcoe_conn_st_ctx {
 #define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK                0x3F
 #define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT               2
        u8 fcp_xfer_size;
-       u8 reserved3[2];
 };
 
 struct fcoe_vlan_fields {
@@ -8273,10 +8873,10 @@ struct xstorm_iscsi_conn_ag_ctx {
 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK                    0x3
 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT                   6
        u8 flags7;
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK                    0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT                   0
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_MASK                    0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_SHIFT                   2
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK      0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT        0
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK      0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT        2
 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK                   0x3
 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT                  4
 #define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK                       0x1
@@ -8322,10 +8922,10 @@ struct xstorm_iscsi_conn_ag_ctx {
 #define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT                     0
 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK                 0x1
 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT                1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK                 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT                2
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_MASK                 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT                3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK   0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT     2
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK   0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT     3
 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK                0x1
 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT               4
 #define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK        0x1
@@ -8335,8 +8935,8 @@ struct xstorm_iscsi_conn_ag_ctx {
 #define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK    0x1
 #define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT   7
        u8 flags11;
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT                    0
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK    0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT              0
 #define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK                     0x1
 #define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT                    1
 #define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK                   0x1
@@ -8440,7 +9040,7 @@ struct xstorm_iscsi_conn_ag_ctx {
        __le32 reg10;
        __le32 reg11;
        __le32 exp_stat_sn;
-       __le32 reg13;
+       __le32 ongoing_fast_rxmit_seq;
        __le32 reg14;
        __le32 reg15;
        __le32 reg16;
@@ -8466,10 +9066,10 @@ struct tstorm_iscsi_conn_ag_ctx {
 #define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK                0x3
 #define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT               6
        u8 flags1;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT               0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT               2
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK     0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT      0
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK     0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT      2
 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK     0x3
 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT    4
 #define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK                0x3
@@ -8490,10 +9090,10 @@ struct tstorm_iscsi_conn_ag_ctx {
 #define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT              2
 #define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK              0x1
 #define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT             4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT             5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT             6
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK  0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT   5
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK  0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT   6
 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK  0x1
 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
        u8 flags4;
@@ -8539,7 +9139,7 @@ struct tstorm_iscsi_conn_ag_ctx {
        __le32 reg6;
        __le32 reg7;
        __le32 reg8;
-       u8 byte2;
+       u8 cid_offload_cnt;
        u8 byte3;
        __le16 word0;
 };
@@ -8831,11 +9431,24 @@ struct eth_stats {
        u64 r511;
        u64 r1023;
        u64 r1518;
-       u64 r1522;
-       u64 r2047;
-       u64 r4095;
-       u64 r9216;
-       u64 r16383;
+
+       union {
+               struct {
+                       u64 r1522;
+                       u64 r2047;
+                       u64 r4095;
+                       u64 r9216;
+                       u64 r16383;
+               } bb0;
+               struct {
+                       u64 unused1;
+                       u64 r1519_to_max;
+                       u64 unused2;
+                       u64 unused3;
+                       u64 unused4;
+               } ah0;
+       } u0;
+
        u64 rfcs;
        u64 rxcf;
        u64 rxpf;
@@ -8852,14 +9465,36 @@ struct eth_stats {
        u64 t511;
        u64 t1023;
        u64 t1518;
-       u64 t2047;
-       u64 t4095;
-       u64 t9216;
-       u64 t16383;
+
+       union {
+               struct {
+                       u64 t2047;
+                       u64 t4095;
+                       u64 t9216;
+                       u64 t16383;
+               } bb1;
+               struct {
+                       u64 t1519_to_max;
+                       u64 unused6;
+                       u64 unused7;
+                       u64 unused8;
+               } ah1;
+       } u1;
+
        u64 txpf;
        u64 txpp;
-       u64 tlpiec;
-       u64 tncl;
+
+       union {
+               struct {
+                       u64 tlpiec;
+                       u64 tncl;
+               } bb2;
+               struct {
+                       u64 unused9;
+                       u64 unused10;
+               } ah2;
+       } u2;
+
        u64 rbyte;
        u64 rxuca;
        u64 rxmca;
@@ -8943,12 +9578,12 @@ struct dcbx_ets_feature {
 #define DCBX_ETS_CBS_SHIFT     3
 #define DCBX_ETS_MAX_TCS_MASK  0x000000f0
 #define DCBX_ETS_MAX_TCS_SHIFT 4
-#define DCBX_ISCSI_OOO_TC_MASK 0x00000f00
-#define DCBX_ISCSI_OOO_TC_SHIFT        8
+#define DCBX_OOO_TC_MASK       0x00000f00
+#define DCBX_OOO_TC_SHIFT      8
        u32 pri_tc_tbl[1];
-#define DCBX_ISCSI_OOO_TC      (4)
+#define DCBX_TCP_OOO_TC                (4)
 
-#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET        (DCBX_ISCSI_OOO_TC + 1)
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET        (DCBX_TCP_OOO_TC + 1)
 #define DCBX_CEE_STRICT_PRIORITY       0xf
        u32 tc_bw_tbl[2];
        u32 tc_tsa_tbl[2];
@@ -8957,6 +9592,9 @@ struct dcbx_ets_feature {
 #define DCBX_ETS_TSA_ETS       2
 };
 
+#define DCBX_TCP_OOO_TC                        (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC       (3)
+
 struct dcbx_app_priority_entry {
        u32 entry;
 #define DCBX_APP_PRI_MAP_MASK          0x000000ff
@@ -9067,6 +9705,10 @@ struct dcb_dscp_map {
 struct public_global {
        u32 max_path;
        u32 max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
        u32 debug_mb_offset;
        u32 phymod_dbg_mb_offset;
        struct couple_mode_teaming cmt;
@@ -9248,9 +9890,11 @@ struct public_func {
 #define DRV_ID_PDA_COMP_VER_MASK       0x0000ffff
 #define DRV_ID_PDA_COMP_VER_SHIFT      0
 
+#define LOAD_REQ_HSI_VERSION           2
 #define DRV_ID_MCP_HSI_VER_MASK                0x00ff0000
 #define DRV_ID_MCP_HSI_VER_SHIFT       16
-#define DRV_ID_MCP_HSI_VER_CURRENT     (1 << DRV_ID_MCP_HSI_VER_SHIFT)
+#define DRV_ID_MCP_HSI_VER_CURRENT     (LOAD_REQ_HSI_VERSION << \
+                                        DRV_ID_MCP_HSI_VER_SHIFT)
 
 #define DRV_ID_DRV_TYPE_MASK           0x7f000000
 #define DRV_ID_DRV_TYPE_SHIFT          24
@@ -9345,6 +9989,7 @@ enum resource_id_enum {
        RESOURCE_NUM_RSS_ENGINES_E = 14,
        RESOURCE_LL2_QUEUE_E = 15,
        RESOURCE_RDMA_STATS_QUEUE_E = 16,
+       RESOURCE_BDQ_E = 17,
        RESOURCE_MAX_NUM,
        RESOURCE_NUM_INVALID = 0xFFFFFFFF
 };
@@ -9362,6 +10007,46 @@ struct resource_info {
 #define RESOURCE_ELEMENT_STRICT (1 << 0)
 };
 
+#define DRV_ROLE_NONE           0
+#define DRV_ROLE_PREBOOT        1
+#define DRV_ROLE_OS             2
+#define DRV_ROLE_KDUMP          3
+
+struct load_req_stc {
+       u32 drv_ver_0;
+       u32 drv_ver_1;
+       u32 fw_ver;
+       u32 misc0;
+#define LOAD_REQ_ROLE_MASK              0x000000FF
+#define LOAD_REQ_ROLE_SHIFT             0
+#define LOAD_REQ_LOCK_TO_MASK           0x0000FF00
+#define LOAD_REQ_LOCK_TO_SHIFT          8
+#define LOAD_REQ_LOCK_TO_DEFAULT        0
+#define LOAD_REQ_LOCK_TO_NONE           255
+#define LOAD_REQ_FORCE_MASK             0x000F0000
+#define LOAD_REQ_FORCE_SHIFT            16
+#define LOAD_REQ_FORCE_NONE             0
+#define LOAD_REQ_FORCE_PF               1
+#define LOAD_REQ_FORCE_ALL              2
+#define LOAD_REQ_FLAGS0_MASK            0x00F00000
+#define LOAD_REQ_FLAGS0_SHIFT           20
+#define LOAD_REQ_FLAGS0_AVOID_RESET     (0x1 << 0)
+};
+
+struct load_rsp_stc {
+       u32 drv_ver_0;
+       u32 drv_ver_1;
+       u32 fw_ver;
+       u32 misc0;
+#define LOAD_RSP_ROLE_MASK              0x000000FF
+#define LOAD_RSP_ROLE_SHIFT             0
+#define LOAD_RSP_HSI_MASK               0x0000FF00
+#define LOAD_RSP_HSI_SHIFT              8
+#define LOAD_RSP_FLAGS0_MASK            0x000F0000
+#define LOAD_RSP_FLAGS0_SHIFT           16
+#define LOAD_RSP_FLAGS0_DRV_EXISTS      (0x1 << 0)
+};
+
 union drv_union_data {
        u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
        struct mcp_mac wol_mac;
@@ -9393,6 +10078,7 @@ struct public_drv_mb {
 #define DRV_MSG_CODE_LOAD_REQ                  0x10000000
 #define DRV_MSG_CODE_LOAD_DONE                 0x11000000
 #define DRV_MSG_CODE_INIT_HW                   0x12000000
+#define DRV_MSG_CODE_CANCEL_LOAD_REQ            0x13000000
 #define DRV_MSG_CODE_UNLOAD_REQ                        0x20000000
 #define DRV_MSG_CODE_UNLOAD_DONE               0x21000000
 #define DRV_MSG_CODE_INIT_PHY                  0x22000000
@@ -9405,12 +10091,14 @@ struct public_drv_mb {
 #define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE     0x31000000
 #define DRV_MSG_CODE_BW_UPDATE_ACK              0x32000000
 #define DRV_MSG_CODE_OV_UPDATE_MTU              0x33000000
+#define DRV_MSG_GET_RESOURCE_ALLOC_MSG         0x34000000
+#define DRV_MSG_SET_RESOURCE_VALUE_MSG         0x35000000
 #define DRV_MSG_CODE_OV_UPDATE_WOL              0x38000000
 #define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE     0x39000000
 
 #define DRV_MSG_CODE_BW_UPDATE_ACK             0x32000000
 #define DRV_MSG_CODE_NIG_DRAIN                 0x30000000
-#define DRV_MSG_GET_RESOURCE_ALLOC_MSG          0x34000000
+#define DRV_MSG_CODE_INITIATE_PF_FLR            0x02010000
 #define DRV_MSG_CODE_VF_DISABLED_DONE          0xc0000000
 #define DRV_MSG_CODE_CFG_VF_MSIX               0xc0010000
 #define DRV_MSG_CODE_NVM_GET_FILE_ATT          0x00030000
@@ -9436,6 +10124,33 @@ struct public_drv_mb {
 
 #define DRV_MSG_CODE_BIST_TEST                 0x001e0000
 #define DRV_MSG_CODE_SET_LED_MODE              0x00200000
+#define DRV_MSG_CODE_RESOURCE_CMD      0x00230000
+
+#define RESOURCE_CMD_REQ_RESC_MASK             0x0000001F
+#define RESOURCE_CMD_REQ_RESC_SHIFT            0
+#define RESOURCE_CMD_REQ_OPCODE_MASK           0x000000E0
+#define RESOURCE_CMD_REQ_OPCODE_SHIFT          5
+#define RESOURCE_OPCODE_REQ                    1
+#define RESOURCE_OPCODE_REQ_WO_AGING           2
+#define RESOURCE_OPCODE_REQ_W_AGING            3
+#define RESOURCE_OPCODE_RELEASE                        4
+#define RESOURCE_OPCODE_FORCE_RELEASE          5
+#define RESOURCE_CMD_REQ_AGE_MASK              0x0000FF00
+#define RESOURCE_CMD_REQ_AGE_SHIFT             8
+
+#define RESOURCE_CMD_RSP_OWNER_MASK            0x000000FF
+#define RESOURCE_CMD_RSP_OWNER_SHIFT           0
+#define RESOURCE_CMD_RSP_OPCODE_MASK           0x00000700
+#define RESOURCE_CMD_RSP_OPCODE_SHIFT          8
+#define RESOURCE_OPCODE_GNT                    1
+#define RESOURCE_OPCODE_BUSY                   2
+#define RESOURCE_OPCODE_RELEASED               3
+#define RESOURCE_OPCODE_RELEASED_PREVIOUS      4
+#define RESOURCE_OPCODE_WRONG_OWNER            5
+#define RESOURCE_OPCODE_UNKNOWN_CMD            255
+
+#define RESOURCE_DUMP                          0
+
 #define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL      0x002b0000
 #define DRV_MSG_CODE_OS_WOL                    0x002e0000
 
@@ -9524,12 +10239,16 @@ struct public_drv_mb {
 
        u32 fw_mb_header;
 #define FW_MSG_CODE_MASK                       0xffff0000
+#define FW_MSG_CODE_UNSUPPORTED                 0x00000000
 #define FW_MSG_CODE_DRV_LOAD_ENGINE            0x10100000
 #define FW_MSG_CODE_DRV_LOAD_PORT              0x10110000
 #define FW_MSG_CODE_DRV_LOAD_FUNCTION          0x10120000
 #define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA       0x10200000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI       0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1     0x10210000
 #define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG      0x10220000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI        0x10230000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT     0x10310000
 #define FW_MSG_CODE_DRV_LOAD_DONE              0x11100000
 #define FW_MSG_CODE_DRV_UNLOAD_ENGINE          0x20110000
 #define FW_MSG_CODE_DRV_UNLOAD_PORT            0x20120000
@@ -9549,6 +10268,10 @@ struct public_drv_mb {
 #define FW_MSG_SEQ_NUMBER_MASK                 0x0000ffff
 
        u32 fw_mb_param;
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK  0xFFFF0000
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK  0x0000FFFF
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
 
        /* get pf rdma protocol command responce */
 #define FW_MB_PARAM_GET_PF_RDMA_NONE           0x0
@@ -9659,6 +10382,8 @@ struct nvm_cfg1_glob {
 #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G          0xC
 #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G          0xD
 #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G          0xE
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G          0xF
+
        u32 e_lane_cfg1;
        u32 e_lane_cfg2;
        u32 f_lane_cfg1;
index 899cad7f97ea41a5b8dc5e1aaccd68b068593cae..a05feb38c6eebc778f1471a950e4c59d1ddba092 100644 (file)
@@ -58,6 +58,7 @@ struct qed_ptt {
        struct list_head        list_entry;
        unsigned int            idx;
        struct pxp_ptt_entry    pxp;
+       u8                      hwfn_id;
 };
 
 struct qed_ptt_pool {
@@ -79,6 +80,7 @@ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
                p_pool->ptts[i].idx = i;
                p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
                p_pool->ptts[i].pxp.pretend.control = 0;
+               p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
                if (i >= RESERVED_PTT_MAX)
                        list_add(&p_pool->ptts[i].list_entry,
                                 &p_pool->free_list);
@@ -193,6 +195,11 @@ static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
 
        offset = hw_addr - win_hw_addr;
 
+       if (p_ptt->hwfn_id != p_hwfn->my_id)
+               DP_NOTICE(p_hwfn,
+                         "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
+                         p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
+
        /* Verify the address is within the window */
        if (hw_addr < win_hw_addr ||
            offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
@@ -800,55 +807,3 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
-u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
-                 enum protocol_type proto, union qed_qm_pq_params *p_params)
-{
-       u16 pq_id = 0;
-
-       if ((proto == PROTOCOLID_CORE ||
-            proto == PROTOCOLID_ETH ||
-            proto == PROTOCOLID_ISCSI ||
-            proto == PROTOCOLID_ROCE) && !p_params) {
-               DP_NOTICE(p_hwfn,
-                         "Protocol %d received NULL PQ params\n", proto);
-               return 0;
-       }
-
-       switch (proto) {
-       case PROTOCOLID_CORE:
-               if (p_params->core.tc == LB_TC)
-                       pq_id = p_hwfn->qm_info.pure_lb_pq;
-               else if (p_params->core.tc == OOO_LB_TC)
-                       pq_id = p_hwfn->qm_info.ooo_pq;
-               else
-                       pq_id = p_hwfn->qm_info.offload_pq;
-               break;
-       case PROTOCOLID_ETH:
-               pq_id = p_params->eth.tc;
-               if (p_params->eth.is_vf)
-                       pq_id += p_hwfn->qm_info.vf_queues_offset +
-                                p_params->eth.vf_id;
-               break;
-       case PROTOCOLID_ISCSI:
-               if (p_params->iscsi.q_idx == 1)
-                       pq_id = p_hwfn->qm_info.pure_ack_pq;
-               break;
-       case PROTOCOLID_ROCE:
-               if (p_params->roce.dcqcn)
-                       pq_id = p_params->roce.qpid;
-               else
-                       pq_id = p_hwfn->qm_info.offload_pq;
-               if (pq_id > p_hwfn->qm_info.num_pf_rls)
-                       pq_id = p_hwfn->qm_info.offload_pq;
-               break;
-       case PROTOCOLID_FCOE:
-               pq_id = p_hwfn->qm_info.offload_pq;
-               break;
-       default:
-               pq_id = 0;
-       }
-
-       pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
-
-       return pq_id;
-}
index 9277264d2e6552a92a9ca88853501b80763a5dbb..f2505c691c264198e73ce946cfa5933b63292e86 100644 (file)
@@ -297,9 +297,6 @@ union qed_qm_pq_params {
        } roce;
 };
 
-u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
-                 enum protocol_type proto, union qed_qm_pq_params *params);
-
 int qed_init_fw_data(struct qed_dev *cdev,
                     const u8 *fw_data);
 #endif
index d891a68526950609f9efbe75ecacfb40ce49b97a..2a50e2b7568f5aff16f3d6afa70c630a6166201a 100644 (file)
@@ -215,13 +215,6 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
 {
        u32 qm_line_crd;
 
-       /* In A0 - Limit the size of pbf queue so that only 511 commands with
-        * the minimum size of 4 (FCoE minimum size)
-        */
-       bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
-
-       if (is_bb_a0)
-               cmdq_lines = min_t(u32, cmdq_lines, 1022);
        qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
        OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
                         (u32)cmdq_lines);
@@ -343,13 +336,11 @@ static void qed_tx_pq_map_rt_init(
        u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
        u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
                            QM_PF_QUEUE_GROUP_SIZE;
-       bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
        u16 i, pq_id, pq_group;
 
        /* a bit per Tx PQ indicating if the PQ is associated with a VF */
        u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
-       u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
-       u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+       u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
        u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
        u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
        u32 mem_addr_4kb = base_mem_addr_4kb;
@@ -371,6 +362,10 @@ static void qed_tx_pq_map_rt_init(
                bool is_vf_pq = (i >= p_params->num_pf_pqs);
                struct qm_rf_pq_map tx_pq_map;
 
+               bool rl_valid = p_params->pq_params[i].rl_valid &&
+                               (p_params->pq_params[i].vport_id <
+                                MAX_QM_GLOBAL_RLS);
+
                /* update first Tx PQ of VPORT/TC */
                u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
                                    p_params->start_vport;
@@ -389,14 +384,18 @@ static void qed_tx_pq_map_rt_init(
                                     (p_params->pf_id <<
                                      QM_WFQ_VP_PQ_PF_SHIFT));
                }
+
+               if (p_params->pq_params[i].rl_valid && !rl_valid)
+                       DP_NOTICE(p_hwfn,
+                                 "Invalid VPORT ID for rate limiter configuration");
                /* fill PQ map entry */
                memset(&tx_pq_map, 0, sizeof(tx_pq_map));
                SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
-               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
-                         p_params->pq_params[i].rl_valid ? 1 : 0);
+               SET_FIELD(tx_pq_map.reg,
+                         QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
                SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
                SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
-                         p_params->pq_params[i].rl_valid ?
+                         rl_valid ?
                          p_params->pq_params[i].vport_id : 0);
                SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
                SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
@@ -413,8 +412,9 @@ static void qed_tx_pq_map_rt_init(
                        /* if PQ is associated with a VF, add indication
                         * to PQ VF mask
                         */
-                       tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
-                               (1 << (pq_id % tx_pq_vf_mask_width));
+                       tx_pq_vf_mask[pq_id /
+                                     QM_PF_QUEUE_GROUP_SIZE] |=
+                           BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
                        mem_addr_4kb += vport_pq_mem_4kb;
                } else {
                        mem_addr_4kb += pq_mem_4kb;
@@ -480,8 +480,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
        if (p_params->pf_id < MAX_NUM_PFS_BB)
                crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
        else
-               crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
-                                (p_params->pf_id % MAX_NUM_PFS_BB);
+               crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
+       crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
 
        inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
        if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
@@ -498,11 +498,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
                                 QM_WFQ_CRD_REG_SIGN_BIT);
        }
 
-       STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
-                    inc_val);
        STORE_RT_REG(p_hwfn,
                     QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
                     QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+       STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+                    inc_val);
        return 0;
 }
 
@@ -576,6 +576,12 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
 {
        u8 i, vport_id;
 
+       if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
+               DP_NOTICE(p_hwfn,
+                         "Invalid VPORT ID for rate limiter configuration");
+               return -1;
+       }
+
        /* go over all PF VPORTs */
        for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
                u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
@@ -785,6 +791,12 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
 {
        u32 inc_val = QM_RL_INC_VAL(vport_rl);
 
+       if (vport_id >= MAX_QM_GLOBAL_RLS) {
+               DP_NOTICE(p_hwfn,
+                         "Invalid VPORT ID for rate limiter configuration");
+               return -1;
+       }
+
        if (inc_val > QM_RL_MAX_INC_VAL) {
                DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
                return -1;
@@ -940,12 +952,6 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
               eth_geneve_enable ? 1 : 0);
        qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
 
-       /* comp ver */
-       reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
-       qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
-       qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
-       qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
-
        /* EDPM with geneve tunnel not supported in BB_B0 */
        if (QED_IS_BB_B0(p_hwfn->cdev))
                return;
index 243b64e0d4dc3ed36f92e570022aa68c7af58901..4a2e7be5bf7210acc93f3ded8d20e1240e3aa6ef 100644 (file)
@@ -554,7 +554,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
        }
 
        /* First Dword contains metadata and should be skipped */
-       buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
+       buf_hdr = (struct bin_buffer_hdr *)data;
 
        offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
        fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
index 84310b60849b4881557cfd62761549a1a182f2d9..0ed24d6e6c6520450ed05b50e8486e54b712a354 100644 (file)
@@ -2500,8 +2500,9 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
 
        /* Configure pi coalescing if set */
        if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+               u8 num_tc = p_hwfn->hw_info.num_hw_tc;
                u8 timeset, timer_res;
-               u8 num_tc = 1, i;
+               u8 i;
 
                /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
                if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
index 3a44d6b395fac9500841f5ac1bc73b4c11d4d188..339c91dfa658e700a570ba55408c9c1a4a445fb6 100644 (file)
@@ -181,6 +181,15 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
        p_params = &p_hwfn->pf_params.iscsi_pf_params;
        p_queue = &p_init->q_params;
 
+       /* Sanity */
+       if (p_params->num_queues > p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]) {
+               DP_ERR(p_hwfn,
+                      "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
+                      p_params->num_queues,
+                      p_hwfn->hw_info.resc_num[QED_ISCSI_CQ]);
+               return -EINVAL;
+       }
+
        SET_FIELD(p_init->hdr.flags,
                  ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE);
        p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC;
@@ -190,6 +199,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
        p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
        p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
        p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
+       p_init->ooo_enable = p_params->ooo_enable;
+       p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
+                                 p_params->ll2_ooo_queue_id;
        p_init->func_params.log_page_size = p_params->log_page_size;
        val = p_params->num_tasks;
        p_init->func_params.num_tasks = cpu_to_le16(val);
@@ -213,7 +225,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
                p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
        }
 
-       p_queue->bdq_resource_id = ISCSI_BDQ_ID(p_hwfn->port_id);
+       p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
 
        DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_RQ],
                       p_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
@@ -267,11 +279,10 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
        struct tcp_offload_params *p_tcp = NULL;
        struct qed_spq_entry *p_ent = NULL;
        struct qed_sp_init_data init_data;
-       union qed_qm_pq_params pq_params;
-       u16 pq0_id = 0, pq1_id = 0;
        dma_addr_t r2tq_pbl_addr;
        dma_addr_t xhq_pbl_addr;
        dma_addr_t uhq_pbl_addr;
+       u16 physical_q;
        int rc = 0;
        u32 dval;
        u16 wval;
@@ -294,16 +305,14 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
        p_ramrod = &p_ent->ramrod.iscsi_conn_offload;
 
        /* Transmission PQ is the first of the PF */
-       memset(&pq_params, 0, sizeof(pq_params));
-       pq0_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params);
-       p_conn->physical_q0 = cpu_to_le16(pq0_id);
-       p_ramrod->iscsi.physical_q0 = cpu_to_le16(pq0_id);
+       physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+       p_conn->physical_q0 = cpu_to_le16(physical_q);
+       p_ramrod->iscsi.physical_q0 = cpu_to_le16(physical_q);
 
        /* iSCSI Pure-ACK PQ */
-       pq_params.iscsi.q_idx = 1;
-       pq1_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params);
-       p_conn->physical_q1 = cpu_to_le16(pq1_id);
-       p_ramrod->iscsi.physical_q1 = cpu_to_le16(pq1_id);
+       physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+       p_conn->physical_q1 = cpu_to_le16(physical_q);
+       p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q);
 
        p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN;
        SET_FIELD(p_ramrod->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE,
@@ -590,21 +599,31 @@ static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
 static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
                                                    u8 bdq_id)
 {
-       u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id);
-
-       return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM +
-                            MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id,
-                                                            bdq_id);
+       if (RESC_NUM(p_hwfn, QED_BDQ)) {
+               return (u8 __iomem *)p_hwfn->regview +
+                      GTT_BAR0_MAP_REG_MSDM_RAM +
+                      MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+                                                                 QED_BDQ),
+                                                      bdq_id);
+       } else {
+               DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+               return NULL;
+       }
 }
 
 static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
                                                      u8 bdq_id)
 {
-       u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id);
-
-       return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM +
-                            TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id,
-                                                            bdq_id);
+       if (RESC_NUM(p_hwfn, QED_BDQ)) {
+               return (u8 __iomem *)p_hwfn->regview +
+                      GTT_BAR0_MAP_REG_TSDM_RAM +
+                      TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+                                                                 QED_BDQ),
+                                                      bdq_id);
+       } else {
+               DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+               return NULL;
+       }
 }
 
 static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
@@ -786,6 +805,23 @@ static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn,
        spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
 }
 
+void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
+                              struct qed_iscsi_conn *p_conn)
+{
+       qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
+       qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
+       qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                         sizeof(struct tcp_upload_params),
+                         p_conn->tcp_upload_params_virt_addr,
+                         p_conn->tcp_upload_params_phys_addr);
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                         sizeof(struct scsi_terminate_extra_params),
+                         p_conn->queue_cnts_virt_addr,
+                         p_conn->queue_cnts_phys_addr);
+       kfree(p_conn);
+}
+
 struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
 {
        struct qed_iscsi_info *p_iscsi_info;
@@ -807,6 +843,17 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
 void qed_iscsi_free(struct qed_hwfn *p_hwfn,
                    struct qed_iscsi_info *p_iscsi_info)
 {
+       struct qed_iscsi_conn *p_conn = NULL;
+
+       while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) {
+               p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
+                                         struct qed_iscsi_conn, list_entry);
+               if (p_conn) {
+                       list_del(&p_conn->list_entry);
+                       qed_iscsi_free_connection(p_hwfn, p_conn);
+               }
+       }
+
        kfree(p_iscsi_info);
 }
 
@@ -826,6 +873,8 @@ static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn,
            HILO_64_REGPAIR(tstats.iscsi_rx_bytes_cnt);
        p_stats->iscsi_rx_packet_cnt =
            HILO_64_REGPAIR(tstats.iscsi_rx_packet_cnt);
+       p_stats->iscsi_rx_new_ooo_isle_events_cnt =
+           HILO_64_REGPAIR(tstats.iscsi_rx_new_ooo_isle_events_cnt);
        p_stats->iscsi_cmdq_threshold_cnt =
            le32_to_cpu(tstats.iscsi_cmdq_threshold_cnt);
        p_stats->iscsi_rq_threshold_cnt =
@@ -972,6 +1021,8 @@ static int qed_fill_iscsi_dev_info(struct qed_dev *cdev,
        info->secondary_bdq_rq_addr =
            qed_iscsi_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
 
+       info->num_cqs = FEAT_NUM(hwfn, QED_ISCSI_CQ);
+
        return rc;
 }
 
@@ -1273,6 +1324,26 @@ static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats)
        return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats);
 }
 
+void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+                                 struct qed_mcp_iscsi_stats *stats)
+{
+       struct qed_iscsi_stats proto_stats;
+
+       /* Retrieve FW statistics */
+       memset(&proto_stats, 0, sizeof(proto_stats));
+       if (qed_iscsi_stats(cdev, &proto_stats)) {
+               DP_VERBOSE(cdev, QED_MSG_STORAGE,
+                          "Failed to collect ISCSI statistics\n");
+               return;
+       }
+
+       /* Translate FW statistics into struct */
+       stats->rx_pdus = proto_stats.iscsi_rx_total_pdu_cnt;
+       stats->tx_pdus = proto_stats.iscsi_tx_total_pdu_cnt;
+       stats->rx_bytes = proto_stats.iscsi_rx_bytes_cnt;
+       stats->tx_bytes = proto_stats.iscsi_tx_bytes_cnt;
+}
+
 static const struct qed_iscsi_ops qed_iscsi_ops_pass = {
        .common = &qed_common_ops_pass,
        .ll2 = &qed_ll2_ops_pass,
index 20c187f4ed0b8aa8ff2125633f81aa19a1ca546b..ae98f772cbc0e0b3542d693252aaa0c066cde864 100644 (file)
@@ -64,13 +64,25 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
 
 void qed_iscsi_free(struct qed_hwfn *p_hwfn,
                    struct qed_iscsi_info *p_iscsi_info);
+
+/**
+ * @brief - Fills provided statistics struct with statistics.
+ *
+ * @param cdev
+ * @param stats - points to struct that will be filled with statistics.
+ */
+void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+                                 struct qed_mcp_iscsi_stats *stats);
 #else /* IS_ENABLED(CONFIG_QED_ISCSI) */
 static inline struct qed_iscsi_info *qed_iscsi_alloc(
                struct qed_hwfn *p_hwfn) { return NULL; }
 static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
                                   struct qed_iscsi_info *p_iscsi_info) {}
 static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn,
-                                 struct qed_iscsi_info *p_iscsi_info) {}
+                                struct qed_iscsi_info *p_iscsi_info) {}
+static inline void
+qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+                            struct qed_mcp_iscsi_stats *stats) {}
 #endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
 
 #endif
index df932be5a4e5aa1e47b16530d51a5dcf78f15706..d56441da87c52e0d0b38ed7d85d61a2e543e865b 100644 (file)
@@ -938,15 +938,12 @@ qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
                          dma_addr_t pbl_addr,
                          u16 pbl_size, void __iomem **pp_doorbell)
 {
-       union qed_qm_pq_params pq_params;
        int rc;
 
-       memset(&pq_params, 0, sizeof(pq_params));
 
        rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
                                      pbl_addr, pbl_size,
-                                     qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH,
-                                                   &pq_params));
+                                     qed_get_cm_pq_idx_mcos(p_hwfn, tc));
        if (rc)
                return rc;
 
@@ -1470,13 +1467,20 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
        memset(&pstats, 0, sizeof(pstats));
        qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
 
-       p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
-       p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
-       p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
-       p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
-       p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
-       p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
-       p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
+       p_stats->common.tx_ucast_bytes +=
+           HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+       p_stats->common.tx_mcast_bytes +=
+           HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+       p_stats->common.tx_bcast_bytes +=
+           HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+       p_stats->common.tx_ucast_pkts +=
+           HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+       p_stats->common.tx_mcast_pkts +=
+           HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+       p_stats->common.tx_bcast_pkts +=
+           HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+       p_stats->common.tx_err_drop_pkts +=
+           HILO_64_REGPAIR(pstats.error_drop_pkts);
 }
 
 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
@@ -1502,10 +1506,10 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
        memset(&tstats, 0, sizeof(tstats));
        qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
 
-       p_stats->mftag_filter_discards +=
-               HILO_64_REGPAIR(tstats.mftag_filter_discard);
-       p_stats->mac_filter_discards +=
-               HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+       p_stats->common.mftag_filter_discards +=
+           HILO_64_REGPAIR(tstats.mftag_filter_discard);
+       p_stats->common.mac_filter_discards +=
+           HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
 }
 
 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
@@ -1539,12 +1543,15 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
        memset(&ustats, 0, sizeof(ustats));
        qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
 
-       p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
-       p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
-       p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
-       p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
-       p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
-       p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+       p_stats->common.rx_ucast_bytes +=
+           HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+       p_stats->common.rx_mcast_bytes +=
+           HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+       p_stats->common.rx_bcast_bytes +=
+           HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+       p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+       p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+       p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
 }
 
 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
@@ -1578,23 +1585,26 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
        memset(&mstats, 0, sizeof(mstats));
        qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
 
-       p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
-       p_stats->packet_too_big_discard +=
-               HILO_64_REGPAIR(mstats.packet_too_big_discard);
-       p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
-       p_stats->tpa_coalesced_pkts +=
-               HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
-       p_stats->tpa_coalesced_events +=
-               HILO_64_REGPAIR(mstats.tpa_coalesced_events);
-       p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
-       p_stats->tpa_coalesced_bytes +=
-               HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+       p_stats->common.no_buff_discards +=
+           HILO_64_REGPAIR(mstats.no_buff_discard);
+       p_stats->common.packet_too_big_discard +=
+           HILO_64_REGPAIR(mstats.packet_too_big_discard);
+       p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
+       p_stats->common.tpa_coalesced_pkts +=
+           HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+       p_stats->common.tpa_coalesced_events +=
+           HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+       p_stats->common.tpa_aborts_num +=
+           HILO_64_REGPAIR(mstats.tpa_aborts_num);
+       p_stats->common.tpa_coalesced_bytes +=
+           HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
 }
 
 static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
                                       struct qed_ptt *p_ptt,
                                       struct qed_eth_stats *p_stats)
 {
+       struct qed_eth_stats_common *p_common = &p_stats->common;
        struct port_stats port_stats;
        int j;
 
@@ -1605,54 +1615,75 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
                        offsetof(struct public_port, stats),
                        sizeof(port_stats));
 
-       p_stats->rx_64_byte_packets             += port_stats.eth.r64;
-       p_stats->rx_65_to_127_byte_packets      += port_stats.eth.r127;
-       p_stats->rx_128_to_255_byte_packets     += port_stats.eth.r255;
-       p_stats->rx_256_to_511_byte_packets     += port_stats.eth.r511;
-       p_stats->rx_512_to_1023_byte_packets    += port_stats.eth.r1023;
-       p_stats->rx_1024_to_1518_byte_packets   += port_stats.eth.r1518;
-       p_stats->rx_1519_to_1522_byte_packets   += port_stats.eth.r1522;
-       p_stats->rx_1519_to_2047_byte_packets   += port_stats.eth.r2047;
-       p_stats->rx_2048_to_4095_byte_packets   += port_stats.eth.r4095;
-       p_stats->rx_4096_to_9216_byte_packets   += port_stats.eth.r9216;
-       p_stats->rx_9217_to_16383_byte_packets  += port_stats.eth.r16383;
-       p_stats->rx_crc_errors                  += port_stats.eth.rfcs;
-       p_stats->rx_mac_crtl_frames             += port_stats.eth.rxcf;
-       p_stats->rx_pause_frames                += port_stats.eth.rxpf;
-       p_stats->rx_pfc_frames                  += port_stats.eth.rxpp;
-       p_stats->rx_align_errors                += port_stats.eth.raln;
-       p_stats->rx_carrier_errors              += port_stats.eth.rfcr;
-       p_stats->rx_oversize_packets            += port_stats.eth.rovr;
-       p_stats->rx_jabbers                     += port_stats.eth.rjbr;
-       p_stats->rx_undersize_packets           += port_stats.eth.rund;
-       p_stats->rx_fragments                   += port_stats.eth.rfrg;
-       p_stats->tx_64_byte_packets             += port_stats.eth.t64;
-       p_stats->tx_65_to_127_byte_packets      += port_stats.eth.t127;
-       p_stats->tx_128_to_255_byte_packets     += port_stats.eth.t255;
-       p_stats->tx_256_to_511_byte_packets     += port_stats.eth.t511;
-       p_stats->tx_512_to_1023_byte_packets    += port_stats.eth.t1023;
-       p_stats->tx_1024_to_1518_byte_packets   += port_stats.eth.t1518;
-       p_stats->tx_1519_to_2047_byte_packets   += port_stats.eth.t2047;
-       p_stats->tx_2048_to_4095_byte_packets   += port_stats.eth.t4095;
-       p_stats->tx_4096_to_9216_byte_packets   += port_stats.eth.t9216;
-       p_stats->tx_9217_to_16383_byte_packets  += port_stats.eth.t16383;
-       p_stats->tx_pause_frames                += port_stats.eth.txpf;
-       p_stats->tx_pfc_frames                  += port_stats.eth.txpp;
-       p_stats->tx_lpi_entry_count             += port_stats.eth.tlpiec;
-       p_stats->tx_total_collisions            += port_stats.eth.tncl;
-       p_stats->rx_mac_bytes                   += port_stats.eth.rbyte;
-       p_stats->rx_mac_uc_packets              += port_stats.eth.rxuca;
-       p_stats->rx_mac_mc_packets              += port_stats.eth.rxmca;
-       p_stats->rx_mac_bc_packets              += port_stats.eth.rxbca;
-       p_stats->rx_mac_frames_ok               += port_stats.eth.rxpok;
-       p_stats->tx_mac_bytes                   += port_stats.eth.tbyte;
-       p_stats->tx_mac_uc_packets              += port_stats.eth.txuca;
-       p_stats->tx_mac_mc_packets              += port_stats.eth.txmca;
-       p_stats->tx_mac_bc_packets              += port_stats.eth.txbca;
-       p_stats->tx_mac_ctrl_frames             += port_stats.eth.txcf;
+       p_common->rx_64_byte_packets += port_stats.eth.r64;
+       p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
+       p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
+       p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
+       p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+       p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+       p_common->rx_crc_errors += port_stats.eth.rfcs;
+       p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
+       p_common->rx_pause_frames += port_stats.eth.rxpf;
+       p_common->rx_pfc_frames += port_stats.eth.rxpp;
+       p_common->rx_align_errors += port_stats.eth.raln;
+       p_common->rx_carrier_errors += port_stats.eth.rfcr;
+       p_common->rx_oversize_packets += port_stats.eth.rovr;
+       p_common->rx_jabbers += port_stats.eth.rjbr;
+       p_common->rx_undersize_packets += port_stats.eth.rund;
+       p_common->rx_fragments += port_stats.eth.rfrg;
+       p_common->tx_64_byte_packets += port_stats.eth.t64;
+       p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
+       p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
+       p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
+       p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+       p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+       p_common->tx_pause_frames += port_stats.eth.txpf;
+       p_common->tx_pfc_frames += port_stats.eth.txpp;
+       p_common->rx_mac_bytes += port_stats.eth.rbyte;
+       p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
+       p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
+       p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
+       p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
+       p_common->tx_mac_bytes += port_stats.eth.tbyte;
+       p_common->tx_mac_uc_packets += port_stats.eth.txuca;
+       p_common->tx_mac_mc_packets += port_stats.eth.txmca;
+       p_common->tx_mac_bc_packets += port_stats.eth.txbca;
+       p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
        for (j = 0; j < 8; j++) {
-               p_stats->brb_truncates  += port_stats.brb.brb_truncate[j];
-               p_stats->brb_discards   += port_stats.brb.brb_discard[j];
+               p_common->brb_truncates += port_stats.brb.brb_truncate[j];
+               p_common->brb_discards += port_stats.brb.brb_discard[j];
+       }
+
+       if (QED_IS_BB(p_hwfn->cdev)) {
+               struct qed_eth_stats_bb *p_bb = &p_stats->bb;
+
+               p_bb->rx_1519_to_1522_byte_packets +=
+                   port_stats.eth.u0.bb0.r1522;
+               p_bb->rx_1519_to_2047_byte_packets +=
+                   port_stats.eth.u0.bb0.r2047;
+               p_bb->rx_2048_to_4095_byte_packets +=
+                   port_stats.eth.u0.bb0.r4095;
+               p_bb->rx_4096_to_9216_byte_packets +=
+                   port_stats.eth.u0.bb0.r9216;
+               p_bb->rx_9217_to_16383_byte_packets +=
+                   port_stats.eth.u0.bb0.r16383;
+               p_bb->tx_1519_to_2047_byte_packets +=
+                   port_stats.eth.u1.bb1.t2047;
+               p_bb->tx_2048_to_4095_byte_packets +=
+                   port_stats.eth.u1.bb1.t4095;
+               p_bb->tx_4096_to_9216_byte_packets +=
+                   port_stats.eth.u1.bb1.t9216;
+               p_bb->tx_9217_to_16383_byte_packets +=
+                   port_stats.eth.u1.bb1.t16383;
+               p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
+               p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
+       } else {
+               struct qed_eth_stats_ah *p_ah = &p_stats->ah;
+
+               p_ah->rx_1519_to_max_byte_packets +=
+                   port_stats.eth.u0.ah0.r1519_to_max;
+               p_ah->tx_1519_to_max_byte_packets =
+                   port_stats.eth.u1.ah1.t1519_to_max;
        }
 }
 
@@ -1898,7 +1929,11 @@ static int qed_start_vport(struct qed_dev *cdev,
                        return rc;
                }
 
-               qed_hw_start_fastpath(p_hwfn);
+               rc = qed_hw_start_fastpath(p_hwfn);
+               if (rc) {
+                       DP_ERR(cdev, "Failed to start VPORT fastpath\n");
+                       return rc;
+               }
 
                DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
                           "Started V-PORT %d with MTU %d\n",
@@ -2141,7 +2176,13 @@ static int qed_start_txq(struct qed_dev *cdev,
 #define QED_HW_STOP_RETRY_LIMIT (10)
 static int qed_fastpath_stop(struct qed_dev *cdev)
 {
-       qed_hw_stop_fastpath(cdev);
+       int rc;
+
+       rc = qed_hw_stop_fastpath(cdev);
+       if (rc) {
+               DP_ERR(cdev, "Failed to stop Fastpath\n");
+               return rc;
+       }
 
        return 0;
 }
index 9a0b9af10a572f3e3c2a5d086e9e731b040e4d6b..09c86411918c1ea9dae48d64d5376a7157605426 100644 (file)
@@ -211,6 +211,8 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
        /* If need to reuse or there's no replacement buffer, repost this */
        if (rc)
                goto out_post;
+       dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+                        cdev->ll2->rx_size, DMA_FROM_DEVICE);
 
        skb = build_skb(buffer->data, 0);
        if (!skb) {
@@ -474,7 +476,7 @@ qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
 static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
                                      struct qed_ll2_info *p_ll2_conn,
                                      union core_rx_cqe_union *p_cqe,
-                                     unsigned long lock_flags,
+                                     unsigned long *p_lock_flags,
                                      bool b_last_cqe)
 {
        struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
@@ -495,10 +497,10 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
                          "Mismatch between active_descq and the LL2 Rx chain\n");
        list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
 
-       spin_unlock_irqrestore(&p_rx->lock, lock_flags);
+       spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
        qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
                                    p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
-       spin_lock_irqsave(&p_rx->lock, lock_flags);
+       spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
 
        return 0;
 }
@@ -538,7 +540,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
                        break;
                case CORE_RX_CQE_TYPE_REGULAR:
                        rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
-                                                       cqe, flags, b_last_cqe);
+                                                       cqe, &flags,
+                                                       b_last_cqe);
                        break;
                default:
                        rc = -EIO;
@@ -594,7 +597,7 @@ static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
        u8 bd_flags = 0;
 
        if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
-               SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
+               SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
 
        return bd_flags;
 }
@@ -755,8 +758,8 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
                             p_buffer->placement_offset;
                parse_flags = p_buffer->parse_flags;
                bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
-               SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
-               SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
+               SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
+               SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
 
                rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
                                               p_buffer->vlan, bd_flags,
@@ -968,7 +971,7 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
 {
        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
        u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
-       struct qed_ll2_conn ll2_info;
+       struct qed_ll2_conn ll2_info = { 0 };
        int rc;
 
        ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
@@ -1087,7 +1090,6 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
        struct core_tx_start_ramrod_data *p_ramrod = NULL;
        struct qed_spq_entry *p_ent = NULL;
        struct qed_sp_init_data init_data;
-       union qed_qm_pq_params pq_params;
        u16 pq_id = 0, pbl_size;
        int rc = -EINVAL;
 
@@ -1124,9 +1126,18 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
        pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
        p_ramrod->pbl_size = cpu_to_le16(pbl_size);
 
-       memset(&pq_params, 0, sizeof(pq_params));
-       pq_params.core.tc = p_ll2_conn->conn.tx_tc;
-       pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+       switch (p_ll2_conn->conn.tx_tc) {
+       case LB_TC:
+               pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+               break;
+       case OOO_LB_TC:
+               pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
+               break;
+       default:
+               pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+               break;
+       }
+
        p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
 
        switch (conn_type) {
@@ -1397,13 +1408,21 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
        struct qed_ll2_info *p_ll2_conn;
        struct qed_ll2_rx_queue *p_rx;
        struct qed_ll2_tx_queue *p_tx;
+       struct qed_ptt *p_ptt;
        int rc = -EINVAL;
        u32 i, capacity;
        u8 qid;
 
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return -EAGAIN;
+
        p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
-       if (!p_ll2_conn)
-               return -EINVAL;
+       if (!p_ll2_conn) {
+               rc = -EINVAL;
+               goto out;
+       }
+
        p_rx = &p_ll2_conn->rx_queue;
        p_tx = &p_ll2_conn->tx_queue;
 
@@ -1436,7 +1455,9 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
        p_tx->cur_completing_frag_num = 0;
        *p_tx->p_fw_cons = 0;
 
-       qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
+       rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
+       if (rc)
+               goto out;
 
        qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
        p_ll2_conn->queue_id = qid;
@@ -1450,26 +1471,28 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
 
        rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
        if (rc)
-               return rc;
+               goto out;
 
        rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
        if (rc)
-               return rc;
+               goto out;
 
        if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
-               qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
+               qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
 
        qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
 
        if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
-               qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+               qed_llh_add_protocol_filter(p_hwfn, p_ptt,
                                            0x8906, 0,
                                            QED_LLH_FILTER_ETHERTYPE);
-               qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+               qed_llh_add_protocol_filter(p_hwfn, p_ptt,
                                            0x8914, 0,
                                            QED_LLH_FILTER_ETHERTYPE);
        }
 
+out:
+       qed_ptt_release(p_hwfn, p_ptt);
        return rc;
 }
 
@@ -1588,33 +1611,34 @@ static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
        p_tx->cur_send_frag_num++;
 }
 
-static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
-                                            struct qed_ll2_info *p_ll2,
-                                            struct qed_ll2_tx_packet *p_curp,
-                                            u8 num_of_bds,
-                                            enum core_tx_dest tx_dest,
-                                            u16 vlan,
-                                            u8 bd_flags,
-                                            u16 l4_hdr_offset_w,
-                                            enum core_roce_flavor_type type,
-                                            dma_addr_t first_frag,
-                                            u16 first_frag_len)
+static void
+qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
+                                struct qed_ll2_info *p_ll2,
+                                struct qed_ll2_tx_packet *p_curp,
+                                u8 num_of_bds,
+                                enum core_tx_dest tx_dest,
+                                u16 vlan,
+                                u8 bd_flags,
+                                u16 l4_hdr_offset_w,
+                                enum core_roce_flavor_type roce_flavor,
+                                dma_addr_t first_frag,
+                                u16 first_frag_len)
 {
        struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
        u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
        struct core_tx_bd *start_bd = NULL;
-       u16 frag_idx;
+       u16 bd_data = 0, frag_idx;
 
        start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
        start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
        SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
                  cpu_to_le16(l4_hdr_offset_w));
        SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
-       start_bd->bd_flags.as_bitfield = bd_flags;
-       start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
-           CORE_TX_BD_FLAGS_START_BD_SHIFT;
-       SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
-       SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
+       bd_data |= bd_flags;
+       SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
+       SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
+       SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
+       start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
        DMA_REGPAIR_LE(start_bd->addr, first_frag);
        start_bd->nbytes = cpu_to_le16(first_frag_len);
 
@@ -1639,9 +1663,8 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
                struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
 
                *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
-               (*p_bd)->bd_flags.as_bitfield = 0;
+               (*p_bd)->bd_data.as_bitfield = 0;
                (*p_bd)->bitfield1 = 0;
-               (*p_bd)->bitfield0 = 0;
                p_curp->bds_set[frag_idx].tx_frag = 0;
                p_curp->bds_set[frag_idx].frag_len = 0;
        }
@@ -1820,23 +1843,30 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
 {
        struct qed_ll2_info *p_ll2_conn = NULL;
        int rc = -EINVAL;
+       struct qed_ptt *p_ptt;
+
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return -EAGAIN;
 
        p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
-       if (!p_ll2_conn)
-               return -EINVAL;
+       if (!p_ll2_conn) {
+               rc = -EINVAL;
+               goto out;
+       }
 
        /* Stop Tx & Rx of connection, if needed */
        if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
                rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
                if (rc)
-                       return rc;
+                       goto out;
                qed_ll2_txq_flush(p_hwfn, connection_handle);
        }
 
        if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
                rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
                if (rc)
-                       return rc;
+                       goto out;
                qed_ll2_rxq_flush(p_hwfn, connection_handle);
        }
 
@@ -1844,14 +1874,16 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
                qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
 
        if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
-               qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+               qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
                                               0x8906, 0,
                                               QED_LLH_FILTER_ETHERTYPE);
-               qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+               qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
                                               0x8914, 0,
                                               QED_LLH_FILTER_ETHERTYPE);
        }
 
+out:
+       qed_ptt_release(p_hwfn, p_ptt);
        return rc;
 }
 
@@ -2238,11 +2270,11 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
        /* Request HW to calculate IP csum */
        if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
              ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
-               flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+               flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
 
        if (skb_vlan_tag_present(skb)) {
                vlan = skb_vlan_tag_get(skb);
-               flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
+               flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
        }
 
        rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
index eef30a598b408e5ded4109860a0d4a37362fbaee..029f431e89ecaa01343c8b4ae523f64ec38fffde 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/ethtool.h>
 #include <linux/etherdevice.h>
 #include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
 #include <linux/qed/qed_if.h>
 #include <linux/qed/qed_ll2_if.h>
 
@@ -54,6 +55,8 @@
 #include "qed_dev_api.h"
 #include "qed_ll2.h"
 #include "qed_fcoe.h"
+#include "qed_iscsi.h"
+
 #include "qed_mcp.h"
 #include "qed_hw.h"
 #include "qed_selftest.h"
@@ -238,6 +241,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
        dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
                                    QED_PCI_ETH_ROCE);
        dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
+       dev_info->dev_type = cdev->type;
        ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
 
        if (IS_PF(cdev)) {
@@ -588,6 +592,19 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
        return rc;
 }
 
+void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
+{
+       struct qed_dev *cdev = p_hwfn->cdev;
+       u8 id = p_hwfn->my_id;
+       u32 int_mode;
+
+       int_mode = cdev->int_params.out.int_mode;
+       if (int_mode == QED_INT_MODE_MSIX)
+               synchronize_irq(cdev->int_params.msix_table[id].vector);
+       else
+               synchronize_irq(cdev->pdev->irq);
+}
+
 static void qed_slowpath_irq_free(struct qed_dev *cdev)
 {
        int i;
@@ -630,19 +647,6 @@ static int qed_nic_stop(struct qed_dev *cdev)
        return rc;
 }
 
-static int qed_nic_reset(struct qed_dev *cdev)
-{
-       int rc;
-
-       rc = qed_hw_reset(cdev);
-       if (rc)
-               return rc;
-
-       qed_resc_free(cdev);
-
-       return 0;
-}
-
 static int qed_nic_setup(struct qed_dev *cdev)
 {
        int rc, i;
@@ -743,7 +747,8 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
        cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
                                       cdev->num_hwfns;
 
-       if (!IS_ENABLED(CONFIG_QED_RDMA))
+       if (!IS_ENABLED(CONFIG_QED_RDMA) ||
+           QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE)
                return 0;
 
        for_each_hwfn(cdev, i)
@@ -875,7 +880,6 @@ static void qed_update_pf_params(struct qed_dev *cdev,
                params->rdma_pf_params.num_qps = QED_ROCE_QPS;
                params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
                /* divide by 3 the MRs to avoid MF ILT overflow */
-               params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
                params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
        }
 
@@ -900,6 +904,8 @@ static void qed_update_pf_params(struct qed_dev *cdev,
 static int qed_slowpath_start(struct qed_dev *cdev,
                              struct qed_slowpath_params *params)
 {
+       struct qed_drv_load_params drv_load_params;
+       struct qed_hw_init_params hw_init_params;
        struct qed_tunn_start_params tunn_info;
        struct qed_mcp_drv_version drv_version;
        const u8 *data = NULL;
@@ -965,9 +971,21 @@ static int qed_slowpath_start(struct qed_dev *cdev,
        tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
 
        /* Start the slowpath */
-       rc = qed_hw_init(cdev, &tunn_info, true,
-                        cdev->int_params.out.int_mode,
-                        true, data);
+       memset(&hw_init_params, 0, sizeof(hw_init_params));
+       hw_init_params.p_tunn = &tunn_info;
+       hw_init_params.b_hw_start = true;
+       hw_init_params.int_mode = cdev->int_params.out.int_mode;
+       hw_init_params.allow_npar_tx_switch = true;
+       hw_init_params.bin_fw_data = data;
+
+       memset(&drv_load_params, 0, sizeof(drv_load_params));
+       drv_load_params.is_crash_kernel = is_kdump_kernel();
+       drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
+       drv_load_params.avoid_eng_reset = false;
+       drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
+       hw_init_params.p_drv_load_params = &drv_load_params;
+
+       rc = qed_hw_init(cdev, &hw_init_params);
        if (rc)
                goto err2;
 
@@ -1042,7 +1060,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
        }
 
        qed_disable_msix(cdev);
-       qed_nic_reset(cdev);
+
+       qed_resc_free(cdev);
 
        qed_iov_wq_stop(cdev, true);
 
@@ -1653,13 +1672,18 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
        switch (type) {
        case QED_MCP_LAN_STATS:
                qed_get_vport_stats(cdev, &eth_stats);
-               stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts;
-               stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts;
+               stats->lan_stats.ucast_rx_pkts =
+                                       eth_stats.common.rx_ucast_pkts;
+               stats->lan_stats.ucast_tx_pkts =
+                                       eth_stats.common.tx_ucast_pkts;
                stats->lan_stats.fcs_err = -1;
                break;
        case QED_MCP_FCOE_STATS:
                qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
                break;
+       case QED_MCP_ISCSI_STATS:
+               qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
+               break;
        default:
                DP_ERR(cdev, "Invalid protocol type = %d\n", type);
                return;
index 87fde205149fdbf3181befd79ca62508b2daa388..ff6080df2246d3957bf80f5c0a708dbae4e53141 100644 (file)
@@ -111,12 +111,71 @@ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        }
 }
 
+struct qed_mcp_cmd_elem {
+       struct list_head list;
+       struct qed_mcp_mb_params *p_mb_params;
+       u16 expected_seq_num;
+       bool b_is_completed;
+};
+
+/* Must be called while cmd_lock is acquired */
+static struct qed_mcp_cmd_elem *
+qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
+                    struct qed_mcp_mb_params *p_mb_params,
+                    u16 expected_seq_num)
+{
+       struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
+
+       p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
+       if (!p_cmd_elem)
+               goto out;
+
+       p_cmd_elem->p_mb_params = p_mb_params;
+       p_cmd_elem->expected_seq_num = expected_seq_num;
+       list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
+out:
+       return p_cmd_elem;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
+                                struct qed_mcp_cmd_elem *p_cmd_elem)
+{
+       list_del(&p_cmd_elem->list);
+       kfree(p_cmd_elem);
+}
+
+/* Must be called while cmd_lock is acquired */
+static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
+                                                    u16 seq_num)
+{
+       struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
+
+       list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
+               if (p_cmd_elem->expected_seq_num == seq_num)
+                       return p_cmd_elem;
+       }
+
+       return NULL;
+}
+
 int qed_mcp_free(struct qed_hwfn *p_hwfn)
 {
        if (p_hwfn->mcp_info) {
+               struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
+
                kfree(p_hwfn->mcp_info->mfw_mb_cur);
                kfree(p_hwfn->mcp_info->mfw_mb_shadow);
+
+               spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+               list_for_each_entry_safe(p_cmd_elem,
+                                        p_tmp,
+                                        &p_hwfn->mcp_info->cmd_list, list) {
+                       qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+               }
+               spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
        }
+
        kfree(p_hwfn->mcp_info);
 
        return 0;
@@ -160,7 +219,7 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
                                DRV_PULSE_SEQ_MASK;
 
-       p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+       p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
 
        return 0;
 }
@@ -176,6 +235,12 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
                goto err;
        p_info = p_hwfn->mcp_info;
 
+       /* Initialize the MFW spinlock */
+       spin_lock_init(&p_info->cmd_lock);
+       spin_lock_init(&p_info->link_lock);
+
+       INIT_LIST_HEAD(&p_info->cmd_list);
+
        if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
                DP_NOTICE(p_hwfn, "MCP is not initialized\n");
                /* Do not free mcp_info here, since public_base indicate that
@@ -190,10 +255,6 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
                goto err;
 
-       /* Initialize the MFW spinlock */
-       spin_lock_init(&p_info->lock);
-       spin_lock_init(&p_info->link_lock);
-
        return 0;
 
 err:
@@ -201,68 +262,39 @@ err:
        return -ENOMEM;
 }
 
-/* Locks the MFW mailbox of a PF to ensure a single access.
- * The lock is achieved in most cases by holding a spinlock, causing other
- * threads to wait till a previous access is done.
- * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
- * access is achieved by setting a blocking flag, which will fail other
- * competing contexts to send their mailboxes.
- */
-static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
+static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt)
 {
-       spin_lock_bh(&p_hwfn->mcp_info->lock);
+       u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
 
-       /* The spinlock shouldn't be acquired when the mailbox command is
-        * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
-        * pending [UN]LOAD_REQ command of another PF together with a spinlock
-        * (i.e. interrupts are disabled) - can lead to a deadlock.
-        * It is assumed that for a single PF, no other mailbox commands can be
-        * sent from another context while sending LOAD_REQ, and that any
-        * parallel commands to UNLOAD_REQ can be cancelled.
+       /* Use MCP history register to check if MCP reset occurred between init
+        * time and now.
         */
-       if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
-               p_hwfn->mcp_info->block_mb_sending = false;
-
-       if (p_hwfn->mcp_info->block_mb_sending) {
-               DP_NOTICE(p_hwfn,
-                         "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
-                         cmd);
-               spin_unlock_bh(&p_hwfn->mcp_info->lock);
-               return -EBUSY;
-       }
+       if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_SP,
+                          "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
+                          p_hwfn->mcp_info->mcp_hist, generic_por_0);
 
-       if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
-               p_hwfn->mcp_info->block_mb_sending = true;
-               spin_unlock_bh(&p_hwfn->mcp_info->lock);
+               qed_load_mcp_offsets(p_hwfn, p_ptt);
+               qed_mcp_cmd_port_init(p_hwfn, p_ptt);
        }
-
-       return 0;
-}
-
-static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
-{
-       if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
-               spin_unlock_bh(&p_hwfn->mcp_info->lock);
 }
 
 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
-       u8 delay = CHIP_MCP_RESP_ITER_US;
-       u32 org_mcp_reset_seq, cnt = 0;
+       u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
        int rc = 0;
 
-       /* Ensure that only a single thread is accessing the mailbox at a
-        * certain time.
-        */
-       rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
-       if (rc != 0)
-               return rc;
+       /* Ensure that only a single thread is accessing the mailbox */
+       spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
 
-       /* Set drv command along with the updated sequence */
        org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
-       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
-                 (DRV_MSG_CODE_MCP_RESET | seq));
+
+       /* Set drv command along with the updated sequence */
+       qed_mcp_reread_offsets(p_hwfn, p_ptt);
+       seq = ++p_hwfn->mcp_info->drv_mb_seq;
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
 
        do {
                /* Wait for MFW response */
@@ -281,72 +313,207 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
                rc = -EAGAIN;
        }
 
-       qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+       spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
 
        return rc;
 }
 
-static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
-                         struct qed_ptt *p_ptt,
-                         u32 cmd,
-                         u32 param,
-                         u32 *o_mcp_resp,
-                         u32 *o_mcp_param)
+/* Must be called while cmd_lock is acquired */
+static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
 {
-       u8 delay = CHIP_MCP_RESP_ITER_US;
-       u32 seq, cnt = 1, actual_mb_seq;
-       int rc = 0;
+       struct qed_mcp_cmd_elem *p_cmd_elem;
 
-       /* Get actual driver mailbox sequence */
-       actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
-                       DRV_MSG_SEQ_NUMBER_MASK;
-
-       /* Use MCP history register to check if MCP reset occurred between
-        * init time and now.
+       /* There is at most one pending command at a certain time, and if it
+        * exists - it is placed at the HEAD of the list.
         */
-       if (p_hwfn->mcp_info->mcp_hist !=
-           qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
-               DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
-               qed_load_mcp_offsets(p_hwfn, p_ptt);
-               qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+       if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
+               p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
+                                             struct qed_mcp_cmd_elem, list);
+               return !p_cmd_elem->b_is_completed;
        }
-       seq = ++p_hwfn->mcp_info->drv_mb_seq;
 
-       /* Set drv param */
-       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+       return false;
+}
 
-       /* Set drv command along with the updated sequence */
-       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+/* Must be called while cmd_lock is acquired */
+static int
+qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       struct qed_mcp_mb_params *p_mb_params;
+       struct qed_mcp_cmd_elem *p_cmd_elem;
+       u32 mcp_resp;
+       u16 seq_num;
+
+       mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+       seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
+
+       /* Return if no new non-handled response has been received */
+       if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
+               return -EAGAIN;
+
+       p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
+       if (!p_cmd_elem) {
+               DP_ERR(p_hwfn,
+                      "Failed to find a pending mailbox cmd that expects sequence number %d\n",
+                      seq_num);
+               return -EINVAL;
+       }
+
+       p_mb_params = p_cmd_elem->p_mb_params;
+
+       /* Get the MFW response along with the sequence number */
+       p_mb_params->mcp_resp = mcp_resp;
+
+       /* Get the MFW param */
+       p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+
+       /* Get the union data */
+       if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
+               u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+                                     offsetof(struct public_drv_mb,
+                                              union_data);
+               qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
+                               union_data_addr, p_mb_params->data_dst_size);
+       }
+
+       p_cmd_elem->b_is_completed = true;
+
+       return 0;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt,
+                                   struct qed_mcp_mb_params *p_mb_params,
+                                   u16 seq_num)
+{
+       union drv_union_data union_data;
+       u32 union_data_addr;
+
+       /* Set the union data */
+       union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+                         offsetof(struct public_drv_mb, union_data);
+       memset(&union_data, 0, sizeof(union_data));
+       if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
+               memcpy(&union_data, p_mb_params->p_data_src,
+                      p_mb_params->data_src_size);
+       qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
+                     sizeof(union_data));
+
+       /* Set the drv param */
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
+
+       /* Set the drv command along with the sequence number */
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
 
        DP_VERBOSE(p_hwfn, QED_MSG_SP,
-                  "wrote command (%x) to MFW MB param 0x%08x\n",
-                  (cmd | seq), param);
+                  "MFW mailbox: command 0x%08x param 0x%08x\n",
+                  (p_mb_params->cmd | seq_num), p_mb_params->param);
+}
+
+static int
+_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+                      struct qed_ptt *p_ptt,
+                      struct qed_mcp_mb_params *p_mb_params,
+                      u32 max_retries, u32 delay)
+{
+       struct qed_mcp_cmd_elem *p_cmd_elem;
+       u32 cnt = 0;
+       u16 seq_num;
+       int rc = 0;
 
+       /* Wait until the mailbox is non-occupied */
        do {
-               /* Wait for MFW response */
+               /* Exit the loop if there is no pending command, or if the
+                * pending command is completed during this iteration.
+                * The spinlock stays locked until the command is sent.
+                */
+
+               spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+               if (!qed_mcp_has_pending_cmd(p_hwfn))
+                       break;
+
+               rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
+               if (!rc)
+                       break;
+               else if (rc != -EAGAIN)
+                       goto err;
+
+               spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
                udelay(delay);
-               *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+       } while (++cnt < max_retries);
 
-               /* Give the FW up to 5 second (500*10ms) */
-       } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
-                (cnt++ < QED_DRV_MB_MAX_RETRIES));
+       if (cnt >= max_retries) {
+               DP_NOTICE(p_hwfn,
+                         "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
+                         p_mb_params->cmd, p_mb_params->param);
+               return -EAGAIN;
+       }
 
-       DP_VERBOSE(p_hwfn, QED_MSG_SP,
-                  "[after %d ms] read (%x) seq is (%x) from FW MB\n",
-                  cnt * delay, *o_mcp_resp, seq);
-
-       /* Is this a reply to our command? */
-       if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
-               *o_mcp_resp &= FW_MSG_CODE_MASK;
-               /* Get the MCP param */
-               *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
-       } else {
-               /* FW BUG! */
-               DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
-                      cmd, param);
-               *o_mcp_resp = 0;
-               rc = -EAGAIN;
+       /* Send the mailbox command */
+       qed_mcp_reread_offsets(p_hwfn, p_ptt);
+       seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
+       p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
+       if (!p_cmd_elem) {
+               rc = -ENOMEM;
+               goto err;
        }
+
+       __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
+       spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+       /* Wait for the MFW response */
+       do {
+               /* Exit the loop if the command is already completed, or if the
+                * command is completed during this iteration.
+                * The spinlock stays locked until the list element is removed.
+                */
+
+               udelay(delay);
+               spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+               if (p_cmd_elem->b_is_completed)
+                       break;
+
+               rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
+               if (!rc)
+                       break;
+               else if (rc != -EAGAIN)
+                       goto err;
+
+               spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+       } while (++cnt < max_retries);
+
+       if (cnt >= max_retries) {
+               DP_NOTICE(p_hwfn,
+                         "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
+                         p_mb_params->cmd, p_mb_params->param);
+
+               spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+               qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+               spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+               return -EAGAIN;
+       }
+
+       qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+       spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_SP,
+                  "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
+                  p_mb_params->mcp_resp,
+                  p_mb_params->mcp_param,
+                  (cnt * delay) / 1000, (cnt * delay) % 1000);
+
+       /* Clear the sequence number from the MFW response */
+       p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
+
+       return 0;
+
+err:
+       spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
        return rc;
 }
 
@@ -354,9 +521,9 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                                 struct qed_ptt *p_ptt,
                                 struct qed_mcp_mb_params *p_mb_params)
 {
-       u32 union_data_addr;
-
-       int rc;
+       size_t union_data_size = sizeof(union drv_union_data);
+       u32 max_retries = QED_DRV_MB_MAX_RETRIES;
+       u32 delay = CHIP_MCP_RESP_ITER_US;
 
        /* MCP not initialized */
        if (!qed_mcp_is_init(p_hwfn)) {
@@ -364,33 +531,17 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                return -EBUSY;
        }
 
-       union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
-                         offsetof(struct public_drv_mb, union_data);
-
-       /* Ensure that only a single thread is accessing the mailbox at a
-        * certain time.
-        */
-       rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
-       if (rc)
-               return rc;
-
-       if (p_mb_params->p_data_src != NULL)
-               qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
-                             p_mb_params->p_data_src,
-                             sizeof(*p_mb_params->p_data_src));
-
-       rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
-                           p_mb_params->param, &p_mb_params->mcp_resp,
-                           &p_mb_params->mcp_param);
-
-       if (p_mb_params->p_data_dst != NULL)
-               qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
-                               union_data_addr,
-                               sizeof(*p_mb_params->p_data_dst));
-
-       qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
+       if (p_mb_params->data_src_size > union_data_size ||
+           p_mb_params->data_dst_size > union_data_size) {
+               DP_ERR(p_hwfn,
+                      "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
+                      p_mb_params->data_src_size,
+                      p_mb_params->data_dst_size, union_data_size);
+               return -EINVAL;
+       }
 
-       return rc;
+       return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
+                                     delay);
 }
 
 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@@ -401,32 +552,12 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
                u32 *o_mcp_param)
 {
        struct qed_mcp_mb_params mb_params;
-       union drv_union_data data_src;
        int rc;
 
        memset(&mb_params, 0, sizeof(mb_params));
-       memset(&data_src, 0, sizeof(data_src));
        mb_params.cmd = cmd;
        mb_params.param = param;
 
-       /* In case of UNLOAD_DONE, set the primary MAC */
-       if ((cmd == DRV_MSG_CODE_UNLOAD_DONE) &&
-           (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED)) {
-               u8 *p_mac = p_hwfn->cdev->wol_mac;
-
-               data_src.wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
-               data_src.wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
-                                            p_mac[4] << 8 | p_mac[5];
-
-               DP_VERBOSE(p_hwfn,
-                          (QED_MSG_SP | NETIF_MSG_IFDOWN),
-                          "Setting WoL MAC: %pM --> [%08x,%08x]\n",
-                          p_mac, data_src.wol_mac.mac_upper,
-                          data_src.wol_mac.mac_lower);
-
-               mb_params.p_data_src = &data_src;
-       }
-
        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
        if (rc)
                return rc;
@@ -445,13 +576,17 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
                       u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
 {
        struct qed_mcp_mb_params mb_params;
-       union drv_union_data union_data;
+       u8 raw_data[MCP_DRV_NVM_BUF_LEN];
        int rc;
 
        memset(&mb_params, 0, sizeof(mb_params));
        mb_params.cmd = cmd;
        mb_params.param = param;
-       mb_params.p_data_dst = &union_data;
+       mb_params.p_data_dst = raw_data;
+
+       /* Use the maximal value since the actual one is part of the response */
+       mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
+
        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
        if (rc)
                return rc;
@@ -460,55 +595,413 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
        *o_mcp_param = mb_params.mcp_param;
 
        *o_txn_size = *o_mcp_param;
-       memcpy(o_buf, &union_data.raw_data, *o_txn_size);
+       memcpy(o_buf, raw_data, *o_txn_size);
 
        return 0;
 }
 
-int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
-                    struct qed_ptt *p_ptt, u32 *p_load_code)
+static bool
+qed_mcp_can_force_load(u8 drv_role,
+                      u8 exist_drv_role,
+                      enum qed_override_force_load override_force_load)
+{
+       bool can_force_load = false;
+
+       switch (override_force_load) {
+       case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
+               can_force_load = true;
+               break;
+       case QED_OVERRIDE_FORCE_LOAD_NEVER:
+               can_force_load = false;
+               break;
+       default:
+               can_force_load = (drv_role == DRV_ROLE_OS &&
+                                 exist_drv_role == DRV_ROLE_PREBOOT) ||
+                                (drv_role == DRV_ROLE_KDUMP &&
+                                 exist_drv_role == DRV_ROLE_OS);
+               break;
+       }
+
+       return can_force_load;
+}
+
+static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt)
+{
+       u32 resp = 0, param = 0;
+       int rc;
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
+                        &resp, &param);
+       if (rc)
+               DP_NOTICE(p_hwfn,
+                         "Failed to send cancel load request, rc = %d\n", rc);
+
+       return rc;
+}
+
+#define CONFIG_QEDE_BITMAP_IDX         BIT(0)
+#define CONFIG_QED_SRIOV_BITMAP_IDX    BIT(1)
+#define CONFIG_QEDR_BITMAP_IDX         BIT(2)
+#define CONFIG_QEDF_BITMAP_IDX         BIT(4)
+#define CONFIG_QEDI_BITMAP_IDX         BIT(5)
+#define CONFIG_QED_LL2_BITMAP_IDX      BIT(6)
+
+static u32 qed_get_config_bitmap(void)
+{
+       u32 config_bitmap = 0x0;
+
+       if (IS_ENABLED(CONFIG_QEDE))
+               config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
+
+       if (IS_ENABLED(CONFIG_QED_SRIOV))
+               config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
+
+       if (IS_ENABLED(CONFIG_QED_RDMA))
+               config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
+
+       if (IS_ENABLED(CONFIG_QED_FCOE))
+               config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
+
+       if (IS_ENABLED(CONFIG_QED_ISCSI))
+               config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
+
+       if (IS_ENABLED(CONFIG_QED_LL2))
+               config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
+
+       return config_bitmap;
+}
+
+struct qed_load_req_in_params {
+       u8 hsi_ver;
+#define QED_LOAD_REQ_HSI_VER_DEFAULT   0
+#define QED_LOAD_REQ_HSI_VER_1         1
+       u32 drv_ver_0;
+       u32 drv_ver_1;
+       u32 fw_ver;
+       u8 drv_role;
+       u8 timeout_val;
+       u8 force_cmd;
+       bool avoid_eng_reset;
+};
+
+struct qed_load_req_out_params {
+       u32 load_code;
+       u32 exist_drv_ver_0;
+       u32 exist_drv_ver_1;
+       u32 exist_fw_ver;
+       u8 exist_drv_role;
+       u8 mfw_hsi_ver;
+       bool drv_exists;
+};
+
+static int
+__qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt,
+                  struct qed_load_req_in_params *p_in_params,
+                  struct qed_load_req_out_params *p_out_params)
 {
-       struct qed_dev *cdev = p_hwfn->cdev;
        struct qed_mcp_mb_params mb_params;
-       union drv_union_data union_data;
+       struct load_req_stc load_req;
+       struct load_rsp_stc load_rsp;
+       u32 hsi_ver;
        int rc;
 
+       memset(&load_req, 0, sizeof(load_req));
+       load_req.drv_ver_0 = p_in_params->drv_ver_0;
+       load_req.drv_ver_1 = p_in_params->drv_ver_1;
+       load_req.fw_ver = p_in_params->fw_ver;
+       QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
+       QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
+                         p_in_params->timeout_val);
+       QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
+                         p_in_params->force_cmd);
+       QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
+                         p_in_params->avoid_eng_reset);
+
+       hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
+                 DRV_ID_MCP_HSI_VER_CURRENT :
+                 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
+
        memset(&mb_params, 0, sizeof(mb_params));
-       /* Load Request */
        mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
-       mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
-                         cdev->drv_type;
-       memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
-       mb_params.p_data_src = &union_data;
-       rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+       mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
+       mb_params.p_data_src = &load_req;
+       mb_params.data_src_size = sizeof(load_req);
+       mb_params.p_data_dst = &load_rsp;
+       mb_params.data_dst_size = sizeof(load_rsp);
 
-       /* if mcp fails to respond we must abort */
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
+                  mb_params.param,
+                  QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
+                  QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
+                  QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
+                  QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
+
+       if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
+               DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                          "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
+                          load_req.drv_ver_0,
+                          load_req.drv_ver_1,
+                          load_req.fw_ver,
+                          load_req.misc0,
+                          QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
+                          QED_MFW_GET_FIELD(load_req.misc0,
+                                            LOAD_REQ_LOCK_TO),
+                          QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
+                          QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
+       }
+
+       rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
        if (rc) {
-               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+               DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
                return rc;
        }
 
-       *p_load_code = mb_params.mcp_resp;
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
+       p_out_params->load_code = mb_params.mcp_resp;
+
+       if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
+           p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_SP,
+                          "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
+                          load_rsp.drv_ver_0,
+                          load_rsp.drv_ver_1,
+                          load_rsp.fw_ver,
+                          load_rsp.misc0,
+                          QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
+                          QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
+                          QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
+
+               p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
+               p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
+               p_out_params->exist_fw_ver = load_rsp.fw_ver;
+               p_out_params->exist_drv_role =
+                   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
+               p_out_params->mfw_hsi_ver =
+                   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
+               p_out_params->drv_exists =
+                   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
+                   LOAD_RSP_FLAGS0_DRV_EXISTS;
+       }
+
+       return 0;
+}
+
+static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
+                                 enum qed_drv_role drv_role,
+                                 u8 *p_mfw_drv_role)
+{
+       switch (drv_role) {
+       case QED_DRV_ROLE_OS:
+               *p_mfw_drv_role = DRV_ROLE_OS;
+               break;
+       case QED_DRV_ROLE_KDUMP:
+               *p_mfw_drv_role = DRV_ROLE_KDUMP;
+               break;
+       default:
+               DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+enum qed_load_req_force {
+       QED_LOAD_REQ_FORCE_NONE,
+       QED_LOAD_REQ_FORCE_PF,
+       QED_LOAD_REQ_FORCE_ALL,
+};
+
+static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
 
-       /* If MFW refused (e.g. other port is in diagnostic mode) we
-        * must abort. This can happen in the following cases:
-        * - Other port is in diagnostic mode
-        * - Previously loaded function on the engine is not compliant with
-        *   the requester.
-        * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
-        *      -
+                                 enum qed_load_req_force force_cmd,
+                                 u8 *p_mfw_force_cmd)
+{
+       switch (force_cmd) {
+       case QED_LOAD_REQ_FORCE_NONE:
+               *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
+               break;
+       case QED_LOAD_REQ_FORCE_PF:
+               *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
+               break;
+       case QED_LOAD_REQ_FORCE_ALL:
+               *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
+               break;
+       }
+}
+
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    struct qed_load_req_params *p_params)
+{
+       struct qed_load_req_out_params out_params;
+       struct qed_load_req_in_params in_params;
+       u8 mfw_drv_role, mfw_force_cmd;
+       int rc;
+
+       memset(&in_params, 0, sizeof(in_params));
+       in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
+       in_params.drv_ver_0 = QED_VERSION;
+       in_params.drv_ver_1 = qed_get_config_bitmap();
+       in_params.fw_ver = STORM_FW_VERSION;
+       rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
+       if (rc)
+               return rc;
+
+       in_params.drv_role = mfw_drv_role;
+       in_params.timeout_val = p_params->timeout_val;
+       qed_get_mfw_force_cmd(p_hwfn,
+                             QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
+
+       in_params.force_cmd = mfw_force_cmd;
+       in_params.avoid_eng_reset = p_params->avoid_eng_reset;
+
+       memset(&out_params, 0, sizeof(out_params));
+       rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+       if (rc)
+               return rc;
+
+       /* First handle cases where another load request should/might be sent:
+        * - MFW expects the old interface [HSI version = 1]
+        * - MFW responds that a force load request is required
         */
-       if (!(*p_load_code) ||
-           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
-           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
-           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
-               DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+       if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+               DP_INFO(p_hwfn,
+                       "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
+
+               in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
+               memset(&out_params, 0, sizeof(out_params));
+               rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+               if (rc)
+                       return rc;
+       } else if (out_params.load_code ==
+                  FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
+               if (qed_mcp_can_force_load(in_params.drv_role,
+                                          out_params.exist_drv_role,
+                                          p_params->override_force_load)) {
+                       DP_INFO(p_hwfn,
+                               "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
+                               in_params.drv_role, in_params.fw_ver,
+                               in_params.drv_ver_0, in_params.drv_ver_1,
+                               out_params.exist_drv_role,
+                               out_params.exist_fw_ver,
+                               out_params.exist_drv_ver_0,
+                               out_params.exist_drv_ver_1);
+
+                       qed_get_mfw_force_cmd(p_hwfn,
+                                             QED_LOAD_REQ_FORCE_ALL,
+                                             &mfw_force_cmd);
+
+                       in_params.force_cmd = mfw_force_cmd;
+                       memset(&out_params, 0, sizeof(out_params));
+                       rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
+                                               &out_params);
+                       if (rc)
+                               return rc;
+               } else {
+                       DP_NOTICE(p_hwfn,
+                                 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
+                                 in_params.drv_role, in_params.fw_ver,
+                                 in_params.drv_ver_0, in_params.drv_ver_1,
+                                 out_params.exist_drv_role,
+                                 out_params.exist_fw_ver,
+                                 out_params.exist_drv_ver_0,
+                                 out_params.exist_drv_ver_1);
+                       DP_NOTICE(p_hwfn,
+                                 "Avoid sending a force load request to prevent disruption of active PFs\n");
+
+                       qed_mcp_cancel_load_req(p_hwfn, p_ptt);
+                       return -EBUSY;
+               }
+       }
+
+       /* Now handle the other types of responses.
+        * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
+        * expected here after the additional revised load requests were sent.
+        */
+       switch (out_params.load_code) {
+       case FW_MSG_CODE_DRV_LOAD_ENGINE:
+       case FW_MSG_CODE_DRV_LOAD_PORT:
+       case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+               if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
+                   out_params.drv_exists) {
+                       /* The role and fw/driver version match, but the PF is
+                        * already loaded and has not been unloaded gracefully.
+                        */
+                       DP_NOTICE(p_hwfn,
+                                 "PF is already loaded\n");
+                       return -EINVAL;
+               }
+               break;
+       default:
+               DP_NOTICE(p_hwfn,
+                         "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
+                         out_params.load_code);
                return -EBUSY;
        }
 
+       p_params->load_code = out_params.load_code;
+
        return 0;
 }
 
+int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       u32 wol_param, mcp_resp, mcp_param;
+
+       switch (p_hwfn->cdev->wol_config) {
+       case QED_OV_WOL_DISABLED:
+               wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
+               break;
+       case QED_OV_WOL_ENABLED:
+               wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
+               break;
+       default:
+               DP_NOTICE(p_hwfn,
+                         "Unknown WoL configuration %02x\n",
+                         p_hwfn->cdev->wol_config);
+               /* Fallthrough */
+       case QED_OV_WOL_DEFAULT:
+               wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
+       }
+
+       return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
+                          &mcp_resp, &mcp_param);
+}
+
+int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       struct qed_mcp_mb_params mb_params;
+       struct mcp_mac wol_mac;
+
+       memset(&mb_params, 0, sizeof(mb_params));
+       mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
+
+       /* Set the primary MAC if WoL is enabled */
+       if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
+               u8 *p_mac = p_hwfn->cdev->wol_mac;
+
+               memset(&wol_mac, 0, sizeof(wol_mac));
+               wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
+               wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
+                                   p_mac[4] << 8 | p_mac[5];
+
+               DP_VERBOSE(p_hwfn,
+                          (QED_MSG_SP | NETIF_MSG_IFDOWN),
+                          "Setting WoL MAC: %pM --> [%08x,%08x]\n",
+                          p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
+
+               mb_params.p_data_src = &wol_mac;
+               mb_params.data_src_size = sizeof(wol_mac);
+       }
+
+       return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
 static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
                                  struct qed_ptt *p_ptt)
 {
@@ -549,7 +1042,6 @@ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
        u32 func_addr = SECTION_ADDR(mfw_func_offsize,
                                     MCP_PF_ID(p_hwfn));
        struct qed_mcp_mb_params mb_params;
-       union drv_union_data union_data;
        int rc;
        int i;
 
@@ -560,8 +1052,8 @@ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
 
        memset(&mb_params, 0, sizeof(mb_params));
        mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
-       memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
-       mb_params.p_data_src = &union_data;
+       mb_params.p_data_src = vfs_to_ack;
+       mb_params.data_src_size = VF_MAX_STATIC / 8;
        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
        if (rc) {
                DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
@@ -744,33 +1236,31 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
 {
        struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
        struct qed_mcp_mb_params mb_params;
-       union drv_union_data union_data;
-       struct eth_phy_cfg *phy_cfg;
+       struct eth_phy_cfg phy_cfg;
        int rc = 0;
        u32 cmd;
 
        /* Set the shmem configuration according to params */
-       phy_cfg = &union_data.drv_phy_cfg;
-       memset(phy_cfg, 0, sizeof(*phy_cfg));
+       memset(&phy_cfg, 0, sizeof(phy_cfg));
        cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
        if (!params->speed.autoneg)
-               phy_cfg->speed = params->speed.forced_speed;
-       phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
-       phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
-       phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
-       phy_cfg->adv_speed = params->speed.advertised_speeds;
-       phy_cfg->loopback_mode = params->loopback_mode;
+               phy_cfg.speed = params->speed.forced_speed;
+       phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
+       phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
+       phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
+       phy_cfg.adv_speed = params->speed.advertised_speeds;
+       phy_cfg.loopback_mode = params->loopback_mode;
 
        p_hwfn->b_drv_link_init = b_up;
 
        if (b_up) {
                DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
                           "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
-                          phy_cfg->speed,
-                          phy_cfg->pause,
-                          phy_cfg->adv_speed,
-                          phy_cfg->loopback_mode,
-                          phy_cfg->feature_config_flags);
+                          phy_cfg.speed,
+                          phy_cfg.pause,
+                          phy_cfg.adv_speed,
+                          phy_cfg.loopback_mode,
+                          phy_cfg.feature_config_flags);
        } else {
                DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
                           "Resetting link\n");
@@ -778,7 +1268,8 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
 
        memset(&mb_params, 0, sizeof(mb_params));
        mb_params.cmd = cmd;
-       mb_params.p_data_src = &union_data;
+       mb_params.p_data_src = &phy_cfg;
+       mb_params.data_src_size = sizeof(phy_cfg);
        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
 
        /* if mcp fails to respond we must abort */
@@ -805,7 +1296,6 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
        enum qed_mcp_protocol_type stats_type;
        union qed_mcp_protocol_stats stats;
        struct qed_mcp_mb_params mb_params;
-       union drv_union_data union_data;
        u32 hsi_param;
 
        switch (type) {
@@ -835,8 +1325,8 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
        memset(&mb_params, 0, sizeof(mb_params));
        mb_params.cmd = DRV_MSG_CODE_GET_STATS;
        mb_params.param = hsi_param;
-       memcpy(&union_data, &stats, sizeof(stats));
-       mb_params.p_data_src = &union_data;
+       mb_params.p_data_src = &stats;
+       mb_params.data_src_size = sizeof(stats);
        qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
 }
 
@@ -963,7 +1453,7 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
                        qed_mcp_update_bw(p_hwfn, p_ptt);
                        break;
                default:
-                       DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
+                       DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
                        rc = -EINVAL;
                }
        }
@@ -1316,24 +1806,23 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
                         struct qed_ptt *p_ptt,
                         struct qed_mcp_drv_version *p_ver)
 {
-       struct drv_version_stc *p_drv_version;
        struct qed_mcp_mb_params mb_params;
-       union drv_union_data union_data;
+       struct drv_version_stc drv_version;
        __be32 val;
        u32 i;
        int rc;
 
-       p_drv_version = &union_data.drv_version;
-       p_drv_version->version = p_ver->version;
-
+       memset(&drv_version, 0, sizeof(drv_version));
+       drv_version.version = p_ver->version;
        for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
                val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
-               *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+               *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
        }
 
        memset(&mb_params, 0, sizeof(mb_params));
        mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
-       mb_params.p_data_src = &union_data;
+       mb_params.p_data_src = &drv_version;
+       mb_params.data_src_size = sizeof(drv_version);
        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
        if (rc)
                DP_ERR(p_hwfn, "MCP response failure, aborting\n");
@@ -1450,7 +1939,7 @@ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
                          struct qed_ptt *p_ptt, u8 *mac)
 {
        struct qed_mcp_mb_params mb_params;
-       union drv_union_data union_data;
+       u32 mfw_mac[2];
        int rc;
 
        memset(&mb_params, 0, sizeof(mb_params));
@@ -1458,8 +1947,17 @@ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
        mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
                          DRV_MSG_CODE_VMAC_TYPE_SHIFT;
        mb_params.param |= MCP_PF_ID(p_hwfn);
-       ether_addr_copy(&union_data.raw_data[0], mac);
-       mb_params.p_data_src = &union_data;
+
+       /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
+        * in 32-bit granularity.
+        * So the MAC has to be set in native order [and not byte order],
+        * otherwise it would be read incorrectly by MFW after swap.
+        */
+       mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
+       mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
+
+       mb_params.p_data_src = (u8 *)mfw_mac;
+       mb_params.data_src_size = 8;
        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
        if (rc)
                DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
@@ -1724,52 +2222,396 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
-#define QED_RESC_ALLOC_VERSION_MAJOR    1
+static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
+{
+       enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
+
+       switch (res_id) {
+       case QED_SB:
+               mfw_res_id = RESOURCE_NUM_SB_E;
+               break;
+       case QED_L2_QUEUE:
+               mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
+               break;
+       case QED_VPORT:
+               mfw_res_id = RESOURCE_NUM_VPORT_E;
+               break;
+       case QED_RSS_ENG:
+               mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
+               break;
+       case QED_PQ:
+               mfw_res_id = RESOURCE_NUM_PQ_E;
+               break;
+       case QED_RL:
+               mfw_res_id = RESOURCE_NUM_RL_E;
+               break;
+       case QED_MAC:
+       case QED_VLAN:
+               /* Each VFC resource can accommodate both a MAC and a VLAN */
+               mfw_res_id = RESOURCE_VFC_FILTER_E;
+               break;
+       case QED_ILT:
+               mfw_res_id = RESOURCE_ILT_E;
+               break;
+       case QED_LL2_QUEUE:
+               mfw_res_id = RESOURCE_LL2_QUEUE_E;
+               break;
+       case QED_RDMA_CNQ_RAM:
+       case QED_CMDQS_CQS:
+               /* CNQ/CMDQS are the same resource */
+               mfw_res_id = RESOURCE_CQS_E;
+               break;
+       case QED_RDMA_STATS_QUEUE:
+               mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
+               break;
+       case QED_BDQ:
+               mfw_res_id = RESOURCE_BDQ_E;
+               break;
+       default:
+               break;
+       }
+
+       return mfw_res_id;
+}
+
+#define QED_RESC_ALLOC_VERSION_MAJOR    2
 #define QED_RESC_ALLOC_VERSION_MINOR    0
 #define QED_RESC_ALLOC_VERSION                              \
        ((QED_RESC_ALLOC_VERSION_MAJOR <<                    \
          DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
         (QED_RESC_ALLOC_VERSION_MINOR <<                    \
          DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
-int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
-                         struct qed_ptt *p_ptt,
-                         struct resource_info *p_resc_info,
-                         u32 *p_mcp_resp, u32 *p_mcp_param)
+
+struct qed_resc_alloc_in_params {
+       u32 cmd;
+       enum qed_resources res_id;
+       u32 resc_max_val;
+};
+
+struct qed_resc_alloc_out_params {
+       u32 mcp_resp;
+       u32 mcp_param;
+       u32 resc_num;
+       u32 resc_start;
+       u32 vf_resc_num;
+       u32 vf_resc_start;
+       u32 flags;
+};
+
+static int
+qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           struct qed_resc_alloc_in_params *p_in_params,
+                           struct qed_resc_alloc_out_params *p_out_params)
 {
        struct qed_mcp_mb_params mb_params;
-       union drv_union_data union_data;
+       struct resource_info mfw_resc_info;
        int rc;
 
+       memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
+
+       mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
+       if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
+               DP_ERR(p_hwfn,
+                      "Failed to match resource %d [%s] with the MFW resources\n",
+                      p_in_params->res_id,
+                      qed_hw_get_resc_name(p_in_params->res_id));
+               return -EINVAL;
+       }
+
+       switch (p_in_params->cmd) {
+       case DRV_MSG_SET_RESOURCE_VALUE_MSG:
+               mfw_resc_info.size = p_in_params->resc_max_val;
+               /* Fallthrough */
+       case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
+               break;
+       default:
+               DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
+                      p_in_params->cmd);
+               return -EINVAL;
+       }
+
        memset(&mb_params, 0, sizeof(mb_params));
-       memset(&union_data, 0, sizeof(union_data));
-       mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+       mb_params.cmd = p_in_params->cmd;
        mb_params.param = QED_RESC_ALLOC_VERSION;
+       mb_params.p_data_src = &mfw_resc_info;
+       mb_params.data_src_size = sizeof(mfw_resc_info);
+       mb_params.p_data_dst = mb_params.p_data_src;
+       mb_params.data_dst_size = mb_params.data_src_size;
 
-       /* Need to have a sufficient large struct, as the cmd_and_union
-        * is going to do memcpy from and to it.
-        */
-       memcpy(&union_data.resource, p_resc_info, sizeof(*p_resc_info));
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_SP,
+                  "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
+                  p_in_params->cmd,
+                  p_in_params->res_id,
+                  qed_hw_get_resc_name(p_in_params->res_id),
+                  QED_MFW_GET_FIELD(mb_params.param,
+                                    DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+                  QED_MFW_GET_FIELD(mb_params.param,
+                                    DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+                  p_in_params->resc_max_val);
 
-       mb_params.p_data_src = &union_data;
-       mb_params.p_data_dst = &union_data;
        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
        if (rc)
                return rc;
 
-       /* Copy the data back */
-       memcpy(p_resc_info, &union_data.resource, sizeof(*p_resc_info));
-       *p_mcp_resp = mb_params.mcp_resp;
-       *p_mcp_param = mb_params.mcp_param;
+       p_out_params->mcp_resp = mb_params.mcp_resp;
+       p_out_params->mcp_param = mb_params.mcp_param;
+       p_out_params->resc_num = mfw_resc_info.size;
+       p_out_params->resc_start = mfw_resc_info.offset;
+       p_out_params->vf_resc_num = mfw_resc_info.vf_size;
+       p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
+       p_out_params->flags = mfw_resc_info.flags;
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_SP,
+                  "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
+                  QED_MFW_GET_FIELD(p_out_params->mcp_param,
+                                    FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+                  QED_MFW_GET_FIELD(p_out_params->mcp_param,
+                                    FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+                  p_out_params->resc_num,
+                  p_out_params->resc_start,
+                  p_out_params->vf_resc_num,
+                  p_out_params->vf_resc_start, p_out_params->flags);
+
+       return 0;
+}
+
+int
+qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt,
+                        enum qed_resources res_id,
+                        u32 resc_max_val, u32 *p_mcp_resp)
+{
+       struct qed_resc_alloc_out_params out_params;
+       struct qed_resc_alloc_in_params in_params;
+       int rc;
+
+       memset(&in_params, 0, sizeof(in_params));
+       in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
+       in_params.res_id = res_id;
+       in_params.resc_max_val = resc_max_val;
+       memset(&out_params, 0, sizeof(out_params));
+       rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+                                        &out_params);
+       if (rc)
+               return rc;
+
+       *p_mcp_resp = out_params.mcp_resp;
+
+       return 0;
+}
+
+int
+qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     enum qed_resources res_id,
+                     u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
+{
+       struct qed_resc_alloc_out_params out_params;
+       struct qed_resc_alloc_in_params in_params;
+       int rc;
+
+       memset(&in_params, 0, sizeof(in_params));
+       in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+       in_params.res_id = res_id;
+       memset(&out_params, 0, sizeof(out_params));
+       rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+                                        &out_params);
+       if (rc)
+               return rc;
+
+       *p_mcp_resp = out_params.mcp_resp;
+
+       if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+               *p_resc_num = out_params.resc_num;
+               *p_resc_start = out_params.resc_start;
+       }
+
+       return 0;
+}
+
+int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       u32 mcp_resp, mcp_param;
+
+       return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
+                          &mcp_resp, &mcp_param);
+}
+
+static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
+                               struct qed_ptt *p_ptt,
+                               u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
+{
+       int rc;
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
+                        p_mcp_resp, p_mcp_param);
+       if (rc)
+               return rc;
+
+       if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+               DP_INFO(p_hwfn,
+                       "The resource command is unsupported by the MFW\n");
+               return -EINVAL;
+       }
+
+       if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
+               u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
+
+               DP_NOTICE(p_hwfn,
+                         "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
+                         param, opcode);
+               return -EINVAL;
+       }
+
+       return rc;
+}
+
+int
+__qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   struct qed_resc_lock_params *p_params)
+{
+       u32 param = 0, mcp_resp, mcp_param;
+       u8 opcode;
+       int rc;
+
+       switch (p_params->timeout) {
+       case QED_MCP_RESC_LOCK_TO_DEFAULT:
+               opcode = RESOURCE_OPCODE_REQ;
+               p_params->timeout = 0;
+               break;
+       case QED_MCP_RESC_LOCK_TO_NONE:
+               opcode = RESOURCE_OPCODE_REQ_WO_AGING;
+               p_params->timeout = 0;
+               break;
+       default:
+               opcode = RESOURCE_OPCODE_REQ_W_AGING;
+               break;
+       }
+
+       QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+       QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+       QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_SP,
+                  "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
+                  param, p_params->timeout, opcode, p_params->resource);
+
+       /* Attempt to acquire the resource */
+       rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
+       if (rc)
+               return rc;
+
+       /* Analyze the response */
+       p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
+       opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
 
        DP_VERBOSE(p_hwfn,
                   QED_MSG_SP,
-                  "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x, offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
-                  *p_mcp_param,
-                  p_resc_info->res_id,
-                  p_resc_info->size,
-                  p_resc_info->offset,
-                  p_resc_info->vf_size,
-                  p_resc_info->vf_offset, p_resc_info->flags);
+                  "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
+                  mcp_param, opcode, p_params->owner);
+
+       switch (opcode) {
+       case RESOURCE_OPCODE_GNT:
+               p_params->b_granted = true;
+               break;
+       case RESOURCE_OPCODE_BUSY:
+               p_params->b_granted = false;
+               break;
+       default:
+               DP_NOTICE(p_hwfn,
+                         "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
+                         mcp_param, opcode);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int
+qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
+{
+       u32 retry_cnt = 0;
+       int rc;
+
+       do {
+               /* No need for an interval before the first iteration */
+               if (retry_cnt) {
+                       if (p_params->sleep_b4_retry) {
+                               u16 retry_interval_in_ms =
+                                   DIV_ROUND_UP(p_params->retry_interval,
+                                                1000);
+
+                               msleep(retry_interval_in_ms);
+                       } else {
+                               udelay(p_params->retry_interval);
+                       }
+               }
+
+               rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
+               if (rc)
+                       return rc;
+
+               if (p_params->b_granted)
+                       break;
+       } while (retry_cnt++ < p_params->retry_num);
+
+       return 0;
+}
+
+int
+qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   struct qed_resc_unlock_params *p_params)
+{
+       u32 param = 0, mcp_resp, mcp_param;
+       u8 opcode;
+       int rc;
+
+       opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
+                                  : RESOURCE_OPCODE_RELEASE;
+       QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+       QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
+                  param, opcode, p_params->resource);
+
+       /* Attempt to release the resource */
+       rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
+       if (rc)
+               return rc;
+
+       /* Analyze the response */
+       opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
+                  mcp_param, opcode);
+
+       switch (opcode) {
+       case RESOURCE_OPCODE_RELEASED_PREVIOUS:
+               DP_INFO(p_hwfn,
+                       "Resource unlock request for an already released resource [%d]\n",
+                       p_params->resource);
+               /* Fallthrough */
+       case RESOURCE_OPCODE_RELEASED:
+               p_params->b_released = true;
+               break;
+       case RESOURCE_OPCODE_WRONG_OWNER:
+               p_params->b_released = false;
+               break;
+       default:
+               DP_NOTICE(p_hwfn,
+                         "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
+                         mcp_param, opcode);
+               return -EINVAL;
+       }
 
        return 0;
 }
index 368e88de146cdbc7a7a66210c6ff9b1302867a8f..ac7d406be1edeba4c2206f654739d25e62dec18f 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/spinlock.h>
 #include <linux/qed/qed_fcoe_if.h>
 #include "qed_hsi.h"
+#include "qed_dev_api.h"
 
 struct qed_mcp_link_speed_params {
        bool    autoneg;
@@ -479,14 +480,18 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
                                            rel_pfid)
 #define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
 
-/* TODO - this is only correct as long as only BB is supported, and
- * no port-swapping is implemented; Afterwards we'll need to fix it.
- */
-#define MFW_PORT(_p_hwfn)       ((_p_hwfn)->abs_pf_id %        \
-                                ((_p_hwfn)->cdev->num_ports_in_engines * 2))
+#define MFW_PORT(_p_hwfn)       ((_p_hwfn)->abs_pf_id %                          \
+                                ((_p_hwfn)->cdev->num_ports_in_engines * \
+                                 qed_device_num_engines((_p_hwfn)->cdev)))
+
 struct qed_mcp_info {
-       /* Spinlock used for protecting the access to the MFW mailbox */
-       spinlock_t                              lock;
+       /* List for mailbox commands which were sent and wait for a response */
+       struct list_head                        cmd_list;
+
+       /* Spinlock used for protecting the access to the mailbox commands list
+        * and the sending of the commands.
+        */
+       spinlock_t                              cmd_lock;
 
        /* Spinlock used for syncing SW link-changes and link-changes
         * originating from attention context.
@@ -506,14 +511,16 @@ struct qed_mcp_info {
        u8                                      *mfw_mb_cur;
        u8                                      *mfw_mb_shadow;
        u16                                     mfw_mb_length;
-       u16                                     mcp_hist;
+       u32                                     mcp_hist;
 };
 
 struct qed_mcp_mb_params {
        u32                     cmd;
        u32                     param;
-       union drv_union_data    *p_data_src;
-       union drv_union_data    *p_data_dst;
+       void                    *p_data_src;
+       u8                      data_src_size;
+       void                    *p_data_dst;
+       u8                      data_dst_size;
        u32                     mcp_resp;
        u32                     mcp_param;
 };
@@ -564,27 +571,55 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn);
 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
                          struct qed_ptt *p_ptt);
 
+enum qed_drv_role {
+       QED_DRV_ROLE_OS,
+       QED_DRV_ROLE_KDUMP,
+};
+
+struct qed_load_req_params {
+       /* Input params */
+       enum qed_drv_role drv_role;
+       u8 timeout_val;
+       bool avoid_eng_reset;
+       enum qed_override_force_load override_force_load;
+
+       /* Output params */
+       u32 load_code;
+};
+
 /**
- * @brief Sends a LOAD_REQ to the MFW, and in case operation
- *        succeed, returns whether this PF is the first on the
- *        chip/engine/port or function. This function should be
- *        called when driver is ready to accept MFW events after
- *        Storms initializations are done.
+ * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
+ *        returns whether this PF is the first on the engine/port or function.
  *
- * @param p_hwfn       - hw function
- * @param p_ptt        - PTT required for register access
- * @param p_load_code  - The MCP response param containing one
- *      of the following:
- *      FW_MSG_CODE_DRV_LOAD_ENGINE
- *      FW_MSG_CODE_DRV_LOAD_PORT
- *      FW_MSG_CODE_DRV_LOAD_FUNCTION
- * @return int -
- *      0 - Operation was successul.
- *      -EBUSY - Operation failed
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return int - 0 - Operation was successful.
  */
 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
                     struct qed_ptt *p_ptt,
-                    u32 *p_load_code);
+                    struct qed_load_req_params *p_params);
+
+/**
+ * @brief Sends a UNLOAD_REQ message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int - 0 - Operation was successful.
+ */
+int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief Sends a UNLOAD_DONE message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int - 0 - Operation was successful.
+ */
+int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 
 /**
  * @brief Read the MFW mailbox into Current buffer.
@@ -707,6 +742,41 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
 int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
                          struct qed_ptt *p_ptt, u32 mask_parities);
 
+/**
+ * @brief - Sets the MFW's max value for the given resource
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param res_id
+ *  @param resc_max_val
+ *  @param p_mcp_resp
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt,
+                        enum qed_resources res_id,
+                        u32 resc_max_val, u32 *p_mcp_resp);
+
+/**
+ * @brief - Gets the MFW allocation info for the given resource
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param res_id
+ *  @param p_mcp_resp
+ *  @param p_resc_num
+ *  @param p_resc_start
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     enum qed_resources res_id,
+                     u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start);
+
 /**
  * @brief Send eswitch mode to MFW
  *
@@ -720,19 +790,86 @@ int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
                              struct qed_ptt *p_ptt,
                              enum qed_ov_eswitch eswitch);
 
+#define QED_MCP_RESC_LOCK_MIN_VAL       RESOURCE_DUMP
+#define QED_MCP_RESC_LOCK_MAX_VAL       31
+
+enum qed_resc_lock {
+       QED_RESC_LOCK_DBG_DUMP = QED_MCP_RESC_LOCK_MIN_VAL,
+       QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL
+};
+
 /**
- * @brief - Gets the MFW allocation info for the given resource
+ * @brief - Initiates PF FLR
  *
  *  @param p_hwfn
  *  @param p_ptt
- *  @param p_resc_info - descriptor of requested resource
- *  @param p_mcp_resp
- *  @param p_mcp_param
  *
  * @return int - 0 - operation was successful.
  */
-int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
-                         struct qed_ptt *p_ptt,
-                         struct resource_info *p_resc_info,
-                         u32 *p_mcp_resp, u32 *p_mcp_param);
+int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+struct qed_resc_lock_params {
+       /* Resource number [valid values are 0..31] */
+       u8 resource;
+
+       /* Lock timeout value in seconds [default, none or 1..254] */
+       u8 timeout;
+#define QED_MCP_RESC_LOCK_TO_DEFAULT    0
+#define QED_MCP_RESC_LOCK_TO_NONE       255
+
+       /* Number of times to retry locking */
+       u8 retry_num;
+
+       /* The interval in usec between retries */
+       u16 retry_interval;
+
+       /* Use sleep or delay between retries */
+       bool sleep_b4_retry;
+
+       /* Will be set as true if the resource is free and granted */
+       bool b_granted;
+
+       /* Will be filled with the resource owner.
+        * [0..15 = PF0-15, 16 = MFW]
+        */
+       u8 owner;
+};
+
+/**
+ * @brief Acquires MFW generic resource lock
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param p_params
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params);
+
+struct qed_resc_unlock_params {
+       /* Resource number [valid values are 0..31] */
+       u8 resource;
+
+       /* Allow to release a resource even if belongs to another PF */
+       bool b_force;
+
+       /* Will be set as true if the resource is released */
+       bool b_released;
+};
+
+/**
+ * @brief Releases MFW generic resource lock
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param p_params
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   struct qed_resc_unlock_params *p_params);
+
 #endif
index 7d731c6cb8923dd927a7bbaafa3a3a97237ba652..db96670192c7563e0d0384370e92845ff17d8e48 100644 (file)
@@ -41,6 +41,7 @@
 #include "qed_iscsi.h"
 #include "qed_ll2.h"
 #include "qed_ooo.h"
+#include "qed_cxt.h"
 
 static struct qed_ooo_archipelago
 *qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn,
@@ -48,15 +49,18 @@ static struct qed_ooo_archipelago
                          *p_ooo_info,
                          u32 cid)
 {
-       struct qed_ooo_archipelago *p_archipelago = NULL;
+       u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
+       struct qed_ooo_archipelago *p_archipelago;
 
-       list_for_each_entry(p_archipelago,
-                           &p_ooo_info->archipelagos_list, list_entry) {
-               if (p_archipelago->cid == cid)
-                       return p_archipelago;
-       }
+       if (idx >= p_ooo_info->max_num_archipelagos)
+               return NULL;
 
-       return NULL;
+       p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
+
+       if (list_empty(&p_archipelago->isles_list))
+               return NULL;
+
+       return p_archipelago;
 }
 
 static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn,
@@ -97,8 +101,8 @@ void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
 
 struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
 {
+       u16 max_num_archipelagos = 0, cid_base;
        struct qed_ooo_info *p_ooo_info;
-       u16 max_num_archipelagos = 0;
        u16 max_num_isles = 0;
        u32 i;
 
@@ -110,6 +114,7 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
 
        max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons;
        max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos;
+       cid_base = (u16)qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ISCSI);
 
        if (!max_num_archipelagos) {
                DP_NOTICE(p_hwfn,
@@ -121,11 +126,12 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
        if (!p_ooo_info)
                return NULL;
 
+       p_ooo_info->cid_base = cid_base;
+       p_ooo_info->max_num_archipelagos = max_num_archipelagos;
+
        INIT_LIST_HEAD(&p_ooo_info->free_buffers_list);
        INIT_LIST_HEAD(&p_ooo_info->ready_buffers_list);
        INIT_LIST_HEAD(&p_ooo_info->free_isles_list);
-       INIT_LIST_HEAD(&p_ooo_info->free_archipelagos_list);
-       INIT_LIST_HEAD(&p_ooo_info->archipelagos_list);
 
        p_ooo_info->p_isles_mem = kcalloc(max_num_isles,
                                          sizeof(struct qed_ooo_isle),
@@ -146,11 +152,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
        if (!p_ooo_info->p_archipelagos_mem)
                goto no_archipelagos_mem;
 
-       for (i = 0; i < max_num_archipelagos; i++) {
+       for (i = 0; i < max_num_archipelagos; i++)
                INIT_LIST_HEAD(&p_ooo_info->p_archipelagos_mem[i].isles_list);
-               list_add_tail(&p_ooo_info->p_archipelagos_mem[i].list_entry,
-                             &p_ooo_info->free_archipelagos_list);
-       }
 
        p_ooo_info->ooo_history.p_cqes =
                                kcalloc(QED_MAX_NUM_OOO_HISTORY_ENTRIES,
@@ -159,6 +162,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
        if (!p_ooo_info->ooo_history.p_cqes)
                goto no_history_mem;
 
+       p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES;
+
        return p_ooo_info;
 
 no_history_mem:
@@ -176,21 +181,9 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
        struct qed_ooo_archipelago *p_archipelago;
        struct qed_ooo_buffer *p_buffer;
        struct qed_ooo_isle *p_isle;
-       bool b_found = false;
-
-       if (list_empty(&p_ooo_info->archipelagos_list))
-               return;
-
-       list_for_each_entry(p_archipelago,
-                           &p_ooo_info->archipelagos_list, list_entry) {
-               if (p_archipelago->cid == cid) {
-                       list_del(&p_archipelago->list_entry);
-                       b_found = true;
-                       break;
-               }
-       }
 
-       if (!b_found)
+       p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+       if (!p_archipelago)
                return;
 
        while (!list_empty(&p_archipelago->isles_list)) {
@@ -214,27 +207,21 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
                list_add_tail(&p_isle->list_entry,
                              &p_ooo_info->free_isles_list);
        }
-
-       list_add_tail(&p_archipelago->list_entry,
-                     &p_ooo_info->free_archipelagos_list);
 }
 
 void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
                               struct qed_ooo_info *p_ooo_info)
 {
-       struct qed_ooo_archipelago *p_arch;
+       struct qed_ooo_archipelago *p_archipelago;
        struct qed_ooo_buffer *p_buffer;
        struct qed_ooo_isle *p_isle;
+       u32 i;
 
-       while (!list_empty(&p_ooo_info->archipelagos_list)) {
-               p_arch = list_first_entry(&p_ooo_info->archipelagos_list,
-                                         struct qed_ooo_archipelago,
-                                         list_entry);
-
-               list_del(&p_arch->list_entry);
+       for (i = 0; i < p_ooo_info->max_num_archipelagos; i++) {
+               p_archipelago = &(p_ooo_info->p_archipelagos_mem[i]);
 
-               while (!list_empty(&p_arch->isles_list)) {
-                       p_isle = list_first_entry(&p_arch->isles_list,
+               while (!list_empty(&p_archipelago->isles_list)) {
+                       p_isle = list_first_entry(&p_archipelago->isles_list,
                                                  struct qed_ooo_isle,
                                                  list_entry);
 
@@ -256,8 +243,6 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
                        list_add_tail(&p_isle->list_entry,
                                      &p_ooo_info->free_isles_list);
                }
-               list_add_tail(&p_arch->list_entry,
-                             &p_ooo_info->free_archipelagos_list);
        }
        if (!list_empty(&p_ooo_info->ready_buffers_list))
                list_splice_tail_init(&p_ooo_info->ready_buffers_list,
@@ -376,12 +361,6 @@ void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
                p_ooo_info->cur_isles_number--;
                list_add(&p_isle->list_entry, &p_ooo_info->free_isles_list);
        }
-
-       if (list_empty(&p_archipelago->isles_list)) {
-               list_del(&p_archipelago->list_entry);
-               list_add(&p_archipelago->list_entry,
-                        &p_ooo_info->free_archipelagos_list);
-       }
 }
 
 void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
@@ -424,28 +403,10 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
                return;
        }
 
-       if (!p_archipelago &&
-           !list_empty(&p_ooo_info->free_archipelagos_list)) {
-               p_archipelago =
-                   list_first_entry(&p_ooo_info->free_archipelagos_list,
-                                    struct qed_ooo_archipelago, list_entry);
+       if (!p_archipelago) {
+               u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
 
-               list_del(&p_archipelago->list_entry);
-               if (!list_empty(&p_archipelago->isles_list)) {
-                       DP_NOTICE(p_hwfn,
-                                 "Free OOO connection is not empty\n");
-                       INIT_LIST_HEAD(&p_archipelago->isles_list);
-               }
-               p_archipelago->cid = cid;
-               list_add(&p_archipelago->list_entry,
-                        &p_ooo_info->archipelagos_list);
-       } else if (!p_archipelago) {
-               DP_NOTICE(p_hwfn, "No more free OOO connections\n");
-               list_add(&p_isle->list_entry,
-                        &p_ooo_info->free_isles_list);
-               list_add(&p_buffer->list_entry,
-                        &p_ooo_info->free_buffers_list);
-               return;
+               p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
        }
 
        list_add(&p_buffer->list_entry, &p_isle->buffers_list);
@@ -515,11 +476,6 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
        } else {
                list_splice_tail_init(&p_right_isle->buffers_list,
                                      &p_ooo_info->ready_buffers_list);
-               if (list_empty(&p_archipelago->isles_list)) {
-                       list_del(&p_archipelago->list_entry);
-                       list_add(&p_archipelago->list_entry,
-                                &p_ooo_info->free_archipelagos_list);
-               }
        }
        list_add_tail(&p_right_isle->list_entry, &p_ooo_info->free_isles_list);
 }
index 4f138fb5f533e0ac68cbdf9acab9a41aa4d33ff7..791ad0f8b7595d00bcad4b61afba26ac882fbcc9 100644 (file)
@@ -60,9 +60,7 @@ struct qed_ooo_isle {
 };
 
 struct qed_ooo_archipelago {
-       struct list_head list_entry;
        struct list_head isles_list;
-       u32 cid;
 };
 
 struct qed_ooo_history {
@@ -75,14 +73,14 @@ struct qed_ooo_info {
        struct list_head free_buffers_list;
        struct list_head ready_buffers_list;
        struct list_head free_isles_list;
-       struct list_head free_archipelagos_list;
-       struct list_head archipelagos_list;
        struct qed_ooo_archipelago *p_archipelagos_mem;
        struct qed_ooo_isle *p_isles_mem;
        struct qed_ooo_history ooo_history;
        u32 cur_isles_number;
        u32 max_isles_number;
        u32 gen_isles_number;
+       u16 max_num_archipelagos;
+       u16 cid_base;
 };
 
 #if IS_ENABLED(CONFIG_QED_ISCSI)
index d27aa85da23cb526ca9319a152107a64c6df0e4a..80c9c0b172dd0d7b7f59a58349699178ec36d438 100644 (file)
@@ -262,12 +262,20 @@ static int qed_ptp_hw_enable(struct qed_dev *cdev)
        qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
 
        /* Pause free running counter */
-       qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
+       if (QED_IS_BB_B0(p_hwfn->cdev))
+               qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
+       if (QED_IS_AH(p_hwfn->cdev))
+               qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2);
 
        qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
        qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
        /* Resume free running counter */
-       qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
+       if (QED_IS_BB_B0(p_hwfn->cdev))
+               qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
+       if (QED_IS_AH(p_hwfn->cdev)) {
+               qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4);
+               qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1);
+       }
 
        /* Disable drift register */
        qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
index d59d9df60cd24c20f031e66e7f034975d5244cba..e65397360ab42ceae14130a6c1be80c7e31b243f 100644 (file)
        0x2e0704UL
 #define  CCFC_REG_STRONG_ENABLE_PF \
        0x2e0708UL
-#define  PGLUE_B_REG_PGL_ADDR_88_F0 \
+#define  PGLUE_B_REG_PGL_ADDR_88_F0_BB \
        0x2aa404UL
-#define  PGLUE_B_REG_PGL_ADDR_8C_F0 \
+#define  PGLUE_B_REG_PGL_ADDR_8C_F0_BB \
        0x2aa408UL
-#define  PGLUE_B_REG_PGL_ADDR_90_F0 \
+#define  PGLUE_B_REG_PGL_ADDR_90_F0_BB \
        0x2aa40cUL
-#define  PGLUE_B_REG_PGL_ADDR_94_F0 \
+#define  PGLUE_B_REG_PGL_ADDR_94_F0_BB \
        0x2aa410UL
 #define  PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
        0x2aa138UL
        0x238804UL
 #define  RDIF_REG_STOP_ON_ERROR \
        0x300040UL
+#define RDIF_REG_DEBUG_ERROR_INFO \
+       0x300400UL
+#define RDIF_REG_DEBUG_ERROR_INFO_SIZE \
+       64
 #define  SRC_REG_SOFT_RST \
        0x23874cUL
 #define  TCFC_REG_ACTIVITY_COUNTER \
        0x1700004UL
 #define  TDIF_REG_STOP_ON_ERROR \
        0x310040UL
+#define TDIF_REG_DEBUG_ERROR_INFO \
+       0x310400UL
+#define TDIF_REG_DEBUG_ERROR_INFO_SIZE \
+       64
 #define  UCM_REG_INIT \
        0x1280000UL
 #define  UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
        0x1901534UL
 #define USEM_REG_DBG_FORCE_FRAME \
        0x1901538UL
+#define NWS_REG_DBG_SELECT \
+       0x700128UL
+#define NWS_REG_DBG_DWORD_ENABLE \
+       0x70012cUL
+#define NWS_REG_DBG_SHIFT \
+       0x700130UL
+#define NWS_REG_DBG_FORCE_VALID        \
+       0x700134UL
+#define NWS_REG_DBG_FORCE_FRAME        \
+       0x700138UL
+#define MS_REG_DBG_SELECT \
+       0x6a0228UL
+#define MS_REG_DBG_DWORD_ENABLE \
+       0x6a022cUL
+#define MS_REG_DBG_SHIFT \
+       0x6a0230UL
+#define MS_REG_DBG_FORCE_VALID \
+       0x6a0234UL
+#define MS_REG_DBG_FORCE_FRAME \
+       0x6a0238UL
 #define PCIE_REG_DBG_COMMON_SELECT \
        0x054398UL
 #define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
        0x000b48UL
 #define RSS_REG_RSS_RAM_DATA \
        0x238c20UL
+#define RSS_REG_RSS_RAM_DATA_SIZE \
+       4
 #define MISC_REG_BLOCK_256B_EN \
        0x008c14UL
 #define NWS_REG_NWS_CMU        \
 #define NIG_REG_TIMESYNC_GEN_REG_BB 0x500d00UL
 #define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL
 #define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL
+#define NIG_REG_PTP_LATCH_OSTS_PKT_TIME 0x509040UL
+#define PSWRQ2_REG_WR_MBS0 0x240400UL
+
+#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2 0x2aaf98UL
+#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2 0x2aaf9cUL
+#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2 0x2aafa0UL
+#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2 0x2aafa4UL
+#define NIG_REG_TSGEN_FREECNT_UPDATE_K2 0x509008UL
+#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
+
 #endif
index d9ff6b28591c19faf288a130e28c19445fd3ba9b..b8c811f9520541318e75dd2627a2afbd68ccf7ae 100644 (file)
 #include "qed_roce.h"
 #include "qed_ll2.h"
 
-void qed_async_roce_event(struct qed_hwfn *p_hwfn,
-                         struct event_ring_entry *p_eqe)
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
+
+void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+                         u8 fw_event_code, union rdma_eqe_data *rdma_data)
 {
-       struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+       if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
+               u16 icid =
+                   (u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid);
+
+               /* icid release in this async event can occur only if the icid
+                * was offloaded to the FW. In case it wasn't offloaded this is
+                * handled in qed_roce_sp_destroy_qp.
+                */
+               qed_roce_free_real_icid(p_hwfn, icid);
+       } else {
+               struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events;
 
-       p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
-                                            p_eqe->opcode, &p_eqe->data);
+               events->affiliated_event(p_hwfn->p_rdma_info->events.context,
+                                        fw_event_code,
+                                        &rdma_data->async_handle);
+       }
 }
 
 static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
@@ -113,6 +127,15 @@ static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
        return 0;
 }
 
+static void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
+                           struct qed_bmap *bmap, u32 id_num)
+{
+       if (id_num >= bmap->max_count)
+               return;
+
+       __set_bit(id_num, bmap->bitmap);
+}
+
 static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
                                struct qed_bmap *bmap, u32 id_num)
 {
@@ -129,6 +152,15 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
        }
 }
 
+static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
+                           struct qed_bmap *bmap, u32 id_num)
+{
+       if (id_num >= bmap->max_count)
+               return -1;
+
+       return test_bit(id_num, bmap->bitmap);
+}
+
 static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
 {
        /* First sb id for RoCE is after all the l2 sb */
@@ -170,7 +202,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
        /* Queue zone lines are shared between RoCE and L2 in such a way that
         * they can be used by each without obstructing the other.
         */
-       p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
+       p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+       p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
 
        /* Allocate a struct with device params and fill it */
        p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
@@ -248,9 +281,18 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
                goto free_tid_map;
        }
 
+       /* Allocate bitmap for cids used for responders/requesters. */
+       rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons);
+       if (rc) {
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+                          "Failed to allocate real cid bitmap, rc = %d\n", rc);
+               goto free_cid_map;
+       }
        DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
        return 0;
 
+free_cid_map:
+       kfree(p_rdma_info->cid_map.bitmap);
 free_tid_map:
        kfree(p_rdma_info->tid_map.bitmap);
 free_toggle_map:
@@ -273,7 +315,22 @@ free_rdma_info:
 
 static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
 {
+       struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
        struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+       int wait_count = 0;
+
+       /* when destroying a_RoCE QP the control is returned to the user after
+        * the synchronous part. The asynchronous part may take a little longer.
+        * We delay for a short while if an async destroy QP is still expected.
+        * Beyond the added delay we clear the bitmap anyway.
+        */
+       while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+               msleep(100);
+               if (wait_count++ > 20) {
+                       DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
+                       break;
+               }
+       }
 
        kfree(p_rdma_info->cid_map.bitmap);
        kfree(p_rdma_info->tid_map.bitmap);
@@ -724,6 +781,14 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
        u32 addr;
 
        p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+       if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
+               DP_NOTICE(p_hwfn,
+                         "queue zone offset %d is too large (max is %d)\n",
+                         qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
+               return;
+       }
+
        qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
        addr = GTT_BAR0_MAP_REG_USDM_RAM +
               USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
@@ -1080,6 +1145,14 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
        return flavor;
 }
 
+void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
+{
+       spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+       qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
+       qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
+       spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
 static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
 {
        struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@@ -1139,15 +1212,22 @@ err:
        return rc;
 }
 
+static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
+{
+       spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+       qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
+       spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
 static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
                                        struct qed_rdma_qp *qp)
 {
        struct roce_create_qp_resp_ramrod_data *p_ramrod;
        struct qed_sp_init_data init_data;
-       union qed_qm_pq_params qm_params;
        enum roce_flavor roce_flavor;
        struct qed_spq_entry *p_ent;
-       u16 physical_queue0 = 0;
+       u16 regular_latency_queue;
+       enum protocol_type proto;
        int rc;
 
        DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -1229,15 +1309,16 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
        p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
        p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
        p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
-       p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
        p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
                                       qp->rq_cq_id);
 
-       memset(&qm_params, 0, sizeof(qm_params));
-       qm_params.roce.qpid = qp->icid >> 1;
-       physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+       regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+
+       p_ramrod->regular_latency_phy_queue =
+           cpu_to_le16(regular_latency_queue);
+       p_ramrod->low_latency_phy_queue =
+           cpu_to_le16(regular_latency_queue);
 
-       p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
        p_ramrod->dpi = cpu_to_le16(qp->dpi);
 
        qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@@ -1253,13 +1334,19 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
 
        rc = qed_spq_post(p_hwfn, p_ent, NULL);
 
-       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
-                  rc, physical_queue0);
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+                  "rc = %d regular physical queue = 0x%x\n", rc,
+                  regular_latency_queue);
 
        if (rc)
                goto err;
 
        qp->resp_offloaded = true;
+       qp->cq_prod = 0;
+
+       proto = p_hwfn->p_rdma_info->proto;
+       qed_roce_set_real_cid(p_hwfn, qp->icid -
+                             qed_cxt_get_proto_cid_start(p_hwfn, proto));
 
        return rc;
 
@@ -1277,10 +1364,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
 {
        struct roce_create_qp_req_ramrod_data *p_ramrod;
        struct qed_sp_init_data init_data;
-       union qed_qm_pq_params qm_params;
        enum roce_flavor roce_flavor;
        struct qed_spq_entry *p_ent;
-       u16 physical_queue0 = 0;
+       u16 regular_latency_queue;
+       enum protocol_type proto;
        int rc;
 
        DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -1351,15 +1438,16 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
        p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
        p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
        p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
-       p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
-       p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
-                                      qp->sq_cq_id);
+       p_ramrod->cq_cid =
+           cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
+
+       regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
 
-       memset(&qm_params, 0, sizeof(qm_params));
-       qm_params.roce.qpid = qp->icid >> 1;
-       physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+       p_ramrod->regular_latency_phy_queue =
+           cpu_to_le16(regular_latency_queue);
+       p_ramrod->low_latency_phy_queue =
+           cpu_to_le16(regular_latency_queue);
 
-       p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
        p_ramrod->dpi = cpu_to_le16(qp->dpi);
 
        qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@@ -1378,6 +1466,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
                goto err;
 
        qp->req_offloaded = true;
+       proto = p_hwfn->p_rdma_info->proto;
+       qed_roce_set_real_cid(p_hwfn,
+                             qp->icid + 1 -
+                             qed_cxt_get_proto_cid_start(p_hwfn, proto));
 
        return rc;
 
@@ -1577,7 +1669,8 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
 
 static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
                                            struct qed_rdma_qp *qp,
-                                           u32 *num_invalidated_mw)
+                                           u32 *num_invalidated_mw,
+                                           u32 *cq_prod)
 {
        struct roce_destroy_qp_resp_output_params *p_ramrod_res;
        struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
@@ -1588,8 +1681,22 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
 
        DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
 
-       if (!qp->resp_offloaded)
+       *num_invalidated_mw = 0;
+       *cq_prod = qp->cq_prod;
+
+       if (!qp->resp_offloaded) {
+               /* If a responder was never offload, we need to free the cids
+                * allocated in create_qp as a FW async event will never arrive
+                */
+               u32 cid;
+
+               cid = qp->icid -
+                     qed_cxt_get_proto_cid_start(p_hwfn,
+                                                 p_hwfn->p_rdma_info->proto);
+               qed_roce_free_cid_pair(p_hwfn, (u16)cid);
+
                return 0;
+       }
 
        /* Get SPQ entry */
        memset(&init_data, 0, sizeof(init_data));
@@ -1624,6 +1731,8 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
                goto err;
 
        *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
+       *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
+       qp->cq_prod = *cq_prod;
 
        /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
        dma_free_coherent(&p_hwfn->cdev->pdev->dev,
@@ -1827,10 +1936,8 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
 
        out_params->draining = false;
 
-       if (rq_err_state)
+       if (rq_err_state || sq_err_state)
                qp->cur_state = QED_ROCE_QP_STATE_ERR;
-       else if (sq_err_state)
-               qp->cur_state = QED_ROCE_QP_STATE_SQE;
        else if (sq_draining)
                out_params->draining = true;
        out_params->state = qp->cur_state;
@@ -1849,10 +1956,9 @@ err_resp:
 
 static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 {
-       struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
        u32 num_invalidated_mw = 0;
        u32 num_bound_mw = 0;
-       u32 start_cid;
+       u32 cq_prod;
        int rc;
 
        /* Destroys the specified QP */
@@ -1866,7 +1972,8 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 
        if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
                rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
-                                                     &num_invalidated_mw);
+                                                     &num_invalidated_mw,
+                                                     &cq_prod);
                if (rc)
                        return rc;
 
@@ -1881,21 +1988,6 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
                                  "number of invalidate memory windows is different from bounded ones\n");
                        return -EINVAL;
                }
-
-               spin_lock_bh(&p_rdma_info->lock);
-
-               start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
-                                                       p_rdma_info->proto);
-
-               /* Release responder's icid */
-               qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
-                                   qp->icid - start_cid);
-
-               /* Release requester's icid */
-               qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
-                                   qp->icid + 1 - start_cid);
-
-               spin_unlock_bh(&p_rdma_info->lock);
        }
 
        return 0;
@@ -2110,12 +2202,19 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
                return rc;
        } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
                /* Any state -> RESET */
+               u32 cq_prod;
+
+               /* Send destroy responder ramrod */
+               rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
+                                                     qp,
+                                                     &num_invalidated_mw,
+                                                     &cq_prod);
 
-               rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
-                                                     &num_invalidated_mw);
                if (rc)
                        return rc;
 
+               qp->cq_prod = cq_prod;
+
                rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
                                                      &num_bound_mw);
 
@@ -2454,6 +2553,31 @@ static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
        return rc;
 }
 
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
+{
+       struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+       u32 start_cid, cid, xcid;
+
+       /* an even icid belongs to a responder while an odd icid belongs to a
+        * requester. The 'cid' received as an input can be either. We calculate
+        * the "partner" icid and call it xcid. Only if both are free then the
+        * "cid" map can be cleared.
+        */
+       start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
+       cid = icid - start_cid;
+       xcid = cid ^ 1;
+
+       spin_lock_bh(&p_rdma_info->lock);
+
+       qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
+       if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
+               qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
+               qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
+       }
+
+       spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
 static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
 {
        return QED_LEADING_HWFN(cdev);
@@ -2773,7 +2897,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
                                                      : QED_LL2_RROCE;
 
        if (pkt->roce_mode == ROCE_V2_IPV4)
-               flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+               flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
 
        /* Tx header */
        rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
index 36cf4b2ab7faf0afcd17ec3347bf0d9d05567901..3ccc08a7c9959108382cbbb9f8fd8c592441bd28 100644 (file)
@@ -82,6 +82,7 @@ struct qed_rdma_info {
        struct qed_bmap qp_map;
        struct qed_bmap srq_map;
        struct qed_bmap cid_map;
+       struct qed_bmap real_cid_map;
        struct qed_bmap dpi_map;
        struct qed_bmap toggle_bits;
        struct qed_rdma_events events;
@@ -92,6 +93,7 @@ struct qed_rdma_info {
        u32 num_qps;
        u32 num_mrs;
        u16 queue_zone_base;
+       u16 max_queue_zones;
        enum protocol_type proto;
 };
 
@@ -153,6 +155,7 @@ struct qed_rdma_qp {
        dma_addr_t irq_phys_addr;
        u8 irq_num_pages;
        bool resp_offloaded;
+       u32 cq_prod;
 
        u8 remote_mac_addr[6];
        u8 local_mac_addr[6];
@@ -163,8 +166,8 @@ struct qed_rdma_qp {
 
 #if IS_ENABLED(CONFIG_QED_RDMA)
 void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-void qed_async_roce_event(struct qed_hwfn *p_hwfn,
-                         struct event_ring_entry *p_eqe);
+void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+                         u8 fw_event_code, union rdma_eqe_data *rdma_data);
 void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
                                     u8 connection_handle,
                                     void *cookie,
@@ -187,7 +190,9 @@ void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
                                     u16 src_mac_addr_lo, bool b_last_packet);
 #else
 static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
-static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
+static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+                                       u8 fw_event_code,
+                                       union rdma_eqe_data *rdma_data) {}
 static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
                                                   u8 connection_handle,
                                                   void *cookie,
index 645328a9f0cfb6b4040c8d6402ad5684d79adab9..f6423a139ca074a60909db7d369c42eb34394c27 100644 (file)
@@ -119,6 +119,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
                         u8 *p_fw_ret, bool skip_quick_poll)
 {
        struct qed_spq_comp_done *comp_done;
+       struct qed_ptt *p_ptt;
        int rc;
 
        /* A relatively short polling period w/o sleeping, to allow the FW to
@@ -135,8 +136,14 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
        if (!rc)
                return 0;
 
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt) {
+               DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
+               return -EAGAIN;
+       }
+
        DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
-       rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+       rc = qed_mcp_drain(p_hwfn, p_ptt);
        if (rc) {
                DP_NOTICE(p_hwfn, "MCP drain failed\n");
                goto err;
@@ -145,15 +152,18 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
        /* Retry after drain */
        rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
        if (!rc)
-               return 0;
+               goto out;
 
        comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
-       if (comp_done->done == 1) {
+       if (comp_done->done == 1)
                if (p_fw_ret)
                        *p_fw_ret = comp_done->fw_return_code;
-               return 0;
-       }
+out:
+       qed_ptt_release(p_hwfn, p_ptt);
+       return 0;
+
 err:
+       qed_ptt_release(p_hwfn, p_ptt);
        DP_NOTICE(p_hwfn,
                  "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
                  le32_to_cpu(p_ent->elem.hdr.cid),
@@ -205,11 +215,10 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
                                  struct qed_spq *p_spq)
 {
-       u16                             pq;
-       struct qed_cxt_info             cxt_info;
-       struct core_conn_context        *p_cxt;
-       union qed_qm_pq_params          pq_params;
-       int                             rc;
+       struct core_conn_context *p_cxt;
+       struct qed_cxt_info cxt_info;
+       u16 physical_q;
+       int rc;
 
        cxt_info.iid = p_spq->cid;
 
@@ -231,10 +240,8 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
                  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
 
        /* QM physical queue */
-       memset(&pq_params, 0, sizeof(pq_params));
-       pq_params.core.tc = LB_TC;
-       pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
-       p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
+       physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+       p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
 
        p_cxt->xstorm_st_context.spq_base_lo =
                DMA_LO_LE(p_spq->chain.p_phys_addr);
@@ -296,9 +303,12 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
                           struct event_ring_entry *p_eqe)
 {
        switch (p_eqe->protocol_id) {
+#if IS_ENABLED(CONFIG_QED_RDMA)
        case PROTOCOLID_ROCE:
-               qed_async_roce_event(p_hwfn, p_eqe);
+               qed_roce_async_event(p_hwfn, p_eqe->opcode,
+                                    &p_eqe->data.rdma_data);
                return 0;
+#endif
        case PROTOCOLID_COMMON:
                return qed_sriov_eqe_event(p_hwfn,
                                           p_eqe->opcode,
@@ -306,14 +316,6 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
        case PROTOCOLID_ISCSI:
                if (!IS_ENABLED(CONFIG_QED_ISCSI))
                        return -EINVAL;
-               if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
-                       u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
-
-                       qed_ooo_release_connection_isles(p_hwfn,
-                                                        p_hwfn->p_ooo_info,
-                                                        cid);
-                       return 0;
-               }
 
                if (p_hwfn->p_iscsi_info->event_cb) {
                        struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
index 253c2bbe1e4e1a705e52054b4d3faa199fd2ca93..92a3ee1715d9b9f0ccf62467f1212938c7a2627d 100644 (file)
@@ -178,26 +178,59 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
        return vf;
 }
 
+enum qed_iov_validate_q_mode {
+       QED_IOV_VALIDATE_Q_NA,
+       QED_IOV_VALIDATE_Q_ENABLE,
+       QED_IOV_VALIDATE_Q_DISABLE,
+};
+
+static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
+                                       struct qed_vf_info *p_vf,
+                                       u16 qid,
+                                       enum qed_iov_validate_q_mode mode,
+                                       bool b_is_tx)
+{
+       if (mode == QED_IOV_VALIDATE_Q_NA)
+               return true;
+
+       if ((b_is_tx && p_vf->vf_queues[qid].p_tx_cid) ||
+           (!b_is_tx && p_vf->vf_queues[qid].p_rx_cid))
+               return mode == QED_IOV_VALIDATE_Q_ENABLE;
+
+       /* In case we haven't found any valid cid, then its disabled */
+       return mode == QED_IOV_VALIDATE_Q_DISABLE;
+}
+
 static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
-                                struct qed_vf_info *p_vf, u16 rx_qid)
+                                struct qed_vf_info *p_vf,
+                                u16 rx_qid,
+                                enum qed_iov_validate_q_mode mode)
 {
-       if (rx_qid >= p_vf->num_rxqs)
+       if (rx_qid >= p_vf->num_rxqs) {
                DP_VERBOSE(p_hwfn,
                           QED_MSG_IOV,
                           "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
                           p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
-       return rx_qid < p_vf->num_rxqs;
+               return false;
+       }
+
+       return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false);
 }
 
 static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
-                                struct qed_vf_info *p_vf, u16 tx_qid)
+                                struct qed_vf_info *p_vf,
+                                u16 tx_qid,
+                                enum qed_iov_validate_q_mode mode)
 {
-       if (tx_qid >= p_vf->num_txqs)
+       if (tx_qid >= p_vf->num_txqs) {
                DP_VERBOSE(p_hwfn,
                           QED_MSG_IOV,
                           "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
                           p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
-       return tx_qid < p_vf->num_txqs;
+               return false;
+       }
+
+       return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true);
 }
 
 static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
@@ -217,6 +250,34 @@ static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
        return false;
 }
 
+static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn,
+                                       struct qed_vf_info *p_vf)
+{
+       u8 i;
+
+       for (i = 0; i < p_vf->num_rxqs; i++)
+               if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
+                                               QED_IOV_VALIDATE_Q_ENABLE,
+                                               false))
+                       return true;
+
+       return false;
+}
+
+static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn,
+                                       struct qed_vf_info *p_vf)
+{
+       u8 i;
+
+       for (i = 0; i < p_vf->num_txqs; i++)
+               if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
+                                               QED_IOV_VALIDATE_Q_ENABLE,
+                                               true))
+                       return true;
+
+       return false;
+}
+
 static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
                                    int vfid, struct qed_ptt *p_ptt)
 {
@@ -557,14 +618,30 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
                return 0;
        }
 
-       /* Calculate the first VF index - this is a bit tricky; Basically,
-        * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
-        * after the first engine's VFs.
+       /* First VF index based on offset is tricky:
+        *  - If ARI is supported [likely], offset - (16 - pf_id) would
+        *    provide the number for eng0. 2nd engine Vfs would begin
+        *    after the first engine's VFs.
+        *  - If !ARI, VFs would start on next device.
+        *    so offset - (256 - pf_id) would provide the number.
+        * Utilize the fact that (256 - pf_id) is achieved only by later
+        * to diffrentiate between the two.
         */
-       cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
-                                          p_hwfn->abs_pf_id - 16;
-       if (QED_PATH_ID(p_hwfn))
-               cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+       if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
+               u32 first = p_hwfn->cdev->p_iov_info->offset +
+                           p_hwfn->abs_pf_id - 16;
+
+               cdev->p_iov_info->first_vf_in_pf = first;
+
+               if (QED_PATH_ID(p_hwfn))
+                       cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+       } else {
+               u32 first = p_hwfn->cdev->p_iov_info->offset +
+                           p_hwfn->abs_pf_id - 256;
+
+               cdev->p_iov_info->first_vf_in_pf = first;
+       }
 
        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
                   "First VF in hwfn 0x%08x\n",
@@ -677,6 +754,11 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
        u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
        int rc;
 
+       /* It's possible VF was previously considered malicious -
+        * clear the indication even if we're only going to disable VF.
+        */
+       vf->b_malicious = false;
+
        if (vf->to_disable)
                return 0;
 
@@ -689,9 +771,6 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
 
        qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
 
-       /* It's possible VF was previously considered malicious */
-       vf->b_malicious = false;
-
        rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
        if (rc)
                return rc;
@@ -1118,13 +1197,17 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
                           (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
                           &params);
 
-       qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
-                          mbx->req_virt->first_tlv.reply_address,
-                          sizeof(u64) / 4, &params);
-
+       /* Once PF copies the rc to the VF, the latter can continue
+        * and send an additional message. So we have to make sure the
+        * channel would be re-set to ready prior to that.
+        */
        REG_WR(p_hwfn,
               GTT_BAR0_MAP_REG_USDM_RAM +
               USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+
+       qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+                          mbx->req_virt->first_tlv.reply_address,
+                          sizeof(u64) / 4, &params);
 }
 
 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
@@ -1733,6 +1816,8 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
        vf->state = VF_ENABLED;
        start = &mbx->req_virt->start_vport;
 
+       qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
        /* Initialize Status block in CAU */
        for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
                if (!start->sb_addr[sb_id]) {
@@ -1746,7 +1831,6 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
                                    start->sb_addr[sb_id],
                                    vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
        }
-       qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
 
        vf->mtu = start->mtu;
        vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
@@ -1803,6 +1887,16 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
        vf->vport_instance--;
        vf->spoof_chk = false;
 
+       if ((qed_iov_validate_active_rxq(p_hwfn, vf)) ||
+           (qed_iov_validate_active_txq(p_hwfn, vf))) {
+               vf->b_malicious = true;
+               DP_NOTICE(p_hwfn,
+                         "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
+                         vf->abs_vf_id);
+               status = PFVF_STATUS_MALICIOUS;
+               goto out;
+       }
+
        rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
        if (rc) {
                DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
@@ -1814,6 +1908,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
        vf->configured_features = 0;
        memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
 
+out:
        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
                             sizeof(struct pfvf_def_resp_tlv), status);
 }
@@ -1870,7 +1965,8 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
 
        req = &mbx->req_virt->start_rxq;
 
-       if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
+       if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
+                                 QED_IOV_VALIDATE_Q_DISABLE) ||
            !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
                goto out;
 
@@ -1970,21 +2066,16 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
        struct qed_queue_start_common_params params;
        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
        u8 status = PFVF_STATUS_NO_RESOURCE;
-       union qed_qm_pq_params pq_params;
        struct vfpf_start_txq_tlv *req;
        struct qed_vf_q_info *p_queue;
        int rc;
        u16 pq;
 
-       /* Prepare the parameters which would choose the right PQ */
-       memset(&pq_params, 0, sizeof(pq_params));
-       pq_params.eth.is_vf = 1;
-       pq_params.eth.vf_id = vf->relative_vf_id;
-
        memset(&params, 0, sizeof(params));
        req = &mbx->req_virt->start_txq;
 
-       if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
+       if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
+                                 QED_IOV_VALIDATE_Q_DISABLE) ||
            !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
                goto out;
 
@@ -2004,7 +2095,7 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
        if (!p_queue->p_tx_cid)
                goto out;
 
-       pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, &pq_params);
+       pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
        rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
                                      req->pbl_addr, req->pbl_size, pq);
        if (rc) {
@@ -2021,57 +2112,53 @@ out:
 
 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
                                struct qed_vf_info *vf,
-                               u16 rxq_id, u8 num_rxqs, bool cqe_completion)
+                               u16 rxq_id, bool cqe_completion)
 {
        struct qed_vf_q_info *p_queue;
        int rc = 0;
-       int qid;
 
-       if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
+       if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id,
+                                 QED_IOV_VALIDATE_Q_ENABLE)) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_IOV,
+                          "VF[%d] Tried Closing Rx 0x%04x which is inactive\n",
+                          vf->relative_vf_id, rxq_id);
                return -EINVAL;
+       }
 
-       for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
-               p_queue = &vf->vf_queues[qid];
-
-               if (!p_queue->p_rx_cid)
-                       continue;
+       p_queue = &vf->vf_queues[rxq_id];
 
-               rc = qed_eth_rx_queue_stop(p_hwfn,
-                                          p_queue->p_rx_cid,
-                                          false, cqe_completion);
-               if (rc)
-                       return rc;
+       rc = qed_eth_rx_queue_stop(p_hwfn,
+                                  p_queue->p_rx_cid,
+                                  false, cqe_completion);
+       if (rc)
+               return rc;
 
-               vf->vf_queues[qid].p_rx_cid = NULL;
-               vf->num_active_rxqs--;
-       }
+       p_queue->p_rx_cid = NULL;
+       vf->num_active_rxqs--;
 
-       return rc;
+       return 0;
 }
 
 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
-                               struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
+                               struct qed_vf_info *vf, u16 txq_id)
 {
-       int rc = 0;
        struct qed_vf_q_info *p_queue;
-       int qid;
+       int rc = 0;
 
-       if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
+       if (!qed_iov_validate_txq(p_hwfn, vf, txq_id,
+                                 QED_IOV_VALIDATE_Q_ENABLE))
                return -EINVAL;
 
-       for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
-               p_queue = &vf->vf_queues[qid];
-               if (!p_queue->p_tx_cid)
-                       continue;
+       p_queue = &vf->vf_queues[txq_id];
 
-               rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
-               if (rc)
-                       return rc;
+       rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
+       if (rc)
+               return rc;
 
-               p_queue->p_tx_cid = NULL;
-       }
+       p_queue->p_tx_cid = NULL;
 
-       return rc;
+       return 0;
 }
 
 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
@@ -2080,20 +2167,28 @@ static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
 {
        u16 length = sizeof(struct pfvf_def_resp_tlv);
        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
-       u8 status = PFVF_STATUS_SUCCESS;
+       u8 status = PFVF_STATUS_FAILURE;
        struct vfpf_stop_rxqs_tlv *req;
        int rc;
 
-       /* We give the option of starting from qid != 0, in this case we
-        * need to make sure that qid + num_qs doesn't exceed the actual
-        * amount of queues that exist.
+       /* There has never been an official driver that used this interface
+        * for stopping multiple queues, and it is now considered deprecated.
+        * Validate this isn't used here.
         */
        req = &mbx->req_virt->stop_rxqs;
-       rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
-                                 req->num_rxqs, req->cqe_completion);
-       if (rc)
-               status = PFVF_STATUS_FAILURE;
+       if (req->num_rxqs != 1) {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "Odd; VF[%d] tried stopping multiple Rx queues\n",
+                          vf->relative_vf_id);
+               status = PFVF_STATUS_NOT_SUPPORTED;
+               goto out;
+       }
 
+       rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+                                 req->cqe_completion);
+       if (!rc)
+               status = PFVF_STATUS_SUCCESS;
+out:
        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
                             length, status);
 }
@@ -2104,19 +2199,27 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
 {
        u16 length = sizeof(struct pfvf_def_resp_tlv);
        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
-       u8 status = PFVF_STATUS_SUCCESS;
+       u8 status = PFVF_STATUS_FAILURE;
        struct vfpf_stop_txqs_tlv *req;
        int rc;
 
-       /* We give the option of starting from qid != 0, in this case we
-        * need to make sure that qid + num_qs doesn't exceed the actual
-        * amount of queues that exist.
+       /* There has never been an official driver that used this interface
+        * for stopping multiple queues, and it is now considered deprecated.
+        * Validate this isn't used here.
         */
        req = &mbx->req_virt->stop_txqs;
-       rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
-       if (rc)
-               status = PFVF_STATUS_FAILURE;
+       if (req->num_txqs != 1) {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "Odd; VF[%d] tried stopping multiple Tx queues\n",
+                          vf->relative_vf_id);
+               status = PFVF_STATUS_NOT_SUPPORTED;
+               goto out;
+       }
+       rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid);
+       if (!rc)
+               status = PFVF_STATUS_SUCCESS;
 
+out:
        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
                             length, status);
 }
@@ -2141,22 +2244,17 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
        complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
 
        /* Validate inputs */
-       if (req->num_rxqs + req->rx_qid > QED_MAX_VF_CHAINS_PER_PF ||
-           !qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) {
-               DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
-                       vf->relative_vf_id, req->rx_qid, req->num_rxqs);
-               goto out;
-       }
-
-       for (i = 0; i < req->num_rxqs; i++) {
-               qid = req->rx_qid + i;
-               if (!vf->vf_queues[qid].p_rx_cid) {
-                       DP_INFO(p_hwfn,
-                               "VF[%d] rx_qid = %d isn`t active!\n",
-                               vf->relative_vf_id, qid);
+       for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++)
+               if (!qed_iov_validate_rxq(p_hwfn, vf, i,
+                                         QED_IOV_VALIDATE_Q_ENABLE)) {
+                       DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+                               vf->relative_vf_id, req->rx_qid, req->num_rxqs);
                        goto out;
                }
 
+       /* Prepare the handlers */
+       for (i = 0; i < req->num_rxqs; i++) {
+               qid = req->rx_qid + i;
                handlers[i] = vf->vf_queues[qid].p_rx_cid;
        }
 
@@ -2372,7 +2470,8 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
 
        for (i = 0; i < table_size; i++) {
                q_idx = p_rss_tlv->rss_ind_table[i];
-               if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx)) {
+               if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
+                                         QED_IOV_VALIDATE_Q_ENABLE)) {
                        DP_VERBOSE(p_hwfn,
                                   QED_MSG_IOV,
                                   "VF[%d]: Omitting RSS due to wrong queue %04x\n",
@@ -2381,15 +2480,6 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
                        goto out;
                }
 
-               if (!vf->vf_queues[q_idx].p_rx_cid) {
-                       DP_VERBOSE(p_hwfn,
-                                  QED_MSG_IOV,
-                                  "VF[%d]: Omitting RSS due to inactive queue %08x\n",
-                                  vf->relative_vf_id, q_idx);
-                       b_reject = true;
-                       goto out;
-               }
-
                p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
        }
 
@@ -3042,9 +3132,10 @@ qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        return rc;
 }
 
-int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
+bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
 {
-       u16 i, found = 0;
+       bool found = false;
+       u16 i;
 
        DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
        for (i = 0; i < (VF_MAX_STATIC / 32); i++)
@@ -3054,7 +3145,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
 
        if (!p_hwfn->cdev->p_iov_info) {
                DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
-               return 0;
+               return false;
        }
 
        /* Mark VFs */
@@ -3083,7 +3174,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
                         * VF flr until ACKs, we're safe.
                         */
                        p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
-                       found = 1;
+                       found = true;
                }
        }
 
@@ -3289,11 +3380,17 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
        if (!p_vf)
                return;
 
-       DP_INFO(p_hwfn,
-               "VF [%d] - Malicious behavior [%02x]\n",
-               p_vf->abs_vf_id, p_data->err_id);
+       if (!p_vf->b_malicious) {
+               DP_NOTICE(p_hwfn,
+                         "VF [%d] - Malicious behavior [%02x]\n",
+                         p_vf->abs_vf_id, p_data->err_id);
 
-       p_vf->b_malicious = true;
+               p_vf->b_malicious = true;
+       } else {
+               DP_INFO(p_hwfn,
+                       "VF [%d] - Malicious behavior [%02x]\n",
+                       p_vf->abs_vf_id, p_data->err_id);
+       }
 }
 
 int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
@@ -3842,6 +3939,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
 
 void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
 {
+       struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
        struct qed_mcp_link_capabilities caps;
        struct qed_mcp_link_params params;
        struct qed_mcp_link_state link;
@@ -3858,9 +3956,15 @@ void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
                if (!vf_info)
                        continue;
 
-               memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
-               memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
-               memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
+               /* Only hwfn0 is actually interested in the link speed.
+                * But since only it would receive an MFW indication of link,
+                * need to take configuration from it - otherwise things like
+                * rate limiting for hwfn1 VF would not work.
+                */
+               memcpy(&params, qed_mcp_get_link_params(lead_hwfn),
+                      sizeof(params));
+               memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
+               memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
                       sizeof(caps));
 
                /* Modify link according to the VF's configured link state */
index a89605821522d528411f711bbb0755c0ae003e5a..8e96b1d1930847fc3b03e1518da1a205f09b6291 100644 (file)
@@ -348,9 +348,9 @@ int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
  * @param p_hwfn
  * @param disabled_vfs - bitmask of all VFs on path that were FLRed
  *
- * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ * @return true iff one of the PF's vfs got FLRed. false otherwise.
  */
-int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
+bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
 
 /**
  * @brief Search extended TLVs in request/reply buffer.
@@ -407,10 +407,10 @@ static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
        return -EINVAL;
 }
 
-static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
-                                     u32 *disabled_vfs)
+static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
+                                      u32 *disabled_vfs)
 {
-       return 0;
+       return false;
 }
 
 static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
index 15d2855ec56352f861b0915823992c505b9c1b9d..798786562b1bbc266f9da05132cd37f39a5b2cdf 100644 (file)
@@ -134,14 +134,20 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
        }
 
        if (!*done) {
-               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
-                          "VF <-- PF Timeout [Type %d]\n",
-                          p_req->first_tlv.tl.type);
+               DP_NOTICE(p_hwfn,
+                         "VF <-- PF Timeout [Type %d]\n",
+                         p_req->first_tlv.tl.type);
                rc = -EBUSY;
        } else {
-               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
-                          "PF response: %d [Type %d]\n",
-                          *done, p_req->first_tlv.tl.type);
+               if ((*done != PFVF_STATUS_SUCCESS) &&
+                   (*done != PFVF_STATUS_NO_RESOURCE))
+                       DP_NOTICE(p_hwfn,
+                                 "PF response: %d [Type %d]\n",
+                                 *done, p_req->first_tlv.tl.type);
+               else
+                       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                                  "PF response: %d [Type %d]\n",
+                                  *done, p_req->first_tlv.tl.type);
        }
 
        return rc;
index 7da0b165d8bc2718d28fbbd108040cc4d948a03a..105c0edd2a01eec6a2f6af0547551265838bd53f 100644 (file)
@@ -275,6 +275,8 @@ struct vfpf_stop_rxqs_tlv {
        struct vfpf_first_tlv first_tlv;
 
        u16 rx_qid;
+
+       /* this field is deprecated and should *always* be set to '1' */
        u8 num_rxqs;
        u8 cqe_completion;
        u8 padding[4];
@@ -285,6 +287,8 @@ struct vfpf_stop_txqs_tlv {
        struct vfpf_first_tlv first_tlv;
 
        u16 tx_qid;
+
+       /* this field is deprecated and should *always* be set to '1' */
        u8 num_txqs;
        u8 padding[5];
 };
index f2aaef2cfb86d7a31c5fdc6f5d5940e1c9459a70..5e7ad25db8adb9d25606bc61742298af8360bcb8 100644 (file)
@@ -50,7 +50,7 @@
 #define QEDE_MAJOR_VERSION             8
 #define QEDE_MINOR_VERSION             10
 #define QEDE_REVISION_VERSION          10
-#define QEDE_ENGINEERING_VERSION       20
+#define QEDE_ENGINEERING_VERSION       21
 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
                __stringify(QEDE_MINOR_VERSION) "."             \
                __stringify(QEDE_REVISION_VERSION) "."          \
@@ -58,7 +58,7 @@
 
 #define DRV_MODULE_SYM         qede
 
-struct qede_stats {
+struct qede_stats_common {
        u64 no_buff_discards;
        u64 packet_too_big_discard;
        u64 ttl0_discard;
@@ -90,11 +90,6 @@ struct qede_stats {
        u64 rx_256_to_511_byte_packets;
        u64 rx_512_to_1023_byte_packets;
        u64 rx_1024_to_1518_byte_packets;
-       u64 rx_1519_to_1522_byte_packets;
-       u64 rx_1519_to_2047_byte_packets;
-       u64 rx_2048_to_4095_byte_packets;
-       u64 rx_4096_to_9216_byte_packets;
-       u64 rx_9217_to_16383_byte_packets;
        u64 rx_crc_errors;
        u64 rx_mac_crtl_frames;
        u64 rx_pause_frames;
@@ -111,17 +106,39 @@ struct qede_stats {
        u64 tx_256_to_511_byte_packets;
        u64 tx_512_to_1023_byte_packets;
        u64 tx_1024_to_1518_byte_packets;
+       u64 tx_pause_frames;
+       u64 tx_pfc_frames;
+       u64 brb_truncates;
+       u64 brb_discards;
+       u64 tx_mac_ctrl_frames;
+};
+
+struct qede_stats_bb {
+       u64 rx_1519_to_1522_byte_packets;
+       u64 rx_1519_to_2047_byte_packets;
+       u64 rx_2048_to_4095_byte_packets;
+       u64 rx_4096_to_9216_byte_packets;
+       u64 rx_9217_to_16383_byte_packets;
        u64 tx_1519_to_2047_byte_packets;
        u64 tx_2048_to_4095_byte_packets;
        u64 tx_4096_to_9216_byte_packets;
        u64 tx_9217_to_16383_byte_packets;
-       u64 tx_pause_frames;
-       u64 tx_pfc_frames;
        u64 tx_lpi_entry_count;
        u64 tx_total_collisions;
-       u64 brb_truncates;
-       u64 brb_discards;
-       u64 tx_mac_ctrl_frames;
+};
+
+struct qede_stats_ah {
+       u64 rx_1519_to_max_byte_packets;
+       u64 tx_1519_to_max_byte_packets;
+};
+
+struct qede_stats {
+       struct qede_stats_common common;
+
+       union {
+               struct qede_stats_bb bb;
+               struct qede_stats_ah ah;
+       };
 };
 
 struct qede_vlan {
@@ -158,6 +175,10 @@ struct qede_dev {
        struct qed_dev_eth_info dev_info;
 #define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
 #define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_IS_BB(edev) \
+       ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB)
+#define QEDE_IS_AH(edev) \
+       ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
 
        struct qede_fastpath            *fp_array;
        u8                              req_num_tx;
@@ -292,21 +313,24 @@ struct qede_rx_queue {
        u8 data_direction;
        u8 rxq_id;
 
+       /* Used once per each NAPI run */
+       u16 num_rx_buffers;
+
+       u16 rx_headroom;
+
        u32 rx_buf_size;
        u32 rx_buf_seg_size;
 
-       u64 rcv_pkts;
-
        struct sw_rx_data *sw_rx_ring;
        struct qed_chain rx_bd_ring;
        struct qed_chain rx_comp_ring ____cacheline_aligned;
 
-       /* Used once per each NAPI run */
-       u16 num_rx_buffers;
-
        /* GRO */
        struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
 
+       /* Used once per each NAPI run */
+       u64 rcv_pkts;
+
        u64 rx_hw_errors;
        u64 rx_alloc_errors;
        u64 rx_ip_frags;
@@ -328,6 +352,11 @@ struct sw_tx_bd {
 #define QEDE_TSO_SPLIT_BD              BIT(0)
 };
 
+struct sw_tx_xdp {
+       struct page *page;
+       dma_addr_t mapping;
+};
+
 struct qede_tx_queue {
        u8 is_xdp;
        bool is_legacy;
@@ -351,11 +380,11 @@ struct qede_tx_queue {
 #define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
 
        /* Regular Tx requires skb + metadata for release purpose,
-        * while XDP requires only the pages themselves.
+        * while XDP requires the pages and the mapped address.
         */
        union {
                struct sw_tx_bd *skbs;
-               struct page **pages;
+               struct sw_tx_xdp *xdp;
        } sw_tx_ring;
 
        struct qed_chain tx_pbl;
index 8979531332455453f4fcb54b4a1a16017005c61c..4dcfe9614731db70d673cc15aec6da26ff673743 100644 (file)
@@ -75,16 +75,33 @@ static const struct {
        QEDE_TQSTAT(stopped_cnt),
 };
 
-#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
-#define QEDE_STAT_STRING(stat_name) (#stat_name)
-#define _QEDE_STAT(stat_name, pf_only) \
-        {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
-#define QEDE_PF_STAT(stat_name)        _QEDE_STAT(stat_name, true)
-#define QEDE_STAT(stat_name)   _QEDE_STAT(stat_name, false)
+#define QEDE_STAT_OFFSET(stat_name, type, base) \
+       (offsetof(type, stat_name) + (base))
+#define QEDE_STAT_STRING(stat_name)    (#stat_name)
+#define _QEDE_STAT(stat_name, type, base, attr) \
+       {QEDE_STAT_OFFSET(stat_name, type, base), \
+        QEDE_STAT_STRING(stat_name), \
+        attr}
+#define QEDE_STAT(stat_name) \
+       _QEDE_STAT(stat_name, struct qede_stats_common, 0, 0x0)
+#define QEDE_PF_STAT(stat_name) \
+       _QEDE_STAT(stat_name, struct qede_stats_common, 0, \
+                  BIT(QEDE_STAT_PF_ONLY))
+#define QEDE_PF_BB_STAT(stat_name) \
+       _QEDE_STAT(stat_name, struct qede_stats_bb, \
+                  offsetof(struct qede_stats, bb), \
+                  BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_BB_ONLY))
+#define QEDE_PF_AH_STAT(stat_name) \
+       _QEDE_STAT(stat_name, struct qede_stats_ah, \
+                  offsetof(struct qede_stats, ah), \
+                  BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_AH_ONLY))
 static const struct {
        u64 offset;
        char string[ETH_GSTRING_LEN];
-       bool pf_only;
+       unsigned long attr;
+#define QEDE_STAT_PF_ONLY      0
+#define QEDE_STAT_BB_ONLY      1
+#define QEDE_STAT_AH_ONLY      2
 } qede_stats_arr[] = {
        QEDE_STAT(rx_ucast_bytes),
        QEDE_STAT(rx_mcast_bytes),
@@ -106,22 +123,23 @@ static const struct {
        QEDE_PF_STAT(rx_256_to_511_byte_packets),
        QEDE_PF_STAT(rx_512_to_1023_byte_packets),
        QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
-       QEDE_PF_STAT(rx_1519_to_1522_byte_packets),
-       QEDE_PF_STAT(rx_1519_to_2047_byte_packets),
-       QEDE_PF_STAT(rx_2048_to_4095_byte_packets),
-       QEDE_PF_STAT(rx_4096_to_9216_byte_packets),
-       QEDE_PF_STAT(rx_9217_to_16383_byte_packets),
+       QEDE_PF_BB_STAT(rx_1519_to_1522_byte_packets),
+       QEDE_PF_BB_STAT(rx_1519_to_2047_byte_packets),
+       QEDE_PF_BB_STAT(rx_2048_to_4095_byte_packets),
+       QEDE_PF_BB_STAT(rx_4096_to_9216_byte_packets),
+       QEDE_PF_BB_STAT(rx_9217_to_16383_byte_packets),
+       QEDE_PF_AH_STAT(rx_1519_to_max_byte_packets),
        QEDE_PF_STAT(tx_64_byte_packets),
        QEDE_PF_STAT(tx_65_to_127_byte_packets),
        QEDE_PF_STAT(tx_128_to_255_byte_packets),
        QEDE_PF_STAT(tx_256_to_511_byte_packets),
        QEDE_PF_STAT(tx_512_to_1023_byte_packets),
        QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
-       QEDE_PF_STAT(tx_1519_to_2047_byte_packets),
-       QEDE_PF_STAT(tx_2048_to_4095_byte_packets),
-       QEDE_PF_STAT(tx_4096_to_9216_byte_packets),
-       QEDE_PF_STAT(tx_9217_to_16383_byte_packets),
-
+       QEDE_PF_BB_STAT(tx_1519_to_2047_byte_packets),
+       QEDE_PF_BB_STAT(tx_2048_to_4095_byte_packets),
+       QEDE_PF_BB_STAT(tx_4096_to_9216_byte_packets),
+       QEDE_PF_BB_STAT(tx_9217_to_16383_byte_packets),
+       QEDE_PF_AH_STAT(tx_1519_to_max_byte_packets),
        QEDE_PF_STAT(rx_mac_crtl_frames),
        QEDE_PF_STAT(tx_mac_ctrl_frames),
        QEDE_PF_STAT(rx_pause_frames),
@@ -136,8 +154,8 @@ static const struct {
        QEDE_PF_STAT(rx_jabbers),
        QEDE_PF_STAT(rx_undersize_packets),
        QEDE_PF_STAT(rx_fragments),
-       QEDE_PF_STAT(tx_lpi_entry_count),
-       QEDE_PF_STAT(tx_total_collisions),
+       QEDE_PF_BB_STAT(tx_lpi_entry_count),
+       QEDE_PF_BB_STAT(tx_total_collisions),
        QEDE_PF_STAT(brb_truncates),
        QEDE_PF_STAT(brb_discards),
        QEDE_STAT(no_buff_discards),
@@ -155,6 +173,12 @@ static const struct {
 };
 
 #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+#define QEDE_STAT_IS_PF_ONLY(i) \
+       test_bit(QEDE_STAT_PF_ONLY, &qede_stats_arr[i].attr)
+#define QEDE_STAT_IS_BB_ONLY(i) \
+       test_bit(QEDE_STAT_BB_ONLY, &qede_stats_arr[i].attr)
+#define QEDE_STAT_IS_AH_ONLY(i) \
+       test_bit(QEDE_STAT_AH_ONLY, &qede_stats_arr[i].attr)
 
 enum {
        QEDE_PRI_FLAG_CMT,
@@ -213,6 +237,13 @@ static void qede_get_strings_stats_rxq(struct qede_dev *edev,
        }
 }
 
+static bool qede_is_irrelevant_stat(struct qede_dev *edev, int stat_index)
+{
+       return (IS_VF(edev) && QEDE_STAT_IS_PF_ONLY(stat_index)) ||
+              (QEDE_IS_BB(edev) && QEDE_STAT_IS_AH_ONLY(stat_index)) ||
+              (QEDE_IS_AH(edev) && QEDE_STAT_IS_BB_ONLY(stat_index));
+}
+
 static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
 {
        struct qede_fastpath *fp;
@@ -234,7 +265,7 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
 
        /* Account for non-queue statistics */
        for (i = 0; i < QEDE_NUM_STATS; i++) {
-               if (IS_VF(edev) && qede_stats_arr[i].pf_only)
+               if (qede_is_irrelevant_stat(edev, i))
                        continue;
                strcpy(buf, qede_stats_arr[i].string);
                buf += ETH_GSTRING_LEN;
@@ -309,7 +340,7 @@ static void qede_get_ethtool_stats(struct net_device *dev,
        }
 
        for (i = 0; i < QEDE_NUM_STATS; i++) {
-               if (IS_VF(edev) && qede_stats_arr[i].pf_only)
+               if (qede_is_irrelevant_stat(edev, i))
                        continue;
                *buf = *((u64 *)(((void *)&edev->stats) +
                                 qede_stats_arr[i].offset));
@@ -323,17 +354,13 @@ static void qede_get_ethtool_stats(struct net_device *dev,
 static int qede_get_sset_count(struct net_device *dev, int stringset)
 {
        struct qede_dev *edev = netdev_priv(dev);
-       int num_stats = QEDE_NUM_STATS;
+       int num_stats = QEDE_NUM_STATS, i;
 
        switch (stringset) {
        case ETH_SS_STATS:
-               if (IS_VF(edev)) {
-                       int i;
-
-                       for (i = 0; i < QEDE_NUM_STATS; i++)
-                               if (qede_stats_arr[i].pf_only)
-                                       num_stats--;
-               }
+               for (i = 0; i < QEDE_NUM_STATS; i++)
+                       if (qede_is_irrelevant_stat(edev, i))
+                               num_stats--;
 
                /* Account for the Regular Tx statistics */
                num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
index 107c3fda4792c3b7f2fb83168f192b1aef35fc70..b00a4fce44b7b2388e6930d0c00c2e7d3e991531 100644 (file)
@@ -520,11 +520,6 @@ static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
 {
        struct qede_reload_args args;
 
-       if (prog && prog->xdp_adjust_head) {
-               DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n");
-               return -EOPNOTSUPP;
-       }
-
        /* If we're called, there was already a bpf reference increment */
        args.func = &qede_xdp_reload_func;
        args.u.new_prog = prog;
@@ -537,6 +532,11 @@ int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
 {
        struct qede_dev *edev = netdev_priv(dev);
 
+       if (IS_VF(edev)) {
+               DP_NOTICE(edev, "VFs don't support XDP\n");
+               return -EOPNOTSUPP;
+       }
+
        switch (xdp->command) {
        case XDP_SETUP_PROG:
                return qede_xdp_set(edev, xdp->prog);
index 1e65038c8fc06cb62172f0a144729e51ecf8faaf..961b1d36b9eb8e9a6ff0aaa5b0060f9970ef957f 100644 (file)
@@ -87,7 +87,8 @@ int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
        rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
        WARN_ON(!rx_bd);
        rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
-       rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+       rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
+                                    rxq->rx_headroom);
 
        rxq->sw_rx_prod++;
        rxq->filled_buffers++;
@@ -360,7 +361,8 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
                                   metadata->mapping + padding,
                                   length, PCI_DMA_TODEVICE);
 
-       txq->sw_tx_ring.pages[idx] = metadata->data;
+       txq->sw_tx_ring.xdp[idx].page = metadata->data;
+       txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
        txq->sw_tx_prod++;
 
        /* Mark the fastpath for future XDP doorbell */
@@ -384,19 +386,19 @@ int qede_txq_has_work(struct qede_tx_queue *txq)
 
 static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
-       struct eth_tx_1st_bd *bd;
-       u16 hw_bd_cons;
+       u16 hw_bd_cons, idx;
 
        hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
        barrier();
 
        while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
-               bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+               qed_chain_consume(&txq->tx_pbl);
+               idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
 
-               dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd),
-                                PAGE_SIZE, DMA_BIDIRECTIONAL);
-               __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons &
-                                                 NUM_TX_BDS_MAX]);
+               dma_unmap_page(&edev->pdev->dev,
+                              txq->sw_tx_ring.xdp[idx].mapping,
+                              PAGE_SIZE, DMA_BIDIRECTIONAL);
+               __free_page(txq->sw_tx_ring.xdp[idx].page);
 
                txq->sw_tx_cons++;
                txq->xmit_pkts++;
@@ -508,7 +510,8 @@ static inline void qede_reuse_page(struct qede_rx_queue *rxq,
        new_mapping = curr_prod->mapping + curr_prod->page_offset;
 
        rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
-       rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
+       rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
+                                         rxq->rx_headroom);
 
        rxq->sw_rx_prod++;
        curr_cons->data = NULL;
@@ -624,7 +627,6 @@ static inline void qede_skb_receive(struct qede_dev *edev,
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 
        napi_gro_receive(&fp->napi, skb);
-       rxq->rcv_pkts++;
 }
 
 static void qede_set_gro_params(struct qede_dev *edev,
@@ -884,9 +886,9 @@ static inline void qede_tpa_cont(struct qede_dev *edev,
                       "Strange - TPA cont with more than a single len_list entry\n");
 }
 
-static void qede_tpa_end(struct qede_dev *edev,
-                        struct qede_fastpath *fp,
-                        struct eth_fast_path_rx_tpa_end_cqe *cqe)
+static int qede_tpa_end(struct qede_dev *edev,
+                       struct qede_fastpath *fp,
+                       struct eth_fast_path_rx_tpa_end_cqe *cqe)
 {
        struct qede_rx_queue *rxq = fp->rxq;
        struct qede_agg_info *tpa_info;
@@ -934,11 +936,12 @@ static void qede_tpa_end(struct qede_dev *edev,
 
        tpa_info->state = QEDE_AGG_STATE_NONE;
 
-       return;
+       return 1;
 err:
        tpa_info->state = QEDE_AGG_STATE_NONE;
        dev_kfree_skb_any(tpa_info->skb);
        tpa_info->skb = NULL;
+       return 0;
 }
 
 static u8 qede_check_notunn_csum(u16 flag)
@@ -990,14 +993,15 @@ static bool qede_rx_xdp(struct qede_dev *edev,
                        struct qede_rx_queue *rxq,
                        struct bpf_prog *prog,
                        struct sw_rx_data *bd,
-                       struct eth_fast_path_rx_reg_cqe *cqe)
+                       struct eth_fast_path_rx_reg_cqe *cqe,
+                       u16 *data_offset, u16 *len)
 {
-       u16 len = le16_to_cpu(cqe->len_on_first_bd);
        struct xdp_buff xdp;
        enum xdp_action act;
 
-       xdp.data = page_address(bd->data) + cqe->placement_offset;
-       xdp.data_end = xdp.data + len;
+       xdp.data_hard_start = page_address(bd->data);
+       xdp.data = xdp.data_hard_start + *data_offset;
+       xdp.data_end = xdp.data + *len;
 
        /* Queues always have a full reset currently, so for the time
         * being until there's atomic program replace just mark read
@@ -1007,6 +1011,10 @@ static bool qede_rx_xdp(struct qede_dev *edev,
        act = bpf_prog_run_xdp(prog, &xdp);
        rcu_read_unlock();
 
+       /* Recalculate, as XDP might have changed the headers */
+       *data_offset = xdp.data - xdp.data_hard_start;
+       *len = xdp.data_end - xdp.data;
+
        if (act == XDP_PASS)
                return true;
 
@@ -1025,7 +1033,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
                /* Now if there's a transmission problem, we'd still have to
                 * throw current buffer, as replacement was already allocated.
                 */
-               if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) {
+               if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) {
                        dma_unmap_page(rxq->dev, bd->mapping,
                                       PAGE_SIZE, DMA_BIDIRECTIONAL);
                        __free_page(bd->data);
@@ -1052,7 +1060,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
                                            struct sw_rx_data *bd, u16 len,
                                            u16 pad)
 {
-       unsigned int offset = bd->page_offset;
+       unsigned int offset = bd->page_offset + pad;
        struct skb_frag_struct *frag;
        struct page *page = bd->data;
        unsigned int pull_len;
@@ -1069,7 +1077,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
         */
        if (len + pad <= edev->rx_copybreak) {
                memcpy(skb_put(skb, len),
-                      page_address(page) + pad + offset, len);
+                      page_address(page) + offset, len);
                qede_reuse_page(rxq, bd);
                goto out;
        }
@@ -1077,7 +1085,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
        frag = &skb_shinfo(skb)->frags[0];
 
        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
-                       page, pad + offset, len, rxq->rx_buf_seg_size);
+                       page, offset, len, rxq->rx_buf_seg_size);
 
        va = skb_frag_address(frag);
        pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
@@ -1178,8 +1186,7 @@ static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
                qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
                return 0;
        case ETH_RX_CQE_TYPE_TPA_END:
-               qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
-               return 1;
+               return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
        default:
                return 0;
        }
@@ -1224,12 +1231,13 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
 
        fp_cqe = &cqe->fast_path_regular;
        len = le16_to_cpu(fp_cqe->len_on_first_bd);
-       pad = fp_cqe->placement_offset;
+       pad = fp_cqe->placement_offset + rxq->rx_headroom;
 
        /* Run eBPF program if one is attached */
        if (xdp_prog)
-               if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
-                       return 1;
+               if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
+                                &pad, &len))
+                       return 0;
 
        /* If this is an error packet then drop it */
        flags = cqe->fast_path_regular.pars_flags.flags;
@@ -1290,8 +1298,8 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
 {
        struct qede_rx_queue *rxq = fp->rxq;
        struct qede_dev *edev = fp->edev;
+       int work_done = 0, rcv_pkts = 0;
        u16 hw_comp_cons, sw_comp_cons;
-       int work_done = 0;
 
        hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
        sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
@@ -1305,12 +1313,14 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
 
        /* Loop to complete all indicated BDs */
        while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
-               qede_rx_process_cqe(edev, fp, rxq);
+               rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
                qed_chain_recycle_consumed(&rxq->rx_comp_ring);
                sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
                work_done++;
        }
 
+       rxq->rcv_pkts += rcv_pkts;
+
        /* Allocate replacement buffers */
        while (rxq->num_rx_buffers - rxq->filled_buffers)
                if (qede_alloc_rx_buffer(rxq, false))
index 3a78c3f2515748ca882f89d7da2c3b4e5cfad79e..8c2baf8b2a08759268943fe2b5f3c1cd99459f01 100644 (file)
@@ -84,6 +84,8 @@ static const struct qed_eth_ops *qed_ops;
 #define CHIP_NUM_57980S_50             0x1654
 #define CHIP_NUM_57980S_25             0x1656
 #define CHIP_NUM_57980S_IOV            0x1664
+#define CHIP_NUM_AH                    0x8070
+#define CHIP_NUM_AH_IOV                        0x8090
 
 #ifndef PCI_DEVICE_ID_NX2_57980E
 #define PCI_DEVICE_ID_57980S_40                CHIP_NUM_57980S_40
@@ -93,6 +95,9 @@ static const struct qed_eth_ops *qed_ops;
 #define PCI_DEVICE_ID_57980S_50                CHIP_NUM_57980S_50
 #define PCI_DEVICE_ID_57980S_25                CHIP_NUM_57980S_25
 #define PCI_DEVICE_ID_57980S_IOV       CHIP_NUM_57980S_IOV
+#define PCI_DEVICE_ID_AH               CHIP_NUM_AH
+#define PCI_DEVICE_ID_AH_IOV           CHIP_NUM_AH_IOV
+
 #endif
 
 enum qede_pci_private {
@@ -109,6 +114,10 @@ static const struct pci_device_id qede_pci_tbl[] = {
        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
 #ifdef CONFIG_QED_SRIOV
        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
+#endif
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
 #endif
        { 0 }
 };
@@ -314,122 +323,135 @@ static int qede_close(struct net_device *ndev);
 
 void qede_fill_by_demand_stats(struct qede_dev *edev)
 {
+       struct qede_stats_common *p_common = &edev->stats.common;
        struct qed_eth_stats stats;
 
        edev->ops->get_vport_stats(edev->cdev, &stats);
-       edev->stats.no_buff_discards = stats.no_buff_discards;
-       edev->stats.packet_too_big_discard = stats.packet_too_big_discard;
-       edev->stats.ttl0_discard = stats.ttl0_discard;
-       edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
-       edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
-       edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
-       edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
-       edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
-       edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
-       edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
-       edev->stats.mac_filter_discards = stats.mac_filter_discards;
-
-       edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
-       edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
-       edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
-       edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
-       edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
-       edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
-       edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
-       edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
-       edev->stats.coalesced_events = stats.tpa_coalesced_events;
-       edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
-       edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
-       edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
-
-       edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
-       edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
-       edev->stats.rx_128_to_255_byte_packets =
-                               stats.rx_128_to_255_byte_packets;
-       edev->stats.rx_256_to_511_byte_packets =
-                               stats.rx_256_to_511_byte_packets;
-       edev->stats.rx_512_to_1023_byte_packets =
-                               stats.rx_512_to_1023_byte_packets;
-       edev->stats.rx_1024_to_1518_byte_packets =
-                               stats.rx_1024_to_1518_byte_packets;
-       edev->stats.rx_1519_to_1522_byte_packets =
-                               stats.rx_1519_to_1522_byte_packets;
-       edev->stats.rx_1519_to_2047_byte_packets =
-                               stats.rx_1519_to_2047_byte_packets;
-       edev->stats.rx_2048_to_4095_byte_packets =
-                               stats.rx_2048_to_4095_byte_packets;
-       edev->stats.rx_4096_to_9216_byte_packets =
-                               stats.rx_4096_to_9216_byte_packets;
-       edev->stats.rx_9217_to_16383_byte_packets =
-                               stats.rx_9217_to_16383_byte_packets;
-       edev->stats.rx_crc_errors = stats.rx_crc_errors;
-       edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
-       edev->stats.rx_pause_frames = stats.rx_pause_frames;
-       edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
-       edev->stats.rx_align_errors = stats.rx_align_errors;
-       edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
-       edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
-       edev->stats.rx_jabbers = stats.rx_jabbers;
-       edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
-       edev->stats.rx_fragments = stats.rx_fragments;
-       edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
-       edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
-       edev->stats.tx_128_to_255_byte_packets =
-                               stats.tx_128_to_255_byte_packets;
-       edev->stats.tx_256_to_511_byte_packets =
-                               stats.tx_256_to_511_byte_packets;
-       edev->stats.tx_512_to_1023_byte_packets =
-                               stats.tx_512_to_1023_byte_packets;
-       edev->stats.tx_1024_to_1518_byte_packets =
-                               stats.tx_1024_to_1518_byte_packets;
-       edev->stats.tx_1519_to_2047_byte_packets =
-                               stats.tx_1519_to_2047_byte_packets;
-       edev->stats.tx_2048_to_4095_byte_packets =
-                               stats.tx_2048_to_4095_byte_packets;
-       edev->stats.tx_4096_to_9216_byte_packets =
-                               stats.tx_4096_to_9216_byte_packets;
-       edev->stats.tx_9217_to_16383_byte_packets =
-                               stats.tx_9217_to_16383_byte_packets;
-       edev->stats.tx_pause_frames = stats.tx_pause_frames;
-       edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
-       edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
-       edev->stats.tx_total_collisions = stats.tx_total_collisions;
-       edev->stats.brb_truncates = stats.brb_truncates;
-       edev->stats.brb_discards = stats.brb_discards;
-       edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
+
+       p_common->no_buff_discards = stats.common.no_buff_discards;
+       p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
+       p_common->ttl0_discard = stats.common.ttl0_discard;
+       p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
+       p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
+       p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
+       p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
+       p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
+       p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
+       p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
+       p_common->mac_filter_discards = stats.common.mac_filter_discards;
+
+       p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
+       p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
+       p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
+       p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
+       p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
+       p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
+       p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
+       p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
+       p_common->coalesced_events = stats.common.tpa_coalesced_events;
+       p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
+       p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
+       p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
+
+       p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
+       p_common->rx_65_to_127_byte_packets =
+           stats.common.rx_65_to_127_byte_packets;
+       p_common->rx_128_to_255_byte_packets =
+           stats.common.rx_128_to_255_byte_packets;
+       p_common->rx_256_to_511_byte_packets =
+           stats.common.rx_256_to_511_byte_packets;
+       p_common->rx_512_to_1023_byte_packets =
+           stats.common.rx_512_to_1023_byte_packets;
+       p_common->rx_1024_to_1518_byte_packets =
+           stats.common.rx_1024_to_1518_byte_packets;
+       p_common->rx_crc_errors = stats.common.rx_crc_errors;
+       p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
+       p_common->rx_pause_frames = stats.common.rx_pause_frames;
+       p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
+       p_common->rx_align_errors = stats.common.rx_align_errors;
+       p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
+       p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
+       p_common->rx_jabbers = stats.common.rx_jabbers;
+       p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
+       p_common->rx_fragments = stats.common.rx_fragments;
+       p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
+       p_common->tx_65_to_127_byte_packets =
+           stats.common.tx_65_to_127_byte_packets;
+       p_common->tx_128_to_255_byte_packets =
+           stats.common.tx_128_to_255_byte_packets;
+       p_common->tx_256_to_511_byte_packets =
+           stats.common.tx_256_to_511_byte_packets;
+       p_common->tx_512_to_1023_byte_packets =
+           stats.common.tx_512_to_1023_byte_packets;
+       p_common->tx_1024_to_1518_byte_packets =
+           stats.common.tx_1024_to_1518_byte_packets;
+       p_common->tx_pause_frames = stats.common.tx_pause_frames;
+       p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
+       p_common->brb_truncates = stats.common.brb_truncates;
+       p_common->brb_discards = stats.common.brb_discards;
+       p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
+
+       if (QEDE_IS_BB(edev)) {
+               struct qede_stats_bb *p_bb = &edev->stats.bb;
+
+               p_bb->rx_1519_to_1522_byte_packets =
+                   stats.bb.rx_1519_to_1522_byte_packets;
+               p_bb->rx_1519_to_2047_byte_packets =
+                   stats.bb.rx_1519_to_2047_byte_packets;
+               p_bb->rx_2048_to_4095_byte_packets =
+                   stats.bb.rx_2048_to_4095_byte_packets;
+               p_bb->rx_4096_to_9216_byte_packets =
+                   stats.bb.rx_4096_to_9216_byte_packets;
+               p_bb->rx_9217_to_16383_byte_packets =
+                   stats.bb.rx_9217_to_16383_byte_packets;
+               p_bb->tx_1519_to_2047_byte_packets =
+                   stats.bb.tx_1519_to_2047_byte_packets;
+               p_bb->tx_2048_to_4095_byte_packets =
+                   stats.bb.tx_2048_to_4095_byte_packets;
+               p_bb->tx_4096_to_9216_byte_packets =
+                   stats.bb.tx_4096_to_9216_byte_packets;
+               p_bb->tx_9217_to_16383_byte_packets =
+                   stats.bb.tx_9217_to_16383_byte_packets;
+               p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
+               p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
+       } else {
+               struct qede_stats_ah *p_ah = &edev->stats.ah;
+
+               p_ah->rx_1519_to_max_byte_packets =
+                   stats.ah.rx_1519_to_max_byte_packets;
+               p_ah->tx_1519_to_max_byte_packets =
+                   stats.ah.tx_1519_to_max_byte_packets;
+       }
 }
 
 static void qede_get_stats64(struct net_device *dev,
                             struct rtnl_link_stats64 *stats)
 {
        struct qede_dev *edev = netdev_priv(dev);
+       struct qede_stats_common *p_common;
 
        qede_fill_by_demand_stats(edev);
+       p_common = &edev->stats.common;
 
-       stats->rx_packets = edev->stats.rx_ucast_pkts +
-                           edev->stats.rx_mcast_pkts +
-                           edev->stats.rx_bcast_pkts;
-       stats->tx_packets = edev->stats.tx_ucast_pkts +
-                           edev->stats.tx_mcast_pkts +
-                           edev->stats.tx_bcast_pkts;
-
-       stats->rx_bytes = edev->stats.rx_ucast_bytes +
-                         edev->stats.rx_mcast_bytes +
-                         edev->stats.rx_bcast_bytes;
+       stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
+                           p_common->rx_bcast_pkts;
+       stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
+                           p_common->tx_bcast_pkts;
 
-       stats->tx_bytes = edev->stats.tx_ucast_bytes +
-                         edev->stats.tx_mcast_bytes +
-                         edev->stats.tx_bcast_bytes;
+       stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
+                         p_common->rx_bcast_bytes;
+       stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
+                         p_common->tx_bcast_bytes;
 
-       stats->tx_errors = edev->stats.tx_err_drop_pkts;
-       stats->multicast = edev->stats.rx_mcast_pkts +
-                          edev->stats.rx_bcast_pkts;
+       stats->tx_errors = p_common->tx_err_drop_pkts;
+       stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
 
-       stats->rx_fifo_errors = edev->stats.no_buff_discards;
+       stats->rx_fifo_errors = p_common->no_buff_discards;
 
-       stats->collisions = edev->stats.tx_total_collisions;
-       stats->rx_crc_errors = edev->stats.rx_crc_errors;
-       stats->rx_frame_errors = edev->stats.rx_align_errors;
+       if (QEDE_IS_BB(edev))
+               stats->collisions = edev->stats.bb.tx_total_collisions;
+       stats->rx_crc_errors = p_common->rx_crc_errors;
+       stats->rx_frame_errors = p_common->rx_align_errors;
 }
 
 #ifdef CONFIG_QED_SRIOV
@@ -1165,9 +1187,11 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
        rxq->num_rx_buffers = edev->q_num_rx_buffers;
 
        rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
+       rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0;
 
-       if (rxq->rx_buf_size > PAGE_SIZE)
-               rxq->rx_buf_size = PAGE_SIZE;
+       /* Make sure that the headroom and  payload fit in a single page */
+       if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE)
+               rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom;
 
        /* Segment size to spilt a page in multiple equal parts,
         * unless XDP is used in which case we'd use the entire page.
@@ -1229,7 +1253,7 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
        /* Free the parallel SW ring */
        if (txq->is_xdp)
-               kfree(txq->sw_tx_ring.pages);
+               kfree(txq->sw_tx_ring.xdp);
        else
                kfree(txq->sw_tx_ring.skbs);
 
@@ -1247,9 +1271,9 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
 
        /* Allocate the parallel driver ring for Tx buffers */
        if (txq->is_xdp) {
-               size = sizeof(*txq->sw_tx_ring.pages) * TX_RING_SIZE;
-               txq->sw_tx_ring.pages = kzalloc(size, GFP_KERNEL);
-               if (!txq->sw_tx_ring.pages)
+               size = sizeof(*txq->sw_tx_ring.xdp) * TX_RING_SIZE;
+               txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
+               if (!txq->sw_tx_ring.xdp)
                        goto err;
        } else {
                size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
index 6d31f92ef2b6340642eca02039266aace70aefdd..84ac50f92c9c5167adfc5e295139a7a2d42a1eb3 100644 (file)
@@ -1162,8 +1162,8 @@ struct ob_mac_tso_iocb_rsp {
 struct ib_mac_iocb_rsp {
        u8 opcode;              /* 0x20 */
        u8 flags1;
-#define IB_MAC_IOCB_RSP_OI     0x01    /* Overide intr delay */
-#define IB_MAC_IOCB_RSP_I      0x02    /* Disble Intr Generation */
+#define IB_MAC_IOCB_RSP_OI     0x01    /* Override intr delay */
+#define IB_MAC_IOCB_RSP_I      0x02    /* Disable Intr Generation */
 #define IB_MAC_CSUM_ERR_MASK 0x1c      /* A mask to use for csum errs */
 #define IB_MAC_IOCB_RSP_TE     0x04    /* Checksum error */
 #define IB_MAC_IOCB_RSP_NU     0x08    /* No checksum rcvd */
index e9e647072596d5c6e3c6206fb003b66bfe3cdb81..1188d420fe539912e6b2eb8656124625f1ae6f34 100644 (file)
@@ -4686,7 +4686,8 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
        /*
         * Set up the operating parameters.
         */
-       qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM);
+       qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+                                                 ndev->name);
        INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
        INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
        INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
index 040b28977ee74c8cbd9a3f83c73a14597677ddbd..18c184ee1f3c0ee9e7ec66d2fafba4da22b9d676 100644 (file)
@@ -13,6 +13,7 @@
 /* Qualcomm Technologies, Inc. EMAC SGMII Controller driver.
  */
 
+#include <linux/interrupt.h>
 #include <linux/iopoll.h>
 #include <linux/acpi.h>
 #include <linux/of_device.h>
index 24b045b777b601c2c8d8d048f9cdb744a589bf29..0a8f2817ea60f2172eb28177473a4879f85bd18a 100644 (file)
@@ -8453,9 +8453,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
                ~(RxBOVF | RxFOVF) : ~0;
 
-       init_timer(&tp->timer);
-       tp->timer.data = (unsigned long) dev;
-       tp->timer.function = rtl8169_phy_timer;
+       setup_timer(&tp->timer, rtl8169_phy_timer, (unsigned long)dev);
 
        tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
 
index b712ec23075b139e3b504fe76099511bf88296af..bab13613b138cc15c734d9e9fff5f465ef480a44 100644 (file)
@@ -33,6 +33,7 @@
 #include <net/rtnetlink.h>
 #include <net/netevent.h>
 #include <net/arp.h>
+#include <net/fib_rules.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
 #include <generated/utsrelease.h>
 
@@ -2175,7 +2176,10 @@ static const struct switchdev_ops rocker_port_switchdev_ops = {
 
 struct rocker_fib_event_work {
        struct work_struct work;
-       struct fib_entry_notifier_info fen_info;
+       union {
+               struct fib_entry_notifier_info fen_info;
+               struct fib_rule_notifier_info fr_info;
+       };
        struct rocker *rocker;
        unsigned long event;
 };
@@ -2185,6 +2189,7 @@ static void rocker_router_fib_event_work(struct work_struct *work)
        struct rocker_fib_event_work *fib_work =
                container_of(work, struct rocker_fib_event_work, work);
        struct rocker *rocker = fib_work->rocker;
+       struct fib_rule *rule;
        int err;
 
        /* Protect internal structures from changes */
@@ -2202,7 +2207,10 @@ static void rocker_router_fib_event_work(struct work_struct *work)
                break;
        case FIB_EVENT_RULE_ADD: /* fall through */
        case FIB_EVENT_RULE_DEL:
-               rocker_world_fib4_abort(rocker);
+               rule = fib_work->fr_info.rule;
+               if (!fib4_rule_default(rule))
+                       rocker_world_fib4_abort(rocker);
+               fib_rule_put(rule);
                break;
        }
        rtnl_unlock();
@@ -2233,6 +2241,11 @@ static int rocker_router_fib_event(struct notifier_block *nb,
                 */
                fib_info_hold(fib_work->fen_info.fi);
                break;
+       case FIB_EVENT_RULE_ADD: /* fall through */
+       case FIB_EVENT_RULE_DEL:
+               memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
+               fib_rule_get(fib_work->fr_info.rule);
+               break;
        }
 
        queue_work(rocker->rocker_owq, &fib_work->work);
index 7cd76b6b5cb9f6c1c05f09b509be7e11a79b0478..2ae85245478087d2d640617bd79bfbfabd5f0763 100644 (file)
@@ -2216,18 +2216,15 @@ static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
 {
        bool want[OFDPA_CTRL_MAX] = { 0, };
        bool prev_ctrls[OFDPA_CTRL_MAX];
-       u8 uninitialized_var(prev_state);
+       u8 prev_state;
        int err;
        int i;
 
-       if (switchdev_trans_ph_prepare(trans)) {
-               memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
-               prev_state = ofdpa_port->stp_state;
-       }
-
-       if (ofdpa_port->stp_state == state)
+       prev_state = ofdpa_port->stp_state;
+       if (prev_state == state)
                return 0;
 
+       memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
        ofdpa_port->stp_state = state;
 
        switch (state) {
index c60c2d4c646a89610edd35ef40e6ef337d045d79..78efb2822b8648c6e6f02ffa764aad89570ffbd0 100644 (file)
@@ -119,6 +119,7 @@ struct efx_ef10_filter_table {
        bool mc_promisc;
 /* Whether in multicast promiscuous mode when last changed */
        bool mc_promisc_last;
+       bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
        bool vlan_filter;
        struct list_head vlan_list;
 };
@@ -5058,6 +5059,7 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
        struct netdev_hw_addr *mc;
        unsigned int i, addr_count;
 
+       table->mc_overflow = false;
        table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
 
        addr_count = netdev_mc_count(net_dev);
@@ -5065,6 +5067,7 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
        netdev_for_each_mc_addr(mc, net_dev) {
                if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
                        table->mc_promisc = true;
+                       table->mc_overflow = true;
                        break;
                }
                ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
@@ -5469,12 +5472,15 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
                        }
                } else {
                        /* If we failed to insert promiscuous filters, don't
-                        * rollback.  Regardless, also insert the mc_list
+                        * rollback.  Regardless, also insert the mc_list,
+                        * unless it's incomplete due to overflow
                         */
                        efx_ef10_filter_insert_def(efx, vlan,
                                                   EFX_ENCAP_TYPE_NONE,
                                                   true, false);
-                       efx_ef10_filter_insert_addr_list(efx, vlan, true, false);
+                       if (!table->mc_overflow)
+                               efx_ef10_filter_insert_addr_list(efx, vlan,
+                                                                true, false);
                }
        } else {
                /* If any filters failed to insert, rollback and fall back to
index 334bcc6df6b2ba90a43da4baf7b44cc5ebfa1bac..50d28261b6b9ea22f42c26be0e9f0e0bed194109 100644 (file)
@@ -2404,7 +2404,7 @@ static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *t
        tnl.type = (u16)efx_tunnel_type;
        tnl.port = ti->port;
 
-       if (efx->type->udp_tnl_add_port)
+       if (efx->type->udp_tnl_del_port)
                (void)efx->type->udp_tnl_del_port(efx, tnl);
 }
 
index 104fb15a73f2074c145878f0466d8ff1fc650167..f6daf09b86272397d35bc72d59c5269b4644db1c 100644 (file)
@@ -437,11 +437,13 @@ int ef4_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
        if (ntc->type != TC_SETUP_MQPRIO)
                return -EINVAL;
 
-       num_tc = ntc->tc;
+       num_tc = ntc->mqprio->num_tc;
 
        if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0 || num_tc > EF4_MAX_TX_TC)
                return -EINVAL;
 
+       ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
        if (num_tc == net_dev->num_tc)
                return 0;
 
index ff88d60aa6d5650d04f46938eaf6abc63c4ff568..3bdf87f310877a31fee219afa3de3dea91e40521 100644 (file)
@@ -665,11 +665,13 @@ int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
        if (ntc->type != TC_SETUP_MQPRIO)
                return -EINVAL;
 
-       num_tc = ntc->tc;
+       num_tc = ntc->mqprio->num_tc;
 
        if (num_tc > EFX_MAX_TX_TC)
                return -EINVAL;
 
+       ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
        if (num_tc == net_dev->num_tc)
                return 0;
 
index 4f19c6166182e780b77222c566390c5acdf2ad3e..36307d34f64181d03e744352d56681b33b111b1f 100644 (file)
@@ -1446,40 +1446,40 @@ static int smc911x_close(struct net_device *dev)
  * Ethtool support
  */
 static int
-smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+smc911x_ethtool_get_link_ksettings(struct net_device *dev,
+                                  struct ethtool_link_ksettings *cmd)
 {
        struct smc911x_local *lp = netdev_priv(dev);
        int ret, status;
        unsigned long flags;
+       u32 supported;
 
        DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
-       cmd->maxtxpkt = 1;
-       cmd->maxrxpkt = 1;
 
        if (lp->phy_type != 0) {
                spin_lock_irqsave(&lp->lock, flags);
-               ret = mii_ethtool_gset(&lp->mii, cmd);
+               ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd);
                spin_unlock_irqrestore(&lp->lock, flags);
        } else {
-               cmd->supported = SUPPORTED_10baseT_Half |
+               supported = SUPPORTED_10baseT_Half |
                                SUPPORTED_10baseT_Full |
                                SUPPORTED_TP | SUPPORTED_AUI;
 
                if (lp->ctl_rspeed == 10)
-                       ethtool_cmd_speed_set(cmd, SPEED_10);
+                       cmd->base.speed = SPEED_10;
                else if (lp->ctl_rspeed == 100)
-                       ethtool_cmd_speed_set(cmd, SPEED_100);
-
-               cmd->autoneg = AUTONEG_DISABLE;
-               if (lp->mii.phy_id==1)
-                       cmd->transceiver = XCVR_INTERNAL;
-               else
-                       cmd->transceiver = XCVR_EXTERNAL;
-               cmd->port = 0;
+                       cmd->base.speed = SPEED_100;
+
+               cmd->base.autoneg = AUTONEG_DISABLE;
+               cmd->base.port = 0;
                SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status);
-               cmd->duplex =
+               cmd->base.duplex =
                        (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ?
                                DUPLEX_FULL : DUPLEX_HALF;
+
+               ethtool_convert_legacy_u32_to_link_mode(
+                       cmd->link_modes.supported, supported);
+
                ret = 0;
        }
 
@@ -1487,7 +1487,8 @@ smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
 }
 
 static int
-smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+smc911x_ethtool_set_link_ksettings(struct net_device *dev,
+                                  const struct ethtool_link_ksettings *cmd)
 {
        struct smc911x_local *lp = netdev_priv(dev);
        int ret;
@@ -1495,16 +1496,18 @@ smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
 
        if (lp->phy_type != 0) {
                spin_lock_irqsave(&lp->lock, flags);
-               ret = mii_ethtool_sset(&lp->mii, cmd);
+               ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd);
                spin_unlock_irqrestore(&lp->lock, flags);
        } else {
-               if (cmd->autoneg != AUTONEG_DISABLE ||
-                       cmd->speed != SPEED_10 ||
-                       (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
-                       (cmd->port != PORT_TP && cmd->port != PORT_AUI))
+               if (cmd->base.autoneg != AUTONEG_DISABLE ||
+                   cmd->base.speed != SPEED_10 ||
+                   (cmd->base.duplex != DUPLEX_HALF &&
+                    cmd->base.duplex != DUPLEX_FULL) ||
+                   (cmd->base.port != PORT_TP &&
+                    cmd->base.port != PORT_AUI))
                        return -EINVAL;
 
-               lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
+               lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
 
                ret = 0;
        }
@@ -1686,8 +1689,6 @@ static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
 }
 
 static const struct ethtool_ops smc911x_ethtool_ops = {
-       .get_settings    = smc911x_ethtool_getsettings,
-       .set_settings    = smc911x_ethtool_setsettings,
        .get_drvinfo     = smc911x_ethtool_getdrvinfo,
        .get_msglevel    = smc911x_ethtool_getmsglevel,
        .set_msglevel    = smc911x_ethtool_setmsglevel,
@@ -1698,6 +1699,8 @@ static const struct ethtool_ops smc911x_ethtool_ops = {
        .get_eeprom_len = smc911x_ethtool_geteeprom_len,
        .get_eeprom = smc911x_ethtool_geteeprom,
        .set_eeprom = smc911x_ethtool_seteeprom,
+       .get_link_ksettings      = smc911x_ethtool_get_link_ksettings,
+       .set_link_ksettings      = smc911x_ethtool_set_link_ksettings,
 };
 
 /*
index 65077c77082a2f042117a0889c2b15099c58eae5..91e9bd7159ab37cab5731fef122345cb59341ba2 100644 (file)
@@ -1535,32 +1535,33 @@ static int smc_close(struct net_device *dev)
  * Ethtool support
  */
 static int
-smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+smc_ethtool_get_link_ksettings(struct net_device *dev,
+                              struct ethtool_link_ksettings *cmd)
 {
        struct smc_local *lp = netdev_priv(dev);
        int ret;
 
-       cmd->maxtxpkt = 1;
-       cmd->maxrxpkt = 1;
-
        if (lp->phy_type != 0) {
                spin_lock_irq(&lp->lock);
-               ret = mii_ethtool_gset(&lp->mii, cmd);
+               ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd);
                spin_unlock_irq(&lp->lock);
        } else {
-               cmd->supported = SUPPORTED_10baseT_Half |
+               u32 supported = SUPPORTED_10baseT_Half |
                                 SUPPORTED_10baseT_Full |
                                 SUPPORTED_TP | SUPPORTED_AUI;
 
                if (lp->ctl_rspeed == 10)
-                       ethtool_cmd_speed_set(cmd, SPEED_10);
+                       cmd->base.speed = SPEED_10;
                else if (lp->ctl_rspeed == 100)
-                       ethtool_cmd_speed_set(cmd, SPEED_100);
+                       cmd->base.speed = SPEED_100;
+
+               cmd->base.autoneg = AUTONEG_DISABLE;
+               cmd->base.port = 0;
+               cmd->base.duplex = lp->tcr_cur_mode & TCR_SWFDUP ?
+                       DUPLEX_FULL : DUPLEX_HALF;
 
-               cmd->autoneg = AUTONEG_DISABLE;
-               cmd->transceiver = XCVR_INTERNAL;
-               cmd->port = 0;
-               cmd->duplex = lp->tcr_cur_mode & TCR_SWFDUP ? DUPLEX_FULL : DUPLEX_HALF;
+               ethtool_convert_legacy_u32_to_link_mode(
+                       cmd->link_modes.supported, supported);
 
                ret = 0;
        }
@@ -1569,24 +1570,26 @@ smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
 }
 
 static int
-smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+smc_ethtool_set_link_ksettings(struct net_device *dev,
+                              const struct ethtool_link_ksettings *cmd)
 {
        struct smc_local *lp = netdev_priv(dev);
        int ret;
 
        if (lp->phy_type != 0) {
                spin_lock_irq(&lp->lock);
-               ret = mii_ethtool_sset(&lp->mii, cmd);
+               ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd);
                spin_unlock_irq(&lp->lock);
        } else {
-               if (cmd->autoneg != AUTONEG_DISABLE ||
-                   cmd->speed != SPEED_10 ||
-                   (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
-                   (cmd->port != PORT_TP && cmd->port != PORT_AUI))
+               if (cmd->base.autoneg != AUTONEG_DISABLE ||
+                   cmd->base.speed != SPEED_10 ||
+                   (cmd->base.duplex != DUPLEX_HALF &&
+                    cmd->base.duplex != DUPLEX_FULL) ||
+                   (cmd->base.port != PORT_TP && cmd->base.port != PORT_AUI))
                        return -EINVAL;
 
-//             lp->port = cmd->port;
-               lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
+//             lp->port = cmd->base.port;
+               lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
 
 //             if (netif_running(dev))
 //                     smc_set_port(dev);
@@ -1744,8 +1747,6 @@ static int smc_ethtool_seteeprom(struct net_device *dev,
 
 
 static const struct ethtool_ops smc_ethtool_ops = {
-       .get_settings   = smc_ethtool_getsettings,
-       .set_settings   = smc_ethtool_setsettings,
        .get_drvinfo    = smc_ethtool_getdrvinfo,
 
        .get_msglevel   = smc_ethtool_getmsglevel,
@@ -1755,6 +1756,8 @@ static const struct ethtool_ops smc_ethtool_ops = {
        .get_eeprom_len = smc_ethtool_geteeprom_len,
        .get_eeprom     = smc_ethtool_geteeprom,
        .set_eeprom     = smc_ethtool_seteeprom,
+       .get_link_ksettings     = smc_ethtool_get_link_ksettings,
+       .set_link_ksettings     = smc_ethtool_set_link_ksettings,
 };
 
 static const struct net_device_ops smc_netdev_ops = {
index 01a8c020d6db193a67c2a74bdb522b245ea5ac99..37881f81319e760df60a47944f56bab3658142b4 100644 (file)
 
 static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
 {
-       struct stmmac_priv *priv = (struct stmmac_priv *)p;
-       unsigned int entry = priv->cur_tx;
-       struct dma_desc *desc = priv->dma_tx + entry;
+       struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
        unsigned int nopaged_len = skb_headlen(skb);
+       struct stmmac_priv *priv = tx_q->priv_data;
+       unsigned int entry = tx_q->cur_tx;
        unsigned int bmax, des2;
        unsigned int i = 1, len;
+       struct dma_desc *desc;
+
+       desc = tx_q->dma_tx + entry;
 
        if (priv->plat->enh_desc)
                bmax = BUF_SIZE_8KiB;
@@ -45,16 +48,16 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
        desc->des2 = cpu_to_le32(des2);
        if (dma_mapping_error(priv->device, des2))
                return -1;
-       priv->tx_skbuff_dma[entry].buf = des2;
-       priv->tx_skbuff_dma[entry].len = bmax;
+       tx_q->tx_skbuff_dma[entry].buf = des2;
+       tx_q->tx_skbuff_dma[entry].len = bmax;
        /* do not close the descriptor and do not set own bit */
        priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
                                        0, false);
 
        while (len != 0) {
-               priv->tx_skbuff[entry] = NULL;
+               tx_q->tx_skbuff[entry] = NULL;
                entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
-               desc = priv->dma_tx + entry;
+               desc = tx_q->dma_tx + entry;
 
                if (len > bmax) {
                        des2 = dma_map_single(priv->device,
@@ -63,8 +66,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
                        desc->des2 = cpu_to_le32(des2);
                        if (dma_mapping_error(priv->device, des2))
                                return -1;
-                       priv->tx_skbuff_dma[entry].buf = des2;
-                       priv->tx_skbuff_dma[entry].len = bmax;
+                       tx_q->tx_skbuff_dma[entry].buf = des2;
+                       tx_q->tx_skbuff_dma[entry].len = bmax;
                        priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
                                                        STMMAC_CHAIN_MODE, 1,
                                                        false);
@@ -77,8 +80,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
                        desc->des2 = cpu_to_le32(des2);
                        if (dma_mapping_error(priv->device, des2))
                                return -1;
-                       priv->tx_skbuff_dma[entry].buf = des2;
-                       priv->tx_skbuff_dma[entry].len = len;
+                       tx_q->tx_skbuff_dma[entry].buf = des2;
+                       tx_q->tx_skbuff_dma[entry].len = len;
                        /* last descriptor can be set now */
                        priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
                                                        STMMAC_CHAIN_MODE, 1,
@@ -87,7 +90,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
                }
        }
 
-       priv->cur_tx = entry;
+       tx_q->cur_tx = entry;
 
        return entry;
 }
@@ -136,32 +139,34 @@ static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr,
 
 static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
 {
-       struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+       struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr;
+       struct stmmac_priv *priv = rx_q->priv_data;
 
        if (priv->hwts_rx_en && !priv->extend_desc)
                /* NOTE: Device will overwrite des3 with timestamp value if
                 * 1588-2002 time stamping is enabled, hence reinitialize it
                 * to keep explicit chaining in the descriptor.
                 */
-               p->des3 = cpu_to_le32((unsigned int)(priv->dma_rx_phy +
-                                     (((priv->dirty_rx) + 1) %
+               p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
+                                     (((rx_q->dirty_rx) + 1) %
                                       DMA_RX_SIZE) *
                                      sizeof(struct dma_desc)));
 }
 
 static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
 {
-       struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
-       unsigned int entry = priv->dirty_tx;
+       struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
+       struct stmmac_priv *priv = tx_q->priv_data;
+       unsigned int entry = tx_q->dirty_tx;
 
-       if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
+       if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
            priv->hwts_tx_en)
                /* NOTE: Device will overwrite des3 with timestamp value if
                 * 1588-2002 time stamping is enabled, hence reinitialize it
                 * to keep explicit chaining in the descriptor.
                 */
-               p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy +
-                                     ((priv->dirty_tx + 1) % DMA_TX_SIZE))
+               p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
+                                     ((tx_q->dirty_tx + 1) % DMA_TX_SIZE))
                                      * sizeof(struct dma_desc)));
 }
 
index 04d9245b7149ce663b0827a158901a10b58ea701..90d28bcad8804f9a60c5c84a994ead8fca3cee7c 100644 (file)
@@ -246,6 +246,15 @@ struct stmmac_extra_stats {
 #define STMMAC_TX_MAX_FRAMES   256
 #define STMMAC_TX_FRAMES       64
 
+/* Packets types */
+enum packets_types {
+       PACKET_AVCPQ = 0x1, /* AV Untagged Control packets */
+       PACKET_PTPQ = 0x2, /* PTP Packets */
+       PACKET_DCBCPQ = 0x3, /* DCB Control Packets */
+       PACKET_UPQ = 0x4, /* Untagged Packets */
+       PACKET_MCBCQ = 0x5, /* Multicast & Broadcast Packets */
+};
+
 /* Rx IPC status */
 enum rx_frame_status {
        good_frame = 0x0,
@@ -324,6 +333,9 @@ struct dma_features {
        unsigned int number_tx_queues;
        /* Alternate (enhanced) DESC mode */
        unsigned int enh_desc;
+       /* TX and RX FIFO sizes */
+       unsigned int tx_fifo_size;
+       unsigned int rx_fifo_size;
 };
 
 /* GMAC TX FIFO is 8K, Rx FIFO is 16K */
@@ -413,6 +425,14 @@ struct stmmac_dma_ops {
        int (*reset)(void __iomem *ioaddr);
        void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
                     u32 dma_tx, u32 dma_rx, int atds);
+       void (*init_chan)(void __iomem *ioaddr,
+                         struct stmmac_dma_cfg *dma_cfg, u32 chan);
+       void (*init_rx_chan)(void __iomem *ioaddr,
+                            struct stmmac_dma_cfg *dma_cfg,
+                            u32 dma_rx_phy, u32 chan);
+       void (*init_tx_chan)(void __iomem *ioaddr,
+                            struct stmmac_dma_cfg *dma_cfg,
+                            u32 dma_tx_phy, u32 chan);
        /* Configure the AXI Bus Mode Register */
        void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
        /* Dump DMA registers */
@@ -421,25 +441,28 @@ struct stmmac_dma_ops {
         * An invalid value enables the store-and-forward mode */
        void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
                         int rxfifosz);
+       void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
+                           int fifosz);
+       void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel);
        /* To track extra statistic (if supported) */
        void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
                                   void __iomem *ioaddr);
        void (*enable_dma_transmission) (void __iomem *ioaddr);
-       void (*enable_dma_irq) (void __iomem *ioaddr);
-       void (*disable_dma_irq) (void __iomem *ioaddr);
-       void (*start_tx) (void __iomem *ioaddr);
-       void (*stop_tx) (void __iomem *ioaddr);
-       void (*start_rx) (void __iomem *ioaddr);
-       void (*stop_rx) (void __iomem *ioaddr);
+       void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan);
+       void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan);
+       void (*start_tx)(void __iomem *ioaddr, u32 chan);
+       void (*stop_tx)(void __iomem *ioaddr, u32 chan);
+       void (*start_rx)(void __iomem *ioaddr, u32 chan);
+       void (*stop_rx)(void __iomem *ioaddr, u32 chan);
        int (*dma_interrupt) (void __iomem *ioaddr,
-                             struct stmmac_extra_stats *x);
+                             struct stmmac_extra_stats *x, u32 chan);
        /* If supported then get the optional core features */
        void (*get_hw_feature)(void __iomem *ioaddr,
                               struct dma_features *dma_cap);
        /* Program the HW RX Watchdog */
-       void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
-       void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len);
-       void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len);
+       void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan);
+       void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
+       void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
        void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
        void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
        void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
@@ -451,20 +474,44 @@ struct mac_device_info;
 struct stmmac_ops {
        /* MAC core initialization */
        void (*core_init)(struct mac_device_info *hw, int mtu);
+       /* Enable the MAC RX/TX */
+       void (*set_mac)(void __iomem *ioaddr, bool enable);
        /* Enable and verify that the IPC module is supported */
        int (*rx_ipc)(struct mac_device_info *hw);
        /* Enable RX Queues */
-       void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue);
+       void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue);
+       /* RX Queues Priority */
+       void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
+       /* TX Queues Priority */
+       void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
+       /* RX Queues Routing */
+       void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet,
+                                u32 queue);
+       /* Program RX Algorithms */
+       void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg);
+       /* Program TX Algorithms */
+       void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg);
+       /* Set MTL TX queues weight */
+       void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw,
+                                       u32 weight, u32 queue);
+       /* RX MTL queue to RX dma mapping */
+       void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan);
+       /* Configure AV Algorithm */
+       void (*config_cbs)(struct mac_device_info *hw, u32 send_slope,
+                          u32 idle_slope, u32 high_credit, u32 low_credit,
+                          u32 queue);
        /* Dump MAC registers */
        void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
        /* Handle extra events on specific interrupts hw dependent */
        int (*host_irq_status)(struct mac_device_info *hw,
                               struct stmmac_extra_stats *x);
+       /* Handle MTL interrupts */
+       int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan);
        /* Multicast filter setting */
        void (*set_filter)(struct mac_device_info *hw, struct net_device *dev);
        /* Flow control setting */
        void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex,
-                         unsigned int fc, unsigned int pause_time);
+                         unsigned int fc, unsigned int pause_time, u32 tx_cnt);
        /* Set power management mode (e.g. magic frame) */
        void (*pmt)(struct mac_device_info *hw, unsigned long mode);
        /* Set/Get Unicast MAC addresses */
@@ -477,7 +524,8 @@ struct stmmac_ops {
        void (*reset_eee_mode)(struct mac_device_info *hw);
        void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
        void (*set_eee_pls)(struct mac_device_info *hw, int link);
-       void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x);
+       void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+                     u32 rx_queues, u32 tx_queues);
        /* PCS calls */
        void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
                             bool loopback);
@@ -547,6 +595,11 @@ struct mac_device_info {
        unsigned int ps;
 };
 
+struct stmmac_rx_routing {
+       u32 reg_mask;
+       u32 reg_shift;
+};
+
 struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
                                        int perfect_uc_entries,
                                        int *synopsys_id);
index 1a3fa3d9f85549c9b5cc10064c6aaac799184adc..dd6a2f9791cc11a390d71bcb5a1b071cd1bca068 100644 (file)
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/device.h>
+#include <linux/gpio/consumer.h>
 #include <linux/ethtool.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/ioport.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/of_net.h>
 #include <linux/mfd/syscon.h>
 #include <linux/platform_device.h>
+#include <linux/reset.h>
 #include <linux/stmmac.h>
 
 #include "stmmac_platform.h"
+#include "dwmac4.h"
+
+struct tegra_eqos {
+       struct device *dev;
+       void __iomem *regs;
+
+       struct reset_control *rst;
+       struct clk *clk_master;
+       struct clk *clk_slave;
+       struct clk *clk_tx;
+       struct clk *clk_rx;
+
+       struct gpio_desc *reset;
+};
 
 static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
                                   struct plat_stmmacenet_data *plat_dat)
@@ -106,13 +124,309 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
        return 0;
 }
 
+static void *dwc_qos_probe(struct platform_device *pdev,
+                          struct plat_stmmacenet_data *plat_dat,
+                          struct stmmac_resources *stmmac_res)
+{
+       int err;
+
+       plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
+       if (IS_ERR(plat_dat->stmmac_clk)) {
+               dev_err(&pdev->dev, "apb_pclk clock not found.\n");
+               return ERR_CAST(plat_dat->stmmac_clk);
+       }
+
+       err = clk_prepare_enable(plat_dat->stmmac_clk);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
+                       err);
+               return ERR_PTR(err);
+       }
+
+       plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+       if (IS_ERR(plat_dat->pclk)) {
+               dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+               err = PTR_ERR(plat_dat->pclk);
+               goto disable;
+       }
+
+       err = clk_prepare_enable(plat_dat->pclk);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n",
+                       err);
+               goto disable;
+       }
+
+       return NULL;
+
+disable:
+       clk_disable_unprepare(plat_dat->stmmac_clk);
+       return ERR_PTR(err);
+}
+
+static int dwc_qos_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+
+       clk_disable_unprepare(priv->plat->pclk);
+       clk_disable_unprepare(priv->plat->stmmac_clk);
+
+       return 0;
+}
+
+#define SDMEMCOMPPADCTRL 0x8800
+#define  SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
+
+#define AUTO_CAL_CONFIG 0x8804
+#define  AUTO_CAL_CONFIG_START BIT(31)
+#define  AUTO_CAL_CONFIG_ENABLE BIT(29)
+
+#define AUTO_CAL_STATUS 0x880c
+#define  AUTO_CAL_STATUS_ACTIVE BIT(31)
+
+static void tegra_eqos_fix_speed(void *priv, unsigned int speed)
+{
+       struct tegra_eqos *eqos = priv;
+       unsigned long rate = 125000000;
+       bool needs_calibration = false;
+       u32 value;
+       int err;
+
+       switch (speed) {
+       case SPEED_1000:
+               needs_calibration = true;
+               rate = 125000000;
+               break;
+
+       case SPEED_100:
+               needs_calibration = true;
+               rate = 25000000;
+               break;
+
+       case SPEED_10:
+               rate = 2500000;
+               break;
+
+       default:
+               dev_err(eqos->dev, "invalid speed %u\n", speed);
+               break;
+       }
+
+       if (needs_calibration) {
+               /* calibrate */
+               value = readl(eqos->regs + SDMEMCOMPPADCTRL);
+               value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
+               writel(value, eqos->regs + SDMEMCOMPPADCTRL);
+
+               udelay(1);
+
+               value = readl(eqos->regs + AUTO_CAL_CONFIG);
+               value |= AUTO_CAL_CONFIG_START | AUTO_CAL_CONFIG_ENABLE;
+               writel(value, eqos->regs + AUTO_CAL_CONFIG);
+
+               err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
+                                               value,
+                                               value & AUTO_CAL_STATUS_ACTIVE,
+                                               1, 10);
+               if (err < 0) {
+                       dev_err(eqos->dev, "calibration did not start\n");
+                       goto failed;
+               }
+
+               err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
+                                               value,
+                                               (value & AUTO_CAL_STATUS_ACTIVE) == 0,
+                                               20, 200);
+               if (err < 0) {
+                       dev_err(eqos->dev, "calibration didn't finish\n");
+                       goto failed;
+               }
+
+       failed:
+               value = readl(eqos->regs + SDMEMCOMPPADCTRL);
+               value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
+               writel(value, eqos->regs + SDMEMCOMPPADCTRL);
+       } else {
+               value = readl(eqos->regs + AUTO_CAL_CONFIG);
+               value &= ~AUTO_CAL_CONFIG_ENABLE;
+               writel(value, eqos->regs + AUTO_CAL_CONFIG);
+       }
+
+       err = clk_set_rate(eqos->clk_tx, rate);
+       if (err < 0)
+               dev_err(eqos->dev, "failed to set TX rate: %d\n", err);
+}
+
+static int tegra_eqos_init(struct platform_device *pdev, void *priv)
+{
+       struct tegra_eqos *eqos = priv;
+       unsigned long rate;
+       u32 value;
+
+       rate = clk_get_rate(eqos->clk_slave);
+
+       value = (rate / 1000000) - 1;
+       writel(value, eqos->regs + GMAC_1US_TIC_COUNTER);
+
+       return 0;
+}
+
+static void *tegra_eqos_probe(struct platform_device *pdev,
+                             struct plat_stmmacenet_data *data,
+                             struct stmmac_resources *res)
+{
+       struct tegra_eqos *eqos;
+       int err;
+
+       eqos = devm_kzalloc(&pdev->dev, sizeof(*eqos), GFP_KERNEL);
+       if (!eqos) {
+               err = -ENOMEM;
+               goto error;
+       }
+
+       eqos->dev = &pdev->dev;
+       eqos->regs = res->addr;
+
+       eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
+       if (IS_ERR(eqos->clk_master)) {
+               err = PTR_ERR(eqos->clk_master);
+               goto error;
+       }
+
+       err = clk_prepare_enable(eqos->clk_master);
+       if (err < 0)
+               goto error;
+
+       eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus");
+       if (IS_ERR(eqos->clk_slave)) {
+               err = PTR_ERR(eqos->clk_slave);
+               goto disable_master;
+       }
+
+       data->stmmac_clk = eqos->clk_slave;
+
+       err = clk_prepare_enable(eqos->clk_slave);
+       if (err < 0)
+               goto disable_master;
+
+       eqos->clk_rx = devm_clk_get(&pdev->dev, "rx");
+       if (IS_ERR(eqos->clk_rx)) {
+               err = PTR_ERR(eqos->clk_rx);
+               goto disable_slave;
+       }
+
+       err = clk_prepare_enable(eqos->clk_rx);
+       if (err < 0)
+               goto disable_slave;
+
+       eqos->clk_tx = devm_clk_get(&pdev->dev, "tx");
+       if (IS_ERR(eqos->clk_tx)) {
+               err = PTR_ERR(eqos->clk_tx);
+               goto disable_rx;
+       }
+
+       err = clk_prepare_enable(eqos->clk_tx);
+       if (err < 0)
+               goto disable_rx;
+
+       eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
+       if (IS_ERR(eqos->reset)) {
+               err = PTR_ERR(eqos->reset);
+               goto disable_tx;
+       }
+
+       usleep_range(2000, 4000);
+       gpiod_set_value(eqos->reset, 0);
+
+       eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
+       if (IS_ERR(eqos->rst)) {
+               err = PTR_ERR(eqos->rst);
+               goto reset_phy;
+       }
+
+       err = reset_control_assert(eqos->rst);
+       if (err < 0)
+               goto reset_phy;
+
+       usleep_range(2000, 4000);
+
+       err = reset_control_deassert(eqos->rst);
+       if (err < 0)
+               goto reset_phy;
+
+       usleep_range(2000, 4000);
+
+       data->fix_mac_speed = tegra_eqos_fix_speed;
+       data->init = tegra_eqos_init;
+       data->bsp_priv = eqos;
+
+       err = tegra_eqos_init(pdev, eqos);
+       if (err < 0)
+               goto reset;
+
+out:
+       return eqos;
+
+reset:
+       reset_control_assert(eqos->rst);
+reset_phy:
+       gpiod_set_value(eqos->reset, 1);
+disable_tx:
+       clk_disable_unprepare(eqos->clk_tx);
+disable_rx:
+       clk_disable_unprepare(eqos->clk_rx);
+disable_slave:
+       clk_disable_unprepare(eqos->clk_slave);
+disable_master:
+       clk_disable_unprepare(eqos->clk_master);
+error:
+       eqos = ERR_PTR(err);
+       goto out;
+}
+
+static int tegra_eqos_remove(struct platform_device *pdev)
+{
+       struct tegra_eqos *eqos = get_stmmac_bsp_priv(&pdev->dev);
+
+       reset_control_assert(eqos->rst);
+       gpiod_set_value(eqos->reset, 1);
+       clk_disable_unprepare(eqos->clk_tx);
+       clk_disable_unprepare(eqos->clk_rx);
+       clk_disable_unprepare(eqos->clk_slave);
+       clk_disable_unprepare(eqos->clk_master);
+
+       return 0;
+}
+
+struct dwc_eth_dwmac_data {
+       void *(*probe)(struct platform_device *pdev,
+                      struct plat_stmmacenet_data *data,
+                      struct stmmac_resources *res);
+       int (*remove)(struct platform_device *pdev);
+};
+
+static const struct dwc_eth_dwmac_data dwc_qos_data = {
+       .probe = dwc_qos_probe,
+       .remove = dwc_qos_remove,
+};
+
+static const struct dwc_eth_dwmac_data tegra_eqos_data = {
+       .probe = tegra_eqos_probe,
+       .remove = tegra_eqos_remove,
+};
+
 static int dwc_eth_dwmac_probe(struct platform_device *pdev)
 {
+       const struct dwc_eth_dwmac_data *data;
        struct plat_stmmacenet_data *plat_dat;
        struct stmmac_resources stmmac_res;
        struct resource *res;
+       void *priv;
        int ret;
 
+       data = of_device_get_match_data(&pdev->dev);
+
        memset(&stmmac_res, 0, sizeof(struct stmmac_resources));
 
        /**
@@ -138,39 +452,26 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
        if (IS_ERR(plat_dat))
                return PTR_ERR(plat_dat);
 
-       plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
-       if (IS_ERR(plat_dat->stmmac_clk)) {
-               dev_err(&pdev->dev, "apb_pclk clock not found.\n");
-               ret = PTR_ERR(plat_dat->stmmac_clk);
-               plat_dat->stmmac_clk = NULL;
-               goto err_remove_config_dt;
-       }
-       clk_prepare_enable(plat_dat->stmmac_clk);
-
-       plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
-       if (IS_ERR(plat_dat->pclk)) {
-               dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
-               ret = PTR_ERR(plat_dat->pclk);
-               plat_dat->pclk = NULL;
-               goto err_out_clk_dis_phy;
+       priv = data->probe(pdev, plat_dat, &stmmac_res);
+       if (IS_ERR(priv)) {
+               ret = PTR_ERR(priv);
+               dev_err(&pdev->dev, "failed to probe subdriver: %d\n", ret);
+               goto remove_config;
        }
-       clk_prepare_enable(plat_dat->pclk);
 
        ret = dwc_eth_dwmac_config_dt(pdev, plat_dat);
        if (ret)
-               goto err_out_clk_dis_aper;
+               goto remove;
 
        ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
        if (ret)
-               goto err_out_clk_dis_aper;
+               goto remove;
 
-       return 0;
+       return ret;
 
-err_out_clk_dis_aper:
-       clk_disable_unprepare(plat_dat->pclk);
-err_out_clk_dis_phy:
-       clk_disable_unprepare(plat_dat->stmmac_clk);
-err_remove_config_dt:
+remove:
+       data->remove(pdev);
+remove_config:
        stmmac_remove_config_dt(pdev, plat_dat);
 
        return ret;
@@ -178,11 +479,29 @@ err_remove_config_dt:
 
 static int dwc_eth_dwmac_remove(struct platform_device *pdev)
 {
-       return stmmac_pltfr_remove(pdev);
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+       const struct dwc_eth_dwmac_data *data;
+       int err;
+
+       data = of_device_get_match_data(&pdev->dev);
+
+       err = stmmac_dvr_remove(&pdev->dev);
+       if (err < 0)
+               dev_err(&pdev->dev, "failed to remove platform: %d\n", err);
+
+       err = data->remove(pdev);
+       if (err < 0)
+               dev_err(&pdev->dev, "failed to remove subdriver: %d\n", err);
+
+       stmmac_remove_config_dt(pdev, priv->plat);
+
+       return err;
 }
 
 static const struct of_device_id dwc_eth_dwmac_match[] = {
-       { .compatible = "snps,dwc-qos-ethernet-4.10", },
+       { .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data },
+       { .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data },
        { }
 };
 MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
index e5db6ac362354317bff2ecdf0d0344a26831752a..f0df5193f047ba534ace8e012df673f5c6cad83f 100644 (file)
@@ -74,6 +74,10 @@ struct rk_priv_data {
 #define GRF_BIT(nr)    (BIT(nr) | BIT(nr+16))
 #define GRF_CLR_BIT(nr)        (BIT(nr+16))
 
+#define DELAY_ENABLE(soc, tx, rx) \
+       (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
+        ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
+
 #define RK3228_GRF_MAC_CON0    0x0900
 #define RK3228_GRF_MAC_CON1    0x0904
 
@@ -115,8 +119,7 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
        regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
                     RK3228_GMAC_PHY_INTF_SEL_RGMII |
                     RK3228_GMAC_RMII_MODE_CLR |
-                    RK3228_GMAC_RXCLK_DLY_ENABLE |
-                    RK3228_GMAC_TXCLK_DLY_ENABLE);
+                    DELAY_ENABLE(RK3228, tx_delay, rx_delay));
 
        regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0,
                     RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) |
@@ -232,8 +235,7 @@ static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
                     RK3288_GMAC_PHY_INTF_SEL_RGMII |
                     RK3288_GMAC_RMII_MODE_CLR);
        regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
-                    RK3288_GMAC_RXCLK_DLY_ENABLE |
-                    RK3288_GMAC_TXCLK_DLY_ENABLE |
+                    DELAY_ENABLE(RK3288, tx_delay, rx_delay) |
                     RK3288_GMAC_CLK_RX_DL_CFG(rx_delay) |
                     RK3288_GMAC_CLK_TX_DL_CFG(tx_delay));
 }
@@ -460,8 +462,7 @@ static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
                     RK3366_GMAC_PHY_INTF_SEL_RGMII |
                     RK3366_GMAC_RMII_MODE_CLR);
        regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7,
-                    RK3366_GMAC_RXCLK_DLY_ENABLE |
-                    RK3366_GMAC_TXCLK_DLY_ENABLE |
+                    DELAY_ENABLE(RK3366, tx_delay, rx_delay) |
                     RK3366_GMAC_CLK_RX_DL_CFG(rx_delay) |
                     RK3366_GMAC_CLK_TX_DL_CFG(tx_delay));
 }
@@ -572,8 +573,7 @@ static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
                     RK3368_GMAC_PHY_INTF_SEL_RGMII |
                     RK3368_GMAC_RMII_MODE_CLR);
        regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16,
-                    RK3368_GMAC_RXCLK_DLY_ENABLE |
-                    RK3368_GMAC_TXCLK_DLY_ENABLE |
+                    DELAY_ENABLE(RK3368, tx_delay, rx_delay) |
                     RK3368_GMAC_CLK_RX_DL_CFG(rx_delay) |
                     RK3368_GMAC_CLK_TX_DL_CFG(tx_delay));
 }
@@ -684,8 +684,7 @@ static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
                     RK3399_GMAC_PHY_INTF_SEL_RGMII |
                     RK3399_GMAC_RMII_MODE_CLR);
        regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6,
-                    RK3399_GMAC_RXCLK_DLY_ENABLE |
-                    RK3399_GMAC_TXCLK_DLY_ENABLE |
+                    DELAY_ENABLE(RK3399, tx_delay, rx_delay) |
                     RK3399_GMAC_CLK_RX_DL_CFG(rx_delay) |
                     RK3399_GMAC_CLK_TX_DL_CFG(tx_delay));
 }
@@ -985,14 +984,29 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
                return ret;
 
        /*rmii or rgmii*/
-       if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
+       switch (bsp_priv->phy_iface) {
+       case PHY_INTERFACE_MODE_RGMII:
                dev_info(dev, "init for RGMII\n");
                bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay,
                                            bsp_priv->rx_delay);
-       } else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
+               break;
+       case PHY_INTERFACE_MODE_RGMII_ID:
+               dev_info(dev, "init for RGMII_ID\n");
+               bsp_priv->ops->set_to_rgmii(bsp_priv, 0, 0);
+               break;
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+               dev_info(dev, "init for RGMII_RXID\n");
+               bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, 0);
+               break;
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+               dev_info(dev, "init for RGMII_TXID\n");
+               bsp_priv->ops->set_to_rgmii(bsp_priv, 0, bsp_priv->rx_delay);
+               break;
+       case PHY_INTERFACE_MODE_RMII:
                dev_info(dev, "init for RMII\n");
                bsp_priv->ops->set_to_rmii(bsp_priv);
-       } else {
+               break;
+       default:
                dev_err(dev, "NO interface defined!\n");
        }
 
@@ -1022,12 +1036,19 @@ static void rk_fix_speed(void *priv, unsigned int speed)
        struct rk_priv_data *bsp_priv = priv;
        struct device *dev = &bsp_priv->pdev->dev;
 
-       if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII)
+       switch (bsp_priv->phy_iface) {
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
                bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
-       else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+               break;
+       case PHY_INTERFACE_MODE_RMII:
                bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
-       else
+               break;
+       default:
                dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
+       }
 }
 
 static int rk_gmac_probe(struct platform_device *pdev)
index 19b9b308709953cc9327961d3eb3bea527848bb7..f3d9305e5f706bc407c51a5d98af6a28b6404f42 100644 (file)
@@ -216,7 +216,8 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
 
 
 static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
-                               unsigned int fc, unsigned int pause_time)
+                               unsigned int fc, unsigned int pause_time,
+                               u32 tx_cnt)
 {
        void __iomem *ioaddr = hw->pcsr;
        /* Set flow such that DZPQ in Mac Register 6 is 0,
@@ -412,7 +413,8 @@ static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
        dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
 }
 
-static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+                           u32 rx_queues, u32 tx_queues)
 {
        u32 value = readl(ioaddr + GMAC_DEBUG);
 
@@ -488,6 +490,7 @@ static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
 
 static const struct stmmac_ops dwmac1000_ops = {
        .core_init = dwmac1000_core_init,
+       .set_mac = stmmac_set_mac,
        .rx_ipc = dwmac1000_rx_ipc_enable,
        .dump_regs = dwmac1000_dump_regs,
        .host_irq_status = dwmac1000_irq_status,
index d3654a4470461e1f44282fac163dbcd9b6827df6..471a9aa6ac94c14d46d4dcf2d956965948193c56 100644 (file)
@@ -247,7 +247,8 @@ static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
        dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
 }
 
-static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
+static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
+                                 u32 number_chan)
 {
        writel(riwt, ioaddr + DMA_RX_WATCHDOG);
 }
index e370ccec6176671d1717d24d88917b88f69b1bd2..1b360910548473a486372835b3e167456528d508 100644 (file)
@@ -131,7 +131,8 @@ static void dwmac100_set_filter(struct mac_device_info *hw,
 }
 
 static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
-                              unsigned int fc, unsigned int pause_time)
+                              unsigned int fc, unsigned int pause_time,
+                              u32 tx_cnt)
 {
        void __iomem *ioaddr = hw->pcsr;
        unsigned int flow = MAC_FLOW_CTRL_ENABLE;
@@ -149,6 +150,7 @@ static void dwmac100_pmt(struct mac_device_info *hw, unsigned long mode)
 
 static const struct stmmac_ops dwmac100_ops = {
        .core_init = dwmac100_core_init,
+       .set_mac = stmmac_set_mac,
        .rx_ipc = dwmac100_rx_ipc_enable,
        .dump_regs = dwmac100_dump_mac_regs,
        .host_irq_status = dwmac100_irq_status,
index db45134fddf04e50c703254b8570b744004123f8..d74cedf2a397580aeb6c62737a35030e65053367 100644 (file)
 #define GMAC_HASH_TAB_32_63            0x00000014
 #define GMAC_RX_FLOW_CTRL              0x00000090
 #define GMAC_QX_TX_FLOW_CTRL(x)                (0x70 + x * 4)
+#define GMAC_TXQ_PRTY_MAP0             0x98
+#define GMAC_TXQ_PRTY_MAP1             0x9C
 #define GMAC_RXQ_CTRL0                 0x000000a0
+#define GMAC_RXQ_CTRL1                 0x000000a4
+#define GMAC_RXQ_CTRL2                 0x000000a8
+#define GMAC_RXQ_CTRL3                 0x000000ac
 #define GMAC_INT_STATUS                        0x000000b0
 #define GMAC_INT_EN                    0x000000b4
+#define GMAC_1US_TIC_COUNTER           0x000000dc
 #define GMAC_PCS_BASE                  0x000000e0
 #define GMAC_PHYIF_CONTROL_STATUS      0x000000f8
 #define GMAC_PMT                       0x000000c0
 #define GMAC_ADDR_HIGH(reg)            (0x300 + reg * 8)
 #define GMAC_ADDR_LOW(reg)             (0x304 + reg * 8)
 
+/* RX Queues Routing */
+#define GMAC_RXQCTRL_AVCPQ_MASK                GENMASK(2, 0)
+#define GMAC_RXQCTRL_AVCPQ_SHIFT       0
+#define GMAC_RXQCTRL_PTPQ_MASK         GENMASK(6, 4)
+#define GMAC_RXQCTRL_PTPQ_SHIFT                4
+#define GMAC_RXQCTRL_DCBCPQ_MASK       GENMASK(10, 8)
+#define GMAC_RXQCTRL_DCBCPQ_SHIFT      8
+#define GMAC_RXQCTRL_UPQ_MASK          GENMASK(14, 12)
+#define GMAC_RXQCTRL_UPQ_SHIFT         12
+#define GMAC_RXQCTRL_MCBCQ_MASK                GENMASK(18, 16)
+#define GMAC_RXQCTRL_MCBCQ_SHIFT       16
+#define GMAC_RXQCTRL_MCBCQEN           BIT(20)
+#define GMAC_RXQCTRL_MCBCQEN_SHIFT     20
+#define GMAC_RXQCTRL_TACPQE            BIT(21)
+#define GMAC_RXQCTRL_TACPQE_SHIFT      21
+
 /* MAC Packet Filtering */
 #define GMAC_PACKET_FILTER_PR          BIT(0)
 #define GMAC_PACKET_FILTER_HMC         BIT(2)
 /* MAC Flow Control RX */
 #define GMAC_RX_FLOW_CTRL_RFE          BIT(0)
 
+/* RX Queues Priorities */
+#define GMAC_RXQCTRL_PSRQX_MASK(x)     GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
+#define GMAC_RXQCTRL_PSRQX_SHIFT(x)    ((x) * 8)
+
+/* TX Queues Priorities */
+#define GMAC_TXQCTRL_PSTQX_MASK(x)     GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
+#define GMAC_TXQCTRL_PSTQX_SHIFT(x)    ((x) * 8)
+
 /* MAC Flow Control TX */
 #define GMAC_TX_FLOW_CTRL_TFE          BIT(1)
 #define GMAC_TX_FLOW_CTRL_PT_SHIFT     16
@@ -148,6 +178,8 @@ enum power_event {
 /* MAC HW features1 bitmap */
 #define GMAC_HW_FEAT_AVSEL             BIT(20)
 #define GMAC_HW_TSOEN                  BIT(18)
+#define GMAC_HW_TXFIFOSIZE             GENMASK(10, 6)
+#define GMAC_HW_RXFIFOSIZE             GENMASK(4, 0)
 
 /* MAC HW features2 bitmap */
 #define GMAC_HW_FEAT_TXCHCNT           GENMASK(21, 18)
@@ -161,8 +193,25 @@ enum power_event {
 #define GMAC_HI_REG_AE                 BIT(31)
 
 /*  MTL registers */
+#define MTL_OPERATION_MODE             0x00000c00
+#define MTL_OPERATION_SCHALG_MASK      GENMASK(6, 5)
+#define MTL_OPERATION_SCHALG_WRR       (0x0 << 5)
+#define MTL_OPERATION_SCHALG_WFQ       (0x1 << 5)
+#define MTL_OPERATION_SCHALG_DWRR      (0x2 << 5)
+#define MTL_OPERATION_SCHALG_SP                (0x3 << 5)
+#define MTL_OPERATION_RAA              BIT(2)
+#define MTL_OPERATION_RAA_SP           (0x0 << 2)
+#define MTL_OPERATION_RAA_WSP          (0x1 << 2)
+
 #define MTL_INT_STATUS                 0x00000c20
-#define MTL_INT_Q0                     BIT(0)
+#define MTL_INT_QX(x)                  BIT(x)
+
+#define MTL_RXQ_DMA_MAP0               0x00000c30 /* queue 0 to 3 */
+#define MTL_RXQ_DMA_MAP1               0x00000c34 /* queue 4 to 7 */
+#define MTL_RXQ_DMA_Q04MDMACH_MASK     GENMASK(3, 0)
+#define MTL_RXQ_DMA_Q04MDMACH(x)       ((x) << 0)
+#define MTL_RXQ_DMA_QXMDMACH_MASK(x)   GENMASK(11 + (8 * ((x) - 1)), 8 * (x))
+#define MTL_RXQ_DMA_QXMDMACH(chan, q)  ((chan) << (8 * (q)))
 
 #define MTL_CHAN_BASE_ADDR             0x00000d00
 #define MTL_CHAN_BASE_OFFSET           0x40
@@ -180,6 +229,7 @@ enum power_event {
 #define MTL_OP_MODE_TSF                        BIT(1)
 
 #define MTL_OP_MODE_TQS_MASK           GENMASK(24, 16)
+#define MTL_OP_MODE_TQS_SHIFT          16
 
 #define MTL_OP_MODE_TTC_MASK           0x70
 #define MTL_OP_MODE_TTC_SHIFT          4
@@ -193,6 +243,17 @@ enum power_event {
 #define MTL_OP_MODE_TTC_384            (6 << MTL_OP_MODE_TTC_SHIFT)
 #define MTL_OP_MODE_TTC_512            (7 << MTL_OP_MODE_TTC_SHIFT)
 
+#define MTL_OP_MODE_RQS_MASK           GENMASK(29, 20)
+#define MTL_OP_MODE_RQS_SHIFT          20
+
+#define MTL_OP_MODE_RFD_MASK           GENMASK(19, 14)
+#define MTL_OP_MODE_RFD_SHIFT          14
+
+#define MTL_OP_MODE_RFA_MASK           GENMASK(13, 8)
+#define MTL_OP_MODE_RFA_SHIFT          8
+
+#define MTL_OP_MODE_EHFC               BIT(7)
+
 #define MTL_OP_MODE_RTC_MASK           0x18
 #define MTL_OP_MODE_RTC_SHIFT          3
 
@@ -201,6 +262,46 @@ enum power_event {
 #define MTL_OP_MODE_RTC_96             (2 << MTL_OP_MODE_RTC_SHIFT)
 #define MTL_OP_MODE_RTC_128            (3 << MTL_OP_MODE_RTC_SHIFT)
 
+/* MTL ETS Control register */
+#define MTL_ETS_CTRL_BASE_ADDR         0x00000d10
+#define MTL_ETS_CTRL_BASE_OFFSET       0x40
+#define MTL_ETSX_CTRL_BASE_ADDR(x)     (MTL_ETS_CTRL_BASE_ADDR + \
+                                       ((x) * MTL_ETS_CTRL_BASE_OFFSET))
+
+#define MTL_ETS_CTRL_CC                        BIT(3)
+#define MTL_ETS_CTRL_AVALG             BIT(2)
+
+/* MTL Queue Quantum Weight */
+#define MTL_TXQ_WEIGHT_BASE_ADDR       0x00000d18
+#define MTL_TXQ_WEIGHT_BASE_OFFSET     0x40
+#define MTL_TXQX_WEIGHT_BASE_ADDR(x)   (MTL_TXQ_WEIGHT_BASE_ADDR + \
+                                       ((x) * MTL_TXQ_WEIGHT_BASE_OFFSET))
+#define MTL_TXQ_WEIGHT_ISCQW_MASK      GENMASK(20, 0)
+
+/* MTL sendSlopeCredit register */
+#define MTL_SEND_SLP_CRED_BASE_ADDR    0x00000d1c
+#define MTL_SEND_SLP_CRED_OFFSET       0x40
+#define MTL_SEND_SLP_CREDX_BASE_ADDR(x)        (MTL_SEND_SLP_CRED_BASE_ADDR + \
+                                       ((x) * MTL_SEND_SLP_CRED_OFFSET))
+
+#define MTL_SEND_SLP_CRED_SSC_MASK     GENMASK(13, 0)
+
+/* MTL hiCredit register */
+#define MTL_HIGH_CRED_BASE_ADDR                0x00000d20
+#define MTL_HIGH_CRED_OFFSET           0x40
+#define MTL_HIGH_CREDX_BASE_ADDR(x)    (MTL_HIGH_CRED_BASE_ADDR + \
+                                       ((x) * MTL_HIGH_CRED_OFFSET))
+
+#define MTL_HIGH_CRED_HC_MASK          GENMASK(28, 0)
+
+/* MTL loCredit register */
+#define MTL_LOW_CRED_BASE_ADDR         0x00000d24
+#define MTL_LOW_CRED_OFFSET            0x40
+#define MTL_LOW_CREDX_BASE_ADDR(x)     (MTL_LOW_CRED_BASE_ADDR + \
+                                       ((x) * MTL_LOW_CRED_OFFSET))
+
+#define MTL_HIGH_CRED_LC_MASK          GENMASK(28, 0)
+
 /*  MTL debug */
 #define MTL_DEBUG_TXSTSFSTS            BIT(5)
 #define MTL_DEBUG_TXFSTS               BIT(4)
index 1e79e6529c4a79a805663e2d65f2cec558f362e3..48793f2e93075a9cabd57b3a4e0ee86765368bae 100644 (file)
@@ -59,17 +59,211 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
        writel(value, ioaddr + GMAC_INT_EN);
 }
 
-static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue)
+static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
+                                  u8 mode, u32 queue)
 {
        void __iomem *ioaddr = hw->pcsr;
        u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
 
        value &= GMAC_RX_QUEUE_CLEAR(queue);
-       value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
+       if (mode == MTL_QUEUE_AVB)
+               value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
+       else if (mode == MTL_QUEUE_DCB)
+               value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
 
        writel(value, ioaddr + GMAC_RXQ_CTRL0);
 }
 
+static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
+                                    u32 prio, u32 queue)
+{
+       void __iomem *ioaddr = hw->pcsr;
+       u32 base_register;
+       u32 value;
+
+       base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
+
+       value = readl(ioaddr + base_register);
+
+       value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
+       value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
+                                               GMAC_RXQCTRL_PSRQX_MASK(queue);
+       writel(value, ioaddr + base_register);
+}
+
+static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
+                                    u32 prio, u32 queue)
+{
+       void __iomem *ioaddr = hw->pcsr;
+       u32 base_register;
+       u32 value;
+
+       base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
+
+       value = readl(ioaddr + base_register);
+
+       value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
+       value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
+                                               GMAC_TXQCTRL_PSTQX_MASK(queue);
+
+       writel(value, ioaddr + base_register);
+}
+
+static void dwmac4_tx_queue_routing(struct mac_device_info *hw,
+                                   u8 packet, u32 queue)
+{
+       void __iomem *ioaddr = hw->pcsr;
+       u32 value;
+
+       const struct stmmac_rx_routing route_possibilities[] = {
+               { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
+               { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
+               { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
+               { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
+               { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
+       };
+
+       value = readl(ioaddr + GMAC_RXQ_CTRL1);
+
+       /* routing configuration */
+       value &= ~route_possibilities[packet - 1].reg_mask;
+       value |= (queue << route_possibilities[packet-1].reg_shift) &
+                route_possibilities[packet - 1].reg_mask;
+
+       /* some packets require extra ops */
+       if (packet == PACKET_AVCPQ) {
+               value &= ~GMAC_RXQCTRL_TACPQE;
+               value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
+       } else if (packet == PACKET_MCBCQ) {
+               value &= ~GMAC_RXQCTRL_MCBCQEN;
+               value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
+       }
+
+       writel(value, ioaddr + GMAC_RXQ_CTRL1);
+}
+
+static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
+                                         u32 rx_alg)
+{
+       void __iomem *ioaddr = hw->pcsr;
+       u32 value = readl(ioaddr + MTL_OPERATION_MODE);
+
+       value &= ~MTL_OPERATION_RAA;
+       switch (rx_alg) {
+       case MTL_RX_ALGORITHM_SP:
+               value |= MTL_OPERATION_RAA_SP;
+               break;
+       case MTL_RX_ALGORITHM_WSP:
+               value |= MTL_OPERATION_RAA_WSP;
+               break;
+       default:
+               break;
+       }
+
+       writel(value, ioaddr + MTL_OPERATION_MODE);
+}
+
+static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
+                                         u32 tx_alg)
+{
+       void __iomem *ioaddr = hw->pcsr;
+       u32 value = readl(ioaddr + MTL_OPERATION_MODE);
+
+       value &= ~MTL_OPERATION_SCHALG_MASK;
+       switch (tx_alg) {
+       case MTL_TX_ALGORITHM_WRR:
+               value |= MTL_OPERATION_SCHALG_WRR;
+               break;
+       case MTL_TX_ALGORITHM_WFQ:
+               value |= MTL_OPERATION_SCHALG_WFQ;
+               break;
+       case MTL_TX_ALGORITHM_DWRR:
+               value |= MTL_OPERATION_SCHALG_DWRR;
+               break;
+       case MTL_TX_ALGORITHM_SP:
+               value |= MTL_OPERATION_SCHALG_SP;
+               break;
+       default:
+               break;
+       }
+}
+
+static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
+                                          u32 weight, u32 queue)
+{
+       void __iomem *ioaddr = hw->pcsr;
+       u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
+
+       value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
+       value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
+       writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
+}
+
+static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
+{
+       void __iomem *ioaddr = hw->pcsr;
+       u32 value;
+
+       if (queue < 4)
+               value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
+       else
+               value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
+
+       if (queue == 0 || queue == 4) {
+               value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
+               value |= MTL_RXQ_DMA_Q04MDMACH(chan);
+       } else {
+               value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
+               value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
+       }
+
+       if (queue < 4)
+               writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
+       else
+               writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
+}
+
+static void dwmac4_config_cbs(struct mac_device_info *hw,
+                             u32 send_slope, u32 idle_slope,
+                             u32 high_credit, u32 low_credit, u32 queue)
+{
+       void __iomem *ioaddr = hw->pcsr;
+       u32 value;
+
+       pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
+       pr_debug("\tsend_slope: 0x%08x\n", send_slope);
+       pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
+       pr_debug("\thigh_credit: 0x%08x\n", high_credit);
+       pr_debug("\tlow_credit: 0x%08x\n", low_credit);
+
+       /* enable AV algorithm */
+       value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
+       value |= MTL_ETS_CTRL_AVALG;
+       value |= MTL_ETS_CTRL_CC;
+       writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
+
+       /* configure send slope */
+       value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
+       value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
+       value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
+       writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
+
+       /* configure idle slope (same register as tx weight) */
+       dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
+
+       /* configure high credit */
+       value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
+       value &= ~MTL_HIGH_CRED_HC_MASK;
+       value |= high_credit & MTL_HIGH_CRED_HC_MASK;
+       writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
+
+       /* configure high credit */
+       value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
+       value &= ~MTL_HIGH_CRED_LC_MASK;
+       value |= low_credit & MTL_HIGH_CRED_LC_MASK;
+       writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
+}
+
 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
 {
        void __iomem *ioaddr = hw->pcsr;
@@ -251,11 +445,12 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
 }
 
 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
-                            unsigned int fc, unsigned int pause_time)
+                            unsigned int fc, unsigned int pause_time,
+                            u32 tx_cnt)
 {
        void __iomem *ioaddr = hw->pcsr;
-       u32 channel = STMMAC_CHAN0;     /* FIXME */
        unsigned int flow = 0;
+       u32 queue = 0;
 
        pr_debug("GMAC Flow-Control:\n");
        if (fc & FLOW_RX) {
@@ -265,13 +460,18 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
        }
        if (fc & FLOW_TX) {
                pr_debug("\tTransmit Flow-Control ON\n");
-               flow |= GMAC_TX_FLOW_CTRL_TFE;
-               writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
 
-               if (duplex) {
+               if (duplex)
                        pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
-                       flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
-                       writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
+
+               for (queue = 0; queue < tx_cnt; queue++) {
+                       flow |= GMAC_TX_FLOW_CTRL_TFE;
+
+                       if (duplex)
+                               flow |=
+                               (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
+
+                       writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
                }
        }
 }
@@ -325,11 +525,34 @@ static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
        }
 }
 
+static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
+{
+       void __iomem *ioaddr = hw->pcsr;
+       u32 mtl_int_qx_status;
+       int ret = 0;
+
+       mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
+
+       /* Check MTL Interrupt */
+       if (mtl_int_qx_status & MTL_INT_QX(chan)) {
+               /* read Queue x Interrupt status */
+               u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
+
+               if (status & MTL_RX_OVERFLOW_INT) {
+                       /*  clear Interrupt */
+                       writel(status | MTL_RX_OVERFLOW_INT,
+                              ioaddr + MTL_CHAN_INT_CTRL(chan));
+                       ret = CORE_IRQ_MTL_RX_OVERFLOW;
+               }
+       }
+
+       return ret;
+}
+
 static int dwmac4_irq_status(struct mac_device_info *hw,
                             struct stmmac_extra_stats *x)
 {
        void __iomem *ioaddr = hw->pcsr;
-       u32 mtl_int_qx_status;
        u32 intr_status;
        int ret = 0;
 
@@ -348,20 +571,6 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
                x->irq_receive_pmt_irq_n++;
        }
 
-       mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
-       /* Check MTL Interrupt: Currently only one queue is used: Q0. */
-       if (mtl_int_qx_status & MTL_INT_Q0) {
-               /* read Queue 0 Interrupt status */
-               u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
-
-               if (status & MTL_RX_OVERFLOW_INT) {
-                       /*  clear Interrupt */
-                       writel(status | MTL_RX_OVERFLOW_INT,
-                              ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
-                       ret = CORE_IRQ_MTL_RX_OVERFLOW;
-               }
-       }
-
        dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
        if (intr_status & PCS_RGSMIIIS_IRQ)
                dwmac4_phystatus(ioaddr, x);
@@ -369,64 +578,69 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
        return ret;
 }
 
-static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+                        u32 rx_queues, u32 tx_queues)
 {
        u32 value;
-
-       /*  Currently only channel 0 is supported */
-       value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0));
-
-       if (value & MTL_DEBUG_TXSTSFSTS)
-               x->mtl_tx_status_fifo_full++;
-       if (value & MTL_DEBUG_TXFSTS)
-               x->mtl_tx_fifo_not_empty++;
-       if (value & MTL_DEBUG_TWCSTS)
-               x->mmtl_fifo_ctrl++;
-       if (value & MTL_DEBUG_TRCSTS_MASK) {
-               u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
-                            >> MTL_DEBUG_TRCSTS_SHIFT;
-               if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
-                       x->mtl_tx_fifo_read_ctrl_write++;
-               else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
-                       x->mtl_tx_fifo_read_ctrl_wait++;
-               else if (trcsts == MTL_DEBUG_TRCSTS_READ)
-                       x->mtl_tx_fifo_read_ctrl_read++;
-               else
-                       x->mtl_tx_fifo_read_ctrl_idle++;
+       u32 queue;
+
+       for (queue = 0; queue < tx_queues; queue++) {
+               value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
+
+               if (value & MTL_DEBUG_TXSTSFSTS)
+                       x->mtl_tx_status_fifo_full++;
+               if (value & MTL_DEBUG_TXFSTS)
+                       x->mtl_tx_fifo_not_empty++;
+               if (value & MTL_DEBUG_TWCSTS)
+                       x->mmtl_fifo_ctrl++;
+               if (value & MTL_DEBUG_TRCSTS_MASK) {
+                       u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
+                                    >> MTL_DEBUG_TRCSTS_SHIFT;
+                       if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
+                               x->mtl_tx_fifo_read_ctrl_write++;
+                       else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
+                               x->mtl_tx_fifo_read_ctrl_wait++;
+                       else if (trcsts == MTL_DEBUG_TRCSTS_READ)
+                               x->mtl_tx_fifo_read_ctrl_read++;
+                       else
+                               x->mtl_tx_fifo_read_ctrl_idle++;
+               }
+               if (value & MTL_DEBUG_TXPAUSED)
+                       x->mac_tx_in_pause++;
        }
-       if (value & MTL_DEBUG_TXPAUSED)
-               x->mac_tx_in_pause++;
 
-       value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0));
+       for (queue = 0; queue < rx_queues; queue++) {
+               value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
 
-       if (value & MTL_DEBUG_RXFSTS_MASK) {
-               u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
-                            >> MTL_DEBUG_RRCSTS_SHIFT;
+               if (value & MTL_DEBUG_RXFSTS_MASK) {
+                       u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
+                                    >> MTL_DEBUG_RRCSTS_SHIFT;
 
-               if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
-                       x->mtl_rx_fifo_fill_level_full++;
-               else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
-                       x->mtl_rx_fifo_fill_above_thresh++;
-               else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
-                       x->mtl_rx_fifo_fill_below_thresh++;
-               else
-                       x->mtl_rx_fifo_fill_level_empty++;
-       }
-       if (value & MTL_DEBUG_RRCSTS_MASK) {
-               u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
-                            MTL_DEBUG_RRCSTS_SHIFT;
-
-               if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
-                       x->mtl_rx_fifo_read_ctrl_flush++;
-               else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
-                       x->mtl_rx_fifo_read_ctrl_read_data++;
-               else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
-                       x->mtl_rx_fifo_read_ctrl_status++;
-               else
-                       x->mtl_rx_fifo_read_ctrl_idle++;
+                       if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
+                               x->mtl_rx_fifo_fill_level_full++;
+                       else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
+                               x->mtl_rx_fifo_fill_above_thresh++;
+                       else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
+                               x->mtl_rx_fifo_fill_below_thresh++;
+                       else
+                               x->mtl_rx_fifo_fill_level_empty++;
+               }
+               if (value & MTL_DEBUG_RRCSTS_MASK) {
+                       u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
+                                    MTL_DEBUG_RRCSTS_SHIFT;
+
+                       if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
+                               x->mtl_rx_fifo_read_ctrl_flush++;
+                       else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
+                               x->mtl_rx_fifo_read_ctrl_read_data++;
+                       else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
+                               x->mtl_rx_fifo_read_ctrl_status++;
+                       else
+                               x->mtl_rx_fifo_read_ctrl_idle++;
+               }
+               if (value & MTL_DEBUG_RWCSTS)
+                       x->mtl_rx_fifo_ctrl_active++;
        }
-       if (value & MTL_DEBUG_RWCSTS)
-               x->mtl_rx_fifo_ctrl_active++;
 
        /* GMAC debug */
        value = readl(ioaddr + GMAC_DEBUG);
@@ -455,10 +669,51 @@ static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
 
 static const struct stmmac_ops dwmac4_ops = {
        .core_init = dwmac4_core_init,
+       .set_mac = stmmac_set_mac,
        .rx_ipc = dwmac4_rx_ipc_enable,
        .rx_queue_enable = dwmac4_rx_queue_enable,
+       .rx_queue_prio = dwmac4_rx_queue_priority,
+       .tx_queue_prio = dwmac4_tx_queue_priority,
+       .rx_queue_routing = dwmac4_tx_queue_routing,
+       .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
+       .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
+       .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
+       .map_mtl_to_dma = dwmac4_map_mtl_dma,
+       .config_cbs = dwmac4_config_cbs,
        .dump_regs = dwmac4_dump_regs,
        .host_irq_status = dwmac4_irq_status,
+       .host_mtl_irq_status = dwmac4_irq_mtl_status,
+       .flow_ctrl = dwmac4_flow_ctrl,
+       .pmt = dwmac4_pmt,
+       .set_umac_addr = dwmac4_set_umac_addr,
+       .get_umac_addr = dwmac4_get_umac_addr,
+       .set_eee_mode = dwmac4_set_eee_mode,
+       .reset_eee_mode = dwmac4_reset_eee_mode,
+       .set_eee_timer = dwmac4_set_eee_timer,
+       .set_eee_pls = dwmac4_set_eee_pls,
+       .pcs_ctrl_ane = dwmac4_ctrl_ane,
+       .pcs_rane = dwmac4_rane,
+       .pcs_get_adv_lp = dwmac4_get_adv_lp,
+       .debug = dwmac4_debug,
+       .set_filter = dwmac4_set_filter,
+};
+
+static const struct stmmac_ops dwmac410_ops = {
+       .core_init = dwmac4_core_init,
+       .set_mac = stmmac_dwmac4_set_mac,
+       .rx_ipc = dwmac4_rx_ipc_enable,
+       .rx_queue_enable = dwmac4_rx_queue_enable,
+       .rx_queue_prio = dwmac4_rx_queue_priority,
+       .tx_queue_prio = dwmac4_tx_queue_priority,
+       .rx_queue_routing = dwmac4_tx_queue_routing,
+       .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
+       .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
+       .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
+       .map_mtl_to_dma = dwmac4_map_mtl_dma,
+       .config_cbs = dwmac4_config_cbs,
+       .dump_regs = dwmac4_dump_regs,
+       .host_irq_status = dwmac4_irq_status,
+       .host_mtl_irq_status = dwmac4_irq_mtl_status,
        .flow_ctrl = dwmac4_flow_ctrl,
        .pmt = dwmac4_pmt,
        .set_umac_addr = dwmac4_set_umac_addr,
@@ -492,8 +747,6 @@ struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
        if (mac->multicast_filter_bins)
                mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
 
-       mac->mac = &dwmac4_ops;
-
        mac->link.port = GMAC_CONFIG_PS;
        mac->link.duplex = GMAC_CONFIG_DM;
        mac->link.speed = GMAC_CONFIG_FES;
@@ -514,5 +767,10 @@ struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
        else
                mac->dma = &dwmac4_dma_ops;
 
+       if (*synopsys_id >= DWMAC_CORE_4_00)
+               mac->mac = &dwmac410_ops;
+       else
+               mac->mac = &dwmac4_ops;
+
        return mac;
 }
index f97b0d5d998742efcad71972bd74ce40cc02afad..eec8463057fd7573b54019298dfa894d27fc33e4 100644 (file)
@@ -71,36 +71,48 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
        writel(value, ioaddr + DMA_SYS_BUS_MODE);
 }
 
-static void dwmac4_dma_init_channel(void __iomem *ioaddr,
-                                   struct stmmac_dma_cfg *dma_cfg,
-                                   u32 dma_tx_phy, u32 dma_rx_phy,
-                                   u32 channel)
+void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
+                            struct stmmac_dma_cfg *dma_cfg,
+                            u32 dma_rx_phy, u32 chan)
 {
        u32 value;
-       int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
-       int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+       u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
 
-       /* set PBL for each channels. Currently we affect same configuration
-        * on each channel
-        */
-       value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
-       if (dma_cfg->pblx8)
-               value = value | DMA_BUS_MODE_PBL;
-       writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
+       value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+       value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
+       writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+
+       writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
+}
 
-       value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
+void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
+                            struct stmmac_dma_cfg *dma_cfg,
+                            u32 dma_tx_phy, u32 chan)
+{
+       u32 value;
+       u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
+
+       value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
        value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
-       writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
+       writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
 
-       value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
-       value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
-       writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
+       writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
+}
 
-       /* Mask interrupts by writing to CSR7 */
-       writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel));
+void dwmac4_dma_init_channel(void __iomem *ioaddr,
+                            struct stmmac_dma_cfg *dma_cfg, u32 chan)
+{
+       u32 value;
+
+       /* common channel control register config */
+       value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
+       if (dma_cfg->pblx8)
+               value = value | DMA_BUS_MODE_PBL;
+       writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
 
-       writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
-       writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
+       /* Mask interrupts by writing to CSR7 */
+       writel(DMA_CHAN_INTR_DEFAULT_MASK,
+              ioaddr + DMA_CHAN_INTR_ENA(chan));
 }
 
 static void dwmac4_dma_init(void __iomem *ioaddr,
@@ -108,7 +120,6 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
                            u32 dma_tx, u32 dma_rx, int atds)
 {
        u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
-       int i;
 
        /* Set the Fixed burst mode */
        if (dma_cfg->fixed_burst)
@@ -122,9 +133,6 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
                value |= DMA_SYS_BUS_AAL;
 
        writel(value, ioaddr + DMA_SYS_BUS_MODE);
-
-       for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
-               dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i);
 }
 
 static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
@@ -174,46 +182,121 @@ static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
                _dwmac4_dump_dma_regs(ioaddr, i, reg_space);
 }
 
-static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
+static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan)
 {
-       int i;
+       u32 chan;
 
-       for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
-               writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i));
+       for (chan = 0; chan < number_chan; chan++)
+               writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan));
 }
 
-static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
-                                   int rxmode, u32 channel)
+static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
+                                      u32 channel, int fifosz)
 {
-       u32 mtl_tx_op, mtl_rx_op, mtl_rx_int;
+       unsigned int rqs = fifosz / 256 - 1;
+       u32 mtl_rx_op, mtl_rx_int;
 
-       /* Following code only done for channel 0, other channels not yet
-        * supported.
-        */
-       mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+       mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+
+       if (mode == SF_DMA_MODE) {
+               pr_debug("GMAC: enable RX store and forward mode\n");
+               mtl_rx_op |= MTL_OP_MODE_RSF;
+       } else {
+               pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
+               mtl_rx_op &= ~MTL_OP_MODE_RSF;
+               mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
+               if (mode <= 32)
+                       mtl_rx_op |= MTL_OP_MODE_RTC_32;
+               else if (mode <= 64)
+                       mtl_rx_op |= MTL_OP_MODE_RTC_64;
+               else if (mode <= 96)
+                       mtl_rx_op |= MTL_OP_MODE_RTC_96;
+               else
+                       mtl_rx_op |= MTL_OP_MODE_RTC_128;
+       }
+
+       mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
+       mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
+
+       /* enable flow control only if each channel gets 4 KiB or more FIFO */
+       if (fifosz >= 4096) {
+               unsigned int rfd, rfa;
+
+               mtl_rx_op |= MTL_OP_MODE_EHFC;
+
+               /* Set Threshold for Activating Flow Control to min 2 frames,
+                * i.e. 1500 * 2 = 3000 bytes.
+                *
+                * Set Threshold for Deactivating Flow Control to min 1 frame,
+                * i.e. 1500 bytes.
+                */
+               switch (fifosz) {
+               case 4096:
+                       /* This violates the above formula because of FIFO size
+                        * limit therefore overflow may occur in spite of this.
+                        */
+                       rfd = 0x03; /* Full-2.5K */
+                       rfa = 0x01; /* Full-1.5K */
+                       break;
+
+               case 8192:
+                       rfd = 0x06; /* Full-4K */
+                       rfa = 0x0a; /* Full-6K */
+                       break;
+
+               case 16384:
+                       rfd = 0x06; /* Full-4K */
+                       rfa = 0x12; /* Full-10K */
+                       break;
+
+               default:
+                       rfd = 0x06; /* Full-4K */
+                       rfa = 0x1e; /* Full-16K */
+                       break;
+               }
+
+               mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK;
+               mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT;
 
-       if (txmode == SF_DMA_MODE) {
+               mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK;
+               mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT;
+       }
+
+       writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+
+       /* Enable MTL RX overflow */
+       mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
+       writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
+              ioaddr + MTL_CHAN_INT_CTRL(channel));
+}
+
+static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
+                                      u32 channel)
+{
+       u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+
+       if (mode == SF_DMA_MODE) {
                pr_debug("GMAC: enable TX store and forward mode\n");
                /* Transmit COE type 2 cannot be done in cut-through mode. */
                mtl_tx_op |= MTL_OP_MODE_TSF;
        } else {
-               pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
+               pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
                mtl_tx_op &= ~MTL_OP_MODE_TSF;
                mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
                /* Set the transmit threshold */
-               if (txmode <= 32)
+               if (mode <= 32)
                        mtl_tx_op |= MTL_OP_MODE_TTC_32;
-               else if (txmode <= 64)
+               else if (mode <= 64)
                        mtl_tx_op |= MTL_OP_MODE_TTC_64;
-               else if (txmode <= 96)
+               else if (mode <= 96)
                        mtl_tx_op |= MTL_OP_MODE_TTC_96;
-               else if (txmode <= 128)
+               else if (mode <= 128)
                        mtl_tx_op |= MTL_OP_MODE_TTC_128;
-               else if (txmode <= 192)
+               else if (mode <= 192)
                        mtl_tx_op |= MTL_OP_MODE_TTC_192;
-               else if (txmode <= 256)
+               else if (mode <= 256)
                        mtl_tx_op |= MTL_OP_MODE_TTC_256;
-               else if (txmode <= 384)
+               else if (mode <= 384)
                        mtl_tx_op |= MTL_OP_MODE_TTC_384;
                else
                        mtl_tx_op |= MTL_OP_MODE_TTC_512;
@@ -230,39 +313,6 @@ static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
         */
        mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
        writel(mtl_tx_op, ioaddr +  MTL_CHAN_TX_OP_MODE(channel));
-
-       mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
-
-       if (rxmode == SF_DMA_MODE) {
-               pr_debug("GMAC: enable RX store and forward mode\n");
-               mtl_rx_op |= MTL_OP_MODE_RSF;
-       } else {
-               pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
-               mtl_rx_op &= ~MTL_OP_MODE_RSF;
-               mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
-               if (rxmode <= 32)
-                       mtl_rx_op |= MTL_OP_MODE_RTC_32;
-               else if (rxmode <= 64)
-                       mtl_rx_op |= MTL_OP_MODE_RTC_64;
-               else if (rxmode <= 96)
-                       mtl_rx_op |= MTL_OP_MODE_RTC_96;
-               else
-                       mtl_rx_op |= MTL_OP_MODE_RTC_128;
-       }
-
-       writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
-
-       /* Enable MTL RX overflow */
-       mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
-       writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
-              ioaddr + MTL_CHAN_INT_CTRL(channel));
-}
-
-static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode,
-                                     int rxmode, int rxfifosz)
-{
-       /* Only Channel 0 is actually configured and used */
-       dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0);
 }
 
 static void dwmac4_get_hw_feature(void __iomem *ioaddr,
@@ -294,6 +344,11 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
        hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
        dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
        dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
+       /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
+        * shifting and store the sizes in bytes.
+        */
+       dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6);
+       dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0);
        /* MAC HW feature2 */
        hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
        /* TX and RX number of channels */
@@ -332,9 +387,13 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
 const struct stmmac_dma_ops dwmac4_dma_ops = {
        .reset = dwmac4_dma_reset,
        .init = dwmac4_dma_init,
+       .init_chan = dwmac4_dma_init_channel,
+       .init_rx_chan = dwmac4_dma_init_rx_chan,
+       .init_tx_chan = dwmac4_dma_init_tx_chan,
        .axi = dwmac4_dma_axi,
        .dump_regs = dwmac4_dump_dma_regs,
-       .dma_mode = dwmac4_dma_operation_mode,
+       .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
+       .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
        .enable_dma_irq = dwmac4_enable_dma_irq,
        .disable_dma_irq = dwmac4_disable_dma_irq,
        .start_tx = dwmac4_dma_start_tx,
@@ -354,9 +413,13 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
 const struct stmmac_dma_ops dwmac410_dma_ops = {
        .reset = dwmac4_dma_reset,
        .init = dwmac4_dma_init,
+       .init_chan = dwmac4_dma_init_channel,
+       .init_rx_chan = dwmac4_dma_init_rx_chan,
+       .init_tx_chan = dwmac4_dma_init_tx_chan,
        .axi = dwmac4_dma_axi,
        .dump_regs = dwmac4_dump_dma_regs,
-       .dma_mode = dwmac4_dma_operation_mode,
+       .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
+       .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
        .enable_dma_irq = dwmac410_enable_dma_irq,
        .disable_dma_irq = dwmac4_disable_dma_irq,
        .start_tx = dwmac4_dma_start_tx,
index 1b06df749e2bbab63c25dbbc9b4ef814fc9d5d46..8474bf961dd0c60a409ba4c1201557cdef7580dc 100644 (file)
 
 int dwmac4_dma_reset(void __iomem *ioaddr);
 void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
-void dwmac4_enable_dma_irq(void __iomem *ioaddr);
-void dwmac410_enable_dma_irq(void __iomem *ioaddr);
-void dwmac4_disable_dma_irq(void __iomem *ioaddr);
-void dwmac4_dma_start_tx(void __iomem *ioaddr);
-void dwmac4_dma_stop_tx(void __iomem *ioaddr);
-void dwmac4_dma_start_rx(void __iomem *ioaddr);
-void dwmac4_dma_stop_rx(void __iomem *ioaddr);
+void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan);
+void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
+void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
+void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan);
 int dwmac4_dma_interrupt(void __iomem *ioaddr,
-                        struct stmmac_extra_stats *x);
-void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len);
-void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len);
+                        struct stmmac_extra_stats *x, u32 chan);
+void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
+void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
 void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
 void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
 
index c7326d5b2f432b00ea35042097adfe6d92063441..49f5687879df241f01ccf7d7befa3ac7dfd8f1dd 100644 (file)
@@ -37,96 +37,96 @@ int dwmac4_dma_reset(void __iomem *ioaddr)
 
 void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
 {
-       writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0));
+       writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(chan));
 }
 
 void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
 {
-       writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0));
+       writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(chan));
 }
 
-void dwmac4_dma_start_tx(void __iomem *ioaddr)
+void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan)
 {
-       u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+       u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
 
        value |= DMA_CONTROL_ST;
-       writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+       writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
 
        value = readl(ioaddr + GMAC_CONFIG);
        value |= GMAC_CONFIG_TE;
        writel(value, ioaddr + GMAC_CONFIG);
 }
 
-void dwmac4_dma_stop_tx(void __iomem *ioaddr)
+void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan)
 {
-       u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+       u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
 
        value &= ~DMA_CONTROL_ST;
-       writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+       writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
 
        value = readl(ioaddr + GMAC_CONFIG);
        value &= ~GMAC_CONFIG_TE;
        writel(value, ioaddr + GMAC_CONFIG);
 }
 
-void dwmac4_dma_start_rx(void __iomem *ioaddr)
+void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan)
 {
-       u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+       u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
 
        value |= DMA_CONTROL_SR;
 
-       writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+       writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
 
        value = readl(ioaddr + GMAC_CONFIG);
        value |= GMAC_CONFIG_RE;
        writel(value, ioaddr + GMAC_CONFIG);
 }
 
-void dwmac4_dma_stop_rx(void __iomem *ioaddr)
+void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan)
 {
-       u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+       u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
 
        value &= ~DMA_CONTROL_SR;
-       writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+       writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
 
        value = readl(ioaddr + GMAC_CONFIG);
        value &= ~GMAC_CONFIG_RE;
        writel(value, ioaddr + GMAC_CONFIG);
 }
 
-void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len)
+void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
 {
-       writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0));
+       writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(chan));
 }
 
-void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len)
+void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
 {
-       writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0));
+       writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan));
 }
 
-void dwmac4_enable_dma_irq(void __iomem *ioaddr)
+void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan)
 {
        writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
-              DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+              DMA_CHAN_INTR_ENA(chan));
 }
 
-void dwmac410_enable_dma_irq(void __iomem *ioaddr)
+void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan)
 {
        writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
-              ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+              ioaddr + DMA_CHAN_INTR_ENA(chan));
 }
 
-void dwmac4_disable_dma_irq(void __iomem *ioaddr)
+void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan)
 {
-       writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+       writel(0, ioaddr + DMA_CHAN_INTR_ENA(chan));
 }
 
 int dwmac4_dma_interrupt(void __iomem *ioaddr,
-                        struct stmmac_extra_stats *x)
+                        struct stmmac_extra_stats *x, u32 chan)
 {
        int ret = 0;
 
-       u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0));
+       u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
 
        /* ABNORMAL interrupts */
        if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
@@ -153,7 +153,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
                if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
                        u32 value;
 
-                       value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+                       value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
                        /* to schedule NAPI on real RIE event. */
                        if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
                                x->rx_normal_irq_n++;
@@ -172,7 +172,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
         * status [21-0] expect reserved bits [5-3]
         */
        writel((intr_status & 0x3fffc7),
-              ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0));
+              ioaddr + DMA_CHAN_STATUS(chan));
 
        return ret;
 }
index 56e485f79077374a9e19859a7953b3f18f5c42f3..9091df86723a3988075cbda535d5d6ba21826b7b 100644 (file)
 #define DMA_CONTROL_FTF                0x00100000      /* Flush transmit FIFO */
 
 void dwmac_enable_dma_transmission(void __iomem *ioaddr);
-void dwmac_enable_dma_irq(void __iomem *ioaddr);
-void dwmac_disable_dma_irq(void __iomem *ioaddr);
-void dwmac_dma_start_tx(void __iomem *ioaddr);
-void dwmac_dma_stop_tx(void __iomem *ioaddr);
-void dwmac_dma_start_rx(void __iomem *ioaddr);
-void dwmac_dma_stop_rx(void __iomem *ioaddr);
-int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
+void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
+void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
+void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
+void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan);
+int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+                       u32 chan);
 int dwmac_dma_reset(void __iomem *ioaddr);
 
 #endif /* __DWMAC_DMA_H__ */
index e60bfca2a763325880215bab4592d9dbe5056fbb..38f94305aab53116a74d533728d25cb750da66a1 100644 (file)
@@ -47,38 +47,38 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr)
        writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
 }
 
-void dwmac_enable_dma_irq(void __iomem *ioaddr)
+void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
 {
        writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
 }
 
-void dwmac_disable_dma_irq(void __iomem *ioaddr)
+void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
 {
        writel(0, ioaddr + DMA_INTR_ENA);
 }
 
-void dwmac_dma_start_tx(void __iomem *ioaddr)
+void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
 {
        u32 value = readl(ioaddr + DMA_CONTROL);
        value |= DMA_CONTROL_ST;
        writel(value, ioaddr + DMA_CONTROL);
 }
 
-void dwmac_dma_stop_tx(void __iomem *ioaddr)
+void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan)
 {
        u32 value = readl(ioaddr + DMA_CONTROL);
        value &= ~DMA_CONTROL_ST;
        writel(value, ioaddr + DMA_CONTROL);
 }
 
-void dwmac_dma_start_rx(void __iomem *ioaddr)
+void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan)
 {
        u32 value = readl(ioaddr + DMA_CONTROL);
        value |= DMA_CONTROL_SR;
        writel(value, ioaddr + DMA_CONTROL);
 }
 
-void dwmac_dma_stop_rx(void __iomem *ioaddr)
+void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
 {
        u32 value = readl(ioaddr + DMA_CONTROL);
        value &= ~DMA_CONTROL_SR;
@@ -156,7 +156,7 @@ static void show_rx_process_state(unsigned int status)
 #endif
 
 int dwmac_dma_interrupt(void __iomem *ioaddr,
-                       struct stmmac_extra_stats *x)
+                       struct stmmac_extra_stats *x, u32 chan)
 {
        int ret = 0;
        /* read the status register (CSR5) */
index 452f256ff03f04bb6ee846966eb8961ebbc40c05..31213e64513dba604eb9ead1f052a1d7289cb223 100644 (file)
 
 static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
 {
-       struct stmmac_priv *priv = (struct stmmac_priv *)p;
-       unsigned int entry = priv->cur_tx;
-       struct dma_desc *desc;
+       struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
        unsigned int nopaged_len = skb_headlen(skb);
+       struct stmmac_priv *priv = tx_q->priv_data;
+       unsigned int entry = tx_q->cur_tx;
        unsigned int bmax, len, des2;
+       struct dma_desc *desc;
 
        if (priv->extend_desc)
-               desc = (struct dma_desc *)(priv->dma_etx + entry);
+               desc = (struct dma_desc *)(tx_q->dma_etx + entry);
        else
-               desc = priv->dma_tx + entry;
+               desc = tx_q->dma_tx + entry;
 
        if (priv->plat->enh_desc)
                bmax = BUF_SIZE_8KiB;
@@ -52,29 +53,29 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
                if (dma_mapping_error(priv->device, des2))
                        return -1;
 
-               priv->tx_skbuff_dma[entry].buf = des2;
-               priv->tx_skbuff_dma[entry].len = bmax;
-               priv->tx_skbuff_dma[entry].is_jumbo = true;
+               tx_q->tx_skbuff_dma[entry].buf = des2;
+               tx_q->tx_skbuff_dma[entry].len = bmax;
+               tx_q->tx_skbuff_dma[entry].is_jumbo = true;
 
                desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
                priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
                                                STMMAC_RING_MODE, 0, false);
-               priv->tx_skbuff[entry] = NULL;
+               tx_q->tx_skbuff[entry] = NULL;
                entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
 
                if (priv->extend_desc)
-                       desc = (struct dma_desc *)(priv->dma_etx + entry);
+                       desc = (struct dma_desc *)(tx_q->dma_etx + entry);
                else
-                       desc = priv->dma_tx + entry;
+                       desc = tx_q->dma_tx + entry;
 
                des2 = dma_map_single(priv->device, skb->data + bmax, len,
                                      DMA_TO_DEVICE);
                desc->des2 = cpu_to_le32(des2);
                if (dma_mapping_error(priv->device, des2))
                        return -1;
-               priv->tx_skbuff_dma[entry].buf = des2;
-               priv->tx_skbuff_dma[entry].len = len;
-               priv->tx_skbuff_dma[entry].is_jumbo = true;
+               tx_q->tx_skbuff_dma[entry].buf = des2;
+               tx_q->tx_skbuff_dma[entry].len = len;
+               tx_q->tx_skbuff_dma[entry].is_jumbo = true;
 
                desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
                priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
@@ -85,15 +86,15 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
                desc->des2 = cpu_to_le32(des2);
                if (dma_mapping_error(priv->device, des2))
                        return -1;
-               priv->tx_skbuff_dma[entry].buf = des2;
-               priv->tx_skbuff_dma[entry].len = nopaged_len;
-               priv->tx_skbuff_dma[entry].is_jumbo = true;
+               tx_q->tx_skbuff_dma[entry].buf = des2;
+               tx_q->tx_skbuff_dma[entry].len = nopaged_len;
+               tx_q->tx_skbuff_dma[entry].is_jumbo = true;
                desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
                priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
                                                STMMAC_RING_MODE, 0, true);
        }
 
-       priv->cur_tx = entry;
+       tx_q->cur_tx = entry;
 
        return entry;
 }
@@ -125,12 +126,13 @@ static void stmmac_init_desc3(struct dma_desc *p)
 
 static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
 {
-       struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
-       unsigned int entry = priv->dirty_tx;
+       struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
+       struct stmmac_priv *priv = tx_q->priv_data;
+       unsigned int entry = tx_q->dirty_tx;
 
        /* des3 is only used for jumbo frames tx or time stamping */
-       if (unlikely(priv->tx_skbuff_dma[entry].is_jumbo ||
-                    (priv->tx_skbuff_dma[entry].last_segment &&
+       if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo ||
+                    (tx_q->tx_skbuff_dma[entry].last_segment &&
                      !priv->extend_desc && priv->hwts_tx_en)))
                p->des3 = 0;
 }
index cd8fb619b1e977cd2b51aa3cfbb9b242fe94510d..33efe7038cabf7740c359b6633bc963032f02165 100644 (file)
@@ -46,38 +46,51 @@ struct stmmac_tx_info {
        bool is_jumbo;
 };
 
-struct stmmac_priv {
-       /* Frequently used values are kept adjacent for cache effect */
+/* Frequently used values are kept adjacent for cache effect */
+struct stmmac_tx_queue {
+       u32 queue_index;
+       struct stmmac_priv *priv_data;
        struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
        struct dma_desc *dma_tx;
        struct sk_buff **tx_skbuff;
+       struct stmmac_tx_info *tx_skbuff_dma;
        unsigned int cur_tx;
        unsigned int dirty_tx;
+       dma_addr_t dma_tx_phy;
+       u32 tx_tail_addr;
+};
+
+struct stmmac_rx_queue {
+       u32 queue_index;
+       struct stmmac_priv *priv_data;
+       struct dma_extended_desc *dma_erx;
+       struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
+       struct sk_buff **rx_skbuff;
+       dma_addr_t *rx_skbuff_dma;
+       unsigned int cur_rx;
+       unsigned int dirty_rx;
+       u32 rx_zeroc_thresh;
+       dma_addr_t dma_rx_phy;
+       u32 rx_tail_addr;
+       struct napi_struct napi ____cacheline_aligned_in_smp;
+};
+
+struct stmmac_priv {
+       /* Frequently used values are kept adjacent for cache effect */
        u32 tx_count_frames;
        u32 tx_coal_frames;
        u32 tx_coal_timer;
-       struct stmmac_tx_info *tx_skbuff_dma;
-       dma_addr_t dma_tx_phy;
+
        int tx_coalesce;
        int hwts_tx_en;
        bool tx_path_in_lpi_mode;
        struct timer_list txtimer;
        bool tso;
 
-       struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
-       struct dma_extended_desc *dma_erx;
-       struct sk_buff **rx_skbuff;
-       unsigned int cur_rx;
-       unsigned int dirty_rx;
        unsigned int dma_buf_sz;
        unsigned int rx_copybreak;
-       unsigned int rx_zeroc_thresh;
        u32 rx_riwt;
        int hwts_rx_en;
-       dma_addr_t *rx_skbuff_dma;
-       dma_addr_t dma_rx_phy;
-
-       struct napi_struct napi ____cacheline_aligned_in_smp;
 
        void __iomem *ioaddr;
        struct net_device *dev;
@@ -85,6 +98,12 @@ struct stmmac_priv {
        struct mac_device_info *hw;
        spinlock_t lock;
 
+       /* RX Queue */
+       struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
+
+       /* TX Queue */
+       struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
+
        int oldlink;
        int speed;
        int oldduplex;
@@ -119,8 +138,6 @@ struct stmmac_priv {
        spinlock_t ptp_lock;
        void __iomem *mmcaddr;
        void __iomem *ptpaddr;
-       u32 rx_tail_addr;
-       u32 tx_tail_addr;
        u32 mss;
 
 #ifdef CONFIG_DEBUG_FS
index 85d64114e159e6d76a03fe5cca839fb246a6e0e6..16808e48ca1cf7bbd4237967ce0497caffc52808 100644 (file)
@@ -481,6 +481,7 @@ stmmac_set_pauseparam(struct net_device *netdev,
                      struct ethtool_pauseparam *pause)
 {
        struct stmmac_priv *priv = netdev_priv(netdev);
+       u32 tx_cnt = priv->plat->tx_queues_to_use;
        struct phy_device *phy = netdev->phydev;
        int new_pause = FLOW_OFF;
 
@@ -511,7 +512,7 @@ stmmac_set_pauseparam(struct net_device *netdev,
        }
 
        priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl,
-                                priv->pause);
+                                priv->pause, tx_cnt);
        return 0;
 }
 
@@ -519,6 +520,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
                                 struct ethtool_stats *dummy, u64 *data)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
+       u32 rx_queues_count = priv->plat->rx_queues_to_use;
+       u32 tx_queues_count = priv->plat->tx_queues_to_use;
        int i, j = 0;
 
        /* Update the DMA HW counters for dwmac10/100 */
@@ -549,7 +552,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
                if ((priv->hw->mac->debug) &&
                    (priv->synopsys_id >= DWMAC_CORE_3_50))
                        priv->hw->mac->debug(priv->ioaddr,
-                                            (void *)&priv->xstats);
+                                            (void *)&priv->xstats,
+                                            rx_queues_count, tx_queues_count);
        }
        for (i = 0; i < STMMAC_STATS_LEN; i++) {
                char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
@@ -726,6 +730,7 @@ static int stmmac_set_coalesce(struct net_device *dev,
                               struct ethtool_coalesce *ec)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
+       u32 rx_cnt = priv->plat->rx_queues_to_use;
        unsigned int rx_riwt;
 
        /* Check not supported parameters  */
@@ -764,7 +769,7 @@ static int stmmac_set_coalesce(struct net_device *dev,
        priv->tx_coal_frames = ec->tx_max_coalesced_frames;
        priv->tx_coal_timer = ec->tx_coalesce_usecs;
        priv->rx_riwt = rx_riwt;
-       priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
+       priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt);
 
        return 0;
 }
index 4498a3861aa3ad09460e922bd7f38e3506889dcb..85f315e01c1d8f6b7c524edf9aa40e7c54a4dc83 100644 (file)
@@ -138,6 +138,64 @@ static void stmmac_verify_args(void)
                eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 }
 
+/**
+ * stmmac_disable_all_queues - Disable all queues
+ * @priv: driver private structure
+ */
+static void stmmac_disable_all_queues(struct stmmac_priv *priv)
+{
+       u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+       u32 queue;
+
+       for (queue = 0; queue < rx_queues_cnt; queue++) {
+               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+               napi_disable(&rx_q->napi);
+       }
+}
+
+/**
+ * stmmac_enable_all_queues - Enable all queues
+ * @priv: driver private structure
+ */
+static void stmmac_enable_all_queues(struct stmmac_priv *priv)
+{
+       u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+       u32 queue;
+
+       for (queue = 0; queue < rx_queues_cnt; queue++) {
+               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+               napi_enable(&rx_q->napi);
+       }
+}
+
+/**
+ * stmmac_stop_all_queues - Stop all queues
+ * @priv: driver private structure
+ */
+static void stmmac_stop_all_queues(struct stmmac_priv *priv)
+{
+       u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+       u32 queue;
+
+       for (queue = 0; queue < tx_queues_cnt; queue++)
+               netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
+}
+
+/**
+ * stmmac_start_all_queues - Start all queues
+ * @priv: driver private structure
+ */
+static void stmmac_start_all_queues(struct stmmac_priv *priv)
+{
+       u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+       u32 queue;
+
+       for (queue = 0; queue < tx_queues_cnt; queue++)
+               netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
+}
+
 /**
  * stmmac_clk_csr_set - dynamically set the MDC clock
  * @priv: driver private structure
@@ -185,26 +243,33 @@ static void print_pkt(unsigned char *buf, int len)
        print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
 }
 
-static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
+static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
 {
+       struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
        u32 avail;
 
-       if (priv->dirty_tx > priv->cur_tx)
-               avail = priv->dirty_tx - priv->cur_tx - 1;
+       if (tx_q->dirty_tx > tx_q->cur_tx)
+               avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
        else
-               avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
+               avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
 
        return avail;
 }
 
-static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
+/**
+ * stmmac_rx_dirty - Get RX queue dirty
+ * @priv: driver private structure
+ * @queue: RX queue index
+ */
+static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
 {
+       struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
        u32 dirty;
 
-       if (priv->dirty_rx <= priv->cur_rx)
-               dirty = priv->cur_rx - priv->dirty_rx;
+       if (rx_q->dirty_rx <= rx_q->cur_rx)
+               dirty = rx_q->cur_rx - rx_q->dirty_rx;
        else
-               dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
+               dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
 
        return dirty;
 }
@@ -232,9 +297,19 @@ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
  */
 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
 {
+       u32 tx_cnt = priv->plat->tx_queues_to_use;
+       u32 queue;
+
+       /* check if all TX queues have the work finished */
+       for (queue = 0; queue < tx_cnt; queue++) {
+               struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+               if (tx_q->dirty_tx != tx_q->cur_tx)
+                       return; /* still unfinished work */
+       }
+
        /* Check and enter in LPI mode */
-       if ((priv->dirty_tx == priv->cur_tx) &&
-           (priv->tx_path_in_lpi_mode == false))
+       if (!priv->tx_path_in_lpi_mode)
                priv->hw->mac->set_eee_mode(priv->hw,
                                            priv->plat->en_tx_lpi_clockgating);
 }
@@ -672,6 +747,19 @@ static void stmmac_release_ptp(struct stmmac_priv *priv)
        stmmac_ptp_unregister(priv);
 }
 
+/**
+ *  stmmac_mac_flow_ctrl - Configure flow control in all queues
+ *  @priv: driver private structure
+ *  Description: It is used for configuring the flow control in all queues
+ */
+static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
+{
+       u32 tx_cnt = priv->plat->tx_queues_to_use;
+
+       priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
+                                priv->pause, tx_cnt);
+}
+
 /**
  * stmmac_adjust_link - adjusts the link parameters
  * @dev: net device structure
@@ -687,7 +775,6 @@ static void stmmac_adjust_link(struct net_device *dev)
        struct phy_device *phydev = dev->phydev;
        unsigned long flags;
        int new_state = 0;
-       unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
 
        if (!phydev)
                return;
@@ -709,8 +796,7 @@ static void stmmac_adjust_link(struct net_device *dev)
                }
                /* Flow Control operation */
                if (phydev->pause)
-                       priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
-                                                fc, pause_time);
+                       stmmac_mac_flow_ctrl(priv, phydev->duplex);
 
                if (phydev->speed != priv->speed) {
                        new_state = 1;
@@ -878,22 +964,56 @@ static int stmmac_init_phy(struct net_device *dev)
        return 0;
 }
 
-static void stmmac_display_rings(struct stmmac_priv *priv)
+static void stmmac_display_rx_rings(struct stmmac_priv *priv)
 {
-       void *head_rx, *head_tx;
+       u32 rx_cnt = priv->plat->rx_queues_to_use;
+       void *head_rx;
+       u32 queue;
 
-       if (priv->extend_desc) {
-               head_rx = (void *)priv->dma_erx;
-               head_tx = (void *)priv->dma_etx;
-       } else {
-               head_rx = (void *)priv->dma_rx;
-               head_tx = (void *)priv->dma_tx;
+       /* Display RX rings */
+       for (queue = 0; queue < rx_cnt; queue++) {
+               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+               pr_info("\tRX Queue %u rings\n", queue);
+
+               if (priv->extend_desc)
+                       head_rx = (void *)rx_q->dma_erx;
+               else
+                       head_rx = (void *)rx_q->dma_rx;
+
+               /* Display RX ring */
+               priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
        }
+}
 
-       /* Display Rx ring */
-       priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
-       /* Display Tx ring */
-       priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
+static void stmmac_display_tx_rings(struct stmmac_priv *priv)
+{
+       u32 tx_cnt = priv->plat->tx_queues_to_use;
+       void *head_tx;
+       u32 queue;
+
+       /* Display TX rings */
+       for (queue = 0; queue < tx_cnt; queue++) {
+               struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+               pr_info("\tTX Queue %d rings\n", queue);
+
+               if (priv->extend_desc)
+                       head_tx = (void *)tx_q->dma_etx;
+               else
+                       head_tx = (void *)tx_q->dma_tx;
+
+               priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
+       }
+}
+
+static void stmmac_display_rings(struct stmmac_priv *priv)
+{
+       /* Display RX ring */
+       stmmac_display_rx_rings(priv);
+
+       /* Display TX ring */
+       stmmac_display_tx_rings(priv);
 }
 
 static int stmmac_set_bfsize(int mtu, int bufsize)
@@ -913,48 +1033,88 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
 }
 
 /**
- * stmmac_clear_descriptors - clear descriptors
+ * stmmac_clear_rx_descriptors - clear RX descriptors
  * @priv: driver private structure
- * Description: this function is called to clear the tx and rx descriptors
+ * @queue: RX queue index
+ * Description: this function is called to clear the RX descriptors
  * in case of both basic and extended descriptors are used.
  */
-static void stmmac_clear_descriptors(struct stmmac_priv *priv)
+static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
 {
+       struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
        int i;
 
-       /* Clear the Rx/Tx descriptors */
+       /* Clear the RX descriptors */
        for (i = 0; i < DMA_RX_SIZE; i++)
                if (priv->extend_desc)
-                       priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
+                       priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
                                                     priv->use_riwt, priv->mode,
                                                     (i == DMA_RX_SIZE - 1));
                else
-                       priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
+                       priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
                                                     priv->use_riwt, priv->mode,
                                                     (i == DMA_RX_SIZE - 1));
+}
+
+/**
+ * stmmac_clear_tx_descriptors - clear tx descriptors
+ * @priv: driver private structure
+ * @queue: TX queue index.
+ * Description: this function is called to clear the TX descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
+{
+       struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+       int i;
+
+       /* Clear the TX descriptors */
        for (i = 0; i < DMA_TX_SIZE; i++)
                if (priv->extend_desc)
-                       priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
+                       priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
                                                     priv->mode,
                                                     (i == DMA_TX_SIZE - 1));
                else
-                       priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
+                       priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
                                                     priv->mode,
                                                     (i == DMA_TX_SIZE - 1));
 }
 
+/**
+ * stmmac_clear_descriptors - clear descriptors
+ * @priv: driver private structure
+ * Description: this function is called to clear the TX and RX descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void stmmac_clear_descriptors(struct stmmac_priv *priv)
+{
+       u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
+       u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+       u32 queue;
+
+       /* Clear the RX descriptors */
+       for (queue = 0; queue < rx_queue_cnt; queue++)
+               stmmac_clear_rx_descriptors(priv, queue);
+
+       /* Clear the TX descriptors */
+       for (queue = 0; queue < tx_queue_cnt; queue++)
+               stmmac_clear_tx_descriptors(priv, queue);
+}
+
 /**
  * stmmac_init_rx_buffers - init the RX descriptor buffer.
  * @priv: driver private structure
  * @p: descriptor pointer
  * @i: descriptor index
- * @flags: gfp flag.
+ * @flags: gfp flag
+ * @queue: RX queue index
  * Description: this function is called to allocate a receive buffer, perform
  * the DMA mapping and init the descriptor.
  */
 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
-                                 int i, gfp_t flags)
+                                 int i, gfp_t flags, u32 queue)
 {
+       struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
        struct sk_buff *skb;
 
        skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
@@ -963,20 +1123,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
                           "%s: Rx init fails; skb is NULL\n", __func__);
                return -ENOMEM;
        }
-       priv->rx_skbuff[i] = skb;
-       priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
+       rx_q->rx_skbuff[i] = skb;
+       rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
                                                priv->dma_buf_sz,
                                                DMA_FROM_DEVICE);
-       if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
+       if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
                netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
                dev_kfree_skb_any(skb);
                return -EINVAL;
        }
 
        if (priv->synopsys_id >= DWMAC_CORE_4_00)
-               p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
+               p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
        else
-               p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
+               p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
 
        if ((priv->hw->mode->init_desc3) &&
            (priv->dma_buf_sz == BUF_SIZE_16KiB))
@@ -985,30 +1145,71 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
        return 0;
 }
 
-static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
+/**
+ * stmmac_free_rx_buffer - free RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ * @i: buffer index.
+ */
+static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
 {
-       if (priv->rx_skbuff[i]) {
-               dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
+       struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+       if (rx_q->rx_skbuff[i]) {
+               dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
                                 priv->dma_buf_sz, DMA_FROM_DEVICE);
-               dev_kfree_skb_any(priv->rx_skbuff[i]);
+               dev_kfree_skb_any(rx_q->rx_skbuff[i]);
        }
-       priv->rx_skbuff[i] = NULL;
+       rx_q->rx_skbuff[i] = NULL;
 }
 
 /**
- * init_dma_desc_rings - init the RX/TX descriptor rings
+ * stmmac_free_tx_buffer - free RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ * @i: buffer index.
+ */
+static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
+{
+       struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+       if (tx_q->tx_skbuff_dma[i].buf) {
+               if (tx_q->tx_skbuff_dma[i].map_as_page)
+                       dma_unmap_page(priv->device,
+                                      tx_q->tx_skbuff_dma[i].buf,
+                                      tx_q->tx_skbuff_dma[i].len,
+                                      DMA_TO_DEVICE);
+               else
+                       dma_unmap_single(priv->device,
+                                        tx_q->tx_skbuff_dma[i].buf,
+                                        tx_q->tx_skbuff_dma[i].len,
+                                        DMA_TO_DEVICE);
+       }
+
+       if (tx_q->tx_skbuff[i]) {
+               dev_kfree_skb_any(tx_q->tx_skbuff[i]);
+               tx_q->tx_skbuff[i] = NULL;
+               tx_q->tx_skbuff_dma[i].buf = 0;
+               tx_q->tx_skbuff_dma[i].map_as_page = false;
+       }
+}
+
+/**
+ * init_dma_rx_desc_rings - init the RX descriptor rings
  * @dev: net device structure
  * @flags: gfp flag.
- * Description: this function initializes the DMA RX/TX descriptors
+ * Description: this function initializes the DMA RX descriptors
  * and allocates the socket buffers. It supports the chained and ring
  * modes.
  */
-static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
+static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
 {
-       int i;
        struct stmmac_priv *priv = netdev_priv(dev);
+       u32 rx_count = priv->plat->rx_queues_to_use;
        unsigned int bfsize = 0;
        int ret = -ENOMEM;
+       u32 queue;
+       int i;
 
        if (priv->hw->mode->set_16kib_bfsize)
                bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
@@ -1018,257 +1219,516 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
 
        priv->dma_buf_sz = bfsize;
 
-       netif_dbg(priv, probe, priv->dev,
-                 "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
-                 __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
-
        /* RX INITIALIZATION */
        netif_dbg(priv, probe, priv->dev,
                  "SKB addresses:\nskb\t\tskb data\tdma data\n");
 
-       for (i = 0; i < DMA_RX_SIZE; i++) {
-               struct dma_desc *p;
-               if (priv->extend_desc)
-                       p = &((priv->dma_erx + i)->basic);
-               else
-                       p = priv->dma_rx + i;
+       for (queue = 0; queue < rx_count; queue++) {
+               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+               netif_dbg(priv, probe, priv->dev,
+                         "(%s) dma_rx_phy=0x%08x\n", __func__,
+                         (u32)rx_q->dma_rx_phy);
 
-               ret = stmmac_init_rx_buffers(priv, p, i, flags);
-               if (ret)
-                       goto err_init_rx_buffers;
+               for (i = 0; i < DMA_RX_SIZE; i++) {
+                       struct dma_desc *p;
 
-               netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
-                         priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
-                         (unsigned int)priv->rx_skbuff_dma[i]);
+                       if (priv->extend_desc)
+                               p = &((rx_q->dma_erx + i)->basic);
+                       else
+                               p = rx_q->dma_rx + i;
+
+                       ret = stmmac_init_rx_buffers(priv, p, i, flags,
+                                                    queue);
+                       if (ret)
+                               goto err_init_rx_buffers;
+
+                       netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
+                                 rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
+                                 (unsigned int)rx_q->rx_skbuff_dma[i]);
+               }
+
+               rx_q->cur_rx = 0;
+               rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
+
+               stmmac_clear_rx_descriptors(priv, queue);
+
+               /* Setup the chained descriptor addresses */
+               if (priv->mode == STMMAC_CHAIN_MODE) {
+                       if (priv->extend_desc)
+                               priv->hw->mode->init(rx_q->dma_erx,
+                                                    rx_q->dma_rx_phy,
+                                                    DMA_RX_SIZE, 1);
+                       else
+                               priv->hw->mode->init(rx_q->dma_rx,
+                                                    rx_q->dma_rx_phy,
+                                                    DMA_RX_SIZE, 0);
+               }
        }
-       priv->cur_rx = 0;
-       priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
+
        buf_sz = bfsize;
 
-       /* Setup the chained descriptor addresses */
-       if (priv->mode == STMMAC_CHAIN_MODE) {
-               if (priv->extend_desc) {
-                       priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
-                                            DMA_RX_SIZE, 1);
-                       priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
-                                            DMA_TX_SIZE, 1);
-               } else {
-                       priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
-                                            DMA_RX_SIZE, 0);
-                       priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
-                                            DMA_TX_SIZE, 0);
-               }
+       return 0;
+
+err_init_rx_buffers:
+       while (queue >= 0) {
+               while (--i >= 0)
+                       stmmac_free_rx_buffer(priv, queue, i);
+
+               if (queue == 0)
+                       break;
+
+               i = DMA_RX_SIZE;
+               queue--;
        }
 
-       /* TX INITIALIZATION */
-       for (i = 0; i < DMA_TX_SIZE; i++) {
-               struct dma_desc *p;
-               if (priv->extend_desc)
-                       p = &((priv->dma_etx + i)->basic);
-               else
-                       p = priv->dma_tx + i;
+       return ret;
+}
 
-               if (priv->synopsys_id >= DWMAC_CORE_4_00) {
-                       p->des0 = 0;
-                       p->des1 = 0;
-                       p->des2 = 0;
-                       p->des3 = 0;
-               } else {
-                       p->des2 = 0;
+/**
+ * init_dma_tx_desc_rings - init the TX descriptor rings
+ * @dev: net device structure.
+ * Description: this function initializes the DMA TX descriptors
+ * and allocates the socket buffers. It supports the chained and ring
+ * modes.
+ */
+static int init_dma_tx_desc_rings(struct net_device *dev)
+{
+       struct stmmac_priv *priv = netdev_priv(dev);
+       u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+       u32 queue;
+       int i;
+
+       for (queue = 0; queue < tx_queue_cnt; queue++) {
+               struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+               netif_dbg(priv, probe, priv->dev,
+                         "(%s) dma_tx_phy=0x%08x\n", __func__,
+                        (u32)tx_q->dma_tx_phy);
+
+               /* Setup the chained descriptor addresses */
+               if (priv->mode == STMMAC_CHAIN_MODE) {
+                       if (priv->extend_desc)
+                               priv->hw->mode->init(tx_q->dma_etx,
+                                                    tx_q->dma_tx_phy,
+                                                    DMA_TX_SIZE, 1);
+                       else
+                               priv->hw->mode->init(tx_q->dma_tx,
+                                                    tx_q->dma_tx_phy,
+                                                    DMA_TX_SIZE, 0);
                }
 
-               priv->tx_skbuff_dma[i].buf = 0;
-               priv->tx_skbuff_dma[i].map_as_page = false;
-               priv->tx_skbuff_dma[i].len = 0;
-               priv->tx_skbuff_dma[i].last_segment = false;
-               priv->tx_skbuff[i] = NULL;
+               for (i = 0; i < DMA_TX_SIZE; i++) {
+                       struct dma_desc *p;
+                       if (priv->extend_desc)
+                               p = &((tx_q->dma_etx + i)->basic);
+                       else
+                               p = tx_q->dma_tx + i;
+
+                       if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+                               p->des0 = 0;
+                               p->des1 = 0;
+                               p->des2 = 0;
+                               p->des3 = 0;
+                       } else {
+                               p->des2 = 0;
+                       }
+
+                       tx_q->tx_skbuff_dma[i].buf = 0;
+                       tx_q->tx_skbuff_dma[i].map_as_page = false;
+                       tx_q->tx_skbuff_dma[i].len = 0;
+                       tx_q->tx_skbuff_dma[i].last_segment = false;
+                       tx_q->tx_skbuff[i] = NULL;
+               }
+
+               tx_q->dirty_tx = 0;
+               tx_q->cur_tx = 0;
+
+               netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
        }
 
-       priv->dirty_tx = 0;
-       priv->cur_tx = 0;
-       netdev_reset_queue(priv->dev);
+       return 0;
+}
+
+/**
+ * init_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * @flags: gfp flag.
+ * Description: this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers. It supports the chained and ring
+ * modes.
+ */
+static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
+{
+       struct stmmac_priv *priv = netdev_priv(dev);
+       int ret;
+
+       ret = init_dma_rx_desc_rings(dev, flags);
+       if (ret)
+               return ret;
+
+       ret = init_dma_tx_desc_rings(dev);
 
        stmmac_clear_descriptors(priv);
 
        if (netif_msg_hw(priv))
                stmmac_display_rings(priv);
 
-       return 0;
-err_init_rx_buffers:
-       while (--i >= 0)
-               stmmac_free_rx_buffers(priv, i);
        return ret;
 }
 
-static void dma_free_rx_skbufs(struct stmmac_priv *priv)
+/**
+ * dma_free_rx_skbufs - free RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ */
+static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
 {
        int i;
 
        for (i = 0; i < DMA_RX_SIZE; i++)
-               stmmac_free_rx_buffers(priv, i);
+               stmmac_free_rx_buffer(priv, queue, i);
 }
 
-static void dma_free_tx_skbufs(struct stmmac_priv *priv)
+/**
+ * dma_free_tx_skbufs - free TX dma buffers
+ * @priv: private structure
+ * @queue: TX queue index
+ */
+static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
 {
        int i;
 
-       for (i = 0; i < DMA_TX_SIZE; i++) {
-               if (priv->tx_skbuff_dma[i].buf) {
-                       if (priv->tx_skbuff_dma[i].map_as_page)
-                               dma_unmap_page(priv->device,
-                                              priv->tx_skbuff_dma[i].buf,
-                                              priv->tx_skbuff_dma[i].len,
-                                              DMA_TO_DEVICE);
-                       else
-                               dma_unmap_single(priv->device,
-                                                priv->tx_skbuff_dma[i].buf,
-                                                priv->tx_skbuff_dma[i].len,
-                                                DMA_TO_DEVICE);
+       for (i = 0; i < DMA_TX_SIZE; i++)
+               stmmac_free_tx_buffer(priv, queue, i);
+}
+
+/**
+ * free_dma_rx_desc_resources - free RX dma desc resources
+ * @priv: private structure
+ */
+static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
+{
+       u32 rx_count = priv->plat->rx_queues_to_use;
+       u32 queue;
+
+       /* Free RX queue resources */
+       for (queue = 0; queue < rx_count; queue++) {
+               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+               /* Release the DMA RX socket buffers */
+               dma_free_rx_skbufs(priv, queue);
+
+               /* Free DMA regions of consistent memory previously allocated */
+               if (!priv->extend_desc)
+                       dma_free_coherent(priv->device,
+                                         DMA_RX_SIZE * sizeof(struct dma_desc),
+                                         rx_q->dma_rx, rx_q->dma_rx_phy);
+               else
+                       dma_free_coherent(priv->device, DMA_RX_SIZE *
+                                         sizeof(struct dma_extended_desc),
+                                         rx_q->dma_erx, rx_q->dma_rx_phy);
+
+               kfree(rx_q->rx_skbuff_dma);
+               kfree(rx_q->rx_skbuff);
+       }
+}
+
+/**
+ * free_dma_tx_desc_resources - free TX dma desc resources
+ * @priv: private structure
+ */
+static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
+{
+       u32 tx_count = priv->plat->tx_queues_to_use;
+       u32 queue = 0;
+
+       /* Free TX queue resources */
+       for (queue = 0; queue < tx_count; queue++) {
+               struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+               /* Release the DMA TX socket buffers */
+               dma_free_tx_skbufs(priv, queue);
+
+               /* Free DMA regions of consistent memory previously allocated */
+               if (!priv->extend_desc)
+                       dma_free_coherent(priv->device,
+                                         DMA_TX_SIZE * sizeof(struct dma_desc),
+                                         tx_q->dma_tx, tx_q->dma_tx_phy);
+               else
+                       dma_free_coherent(priv->device, DMA_TX_SIZE *
+                                         sizeof(struct dma_extended_desc),
+                                         tx_q->dma_etx, tx_q->dma_tx_phy);
+
+               kfree(tx_q->tx_skbuff_dma);
+               kfree(tx_q->tx_skbuff);
+       }
+}
+
+/**
+ * alloc_dma_rx_desc_resources - alloc RX resources.
+ * @priv: private structure
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
+{
+       u32 rx_count = priv->plat->rx_queues_to_use;
+       int ret = -ENOMEM;
+       u32 queue;
+
+       /* RX queues buffers and DMA */
+       for (queue = 0; queue < rx_count; queue++) {
+               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+               rx_q->queue_index = queue;
+               rx_q->priv_data = priv;
+
+               rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
+                                                   sizeof(dma_addr_t),
+                                                   GFP_KERNEL);
+               if (!rx_q->rx_skbuff_dma)
+                       return -ENOMEM;
+
+               rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
+                                               sizeof(struct sk_buff *),
+                                               GFP_KERNEL);
+               if (!rx_q->rx_skbuff)
+                       goto err_dma;
+
+               if (priv->extend_desc) {
+                       rx_q->dma_erx = dma_zalloc_coherent(priv->device,
+                                                           DMA_RX_SIZE *
+                                                           sizeof(struct
+                                                           dma_extended_desc),
+                                                           &rx_q->dma_rx_phy,
+                                                           GFP_KERNEL);
+                       if (!rx_q->dma_erx)
+                               goto err_dma;
+
+               } else {
+                       rx_q->dma_rx = dma_zalloc_coherent(priv->device,
+                                                          DMA_RX_SIZE *
+                                                          sizeof(struct
+                                                          dma_desc),
+                                                          &rx_q->dma_rx_phy,
+                                                          GFP_KERNEL);
+                       if (!rx_q->dma_rx)
+                               goto err_dma;
                }
+       }
+
+       return 0;
+
+err_dma:
+       free_dma_rx_desc_resources(priv);
+
+       return ret;
+}
+
+/**
+ * alloc_dma_tx_desc_resources - alloc TX resources.
+ * @priv: private structure
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
+{
+       u32 tx_count = priv->plat->tx_queues_to_use;
+       int ret = -ENOMEM;
+       u32 queue;
+
+       /* TX queues buffers and DMA */
+       for (queue = 0; queue < tx_count; queue++) {
+               struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+               tx_q->queue_index = queue;
+               tx_q->priv_data = priv;
+
+               tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
+                                                   sizeof(*tx_q->tx_skbuff_dma),
+                                                   GFP_KERNEL);
+               if (!tx_q->tx_skbuff_dma)
+                       return -ENOMEM;
+
+               tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
+                                               sizeof(struct sk_buff *),
+                                               GFP_KERNEL);
+               if (!tx_q->tx_skbuff)
+                       goto err_dma_buffers;
+
+               if (priv->extend_desc) {
+                       tx_q->dma_etx = dma_zalloc_coherent(priv->device,
+                                                           DMA_TX_SIZE *
+                                                           sizeof(struct
+                                                           dma_extended_desc),
+                                                           &tx_q->dma_tx_phy,
+                                                           GFP_KERNEL);
+                       if (!tx_q->dma_etx)
+                               goto err_dma_buffers;
+               } else {
+                       tx_q->dma_tx = dma_zalloc_coherent(priv->device,
+                                                          DMA_TX_SIZE *
+                                                          sizeof(struct
+                                                                 dma_desc),
+                                                          &tx_q->dma_tx_phy,
+                                                          GFP_KERNEL);
+                       if (!tx_q->dma_tx)
+                               goto err_dma_buffers;
+               }
+       }
+
+       return 0;
+
+err_dma_buffers:
+       free_dma_tx_desc_resources(priv);
+
+       return ret;
+}
+
+/**
+ * alloc_dma_desc_resources - alloc TX/RX resources.
+ * @priv: private structure
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+{
+       /* RX Allocation */
+       int ret = alloc_dma_rx_desc_resources(priv);
+
+       if (ret)
+               return ret;
+
+       ret = alloc_dma_tx_desc_resources(priv);
+
+       return ret;
+}
+
+/**
+ * free_dma_desc_resources - free dma desc resources
+ * @priv: private structure
+ */
+static void free_dma_desc_resources(struct stmmac_priv *priv)
+{
+       /* Release the DMA RX socket buffers */
+       free_dma_rx_desc_resources(priv);
+
+       /* Release the DMA TX socket buffers */
+       free_dma_tx_desc_resources(priv);
+}
+
+/**
+ *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
+ *  @priv: driver private structure
+ *  Description: It is used for enabling the rx queues in the MAC
+ */
+static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
+{
+       u32 rx_queues_count = priv->plat->rx_queues_to_use;
+       int queue;
+       u8 mode;
 
-               if (priv->tx_skbuff[i]) {
-                       dev_kfree_skb_any(priv->tx_skbuff[i]);
-                       priv->tx_skbuff[i] = NULL;
-                       priv->tx_skbuff_dma[i].buf = 0;
-                       priv->tx_skbuff_dma[i].map_as_page = false;
-               }
+       for (queue = 0; queue < rx_queues_count; queue++) {
+               mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
+               priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
        }
 }
 
 /**
- * alloc_dma_desc_resources - alloc TX/RX resources.
- * @priv: private structure
- * Description: according to which descriptor can be used (extend or basic)
- * this function allocates the resources for TX and RX paths. In case of
- * reception, for example, it pre-allocated the RX socket buffer in order to
- * allow zero-copy mechanism.
+ * stmmac_start_rx_dma - start RX DMA channel
+ * @priv: driver private structure
+ * @chan: RX channel index
+ * Description:
+ * This starts a RX DMA channel
  */
-static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
 {
-       int ret = -ENOMEM;
-
-       priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
-                                           GFP_KERNEL);
-       if (!priv->rx_skbuff_dma)
-               return -ENOMEM;
-
-       priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
-                                       GFP_KERNEL);
-       if (!priv->rx_skbuff)
-               goto err_rx_skbuff;
-
-       priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
-                                           sizeof(*priv->tx_skbuff_dma),
-                                           GFP_KERNEL);
-       if (!priv->tx_skbuff_dma)
-               goto err_tx_skbuff_dma;
-
-       priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
-                                       GFP_KERNEL);
-       if (!priv->tx_skbuff)
-               goto err_tx_skbuff;
-
-       if (priv->extend_desc) {
-               priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
-                                                   sizeof(struct
-                                                          dma_extended_desc),
-                                                   &priv->dma_rx_phy,
-                                                   GFP_KERNEL);
-               if (!priv->dma_erx)
-                       goto err_dma;
-
-               priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
-                                                   sizeof(struct
-                                                          dma_extended_desc),
-                                                   &priv->dma_tx_phy,
-                                                   GFP_KERNEL);
-               if (!priv->dma_etx) {
-                       dma_free_coherent(priv->device, DMA_RX_SIZE *
-                                         sizeof(struct dma_extended_desc),
-                                         priv->dma_erx, priv->dma_rx_phy);
-                       goto err_dma;
-               }
-       } else {
-               priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
-                                                  sizeof(struct dma_desc),
-                                                  &priv->dma_rx_phy,
-                                                  GFP_KERNEL);
-               if (!priv->dma_rx)
-                       goto err_dma;
+       netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
+       priv->hw->dma->start_rx(priv->ioaddr, chan);
+}
 
-               priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
-                                                  sizeof(struct dma_desc),
-                                                  &priv->dma_tx_phy,
-                                                  GFP_KERNEL);
-               if (!priv->dma_tx) {
-                       dma_free_coherent(priv->device, DMA_RX_SIZE *
-                                         sizeof(struct dma_desc),
-                                         priv->dma_rx, priv->dma_rx_phy);
-                       goto err_dma;
-               }
-       }
+/**
+ * stmmac_start_tx_dma - start TX DMA channel
+ * @priv: driver private structure
+ * @chan: TX channel index
+ * Description:
+ * This starts a TX DMA channel
+ */
+static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
+{
+       netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
+       priv->hw->dma->start_tx(priv->ioaddr, chan);
+}
 
-       return 0;
+/**
+ * stmmac_stop_rx_dma - stop RX DMA channel
+ * @priv: driver private structure
+ * @chan: RX channel index
+ * Description:
+ * This stops a RX DMA channel
+ */
+static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
+{
+       netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
+       priv->hw->dma->stop_rx(priv->ioaddr, chan);
+}
 
-err_dma:
-       kfree(priv->tx_skbuff);
-err_tx_skbuff:
-       kfree(priv->tx_skbuff_dma);
-err_tx_skbuff_dma:
-       kfree(priv->rx_skbuff);
-err_rx_skbuff:
-       kfree(priv->rx_skbuff_dma);
-       return ret;
+/**
+ * stmmac_stop_tx_dma - stop TX DMA channel
+ * @priv: driver private structure
+ * @chan: TX channel index
+ * Description:
+ * This stops a TX DMA channel
+ */
+static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
+{
+       netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
+       priv->hw->dma->stop_tx(priv->ioaddr, chan);
 }
 
-static void free_dma_desc_resources(struct stmmac_priv *priv)
+/**
+ * stmmac_start_all_dma - start all RX and TX DMA channels
+ * @priv: driver private structure
+ * Description:
+ * This starts all the RX and TX DMA channels
+ */
+static void stmmac_start_all_dma(struct stmmac_priv *priv)
 {
-       /* Release the DMA TX/RX socket buffers */
-       dma_free_rx_skbufs(priv);
-       dma_free_tx_skbufs(priv);
-
-       /* Free DMA regions of consistent memory previously allocated */
-       if (!priv->extend_desc) {
-               dma_free_coherent(priv->device,
-                                 DMA_TX_SIZE * sizeof(struct dma_desc),
-                                 priv->dma_tx, priv->dma_tx_phy);
-               dma_free_coherent(priv->device,
-                                 DMA_RX_SIZE * sizeof(struct dma_desc),
-                                 priv->dma_rx, priv->dma_rx_phy);
-       } else {
-               dma_free_coherent(priv->device, DMA_TX_SIZE *
-                                 sizeof(struct dma_extended_desc),
-                                 priv->dma_etx, priv->dma_tx_phy);
-               dma_free_coherent(priv->device, DMA_RX_SIZE *
-                                 sizeof(struct dma_extended_desc),
-                                 priv->dma_erx, priv->dma_rx_phy);
-       }
-       kfree(priv->rx_skbuff_dma);
-       kfree(priv->rx_skbuff);
-       kfree(priv->tx_skbuff_dma);
-       kfree(priv->tx_skbuff);
+       u32 rx_channels_count = priv->plat->rx_queues_to_use;
+       u32 tx_channels_count = priv->plat->tx_queues_to_use;
+       u32 chan = 0;
+
+       for (chan = 0; chan < rx_channels_count; chan++)
+               stmmac_start_rx_dma(priv, chan);
+
+       for (chan = 0; chan < tx_channels_count; chan++)
+               stmmac_start_tx_dma(priv, chan);
 }
 
 /**
- *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
- *  @priv: driver private structure
- *  Description: It is used for enabling the rx queues in the MAC
+ * stmmac_stop_all_dma - stop all RX and TX DMA channels
+ * @priv: driver private structure
+ * Description:
+ * This stops the RX and TX DMA channels
  */
-static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
+static void stmmac_stop_all_dma(struct stmmac_priv *priv)
 {
-       int rx_count = priv->dma_cap.number_rx_queues;
-       int queue = 0;
+       u32 rx_channels_count = priv->plat->rx_queues_to_use;
+       u32 tx_channels_count = priv->plat->tx_queues_to_use;
+       u32 chan = 0;
 
-       /* If GMAC does not have multiple queues, then this is not necessary*/
-       if (rx_count == 1)
-               return;
+       for (chan = 0; chan < rx_channels_count; chan++)
+               stmmac_stop_rx_dma(priv, chan);
 
-       /**
-        *  If the core is synthesized with multiple rx queues / multiple
-        *  dma channels, then rx queues will be disabled by default.
-        *  For now only rx queue 0 is enabled.
-        */
-       priv->hw->mac->rx_queue_enable(priv->hw, queue);
+       for (chan = 0; chan < tx_channels_count; chan++)
+               stmmac_stop_tx_dma(priv, chan);
 }
 
 /**
@@ -1279,11 +1739,20 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
  */
 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
 {
+       u32 rx_channels_count = priv->plat->rx_queues_to_use;
+       u32 tx_channels_count = priv->plat->tx_queues_to_use;
        int rxfifosz = priv->plat->rx_fifo_size;
+       u32 txmode = 0;
+       u32 rxmode = 0;
+       u32 chan = 0;
+
+       if (rxfifosz == 0)
+               rxfifosz = priv->dma_cap.rx_fifo_size;
 
-       if (priv->plat->force_thresh_dma_mode)
-               priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
-       else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
+       if (priv->plat->force_thresh_dma_mode) {
+               txmode = tc;
+               rxmode = tc;
+       } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
                /*
                 * In case of GMAC, SF mode can be enabled
                 * to perform the TX COE in HW. This depends on:
@@ -1291,37 +1760,53 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
                 * 2) There is no bugged Jumbo frame support
                 *    that needs to not insert csum in the TDES.
                 */
-               priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
-                                       rxfifosz);
+               txmode = SF_DMA_MODE;
+               rxmode = SF_DMA_MODE;
                priv->xstats.threshold = SF_DMA_MODE;
-       } else
-               priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
+       } else {
+               txmode = tc;
+               rxmode = SF_DMA_MODE;
+       }
+
+       /* configure all channels */
+       if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+               for (chan = 0; chan < rx_channels_count; chan++)
+                       priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
+                                                  rxfifosz);
+
+               for (chan = 0; chan < tx_channels_count; chan++)
+                       priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
+       } else {
+               priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
                                        rxfifosz);
+       }
 }
 
 /**
  * stmmac_tx_clean - to manage the transmission completion
  * @priv: driver private structure
+ * @queue: TX queue index
  * Description: it reclaims the transmit resources after transmission completes.
  */
-static void stmmac_tx_clean(struct stmmac_priv *priv)
+static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
 {
+       struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
        unsigned int bytes_compl = 0, pkts_compl = 0;
-       unsigned int entry = priv->dirty_tx;
+       unsigned int entry = tx_q->dirty_tx;
 
        netif_tx_lock(priv->dev);
 
        priv->xstats.tx_clean++;
 
-       while (entry != priv->cur_tx) {
-               struct sk_buff *skb = priv->tx_skbuff[entry];
+       while (entry != tx_q->cur_tx) {
+               struct sk_buff *skb = tx_q->tx_skbuff[entry];
                struct dma_desc *p;
                int status;
 
                if (priv->extend_desc)
-                       p = (struct dma_desc *)(priv->dma_etx + entry);
+                       p = (struct dma_desc *)(tx_q->dma_etx + entry);
                else
-                       p = priv->dma_tx + entry;
+                       p = tx_q->dma_tx + entry;
 
                status = priv->hw->desc->tx_status(&priv->dev->stats,
                                                      &priv->xstats, p,
@@ -1342,48 +1827,51 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
                        stmmac_get_tx_hwtstamp(priv, p, skb);
                }
 
-               if (likely(priv->tx_skbuff_dma[entry].buf)) {
-                       if (priv->tx_skbuff_dma[entry].map_as_page)
+               if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
+                       if (tx_q->tx_skbuff_dma[entry].map_as_page)
                                dma_unmap_page(priv->device,
-                                              priv->tx_skbuff_dma[entry].buf,
-                                              priv->tx_skbuff_dma[entry].len,
+                                              tx_q->tx_skbuff_dma[entry].buf,
+                                              tx_q->tx_skbuff_dma[entry].len,
                                               DMA_TO_DEVICE);
                        else
                                dma_unmap_single(priv->device,
-                                                priv->tx_skbuff_dma[entry].buf,
-                                                priv->tx_skbuff_dma[entry].len,
+                                                tx_q->tx_skbuff_dma[entry].buf,
+                                                tx_q->tx_skbuff_dma[entry].len,
                                                 DMA_TO_DEVICE);
-                       priv->tx_skbuff_dma[entry].buf = 0;
-                       priv->tx_skbuff_dma[entry].len = 0;
-                       priv->tx_skbuff_dma[entry].map_as_page = false;
+                       tx_q->tx_skbuff_dma[entry].buf = 0;
+                       tx_q->tx_skbuff_dma[entry].len = 0;
+                       tx_q->tx_skbuff_dma[entry].map_as_page = false;
                }
 
                if (priv->hw->mode->clean_desc3)
-                       priv->hw->mode->clean_desc3(priv, p);
+                       priv->hw->mode->clean_desc3(tx_q, p);
 
-               priv->tx_skbuff_dma[entry].last_segment = false;
-               priv->tx_skbuff_dma[entry].is_jumbo = false;
+               tx_q->tx_skbuff_dma[entry].last_segment = false;
+               tx_q->tx_skbuff_dma[entry].is_jumbo = false;
 
                if (likely(skb != NULL)) {
                        pkts_compl++;
                        bytes_compl += skb->len;
                        dev_consume_skb_any(skb);
-                       priv->tx_skbuff[entry] = NULL;
+                       tx_q->tx_skbuff[entry] = NULL;
                }
 
                priv->hw->desc->release_tx_desc(p, priv->mode);
 
                entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
        }
-       priv->dirty_tx = entry;
+       tx_q->dirty_tx = entry;
+
+       netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
+                                 pkts_compl, bytes_compl);
 
-       netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
+       if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
+                                                               queue))) &&
+           stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
 
-       if (unlikely(netif_queue_stopped(priv->dev) &&
-           stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
                netif_dbg(priv, tx_done, priv->dev,
                          "%s: restart transmit\n", __func__);
-               netif_wake_queue(priv->dev);
+               netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
        }
 
        if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
@@ -1393,45 +1881,76 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
        netif_tx_unlock(priv->dev);
 }
 
-static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
+static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
 {
-       priv->hw->dma->enable_dma_irq(priv->ioaddr);
+       priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
 }
 
-static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
+static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
 {
-       priv->hw->dma->disable_dma_irq(priv->ioaddr);
+       priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
 }
 
 /**
  * stmmac_tx_err - to manage the tx error
  * @priv: driver private structure
+ * @chan: channel index
  * Description: it cleans the descriptors and restarts the transmission
  * in case of transmission errors.
  */
-static void stmmac_tx_err(struct stmmac_priv *priv)
+static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
 {
+       struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
        int i;
-       netif_stop_queue(priv->dev);
 
-       priv->hw->dma->stop_tx(priv->ioaddr);
-       dma_free_tx_skbufs(priv);
+       netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
+
+       stmmac_stop_tx_dma(priv, chan);
+       dma_free_tx_skbufs(priv, chan);
        for (i = 0; i < DMA_TX_SIZE; i++)
                if (priv->extend_desc)
-                       priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
+                       priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
                                                     priv->mode,
                                                     (i == DMA_TX_SIZE - 1));
                else
-                       priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
+                       priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
                                                     priv->mode,
                                                     (i == DMA_TX_SIZE - 1));
-       priv->dirty_tx = 0;
-       priv->cur_tx = 0;
-       netdev_reset_queue(priv->dev);
-       priv->hw->dma->start_tx(priv->ioaddr);
+       tx_q->dirty_tx = 0;
+       tx_q->cur_tx = 0;
+       netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
+       stmmac_start_tx_dma(priv, chan);
 
        priv->dev->stats.tx_errors++;
-       netif_wake_queue(priv->dev);
+       netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
+}
+
+/**
+ *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
+ *  @priv: driver private structure
+ *  @txmode: TX operating mode
+ *  @rxmode: RX operating mode
+ *  @chan: channel index
+ *  Description: it is used for configuring of the DMA operation mode in
+ *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
+ *  mode.
+ */
+static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
+                                         u32 rxmode, u32 chan)
+{
+       int rxfifosz = priv->plat->rx_fifo_size;
+
+       if (rxfifosz == 0)
+               rxfifosz = priv->dma_cap.rx_fifo_size;
+
+       if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+               priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
+                                          rxfifosz);
+               priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
+       } else {
+               priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
+                                       rxfifosz);
+       }
 }
 
 /**
@@ -1443,31 +1962,43 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
  */
 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
 {
+       u32 tx_channel_count = priv->plat->tx_queues_to_use;
        int status;
-       int rxfifosz = priv->plat->rx_fifo_size;
+       u32 chan;
+
+       for (chan = 0; chan < tx_channel_count; chan++) {
+               struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
 
-       status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
-       if (likely((status & handle_rx)) || (status & handle_tx)) {
-               if (likely(napi_schedule_prep(&priv->napi))) {
-                       stmmac_disable_dma_irq(priv);
-                       __napi_schedule(&priv->napi);
+               status = priv->hw->dma->dma_interrupt(priv->ioaddr,
+                                                     &priv->xstats, chan);
+               if (likely((status & handle_rx)) || (status & handle_tx)) {
+                       if (likely(napi_schedule_prep(&rx_q->napi))) {
+                               stmmac_disable_dma_irq(priv, chan);
+                               __napi_schedule(&rx_q->napi);
+                       }
                }
-       }
-       if (unlikely(status & tx_hard_error_bump_tc)) {
-               /* Try to bump up the dma threshold on this failure */
-               if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
-                   (tc <= 256)) {
-                       tc += 64;
-                       if (priv->plat->force_thresh_dma_mode)
-                               priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
-                                                       rxfifosz);
-                       else
-                               priv->hw->dma->dma_mode(priv->ioaddr, tc,
-                                                       SF_DMA_MODE, rxfifosz);
-                       priv->xstats.threshold = tc;
+
+               if (unlikely(status & tx_hard_error_bump_tc)) {
+                       /* Try to bump up the dma threshold on this failure */
+                       if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
+                           (tc <= 256)) {
+                               tc += 64;
+                               if (priv->plat->force_thresh_dma_mode)
+                                       stmmac_set_dma_operation_mode(priv,
+                                                                     tc,
+                                                                     tc,
+                                                                     chan);
+                               else
+                                       stmmac_set_dma_operation_mode(priv,
+                                                                   tc,
+                                                                   SF_DMA_MODE,
+                                                                   chan);
+                               priv->xstats.threshold = tc;
+                       }
+               } else if (unlikely(status == tx_hard_error)) {
+                       stmmac_tx_err(priv, chan);
                }
-       } else if (unlikely(status == tx_hard_error))
-               stmmac_tx_err(priv);
+       }
 }
 
 /**
@@ -1574,6 +2105,13 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
  */
 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
 {
+       u32 rx_channels_count = priv->plat->rx_queues_to_use;
+       u32 tx_channels_count = priv->plat->tx_queues_to_use;
+       struct stmmac_rx_queue *rx_q;
+       struct stmmac_tx_queue *tx_q;
+       u32 dummy_dma_rx_phy = 0;
+       u32 dummy_dma_tx_phy = 0;
+       u32 chan = 0;
        int atds = 0;
        int ret = 0;
 
@@ -1591,19 +2129,49 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
                return ret;
        }
 
-       priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
-                           priv->dma_tx_phy, priv->dma_rx_phy, atds);
-
        if (priv->synopsys_id >= DWMAC_CORE_4_00) {
-               priv->rx_tail_addr = priv->dma_rx_phy +
-                           (DMA_RX_SIZE * sizeof(struct dma_desc));
-               priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
-                                              STMMAC_CHAN0);
+               /* DMA Configuration */
+               priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
+                                   dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
+
+               /* DMA RX Channel Configuration */
+               for (chan = 0; chan < rx_channels_count; chan++) {
+                       rx_q = &priv->rx_queue[chan];
+
+                       priv->hw->dma->init_rx_chan(priv->ioaddr,
+                                                   priv->plat->dma_cfg,
+                                                   rx_q->dma_rx_phy, chan);
+
+                       rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+                                   (DMA_RX_SIZE * sizeof(struct dma_desc));
+                       priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
+                                                      rx_q->rx_tail_addr,
+                                                      chan);
+               }
+
+               /* DMA TX Channel Configuration */
+               for (chan = 0; chan < tx_channels_count; chan++) {
+                       tx_q = &priv->tx_queue[chan];
+
+                       priv->hw->dma->init_chan(priv->ioaddr,
+                                                priv->plat->dma_cfg,
+                                                chan);
 
-               priv->tx_tail_addr = priv->dma_tx_phy +
-                           (DMA_TX_SIZE * sizeof(struct dma_desc));
-               priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
-                                              STMMAC_CHAN0);
+                       priv->hw->dma->init_tx_chan(priv->ioaddr,
+                                                   priv->plat->dma_cfg,
+                                                   tx_q->dma_tx_phy, chan);
+
+                       tx_q->tx_tail_addr = tx_q->dma_tx_phy +
+                                   (DMA_TX_SIZE * sizeof(struct dma_desc));
+                       priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
+                                                      tx_q->tx_tail_addr,
+                                                      chan);
+               }
+       } else {
+               rx_q = &priv->rx_queue[chan];
+               tx_q = &priv->tx_queue[chan];
+               priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
+                                   tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
        }
 
        if (priv->plat->axi && priv->hw->dma->axi)
@@ -1621,8 +2189,12 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
 static void stmmac_tx_timer(unsigned long data)
 {
        struct stmmac_priv *priv = (struct stmmac_priv *)data;
+       u32 tx_queues_count = priv->plat->tx_queues_to_use;
+       u32 queue;
 
-       stmmac_tx_clean(priv);
+       /* let's scan all the tx queues */
+       for (queue = 0; queue < tx_queues_count; queue++)
+               stmmac_tx_clean(priv, queue);
 }
 
 /**
@@ -1644,6 +2216,196 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
        add_timer(&priv->txtimer);
 }
 
+static void stmmac_set_rings_length(struct stmmac_priv *priv)
+{
+       u32 rx_channels_count = priv->plat->rx_queues_to_use;
+       u32 tx_channels_count = priv->plat->tx_queues_to_use;
+       u32 chan;
+
+       /* set TX ring length */
+       if (priv->hw->dma->set_tx_ring_len) {
+               for (chan = 0; chan < tx_channels_count; chan++)
+                       priv->hw->dma->set_tx_ring_len(priv->ioaddr,
+                                                      (DMA_TX_SIZE - 1), chan);
+       }
+
+       /* set RX ring length */
+       if (priv->hw->dma->set_rx_ring_len) {
+               for (chan = 0; chan < rx_channels_count; chan++)
+                       priv->hw->dma->set_rx_ring_len(priv->ioaddr,
+                                                      (DMA_RX_SIZE - 1), chan);
+       }
+}
+
+/**
+ *  stmmac_set_tx_queue_weight - Set TX queue weight
+ *  @priv: driver private structure
+ *  Description: It is used for setting TX queues weight
+ */
+static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
+{
+       u32 tx_queues_count = priv->plat->tx_queues_to_use;
+       u32 weight;
+       u32 queue;
+
+       for (queue = 0; queue < tx_queues_count; queue++) {
+               weight = priv->plat->tx_queues_cfg[queue].weight;
+               priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
+       }
+}
+
+/**
+ *  stmmac_configure_cbs - Configure CBS in TX queue
+ *  @priv: driver private structure
+ *  Description: It is used for configuring CBS in AVB TX queues
+ */
+static void stmmac_configure_cbs(struct stmmac_priv *priv)
+{
+       u32 tx_queues_count = priv->plat->tx_queues_to_use;
+       u32 mode_to_use;
+       u32 queue;
+
+       /* queue 0 is reserved for legacy traffic */
+       for (queue = 1; queue < tx_queues_count; queue++) {
+               mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+               if (mode_to_use == MTL_QUEUE_DCB)
+                       continue;
+
+               priv->hw->mac->config_cbs(priv->hw,
+                               priv->plat->tx_queues_cfg[queue].send_slope,
+                               priv->plat->tx_queues_cfg[queue].idle_slope,
+                               priv->plat->tx_queues_cfg[queue].high_credit,
+                               priv->plat->tx_queues_cfg[queue].low_credit,
+                               queue);
+       }
+}
+
+/**
+ *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
+ *  @priv: driver private structure
+ *  Description: It is used for mapping RX queues to RX dma channels
+ */
+static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
+{
+       u32 rx_queues_count = priv->plat->rx_queues_to_use;
+       u32 queue;
+       u32 chan;
+
+       for (queue = 0; queue < rx_queues_count; queue++) {
+               chan = priv->plat->rx_queues_cfg[queue].chan;
+               priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
+       }
+}
+
+/**
+ *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
+ *  @priv: driver private structure
+ *  Description: It is used for configuring the RX Queue Priority
+ */
+static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
+{
+       u32 rx_queues_count = priv->plat->rx_queues_to_use;
+       u32 queue;
+       u32 prio;
+
+       for (queue = 0; queue < rx_queues_count; queue++) {
+               if (!priv->plat->rx_queues_cfg[queue].use_prio)
+                       continue;
+
+               prio = priv->plat->rx_queues_cfg[queue].prio;
+               priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
+       }
+}
+
+/**
+ *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
+ *  @priv: driver private structure
+ *  Description: It is used for configuring the TX Queue Priority
+ */
+static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
+{
+       u32 tx_queues_count = priv->plat->tx_queues_to_use;
+       u32 queue;
+       u32 prio;
+
+       for (queue = 0; queue < tx_queues_count; queue++) {
+               if (!priv->plat->tx_queues_cfg[queue].use_prio)
+                       continue;
+
+               prio = priv->plat->tx_queues_cfg[queue].prio;
+               priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
+       }
+}
+
+/**
+ *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
+ *  @priv: driver private structure
+ *  Description: It is used for configuring the RX queue routing
+ */
+static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
+{
+       u32 rx_queues_count = priv->plat->rx_queues_to_use;
+       u32 queue;
+       u8 packet;
+
+       for (queue = 0; queue < rx_queues_count; queue++) {
+               /* no specific packet type routing specified for the queue */
+               if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
+                       continue;
+
+               packet = priv->plat->rx_queues_cfg[queue].pkt_route;
+               priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
+       }
+}
+
+/**
+ *  stmmac_mtl_configuration - Configure MTL
+ *  @priv: driver private structure
+ *  Description: It is used for configurring MTL
+ */
+static void stmmac_mtl_configuration(struct stmmac_priv *priv)
+{
+       u32 rx_queues_count = priv->plat->rx_queues_to_use;
+       u32 tx_queues_count = priv->plat->tx_queues_to_use;
+
+       if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
+               stmmac_set_tx_queue_weight(priv);
+
+       /* Configure MTL RX algorithms */
+       if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
+               priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
+                                               priv->plat->rx_sched_algorithm);
+
+       /* Configure MTL TX algorithms */
+       if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
+               priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
+                                               priv->plat->tx_sched_algorithm);
+
+       /* Configure CBS in AVB TX queues */
+       if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
+               stmmac_configure_cbs(priv);
+
+       /* Map RX MTL to DMA channels */
+       if (priv->hw->mac->map_mtl_to_dma)
+               stmmac_rx_queue_dma_chan_map(priv);
+
+       /* Enable MAC RX Queues */
+       if (priv->hw->mac->rx_queue_enable)
+               stmmac_mac_enable_rx_queues(priv);
+
+       /* Set RX priorities */
+       if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
+               stmmac_mac_config_rx_queues_prio(priv);
+
+       /* Set TX priorities */
+       if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
+               stmmac_mac_config_tx_queues_prio(priv);
+
+       /* Set RX routing */
+       if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
+               stmmac_mac_config_rx_queues_routing(priv);
+}
+
 /**
  * stmmac_hw_setup - setup mac in a usable state.
  *  @dev : pointer to the device structure.
@@ -1659,6 +2421,9 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
+       u32 rx_cnt = priv->plat->rx_queues_to_use;
+       u32 tx_cnt = priv->plat->tx_queues_to_use;
+       u32 chan;
        int ret;
 
        /* DMA initialization and SW reset */
@@ -1688,9 +2453,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
        /* Initialize the MAC Core */
        priv->hw->mac->core_init(priv->hw, dev->mtu);
 
-       /* Initialize MAC RX Queues */
-       if (priv->hw->mac->rx_queue_enable)
-               stmmac_mac_enable_rx_queues(priv);
+       /* Initialize MTL*/
+       if (priv->synopsys_id >= DWMAC_CORE_4_00)
+               stmmac_mtl_configuration(priv);
 
        ret = priv->hw->mac->rx_ipc(priv->hw);
        if (!ret) {
@@ -1700,10 +2465,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
        }
 
        /* Enable the MAC Rx/Tx */
-       if (priv->synopsys_id >= DWMAC_CORE_4_00)
-               stmmac_dwmac4_set_mac(priv->ioaddr, true);
-       else
-               stmmac_set_mac(priv->ioaddr, true);
+       priv->hw->mac->set_mac(priv->ioaddr, true);
 
        /* Set the HW DMA mode and the COE */
        stmmac_dma_operation_mode(priv);
@@ -1711,6 +2473,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
        stmmac_mmc_setup(priv);
 
        if (init_ptp) {
+               ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+               if (ret < 0)
+                       netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
+
                ret = stmmac_init_ptp(priv);
                if (ret == -EOPNOTSUPP)
                        netdev_warn(priv->dev, "PTP not supported by HW\n");
@@ -1725,35 +2491,37 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
                            __func__);
 #endif
        /* Start the ball rolling... */
-       netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
-       priv->hw->dma->start_tx(priv->ioaddr);
-       priv->hw->dma->start_rx(priv->ioaddr);
+       stmmac_start_all_dma(priv);
 
        priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
 
        if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
                priv->rx_riwt = MAX_DMA_RIWT;
-               priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
+               priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
        }
 
        if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
                priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
 
-       /*  set TX ring length */
-       if (priv->hw->dma->set_tx_ring_len)
-               priv->hw->dma->set_tx_ring_len(priv->ioaddr,
-                                              (DMA_TX_SIZE - 1));
-       /*  set RX ring length */
-       if (priv->hw->dma->set_rx_ring_len)
-               priv->hw->dma->set_rx_ring_len(priv->ioaddr,
-                                              (DMA_RX_SIZE - 1));
+       /* set TX and RX rings length */
+       stmmac_set_rings_length(priv);
+
        /* Enable TSO */
-       if (priv->tso)
-               priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
+       if (priv->tso) {
+               for (chan = 0; chan < tx_cnt; chan++)
+                       priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
+       }
 
        return 0;
 }
 
+static void stmmac_hw_teardown(struct net_device *dev)
+{
+       struct stmmac_priv *priv = netdev_priv(dev);
+
+       clk_disable_unprepare(priv->plat->clk_ptp_ref);
+}
+
 /**
  *  stmmac_open - open entry point of the driver
  *  @dev : pointer to the device structure.
@@ -1821,7 +2589,7 @@ static int stmmac_open(struct net_device *dev)
                netdev_err(priv->dev,
                           "%s: ERROR: allocating the IRQ %d (error: %d)\n",
                           __func__, dev->irq, ret);
-               goto init_error;
+               goto irq_error;
        }
 
        /* Request the Wake IRQ in case of another line is used for WoL */
@@ -1848,8 +2616,8 @@ static int stmmac_open(struct net_device *dev)
                }
        }
 
-       napi_enable(&priv->napi);
-       netif_start_queue(dev);
+       stmmac_enable_all_queues(priv);
+       stmmac_start_all_queues(priv);
 
        return 0;
 
@@ -1858,7 +2626,12 @@ lpiirq_error:
                free_irq(priv->wol_irq, dev);
 wolirq_error:
        free_irq(dev->irq, dev);
+irq_error:
+       if (dev->phydev)
+               phy_stop(dev->phydev);
 
+       del_timer_sync(&priv->txtimer);
+       stmmac_hw_teardown(dev);
 init_error:
        free_dma_desc_resources(priv);
 dma_desc_error:
@@ -1887,9 +2660,9 @@ static int stmmac_release(struct net_device *dev)
                phy_disconnect(dev->phydev);
        }
 
-       netif_stop_queue(dev);
+       stmmac_stop_all_queues(priv);
 
-       napi_disable(&priv->napi);
+       stmmac_disable_all_queues(priv);
 
        del_timer_sync(&priv->txtimer);
 
@@ -1901,14 +2674,13 @@ static int stmmac_release(struct net_device *dev)
                free_irq(priv->lpi_irq, dev);
 
        /* Stop TX/RX DMA and clear the descriptors */
-       priv->hw->dma->stop_tx(priv->ioaddr);
-       priv->hw->dma->stop_rx(priv->ioaddr);
+       stmmac_stop_all_dma(priv);
 
        /* Release and free the Rx/Tx resources */
        free_dma_desc_resources(priv);
 
        /* Disable the MAC Rx/Tx */
-       stmmac_set_mac(priv->ioaddr, false);
+       priv->hw->mac->set_mac(priv->ioaddr, false);
 
        netif_carrier_off(dev);
 
@@ -1927,22 +2699,24 @@ static int stmmac_release(struct net_device *dev)
  *  @des: buffer start address
  *  @total_len: total length to fill in descriptors
  *  @last_segmant: condition for the last descriptor
+ *  @queue: TX queue index
  *  Description:
  *  This function fills descriptor and request new descriptors according to
  *  buffer length to fill
  */
 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
-                                int total_len, bool last_segment)
+                                int total_len, bool last_segment, u32 queue)
 {
+       struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
        struct dma_desc *desc;
-       int tmp_len;
        u32 buff_size;
+       int tmp_len;
 
        tmp_len = total_len;
 
        while (tmp_len > 0) {
-               priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
-               desc = priv->dma_tx + priv->cur_tx;
+               tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+               desc = tx_q->dma_tx + tx_q->cur_tx;
 
                desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
                buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
@@ -1986,23 +2760,28 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
  */
 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       u32 pay_len, mss;
-       int tmp_pay_len = 0;
+       struct dma_desc *desc, *first, *mss_desc = NULL;
        struct stmmac_priv *priv = netdev_priv(dev);
        int nfrags = skb_shinfo(skb)->nr_frags;
+       u32 queue = skb_get_queue_mapping(skb);
        unsigned int first_entry, des;
-       struct dma_desc *desc, *first, *mss_desc = NULL;
+       struct stmmac_tx_queue *tx_q;
+       int tmp_pay_len = 0;
+       u32 pay_len, mss;
        u8 proto_hdr_len;
        int i;
 
+       tx_q = &priv->tx_queue[queue];
+
        /* Compute header lengths */
        proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
 
        /* Desc availability based on threshold should be enough safe */
-       if (unlikely(stmmac_tx_avail(priv) <
+       if (unlikely(stmmac_tx_avail(priv, queue) <
                (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
-               if (!netif_queue_stopped(dev)) {
-                       netif_stop_queue(dev);
+               if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
+                       netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
+                                                               queue));
                        /* This is a hard error, log it. */
                        netdev_err(priv->dev,
                                   "%s: Tx Ring full when queue awake\n",
@@ -2017,10 +2796,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* set new MSS value if needed */
        if (mss != priv->mss) {
-               mss_desc = priv->dma_tx + priv->cur_tx;
+               mss_desc = tx_q->dma_tx + tx_q->cur_tx;
                priv->hw->desc->set_mss(mss_desc, mss);
                priv->mss = mss;
-               priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+               tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
        }
 
        if (netif_msg_tx_queued(priv)) {
@@ -2030,9 +2809,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
                        skb->data_len);
        }
 
-       first_entry = priv->cur_tx;
+       first_entry = tx_q->cur_tx;
 
-       desc = priv->dma_tx + first_entry;
+       desc = tx_q->dma_tx + first_entry;
        first = desc;
 
        /* first descriptor: fill Headers on Buf1 */
@@ -2041,9 +2820,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
        if (dma_mapping_error(priv->device, des))
                goto dma_map_err;
 
-       priv->tx_skbuff_dma[first_entry].buf = des;
-       priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
-       priv->tx_skbuff[first_entry] = skb;
+       tx_q->tx_skbuff_dma[first_entry].buf = des;
+       tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
+       tx_q->tx_skbuff[first_entry] = skb;
 
        first->des0 = cpu_to_le32(des);
 
@@ -2054,7 +2833,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
        /* If needed take extra descriptors to fill the remaining payload */
        tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
 
-       stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
+       stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
 
        /* Prepare fragments */
        for (i = 0; i < nfrags; i++) {
@@ -2063,24 +2842,26 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
                des = skb_frag_dma_map(priv->device, frag, 0,
                                       skb_frag_size(frag),
                                       DMA_TO_DEVICE);
+               if (dma_mapping_error(priv->device, des))
+                       goto dma_map_err;
 
                stmmac_tso_allocator(priv, des, skb_frag_size(frag),
-                                    (i == nfrags - 1));
+                                    (i == nfrags - 1), queue);
 
-               priv->tx_skbuff_dma[priv->cur_tx].buf = des;
-               priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
-               priv->tx_skbuff[priv->cur_tx] = NULL;
-               priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
+               tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
+               tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
+               tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
+               tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
        }
 
-       priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
+       tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
 
-       priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+       tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
 
-       if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+       if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
                netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
                          __func__);
-               netif_stop_queue(dev);
+               netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
        }
 
        dev->stats.tx_bytes += skb->len;
@@ -2112,7 +2893,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
        priv->hw->desc->prepare_tso_tx_desc(first, 1,
                        proto_hdr_len,
                        pay_len,
-                       1, priv->tx_skbuff_dma[first_entry].last_segment,
+                       1, tx_q->tx_skbuff_dma[first_entry].last_segment,
                        tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
 
        /* If context desc is used to change MSS */
@@ -2127,20 +2908,20 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
 
        if (netif_msg_pktdata(priv)) {
                pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
-                       __func__, priv->cur_tx, priv->dirty_tx, first_entry,
-                       priv->cur_tx, first, nfrags);
+                       __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
+                       tx_q->cur_tx, first, nfrags);
 
-               priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
+               priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
                                             0);
 
                pr_info(">>> frame to be transmitted: ");
                print_pkt(skb->data, skb_headlen(skb));
        }
 
-       netdev_sent_queue(dev, skb->len);
+       netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
 
-       priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
-                                      STMMAC_CHAN0);
+       priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
+                                      queue);
 
        return NETDEV_TX_OK;
 
@@ -2164,21 +2945,26 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        struct stmmac_priv *priv = netdev_priv(dev);
        unsigned int nopaged_len = skb_headlen(skb);
        int i, csum_insertion = 0, is_jumbo = 0;
+       u32 queue = skb_get_queue_mapping(skb);
        int nfrags = skb_shinfo(skb)->nr_frags;
        unsigned int entry, first_entry;
        struct dma_desc *desc, *first;
+       struct stmmac_tx_queue *tx_q;
        unsigned int enh_desc;
        unsigned int des;
 
+       tx_q = &priv->tx_queue[queue];
+
        /* Manage oversized TCP frames for GMAC4 device */
        if (skb_is_gso(skb) && priv->tso) {
                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
                        return stmmac_tso_xmit(skb, dev);
        }
 
-       if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
-               if (!netif_queue_stopped(dev)) {
-                       netif_stop_queue(dev);
+       if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
+               if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
+                       netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
+                                                               queue));
                        /* This is a hard error, log it. */
                        netdev_err(priv->dev,
                                   "%s: Tx Ring full when queue awake\n",
@@ -2190,19 +2976,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        if (priv->tx_path_in_lpi_mode)
                stmmac_disable_eee_mode(priv);
 
-       entry = priv->cur_tx;
+       entry = tx_q->cur_tx;
        first_entry = entry;
 
        csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
 
        if (likely(priv->extend_desc))
-               desc = (struct dma_desc *)(priv->dma_etx + entry);
+               desc = (struct dma_desc *)(tx_q->dma_etx + entry);
        else
-               desc = priv->dma_tx + entry;
+               desc = tx_q->dma_tx + entry;
 
        first = desc;
 
-       priv->tx_skbuff[first_entry] = skb;
+       tx_q->tx_skbuff[first_entry] = skb;
 
        enh_desc = priv->plat->enh_desc;
        /* To program the descriptors according to the size of the frame */
@@ -2211,7 +2997,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
        if (unlikely(is_jumbo) && likely(priv->synopsys_id <
                                         DWMAC_CORE_4_00)) {
-               entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
+               entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
                if (unlikely(entry < 0))
                        goto dma_map_err;
        }
@@ -2224,26 +3010,26 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
 
                if (likely(priv->extend_desc))
-                       desc = (struct dma_desc *)(priv->dma_etx + entry);
+                       desc = (struct dma_desc *)(tx_q->dma_etx + entry);
                else
-                       desc = priv->dma_tx + entry;
+                       desc = tx_q->dma_tx + entry;
 
                des = skb_frag_dma_map(priv->device, frag, 0, len,
                                       DMA_TO_DEVICE);
                if (dma_mapping_error(priv->device, des))
                        goto dma_map_err; /* should reuse desc w/o issues */
 
-               priv->tx_skbuff[entry] = NULL;
+               tx_q->tx_skbuff[entry] = NULL;
 
-               priv->tx_skbuff_dma[entry].buf = des;
+               tx_q->tx_skbuff_dma[entry].buf = des;
                if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
                        desc->des0 = cpu_to_le32(des);
                else
                        desc->des2 = cpu_to_le32(des);
 
-               priv->tx_skbuff_dma[entry].map_as_page = true;
-               priv->tx_skbuff_dma[entry].len = len;
-               priv->tx_skbuff_dma[entry].last_segment = last_segment;
+               tx_q->tx_skbuff_dma[entry].map_as_page = true;
+               tx_q->tx_skbuff_dma[entry].len = len;
+               tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
 
                /* Prepare the descriptor and set the own bit too */
                priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
@@ -2252,20 +3038,20 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
        entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
 
-       priv->cur_tx = entry;
+       tx_q->cur_tx = entry;
 
        if (netif_msg_pktdata(priv)) {
                void *tx_head;
 
                netdev_dbg(priv->dev,
                           "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
-                          __func__, priv->cur_tx, priv->dirty_tx, first_entry,
+                          __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
                           entry, first, nfrags);
 
                if (priv->extend_desc)
-                       tx_head = (void *)priv->dma_etx;
+                       tx_head = (void *)tx_q->dma_etx;
                else
-                       tx_head = (void *)priv->dma_tx;
+                       tx_head = (void *)tx_q->dma_tx;
 
                priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
 
@@ -2273,10 +3059,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                print_pkt(skb->data, skb->len);
        }
 
-       if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+       if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
                netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
                          __func__);
-               netif_stop_queue(dev);
+               netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
        }
 
        dev->stats.tx_bytes += skb->len;
@@ -2311,14 +3097,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                if (dma_mapping_error(priv->device, des))
                        goto dma_map_err;
 
-               priv->tx_skbuff_dma[first_entry].buf = des;
+               tx_q->tx_skbuff_dma[first_entry].buf = des;
                if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
                        first->des0 = cpu_to_le32(des);
                else
                        first->des2 = cpu_to_le32(des);
 
-               priv->tx_skbuff_dma[first_entry].len = nopaged_len;
-               priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
+               tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
+               tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
 
                if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
                             priv->hwts_tx_en)) {
@@ -2339,13 +3125,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                dma_wmb();
        }
 
-       netdev_sent_queue(dev, skb->len);
+       netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
 
        if (priv->synopsys_id < DWMAC_CORE_4_00)
                priv->hw->dma->enable_dma_transmission(priv->ioaddr);
        else
-               priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
-                                              STMMAC_CHAN0);
+               priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
+                                              queue);
 
        return NETDEV_TX_OK;
 
@@ -2373,9 +3159,9 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
 }
 
 
-static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
+static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
 {
-       if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
+       if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
                return 0;
 
        return 1;
@@ -2384,30 +3170,33 @@ static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
 /**
  * stmmac_rx_refill - refill used skb preallocated buffers
  * @priv: driver private structure
+ * @queue: RX queue index
  * Description : this is to reallocate the skb for the reception process
  * that is based on zero-copy.
  */
-static inline void stmmac_rx_refill(struct stmmac_priv *priv)
+static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
 {
+       struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       int dirty = stmmac_rx_dirty(priv, queue);
+       unsigned int entry = rx_q->dirty_rx;
+
        int bfsize = priv->dma_buf_sz;
-       unsigned int entry = priv->dirty_rx;
-       int dirty = stmmac_rx_dirty(priv);
 
        while (dirty-- > 0) {
                struct dma_desc *p;
 
                if (priv->extend_desc)
-                       p = (struct dma_desc *)(priv->dma_erx + entry);
+                       p = (struct dma_desc *)(rx_q->dma_erx + entry);
                else
-                       p = priv->dma_rx + entry;
+                       p = rx_q->dma_rx + entry;
 
-               if (likely(priv->rx_skbuff[entry] == NULL)) {
+               if (likely(!rx_q->rx_skbuff[entry])) {
                        struct sk_buff *skb;
 
                        skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
                        if (unlikely(!skb)) {
                                /* so for a while no zero-copy! */
-                               priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
+                               rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
                                if (unlikely(net_ratelimit()))
                                        dev_err(priv->device,
                                                "fail to alloc skb entry %d\n",
@@ -2415,28 +3204,28 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
                                break;
                        }
 
-                       priv->rx_skbuff[entry] = skb;
-                       priv->rx_skbuff_dma[entry] =
+                       rx_q->rx_skbuff[entry] = skb;
+                       rx_q->rx_skbuff_dma[entry] =
                            dma_map_single(priv->device, skb->data, bfsize,
                                           DMA_FROM_DEVICE);
                        if (dma_mapping_error(priv->device,
-                                             priv->rx_skbuff_dma[entry])) {
+                                             rx_q->rx_skbuff_dma[entry])) {
                                netdev_err(priv->dev, "Rx DMA map failed\n");
                                dev_kfree_skb(skb);
                                break;
                        }
 
                        if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
-                               p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
+                               p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
                                p->des1 = 0;
                        } else {
-                               p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
+                               p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
                        }
                        if (priv->hw->mode->refill_desc3)
-                               priv->hw->mode->refill_desc3(priv, p);
+                               priv->hw->mode->refill_desc3(rx_q, p);
 
-                       if (priv->rx_zeroc_thresh > 0)
-                               priv->rx_zeroc_thresh--;
+                       if (rx_q->rx_zeroc_thresh > 0)
+                               rx_q->rx_zeroc_thresh--;
 
                        netif_dbg(priv, rx_status, priv->dev,
                                  "refill entry #%d\n", entry);
@@ -2452,31 +3241,33 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
 
                entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
        }
-       priv->dirty_rx = entry;
+       rx_q->dirty_rx = entry;
 }
 
 /**
  * stmmac_rx - manage the receive process
  * @priv: driver private structure
- * @limit: napi bugget.
+ * @limit: napi bugget
+ * @queue: RX queue index.
  * Description :  this the function called by the napi poll method.
  * It gets all the frames inside the ring.
  */
-static int stmmac_rx(struct stmmac_priv *priv, int limit)
+static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
 {
-       unsigned int entry = priv->cur_rx;
+       struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       unsigned int entry = rx_q->cur_rx;
+       int coe = priv->hw->rx_csum;
        unsigned int next_entry;
        unsigned int count = 0;
-       int coe = priv->hw->rx_csum;
 
        if (netif_msg_rx_status(priv)) {
                void *rx_head;
 
                netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
                if (priv->extend_desc)
-                       rx_head = (void *)priv->dma_erx;
+                       rx_head = (void *)rx_q->dma_erx;
                else
-                       rx_head = (void *)priv->dma_rx;
+                       rx_head = (void *)rx_q->dma_rx;
 
                priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
        }
@@ -2486,9 +3277,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
                struct dma_desc *np;
 
                if (priv->extend_desc)
-                       p = (struct dma_desc *)(priv->dma_erx + entry);
+                       p = (struct dma_desc *)(rx_q->dma_erx + entry);
                else
-                       p = priv->dma_rx + entry;
+                       p = rx_q->dma_rx + entry;
 
                /* read the status of the incoming frame */
                status = priv->hw->desc->rx_status(&priv->dev->stats,
@@ -2499,20 +3290,20 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
 
                count++;
 
-               priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
-               next_entry = priv->cur_rx;
+               rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
+               next_entry = rx_q->cur_rx;
 
                if (priv->extend_desc)
-                       np = (struct dma_desc *)(priv->dma_erx + next_entry);
+                       np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
                else
-                       np = priv->dma_rx + next_entry;
+                       np = rx_q->dma_rx + next_entry;
 
                prefetch(np);
 
                if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
                        priv->hw->desc->rx_extended_status(&priv->dev->stats,
                                                           &priv->xstats,
-                                                          priv->dma_erx +
+                                                          rx_q->dma_erx +
                                                           entry);
                if (unlikely(status == discard_frame)) {
                        priv->dev->stats.rx_errors++;
@@ -2522,9 +3313,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
                                 * them in stmmac_rx_refill() function so that
                                 * device can reuse it.
                                 */
-                               priv->rx_skbuff[entry] = NULL;
+                               rx_q->rx_skbuff[entry] = NULL;
                                dma_unmap_single(priv->device,
-                                                priv->rx_skbuff_dma[entry],
+                                                rx_q->rx_skbuff_dma[entry],
                                                 priv->dma_buf_sz,
                                                 DMA_FROM_DEVICE);
                        }
@@ -2572,7 +3363,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
                         */
                        if (unlikely(!priv->plat->has_gmac4 &&
                                     ((frame_len < priv->rx_copybreak) ||
-                                    stmmac_rx_threshold_count(priv)))) {
+                                    stmmac_rx_threshold_count(rx_q)))) {
                                skb = netdev_alloc_skb_ip_align(priv->dev,
                                                                frame_len);
                                if (unlikely(!skb)) {
@@ -2584,21 +3375,21 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
                                }
 
                                dma_sync_single_for_cpu(priv->device,
-                                                       priv->rx_skbuff_dma
+                                                       rx_q->rx_skbuff_dma
                                                        [entry], frame_len,
                                                        DMA_FROM_DEVICE);
                                skb_copy_to_linear_data(skb,
-                                                       priv->
+                                                       rx_q->
                                                        rx_skbuff[entry]->data,
                                                        frame_len);
 
                                skb_put(skb, frame_len);
                                dma_sync_single_for_device(priv->device,
-                                                          priv->rx_skbuff_dma
+                                                          rx_q->rx_skbuff_dma
                                                           [entry], frame_len,
                                                           DMA_FROM_DEVICE);
                        } else {
-                               skb = priv->rx_skbuff[entry];
+                               skb = rx_q->rx_skbuff[entry];
                                if (unlikely(!skb)) {
                                        netdev_err(priv->dev,
                                                   "%s: Inconsistent Rx chain\n",
@@ -2607,12 +3398,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
                                        break;
                                }
                                prefetch(skb->data - NET_IP_ALIGN);
-                               priv->rx_skbuff[entry] = NULL;
-                               priv->rx_zeroc_thresh++;
+                               rx_q->rx_skbuff[entry] = NULL;
+                               rx_q->rx_zeroc_thresh++;
 
                                skb_put(skb, frame_len);
                                dma_unmap_single(priv->device,
-                                                priv->rx_skbuff_dma[entry],
+                                                rx_q->rx_skbuff_dma[entry],
                                                 priv->dma_buf_sz,
                                                 DMA_FROM_DEVICE);
                        }
@@ -2634,7 +3425,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
                        else
                                skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-                       napi_gro_receive(&priv->napi, skb);
+                       napi_gro_receive(&rx_q->napi, skb);
 
                        priv->dev->stats.rx_packets++;
                        priv->dev->stats.rx_bytes += frame_len;
@@ -2642,7 +3433,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
                entry = next_entry;
        }
 
-       stmmac_rx_refill(priv);
+       stmmac_rx_refill(priv, queue);
 
        priv->xstats.rx_pkt_n += count;
 
@@ -2659,16 +3450,24 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
  */
 static int stmmac_poll(struct napi_struct *napi, int budget)
 {
-       struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
+       struct stmmac_rx_queue *rx_q =
+               container_of(napi, struct stmmac_rx_queue, napi);
+       struct stmmac_priv *priv = rx_q->priv_data;
+       u32 tx_count = priv->plat->tx_queues_to_use;
+       u32 chan = rx_q->queue_index;
        int work_done = 0;
+       u32 queue;
 
        priv->xstats.napi_poll++;
-       stmmac_tx_clean(priv);
 
-       work_done = stmmac_rx(priv, budget);
+       /* check all the queues */
+       for (queue = 0; queue < tx_count; queue++)
+               stmmac_tx_clean(priv, queue);
+
+       work_done = stmmac_rx(priv, budget, rx_q->queue_index);
        if (work_done < budget) {
                napi_complete_done(napi, work_done);
-               stmmac_enable_dma_irq(priv);
+               stmmac_enable_dma_irq(priv, chan);
        }
        return work_done;
 }
@@ -2684,9 +3483,12 @@ static int stmmac_poll(struct napi_struct *napi, int budget)
 static void stmmac_tx_timeout(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
+       u32 tx_count = priv->plat->tx_queues_to_use;
+       u32 chan;
 
        /* Clear Tx resources and restart transmitting again */
-       stmmac_tx_err(priv);
+       for (chan = 0; chan < tx_count; chan++)
+               stmmac_tx_err(priv, chan);
 }
 
 /**
@@ -2795,6 +3597,12 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
 {
        struct net_device *dev = (struct net_device *)dev_id;
        struct stmmac_priv *priv = netdev_priv(dev);
+       u32 rx_cnt = priv->plat->rx_queues_to_use;
+       u32 tx_cnt = priv->plat->tx_queues_to_use;
+       u32 queues_count;
+       u32 queue;
+
+       queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
 
        if (priv->irq_wake)
                pm_wakeup_event(priv->device, 0);
@@ -2808,16 +3616,30 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
        if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
                int status = priv->hw->mac->host_irq_status(priv->hw,
                                                            &priv->xstats);
+
                if (unlikely(status)) {
                        /* For LPI we need to save the tx status */
                        if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
                                priv->tx_path_in_lpi_mode = true;
                        if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
                                priv->tx_path_in_lpi_mode = false;
-                       if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
-                               priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
-                                                       priv->rx_tail_addr,
-                                                       STMMAC_CHAN0);
+               }
+
+               if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+                       for (queue = 0; queue < queues_count; queue++) {
+                               struct stmmac_rx_queue *rx_q =
+                               &priv->rx_queue[queue];
+
+                               status |=
+                               priv->hw->mac->host_mtl_irq_status(priv->hw,
+                                                                  queue);
+
+                               if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
+                                   priv->hw->dma->set_rx_tail_ptr)
+                                       priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
+                                                               rx_q->rx_tail_addr,
+                                                               queue);
+                       }
                }
 
                /* PCS link status */
@@ -2915,17 +3737,40 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
 {
        struct net_device *dev = seq->private;
        struct stmmac_priv *priv = netdev_priv(dev);
+       u32 rx_count = priv->plat->rx_queues_to_use;
+       u32 tx_count = priv->plat->tx_queues_to_use;
+       u32 queue;
 
-       if (priv->extend_desc) {
-               seq_printf(seq, "Extended RX descriptor ring:\n");
-               sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
-               seq_printf(seq, "Extended TX descriptor ring:\n");
-               sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
-       } else {
-               seq_printf(seq, "RX descriptor ring:\n");
-               sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
-               seq_printf(seq, "TX descriptor ring:\n");
-               sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
+       for (queue = 0; queue < rx_count; queue++) {
+               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+               seq_printf(seq, "RX Queue %d:\n", queue);
+
+               if (priv->extend_desc) {
+                       seq_printf(seq, "Extended descriptor ring:\n");
+                       sysfs_display_ring((void *)rx_q->dma_erx,
+                                          DMA_RX_SIZE, 1, seq);
+               } else {
+                       seq_printf(seq, "Descriptor ring:\n");
+                       sysfs_display_ring((void *)rx_q->dma_rx,
+                                          DMA_RX_SIZE, 0, seq);
+               }
+       }
+
+       for (queue = 0; queue < tx_count; queue++) {
+               struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+               seq_printf(seq, "TX Queue %d:\n", queue);
+
+               if (priv->extend_desc) {
+                       seq_printf(seq, "Extended descriptor ring:\n");
+                       sysfs_display_ring((void *)tx_q->dma_etx,
+                                          DMA_TX_SIZE, 1, seq);
+               } else {
+                       seq_printf(seq, "Descriptor ring:\n");
+                       sysfs_display_ring((void *)tx_q->dma_tx,
+                                          DMA_TX_SIZE, 0, seq);
+               }
        }
 
        return 0;
@@ -3208,11 +4053,14 @@ int stmmac_dvr_probe(struct device *device,
                     struct plat_stmmacenet_data *plat_dat,
                     struct stmmac_resources *res)
 {
-       int ret = 0;
        struct net_device *ndev = NULL;
        struct stmmac_priv *priv;
+       int ret = 0;
+       u32 queue;
 
-       ndev = alloc_etherdev(sizeof(struct stmmac_priv));
+       ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
+                                 MTL_MAX_TX_QUEUES,
+                                 MTL_MAX_RX_QUEUES);
        if (!ndev)
                return -ENOMEM;
 
@@ -3254,6 +4102,10 @@ int stmmac_dvr_probe(struct device *device,
        if (ret)
                goto error_hw_init;
 
+       /* Configure real RX and TX queues */
+       netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
+       netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
+
        ndev->netdev_ops = &stmmac_netdev_ops;
 
        ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -3303,7 +4155,12 @@ int stmmac_dvr_probe(struct device *device,
                         "Enable RX Mitigation via HW Watchdog Timer\n");
        }
 
-       netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
+       for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
+               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+               netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
+                              (8 * priv->plat->rx_queues_to_use));
+       }
 
        spin_lock_init(&priv->lock);
 
@@ -3348,7 +4205,11 @@ error_netdev_register:
            priv->hw->pcs != STMMAC_PCS_RTBI)
                stmmac_mdio_unregister(ndev);
 error_mdio_register:
-       netif_napi_del(&priv->napi);
+       for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
+               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+               netif_napi_del(&rx_q->napi);
+       }
 error_hw_init:
        free_netdev(ndev);
 
@@ -3369,10 +4230,9 @@ int stmmac_dvr_remove(struct device *dev)
 
        netdev_info(priv->dev, "%s: removing driver", __func__);
 
-       priv->hw->dma->stop_rx(priv->ioaddr);
-       priv->hw->dma->stop_tx(priv->ioaddr);
+       stmmac_stop_all_dma(priv);
 
-       stmmac_set_mac(priv->ioaddr, false);
+       priv->hw->mac->set_mac(priv->ioaddr, false);
        netif_carrier_off(ndev);
        unregister_netdev(ndev);
        if (priv->plat->stmmac_rst)
@@ -3411,20 +4271,19 @@ int stmmac_suspend(struct device *dev)
        spin_lock_irqsave(&priv->lock, flags);
 
        netif_device_detach(ndev);
-       netif_stop_queue(ndev);
+       stmmac_stop_all_queues(priv);
 
-       napi_disable(&priv->napi);
+       stmmac_disable_all_queues(priv);
 
        /* Stop TX/RX DMA */
-       priv->hw->dma->stop_tx(priv->ioaddr);
-       priv->hw->dma->stop_rx(priv->ioaddr);
+       stmmac_stop_all_dma(priv);
 
        /* Enable Power down mode by programming the PMT regs */
        if (device_may_wakeup(priv->device)) {
                priv->hw->mac->pmt(priv->hw, priv->wolopts);
                priv->irq_wake = 1;
        } else {
-               stmmac_set_mac(priv->ioaddr, false);
+               priv->hw->mac->set_mac(priv->ioaddr, false);
                pinctrl_pm_select_sleep_state(priv->device);
                /* Disable clock in case of PWM is off */
                clk_disable(priv->plat->pclk);
@@ -3439,6 +4298,31 @@ int stmmac_suspend(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(stmmac_suspend);
 
+/**
+ * stmmac_reset_queues_param - reset queue parameters
+ * @dev: device pointer
+ */
+static void stmmac_reset_queues_param(struct stmmac_priv *priv)
+{
+       u32 rx_cnt = priv->plat->rx_queues_to_use;
+       u32 tx_cnt = priv->plat->tx_queues_to_use;
+       u32 queue;
+
+       for (queue = 0; queue < rx_cnt; queue++) {
+               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+               rx_q->cur_rx = 0;
+               rx_q->dirty_rx = 0;
+       }
+
+       for (queue = 0; queue < tx_cnt; queue++) {
+               struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+               tx_q->cur_tx = 0;
+               tx_q->dirty_tx = 0;
+       }
+}
+
 /**
  * stmmac_resume - resume callback
  * @dev: device pointer
@@ -3479,10 +4363,8 @@ int stmmac_resume(struct device *dev)
 
        spin_lock_irqsave(&priv->lock, flags);
 
-       priv->cur_rx = 0;
-       priv->dirty_rx = 0;
-       priv->dirty_tx = 0;
-       priv->cur_tx = 0;
+       stmmac_reset_queues_param(priv);
+
        /* reset private mss value to force mss context settings at
         * next tso xmit (only used for gmac4).
         */
@@ -3494,9 +4376,9 @@ int stmmac_resume(struct device *dev)
        stmmac_init_tx_coalesce(priv);
        stmmac_set_rx_mode(ndev);
 
-       napi_enable(&priv->napi);
+       stmmac_enable_all_queues(priv);
 
-       netif_start_queue(ndev);
+       stmmac_start_all_queues(priv);
 
        spin_unlock_irqrestore(&priv->lock, flags);
 
index 5c9e462276b9cbc25a8b5c2748988286fe17884f..a224d7bf1c1beea57fe976cbd4f94f63603a480a 100644 (file)
@@ -88,6 +88,17 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
 
        /* Set the maxmtu to a default of JUMBO_LEN */
        plat->maxmtu = JUMBO_LEN;
+
+       /* Set default number of RX and TX queues to use */
+       plat->tx_queues_to_use = 1;
+       plat->rx_queues_to_use = 1;
+
+       /* Disable Priority config by default */
+       plat->tx_queues_cfg[0].use_prio = false;
+       plat->rx_queues_cfg[0].use_prio = false;
+
+       /* Disable RX queues routing by default */
+       plat->rx_queues_cfg[0].pkt_route = 0x0;
 }
 
 static int quark_default_data(struct plat_stmmacenet_data *plat,
index 433a84239a687bab4ff0572978d7c0eaf849cb46..7fc3a1ef395ab2e99060355d1c47e5b5f1f9d9f1 100644 (file)
@@ -108,7 +108,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
        if (!np)
                return NULL;
 
-       axi = kzalloc(sizeof(*axi), GFP_KERNEL);
+       axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL);
        if (!axi) {
                of_node_put(np);
                return ERR_PTR(-ENOMEM);
@@ -131,6 +131,155 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
        return axi;
 }
 
+/**
+ * stmmac_mtl_setup - parse DT parameters for multiple queues configuration
+ * @pdev: platform device
+ */
+static void stmmac_mtl_setup(struct platform_device *pdev,
+                            struct plat_stmmacenet_data *plat)
+{
+       struct device_node *q_node;
+       struct device_node *rx_node;
+       struct device_node *tx_node;
+       u8 queue = 0;
+
+       /* For backwards-compatibility with device trees that don't have any
+        * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
+        * to one RX and TX queues each.
+        */
+       plat->rx_queues_to_use = 1;
+       plat->tx_queues_to_use = 1;
+
+       rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
+       if (!rx_node)
+               return;
+
+       tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
+       if (!tx_node) {
+               of_node_put(rx_node);
+               return;
+       }
+
+       /* Processing RX queues common config */
+       if (of_property_read_u8(rx_node, "snps,rx-queues-to-use",
+                               &plat->rx_queues_to_use))
+               plat->rx_queues_to_use = 1;
+
+       if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
+               plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
+       else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp"))
+               plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP;
+       else
+               plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
+
+       /* Processing individual RX queue config */
+       for_each_child_of_node(rx_node, q_node) {
+               if (queue >= plat->rx_queues_to_use)
+                       break;
+
+               if (of_property_read_bool(q_node, "snps,dcb-algorithm"))
+                       plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+               else if (of_property_read_bool(q_node, "snps,avb-algorithm"))
+                       plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
+               else
+                       plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+
+               if (of_property_read_u8(q_node, "snps,map-to-dma-channel",
+                                       &plat->rx_queues_cfg[queue].chan))
+                       plat->rx_queues_cfg[queue].chan = queue;
+               /* TODO: Dynamic mapping to be included in the future */
+
+               if (of_property_read_u32(q_node, "snps,priority",
+                                       &plat->rx_queues_cfg[queue].prio)) {
+                       plat->rx_queues_cfg[queue].prio = 0;
+                       plat->rx_queues_cfg[queue].use_prio = false;
+               } else {
+                       plat->rx_queues_cfg[queue].use_prio = true;
+               }
+
+               /* RX queue specific packet type routing */
+               if (of_property_read_bool(q_node, "snps,route-avcp"))
+                       plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ;
+               else if (of_property_read_bool(q_node, "snps,route-ptp"))
+                       plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ;
+               else if (of_property_read_bool(q_node, "snps,route-dcbcp"))
+                       plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ;
+               else if (of_property_read_bool(q_node, "snps,route-up"))
+                       plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
+               else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
+                       plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
+               else
+                       plat->rx_queues_cfg[queue].pkt_route = 0x0;
+
+               queue++;
+       }
+
+       /* Processing TX queues common config */
+       if (of_property_read_u8(tx_node, "snps,tx-queues-to-use",
+                               &plat->tx_queues_to_use))
+               plat->tx_queues_to_use = 1;
+
+       if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
+               plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
+       else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq"))
+               plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ;
+       else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr"))
+               plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR;
+       else if (of_property_read_bool(tx_node, "snps,tx-sched-sp"))
+               plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
+       else
+               plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
+
+       queue = 0;
+
+       /* Processing individual TX queue config */
+       for_each_child_of_node(tx_node, q_node) {
+               if (queue >= plat->tx_queues_to_use)
+                       break;
+
+               if (of_property_read_u8(q_node, "snps,weight",
+                                       &plat->tx_queues_cfg[queue].weight))
+                       plat->tx_queues_cfg[queue].weight = 0x10 + queue;
+
+               if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
+                       plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+               } else if (of_property_read_bool(q_node,
+                                                "snps,avb-algorithm")) {
+                       plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
+
+                       /* Credit Base Shaper parameters used by AVB */
+                       if (of_property_read_u32(q_node, "snps,send_slope",
+                               &plat->tx_queues_cfg[queue].send_slope))
+                               plat->tx_queues_cfg[queue].send_slope = 0x0;
+                       if (of_property_read_u32(q_node, "snps,idle_slope",
+                               &plat->tx_queues_cfg[queue].idle_slope))
+                               plat->tx_queues_cfg[queue].idle_slope = 0x0;
+                       if (of_property_read_u32(q_node, "snps,high_credit",
+                               &plat->tx_queues_cfg[queue].high_credit))
+                               plat->tx_queues_cfg[queue].high_credit = 0x0;
+                       if (of_property_read_u32(q_node, "snps,low_credit",
+                               &plat->tx_queues_cfg[queue].low_credit))
+                               plat->tx_queues_cfg[queue].low_credit = 0x0;
+               } else {
+                       plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+               }
+
+               if (of_property_read_u32(q_node, "snps,priority",
+                                       &plat->tx_queues_cfg[queue].prio)) {
+                       plat->tx_queues_cfg[queue].prio = 0;
+                       plat->tx_queues_cfg[queue].use_prio = false;
+               } else {
+                       plat->tx_queues_cfg[queue].use_prio = true;
+               }
+
+               queue++;
+       }
+
+       of_node_put(rx_node);
+       of_node_put(tx_node);
+       of_node_put(q_node);
+}
+
 /**
  * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
  * @plat: driver data platform structure
@@ -340,6 +489,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
 
        plat->axi = stmmac_axi_setup(pdev);
 
+       stmmac_mtl_setup(pdev, plat);
+
        /* clock setup */
        plat->stmmac_clk = devm_clk_get(&pdev->dev,
                                        STMMAC_RESOURCE_NAME);
@@ -359,13 +510,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
        clk_prepare_enable(plat->pclk);
 
        /* Fall-back to main clock in case of no PTP ref is passed */
-       plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref");
+       plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref");
        if (IS_ERR(plat->clk_ptp_ref)) {
                plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
                plat->clk_ptp_ref = NULL;
                dev_warn(&pdev->dev, "PTP uses main clock\n");
        } else {
-               clk_prepare_enable(plat->clk_ptp_ref);
                plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
                dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
        }
index 0e8e89f17dbb1128c6b562b17ae736622b6cf45b..382993c1561c5c9b071138d0f8ca0def10743e83 100644 (file)
@@ -691,7 +691,8 @@ static void cas_mif_poll(struct cas *cp, const int enable)
 }
 
 /* Must be invoked under cp->lock */
-static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
+static void cas_begin_auto_negotiation(struct cas *cp,
+                                      const struct ethtool_link_ksettings *ep)
 {
        u16 ctl;
 #if 1
@@ -704,16 +705,16 @@ static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
        if (!ep)
                goto start_aneg;
        lcntl = cp->link_cntl;
-       if (ep->autoneg == AUTONEG_ENABLE)
+       if (ep->base.autoneg == AUTONEG_ENABLE) {
                cp->link_cntl = BMCR_ANENABLE;
-       else {
-               u32 speed = ethtool_cmd_speed(ep);
+       else {
+               u32 speed = ep->base.speed;
                cp->link_cntl = 0;
                if (speed == SPEED_100)
                        cp->link_cntl |= BMCR_SPEED100;
                else if (speed == SPEED_1000)
                        cp->link_cntl |= CAS_BMCR_SPEED1000;
-               if (ep->duplex == DUPLEX_FULL)
+               if (ep->base.duplex == DUPLEX_FULL)
                        cp->link_cntl |= BMCR_FULLDPLX;
        }
 #if 1
@@ -4528,19 +4529,21 @@ static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
        strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
 }
 
-static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int cas_get_link_ksettings(struct net_device *dev,
+                                 struct ethtool_link_ksettings *cmd)
 {
        struct cas *cp = netdev_priv(dev);
        u16 bmcr;
        int full_duplex, speed, pause;
        unsigned long flags;
        enum link_state linkstate = link_up;
+       u32 supported, advertising;
 
-       cmd->advertising = 0;
-       cmd->supported = SUPPORTED_Autoneg;
+       advertising = 0;
+       supported = SUPPORTED_Autoneg;
        if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
-               cmd->supported |= SUPPORTED_1000baseT_Full;
-               cmd->advertising |= ADVERTISED_1000baseT_Full;
+               supported |= SUPPORTED_1000baseT_Full;
+               advertising |= ADVERTISED_1000baseT_Full;
        }
 
        /* Record PHY settings if HW is on. */
@@ -4548,17 +4551,15 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        bmcr = 0;
        linkstate = cp->lstate;
        if (CAS_PHY_MII(cp->phy_type)) {
-               cmd->port = PORT_MII;
-               cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
-                       XCVR_INTERNAL : XCVR_EXTERNAL;
-               cmd->phy_address = cp->phy_addr;
-               cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
+               cmd->base.port = PORT_MII;
+               cmd->base.phy_address = cp->phy_addr;
+               advertising |= ADVERTISED_TP | ADVERTISED_MII |
                        ADVERTISED_10baseT_Half |
                        ADVERTISED_10baseT_Full |
                        ADVERTISED_100baseT_Half |
                        ADVERTISED_100baseT_Full;
 
-               cmd->supported |=
+               supported |=
                        (SUPPORTED_10baseT_Half |
                         SUPPORTED_10baseT_Full |
                         SUPPORTED_100baseT_Half |
@@ -4574,11 +4575,10 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                }
 
        } else {
-               cmd->port = PORT_FIBRE;
-               cmd->transceiver = XCVR_INTERNAL;
-               cmd->phy_address = 0;
-               cmd->supported   |= SUPPORTED_FIBRE;
-               cmd->advertising |= ADVERTISED_FIBRE;
+               cmd->base.port = PORT_FIBRE;
+               cmd->base.phy_address = 0;
+               supported   |= SUPPORTED_FIBRE;
+               advertising |= ADVERTISED_FIBRE;
 
                if (cp->hw_running) {
                        /* pcs uses the same bits as mii */
@@ -4590,21 +4590,20 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        spin_unlock_irqrestore(&cp->lock, flags);
 
        if (bmcr & BMCR_ANENABLE) {
-               cmd->advertising |= ADVERTISED_Autoneg;
-               cmd->autoneg = AUTONEG_ENABLE;
-               ethtool_cmd_speed_set(cmd, ((speed == 10) ?
+               advertising |= ADVERTISED_Autoneg;
+               cmd->base.autoneg = AUTONEG_ENABLE;
+               cmd->base.speed =  ((speed == 10) ?
                                            SPEED_10 :
                                            ((speed == 1000) ?
-                                            SPEED_1000 : SPEED_100)));
-               cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+                                            SPEED_1000 : SPEED_100));
+               cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
        } else {
-               cmd->autoneg = AUTONEG_DISABLE;
-               ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ?
+               cmd->base.autoneg = AUTONEG_DISABLE;
+               cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ?
                                            SPEED_1000 :
                                            ((bmcr & BMCR_SPEED100) ?
-                                            SPEED_100 : SPEED_10)));
-               cmd->duplex =
-                       (bmcr & BMCR_FULLDPLX) ?
+                                            SPEED_100 : SPEED_10));
+               cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
                        DUPLEX_FULL : DUPLEX_HALF;
        }
        if (linkstate != link_up) {
@@ -4619,39 +4618,46 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                 * settings that we configured.
                 */
                if (cp->link_cntl & BMCR_ANENABLE) {
-                       ethtool_cmd_speed_set(cmd, 0);
-                       cmd->duplex = 0xff;
+                       cmd->base.speed = 0;
+                       cmd->base.duplex = 0xff;
                } else {
-                       ethtool_cmd_speed_set(cmd, SPEED_10);
+                       cmd->base.speed = SPEED_10;
                        if (cp->link_cntl & BMCR_SPEED100) {
-                               ethtool_cmd_speed_set(cmd, SPEED_100);
+                               cmd->base.speed = SPEED_100;
                        } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
-                               ethtool_cmd_speed_set(cmd, SPEED_1000);
+                               cmd->base.speed = SPEED_1000;
                        }
-                       cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
+                       cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ?
                                DUPLEX_FULL : DUPLEX_HALF;
                }
        }
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
+
        return 0;
 }
 
-static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int cas_set_link_ksettings(struct net_device *dev,
+                                 const struct ethtool_link_ksettings *cmd)
 {
        struct cas *cp = netdev_priv(dev);
        unsigned long flags;
-       u32 speed = ethtool_cmd_speed(cmd);
+       u32 speed = cmd->base.speed;
 
        /* Verify the settings we care about. */
-       if (cmd->autoneg != AUTONEG_ENABLE &&
-           cmd->autoneg != AUTONEG_DISABLE)
+       if (cmd->base.autoneg != AUTONEG_ENABLE &&
+           cmd->base.autoneg != AUTONEG_DISABLE)
                return -EINVAL;
 
-       if (cmd->autoneg == AUTONEG_DISABLE &&
+       if (cmd->base.autoneg == AUTONEG_DISABLE &&
            ((speed != SPEED_1000 &&
              speed != SPEED_100 &&
              speed != SPEED_10) ||
-            (cmd->duplex != DUPLEX_HALF &&
-             cmd->duplex != DUPLEX_FULL)))
+            (cmd->base.duplex != DUPLEX_HALF &&
+             cmd->base.duplex != DUPLEX_FULL)))
                return -EINVAL;
 
        /* Apply settings and restart link process. */
@@ -4753,8 +4759,6 @@ static void cas_get_ethtool_stats(struct net_device *dev,
 
 static const struct ethtool_ops cas_ethtool_ops = {
        .get_drvinfo            = cas_get_drvinfo,
-       .get_settings           = cas_get_settings,
-       .set_settings           = cas_set_settings,
        .nway_reset             = cas_nway_reset,
        .get_link               = cas_get_link,
        .get_msglevel           = cas_get_msglevel,
@@ -4764,6 +4768,8 @@ static const struct ethtool_ops cas_ethtool_ops = {
        .get_sset_count         = cas_get_sset_count,
        .get_strings            = cas_get_strings,
        .get_ethtool_stats      = cas_get_ethtool_stats,
+       .get_link_ksettings     = cas_get_link_ksettings,
+       .set_link_ksettings     = cas_set_link_ksettings,
 };
 
 static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
index 89952deae47fc813b66c6d856f16dd648a803a0c..5a90fed0626065613ba59fa7c5f8ca3c2dff6ef3 100644 (file)
@@ -1,6 +1,6 @@
 /* ldmvsw.c: Sun4v LDOM Virtual Switch Driver.
  *
- * Copyright (C) 2016 Oracle. All rights reserved.
+ * Copyright (C) 2016-2017 Oracle. All rights reserved.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -41,8 +41,8 @@
 static u8 vsw_port_hwaddr[ETH_ALEN] = {0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
 
 #define DRV_MODULE_NAME                "ldmvsw"
-#define DRV_MODULE_VERSION     "1.1"
-#define DRV_MODULE_RELDATE     "February 3, 2017"
+#define DRV_MODULE_VERSION     "1.2"
+#define DRV_MODULE_RELDATE     "March 4, 2017"
 
 static char version[] =
        DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
@@ -123,6 +123,20 @@ static void vsw_set_rx_mode(struct net_device *dev)
        return sunvnet_set_rx_mode_common(dev, port->vp);
 }
 
+int ldmvsw_open(struct net_device *dev)
+{
+       struct vnet_port *port = netdev_priv(dev);
+       struct vio_driver_state *vio = &port->vio;
+
+       /* reset the channel */
+       vio_link_state_change(vio, LDC_EVENT_RESET);
+       vnet_port_reset(port);
+       vio_port_up(vio);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ldmvsw_open);
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void vsw_poll_controller(struct net_device *dev)
 {
@@ -133,7 +147,7 @@ static void vsw_poll_controller(struct net_device *dev)
 #endif
 
 static const struct net_device_ops vsw_ops = {
-       .ndo_open               = sunvnet_open_common,
+       .ndo_open               = ldmvsw_open,
        .ndo_stop               = sunvnet_close_common,
        .ndo_set_rx_mode        = vsw_set_rx_mode,
        .ndo_set_mac_address    = sunvnet_set_mac_addr_common,
@@ -365,6 +379,11 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        napi_enable(&port->napi);
        vio_port_up(&port->vio);
 
+       /* assure no carrier until we receive an LDC_EVENT_UP,
+        * even if the vsw config script tries to force us up
+        */
+       netif_carrier_off(dev);
+
        netdev_info(dev, "LDOM vsw-port %pM\n", dev->dev_addr);
 
        pr_info("%s: PORT ( remote-mac %pM%s )\n", dev->name,
index 57978056b3366f0ecb0410f96dbfbde228ea44f1..2dcca249eb9c732e48a563f22d4e98bc79429e24 100644 (file)
@@ -6813,7 +6813,8 @@ static void niu_get_drvinfo(struct net_device *dev,
                        sizeof(info->bus_info));
 }
 
-static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int niu_get_link_ksettings(struct net_device *dev,
+                                 struct ethtool_link_ksettings *cmd)
 {
        struct niu *np = netdev_priv(dev);
        struct niu_link_config *lp;
@@ -6821,28 +6822,30 @@ static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        lp = &np->link_config;
 
        memset(cmd, 0, sizeof(*cmd));
-       cmd->phy_address = np->phy_addr;
-       cmd->supported = lp->supported;
-       cmd->advertising = lp->active_advertising;
-       cmd->autoneg = lp->active_autoneg;
-       ethtool_cmd_speed_set(cmd, lp->active_speed);
-       cmd->duplex = lp->active_duplex;
-       cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
-       cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
-               XCVR_EXTERNAL : XCVR_INTERNAL;
+       cmd->base.phy_address = np->phy_addr;
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               lp->supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               lp->active_advertising);
+       cmd->base.autoneg = lp->active_autoneg;
+       cmd->base.speed = lp->active_speed;
+       cmd->base.duplex = lp->active_duplex;
+       cmd->base.port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
 
        return 0;
 }
 
-static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int niu_set_link_ksettings(struct net_device *dev,
+                                 const struct ethtool_link_ksettings *cmd)
 {
        struct niu *np = netdev_priv(dev);
        struct niu_link_config *lp = &np->link_config;
 
-       lp->advertising = cmd->advertising;
-       lp->speed = ethtool_cmd_speed(cmd);
-       lp->duplex = cmd->duplex;
-       lp->autoneg = cmd->autoneg;
+       ethtool_convert_link_mode_to_legacy_u32(&lp->advertising,
+                                               cmd->link_modes.advertising);
+       lp->speed = cmd->base.speed;
+       lp->duplex = cmd->base.duplex;
+       lp->autoneg = cmd->base.autoneg;
        return niu_init_link(np);
 }
 
@@ -7902,14 +7905,14 @@ static const struct ethtool_ops niu_ethtool_ops = {
        .nway_reset             = niu_nway_reset,
        .get_eeprom_len         = niu_get_eeprom_len,
        .get_eeprom             = niu_get_eeprom,
-       .get_settings           = niu_get_settings,
-       .set_settings           = niu_set_settings,
        .get_strings            = niu_get_strings,
        .get_sset_count         = niu_get_sset_count,
        .get_ethtool_stats      = niu_get_ethtool_stats,
        .set_phys_id            = niu_set_phys_id,
        .get_rxnfc              = niu_get_nfc,
        .set_rxnfc              = niu_set_nfc,
+       .get_link_ksettings     = niu_get_link_ksettings,
+       .set_link_ksettings     = niu_set_link_ksettings,
 };
 
 static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
index c4caf486cbeffee85ca7ca829ae3fef0d6ad9fe4..3189722110c262fc4a59c1fb67aef953500e0ff6 100644 (file)
@@ -169,7 +169,7 @@ static void bigmac_stop(struct bigmac *bp)
 
 static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs)
 {
-       struct net_device_stats *stats = &bp->enet_stats;
+       struct net_device_stats *stats = &bp->dev->stats;
 
        stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR);
        sbus_writel(0, bregs + BMAC_RCRCECTR);
@@ -774,8 +774,8 @@ static void bigmac_tx(struct bigmac *bp)
                if (this->tx_flags & TXD_OWN)
                        break;
                skb = bp->tx_skbs[elem];
-               bp->enet_stats.tx_packets++;
-               bp->enet_stats.tx_bytes += skb->len;
+               dev->stats.tx_packets++;
+               dev->stats.tx_bytes += skb->len;
                dma_unmap_single(&bp->bigmac_op->dev,
                                 this->tx_addr, skb->len,
                                 DMA_TO_DEVICE);
@@ -811,12 +811,12 @@ static void bigmac_rx(struct bigmac *bp)
 
                /* Check for errors. */
                if (len < ETH_ZLEN) {
-                       bp->enet_stats.rx_errors++;
-                       bp->enet_stats.rx_length_errors++;
+                       bp->dev->stats.rx_errors++;
+                       bp->dev->stats.rx_length_errors++;
 
        drop_it:
                        /* Return it to the BigMAC. */
-                       bp->enet_stats.rx_dropped++;
+                       bp->dev->stats.rx_dropped++;
                        this->rx_flags =
                                (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
                        goto next;
@@ -875,8 +875,8 @@ static void bigmac_rx(struct bigmac *bp)
                /* No checksums done by the BigMAC ;-( */
                skb->protocol = eth_type_trans(skb, bp->dev);
                netif_rx(skb);
-               bp->enet_stats.rx_packets++;
-               bp->enet_stats.rx_bytes += len;
+               bp->dev->stats.rx_packets++;
+               bp->dev->stats.rx_bytes += len;
        next:
                elem = NEXT_RX(elem);
                this = &rxbase[elem];
@@ -987,7 +987,7 @@ static struct net_device_stats *bigmac_get_stats(struct net_device *dev)
        struct bigmac *bp = netdev_priv(dev);
 
        bigmac_get_counters(bp, bp->bregs);
-       return &bp->enet_stats;
+       return &dev->stats;
 }
 
 static void bigmac_set_multicast(struct net_device *dev)
index 532fc56830cf319b3067b3caa6c8149f1ac115d6..ee56930475a8e5941c468b09d84da8dbf5314a94 100644 (file)
@@ -311,7 +311,6 @@ struct bigmac {
        enum bigmac_timer_state timer_state;
        unsigned int            timer_ticks;
 
-       struct net_device_stats enet_stats;
        struct platform_device  *qec_op;
        struct platform_device  *bigmac_op;
        struct net_device       *dev;
index 5c5952e782cd223c0aee844d414ee6c749c35804..fa607d062cb3130eff15295f61a8efce7c6a969c 100644 (file)
@@ -1250,12 +1250,18 @@ static void gem_stop_dma(struct gem *gp)
 
 
 // XXX dbl check what that function should do when called on PCS PHY
-static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
+static void gem_begin_auto_negotiation(struct gem *gp,
+                                      const struct ethtool_link_ksettings *ep)
 {
        u32 advertise, features;
        int autoneg;
        int speed;
        int duplex;
+       u32 advertising;
+
+       if (ep)
+               ethtool_convert_link_mode_to_legacy_u32(
+                       &advertising, ep->link_modes.advertising);
 
        if (gp->phy_type != phy_mii_mdio0 &&
            gp->phy_type != phy_mii_mdio1)
@@ -1278,13 +1284,13 @@ static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
        /* Setup link parameters */
        if (!ep)
                goto start_aneg;
-       if (ep->autoneg == AUTONEG_ENABLE) {
-               advertise = ep->advertising;
+       if (ep->base.autoneg == AUTONEG_ENABLE) {
+               advertise = advertising;
                autoneg = 1;
        } else {
                autoneg = 0;
-               speed = ethtool_cmd_speed(ep);
-               duplex = ep->duplex;
+               speed = ep->base.speed;
+               duplex = ep->base.duplex;
        }
 
 start_aneg:
@@ -2515,85 +2521,96 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
        strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
 }
 
-static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int gem_get_link_ksettings(struct net_device *dev,
+                                 struct ethtool_link_ksettings *cmd)
 {
        struct gem *gp = netdev_priv(dev);
+       u32 supported, advertising;
 
        if (gp->phy_type == phy_mii_mdio0 ||
            gp->phy_type == phy_mii_mdio1) {
                if (gp->phy_mii.def)
-                       cmd->supported = gp->phy_mii.def->features;
+                       supported = gp->phy_mii.def->features;
                else
-                       cmd->supported = (SUPPORTED_10baseT_Half |
+                       supported = (SUPPORTED_10baseT_Half |
                                          SUPPORTED_10baseT_Full);
 
                /* XXX hardcoded stuff for now */
-               cmd->port = PORT_MII;
-               cmd->transceiver = XCVR_EXTERNAL;
-               cmd->phy_address = 0; /* XXX fixed PHYAD */
+               cmd->base.port = PORT_MII;
+               cmd->base.phy_address = 0; /* XXX fixed PHYAD */
 
                /* Return current PHY settings */
-               cmd->autoneg = gp->want_autoneg;
-               ethtool_cmd_speed_set(cmd, gp->phy_mii.speed);
-               cmd->duplex = gp->phy_mii.duplex;
-               cmd->advertising = gp->phy_mii.advertising;
+               cmd->base.autoneg = gp->want_autoneg;
+               cmd->base.speed = gp->phy_mii.speed;
+               cmd->base.duplex = gp->phy_mii.duplex;
+               advertising = gp->phy_mii.advertising;
 
                /* If we started with a forced mode, we don't have a default
                 * advertise set, we need to return something sensible so
                 * userland can re-enable autoneg properly.
                 */
-               if (cmd->advertising == 0)
-                       cmd->advertising = cmd->supported;
+               if (advertising == 0)
+                       advertising = supported;
        } else { // XXX PCS ?
-               cmd->supported =
+               supported =
                        (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
                         SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
                         SUPPORTED_Autoneg);
-               cmd->advertising = cmd->supported;
-               ethtool_cmd_speed_set(cmd, 0);
-               cmd->duplex = cmd->port = cmd->phy_address =
-                       cmd->transceiver = cmd->autoneg = 0;
+               advertising = supported;
+               cmd->base.speed = 0;
+               cmd->base.duplex = 0;
+               cmd->base.port = 0;
+               cmd->base.phy_address = 0;
+               cmd->base.autoneg = 0;
 
                /* serdes means usually a Fibre connector, with most fixed */
                if (gp->phy_type == phy_serdes) {
-                       cmd->port = PORT_FIBRE;
-                       cmd->supported = (SUPPORTED_1000baseT_Half |
+                       cmd->base.port = PORT_FIBRE;
+                       supported = (SUPPORTED_1000baseT_Half |
                                SUPPORTED_1000baseT_Full |
                                SUPPORTED_FIBRE | SUPPORTED_Autoneg |
                                SUPPORTED_Pause | SUPPORTED_Asym_Pause);
-                       cmd->advertising = cmd->supported;
-                       cmd->transceiver = XCVR_INTERNAL;
+                       advertising = supported;
                        if (gp->lstate == link_up)
-                               ethtool_cmd_speed_set(cmd, SPEED_1000);
-                       cmd->duplex = DUPLEX_FULL;
-                       cmd->autoneg = 1;
+                               cmd->base.speed = SPEED_1000;
+                       cmd->base.duplex = DUPLEX_FULL;
+                       cmd->base.autoneg = 1;
                }
        }
-       cmd->maxtxpkt = cmd->maxrxpkt = 0;
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
 
        return 0;
 }
 
-static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int gem_set_link_ksettings(struct net_device *dev,
+                                 const struct ethtool_link_ksettings *cmd)
 {
        struct gem *gp = netdev_priv(dev);
-       u32 speed = ethtool_cmd_speed(cmd);
+       u32 speed = cmd->base.speed;
+       u32 advertising;
+
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
 
        /* Verify the settings we care about. */
-       if (cmd->autoneg != AUTONEG_ENABLE &&
-           cmd->autoneg != AUTONEG_DISABLE)
+       if (cmd->base.autoneg != AUTONEG_ENABLE &&
+           cmd->base.autoneg != AUTONEG_DISABLE)
                return -EINVAL;
 
-       if (cmd->autoneg == AUTONEG_ENABLE &&
-           cmd->advertising == 0)
+       if (cmd->base.autoneg == AUTONEG_ENABLE &&
+           advertising == 0)
                return -EINVAL;
 
-       if (cmd->autoneg == AUTONEG_DISABLE &&
+       if (cmd->base.autoneg == AUTONEG_DISABLE &&
            ((speed != SPEED_1000 &&
              speed != SPEED_100 &&
              speed != SPEED_10) ||
-            (cmd->duplex != DUPLEX_HALF &&
-             cmd->duplex != DUPLEX_FULL)))
+            (cmd->base.duplex != DUPLEX_HALF &&
+             cmd->base.duplex != DUPLEX_FULL)))
                return -EINVAL;
 
        /* Apply settings and restart link process. */
@@ -2666,13 +2683,13 @@ static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 static const struct ethtool_ops gem_ethtool_ops = {
        .get_drvinfo            = gem_get_drvinfo,
        .get_link               = ethtool_op_get_link,
-       .get_settings           = gem_get_settings,
-       .set_settings           = gem_set_settings,
        .nway_reset             = gem_nway_reset,
        .get_msglevel           = gem_get_msglevel,
        .set_msglevel           = gem_set_msglevel,
        .get_wol                = gem_get_wol,
        .set_wol                = gem_set_wol,
+       .get_link_ksettings     = gem_get_link_ksettings,
+       .set_link_ksettings     = gem_set_link_ksettings,
 };
 
 static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
index 72ff05cd3ed80c883d2bc34a969e8bdb1b61992a..a6cc9a2d41c1992891cf6678d82cf1015372454f 100644 (file)
@@ -933,7 +933,7 @@ static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
 /* hp->happy_lock must be held */
 static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
 {
-       struct net_device_stats *stats = &hp->net_stats;
+       struct net_device_stats *stats = &hp->dev->stats;
 
        stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
        hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
@@ -1294,9 +1294,10 @@ static void happy_meal_init_rings(struct happy_meal *hp)
 }
 
 /* hp->happy_lock must be held */
-static void happy_meal_begin_auto_negotiation(struct happy_meal *hp,
-                                             void __iomem *tregs,
-                                             struct ethtool_cmd *ep)
+static void
+happy_meal_begin_auto_negotiation(struct happy_meal *hp,
+                                 void __iomem *tregs,
+                                 const struct ethtool_link_ksettings *ep)
 {
        int timeout;
 
@@ -1309,7 +1310,7 @@ static void happy_meal_begin_auto_negotiation(struct happy_meal *hp,
        /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
 
        hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
-       if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
+       if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
                /* Advertise everything we can support. */
                if (hp->sw_bmsr & BMSR_10HALF)
                        hp->sw_advertise |= (ADVERTISE_10HALF);
@@ -1384,14 +1385,14 @@ force_link:
                /* Disable auto-negotiation in BMCR, enable the duplex and
                 * speed setting, init the timer state machine, and fire it off.
                 */
-               if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
+               if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
                        hp->sw_bmcr = BMCR_SPEED100;
                } else {
-                       if (ethtool_cmd_speed(ep) == SPEED_100)
+                       if (ep->base.speed == SPEED_100)
                                hp->sw_bmcr = BMCR_SPEED100;
                        else
                                hp->sw_bmcr = 0;
-                       if (ep->duplex == DUPLEX_FULL)
+                       if (ep->base.duplex == DUPLEX_FULL)
                                hp->sw_bmcr |= BMCR_FULLDPLX;
                }
                happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
@@ -1946,7 +1947,7 @@ static void happy_meal_tx(struct happy_meal *hp)
                                break;
                }
                hp->tx_skbs[elem] = NULL;
-               hp->net_stats.tx_bytes += skb->len;
+               dev->stats.tx_bytes += skb->len;
 
                for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
                        dma_addr = hme_read_desc32(hp, &this->tx_addr);
@@ -1963,7 +1964,7 @@ static void happy_meal_tx(struct happy_meal *hp)
                }
 
                dev_kfree_skb_irq(skb);
-               hp->net_stats.tx_packets++;
+               dev->stats.tx_packets++;
        }
        hp->tx_old = elem;
        TXD((">"));
@@ -2008,17 +2009,17 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
                /* Check for errors. */
                if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
                        RXD(("ERR(%08x)]", flags));
-                       hp->net_stats.rx_errors++;
+                       dev->stats.rx_errors++;
                        if (len < ETH_ZLEN)
-                               hp->net_stats.rx_length_errors++;
+                               dev->stats.rx_length_errors++;
                        if (len & (RXFLAG_OVERFLOW >> 16)) {
-                               hp->net_stats.rx_over_errors++;
-                               hp->net_stats.rx_fifo_errors++;
+                               dev->stats.rx_over_errors++;
+                               dev->stats.rx_fifo_errors++;
                        }
 
                        /* Return it to the Happy meal. */
        drop_it:
-                       hp->net_stats.rx_dropped++;
+                       dev->stats.rx_dropped++;
                        hme_write_rxd(hp, this,
                                      (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
                                      dma_addr);
@@ -2083,8 +2084,8 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
                skb->protocol = eth_type_trans(skb, dev);
                netif_rx(skb);
 
-               hp->net_stats.rx_packets++;
-               hp->net_stats.rx_bytes += len;
+               dev->stats.rx_packets++;
+               dev->stats.rx_bytes += len;
        next:
                elem = NEXT_RX(elem);
                this = &rxbase[elem];
@@ -2395,7 +2396,7 @@ static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
        happy_meal_get_counters(hp, hp->bigmacregs);
        spin_unlock_irq(&hp->happy_lock);
 
-       return &hp->net_stats;
+       return &dev->stats;
 }
 
 static void happy_meal_set_multicast(struct net_device *dev)
@@ -2434,20 +2435,21 @@ static void happy_meal_set_multicast(struct net_device *dev)
 }
 
 /* Ethtool support... */
-static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int hme_get_link_ksettings(struct net_device *dev,
+                                 struct ethtool_link_ksettings *cmd)
 {
        struct happy_meal *hp = netdev_priv(dev);
        u32 speed;
+       u32 supported;
 
-       cmd->supported =
+       supported =
                (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
                 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
                 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
 
        /* XXX hardcoded stuff for now */
-       cmd->port = PORT_TP; /* XXX no MII support */
-       cmd->transceiver = XCVR_INTERNAL; /* XXX no external xcvr support */
-       cmd->phy_address = 0; /* XXX fixed PHYAD */
+       cmd->base.port = PORT_TP; /* XXX no MII support */
+       cmd->base.phy_address = 0; /* XXX fixed PHYAD */
 
        /* Record PHY settings. */
        spin_lock_irq(&hp->happy_lock);
@@ -2456,41 +2458,45 @@ static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        spin_unlock_irq(&hp->happy_lock);
 
        if (hp->sw_bmcr & BMCR_ANENABLE) {
-               cmd->autoneg = AUTONEG_ENABLE;
+               cmd->base.autoneg = AUTONEG_ENABLE;
                speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
                         SPEED_100 : SPEED_10);
                if (speed == SPEED_100)
-                       cmd->duplex =
+                       cmd->base.duplex =
                                (hp->sw_lpa & (LPA_100FULL)) ?
                                DUPLEX_FULL : DUPLEX_HALF;
                else
-                       cmd->duplex =
+                       cmd->base.duplex =
                                (hp->sw_lpa & (LPA_10FULL)) ?
                                DUPLEX_FULL : DUPLEX_HALF;
        } else {
-               cmd->autoneg = AUTONEG_DISABLE;
+               cmd->base.autoneg = AUTONEG_DISABLE;
                speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
-               cmd->duplex =
+               cmd->base.duplex =
                        (hp->sw_bmcr & BMCR_FULLDPLX) ?
                        DUPLEX_FULL : DUPLEX_HALF;
        }
-       ethtool_cmd_speed_set(cmd, speed);
+       cmd->base.speed = speed;
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+
        return 0;
 }
 
-static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int hme_set_link_ksettings(struct net_device *dev,
+                                 const struct ethtool_link_ksettings *cmd)
 {
        struct happy_meal *hp = netdev_priv(dev);
 
        /* Verify the settings we care about. */
-       if (cmd->autoneg != AUTONEG_ENABLE &&
-           cmd->autoneg != AUTONEG_DISABLE)
+       if (cmd->base.autoneg != AUTONEG_ENABLE &&
+           cmd->base.autoneg != AUTONEG_DISABLE)
                return -EINVAL;
-       if (cmd->autoneg == AUTONEG_DISABLE &&
-           ((ethtool_cmd_speed(cmd) != SPEED_100 &&
-             ethtool_cmd_speed(cmd) != SPEED_10) ||
-            (cmd->duplex != DUPLEX_HALF &&
-             cmd->duplex != DUPLEX_FULL)))
+       if (cmd->base.autoneg == AUTONEG_DISABLE &&
+           ((cmd->base.speed != SPEED_100 &&
+             cmd->base.speed != SPEED_10) ||
+            (cmd->base.duplex != DUPLEX_HALF &&
+             cmd->base.duplex != DUPLEX_FULL)))
                return -EINVAL;
 
        /* Ok, do it to it. */
@@ -2537,10 +2543,10 @@ static u32 hme_get_link(struct net_device *dev)
 }
 
 static const struct ethtool_ops hme_ethtool_ops = {
-       .get_settings           = hme_get_settings,
-       .set_settings           = hme_set_settings,
        .get_drvinfo            = hme_get_drvinfo,
        .get_link               = hme_get_link,
+       .get_link_ksettings     = hme_get_link_ksettings,
+       .set_link_ksettings     = hme_set_link_ksettings,
 };
 
 static int hme_version_printed;
index 4a8d5b18dfd5738d9254706b84174eb801f6c562..3af540adb3c58e83b4265c77fa436d7d44da06c7 100644 (file)
@@ -418,8 +418,6 @@ struct happy_meal {
 
        int rx_new, tx_new, rx_old, tx_old;
 
-       struct net_device_stats   net_stats;      /* Statistical counters              */
-
 #if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
        u32 (*read32)(void __iomem *);
        void (*write32)(void __iomem *, u32);
index 4cc2571f71c6b65a076071789cef36f537bc0ddd..0b95105f706007ae9e0ff4325ea9983f9d514ee4 100644 (file)
@@ -1,7 +1,7 @@
 /* sunvnet.c: Sun LDOM Virtual Network Driver.
  *
  * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
- * Copyright (C) 2016 Oracle. All rights reserved.
+ * Copyright (C) 2016-2017 Oracle. All rights reserved.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -77,11 +77,125 @@ static void vnet_set_msglevel(struct net_device *dev, u32 value)
        vp->msg_enable = value;
 }
 
+static const struct {
+       const char string[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+       { "rx_packets" },
+       { "tx_packets" },
+       { "rx_bytes" },
+       { "tx_bytes" },
+       { "rx_errors" },
+       { "tx_errors" },
+       { "rx_dropped" },
+       { "tx_dropped" },
+       { "multicast" },
+       { "rx_length_errors" },
+       { "rx_frame_errors" },
+       { "rx_missed_errors" },
+       { "tx_carrier_errors" },
+       { "nports" },
+};
+
+static int vnet_get_sset_count(struct net_device *dev, int sset)
+{
+       struct vnet *vp = (struct vnet *)netdev_priv(dev);
+
+       switch (sset) {
+       case ETH_SS_STATS:
+               return ARRAY_SIZE(ethtool_stats_keys)
+                       + (NUM_VNET_PORT_STATS * vp->nports);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void vnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+       struct vnet *vp = (struct vnet *)netdev_priv(dev);
+       struct vnet_port *port;
+       char *p = (char *)buf;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+               p += sizeof(ethtool_stats_keys);
+
+               rcu_read_lock();
+               list_for_each_entry_rcu(port, &vp->port_list, list) {
+                       snprintf(p, ETH_GSTRING_LEN, "p%u.%s-%pM",
+                                port->q_index, port->switch_port ? "s" : "q",
+                                port->raddr);
+                       p += ETH_GSTRING_LEN;
+                       snprintf(p, ETH_GSTRING_LEN, "p%u.rx_packets",
+                                port->q_index);
+                       p += ETH_GSTRING_LEN;
+                       snprintf(p, ETH_GSTRING_LEN, "p%u.tx_packets",
+                                port->q_index);
+                       p += ETH_GSTRING_LEN;
+                       snprintf(p, ETH_GSTRING_LEN, "p%u.rx_bytes",
+                                port->q_index);
+                       p += ETH_GSTRING_LEN;
+                       snprintf(p, ETH_GSTRING_LEN, "p%u.tx_bytes",
+                                port->q_index);
+                       p += ETH_GSTRING_LEN;
+                       snprintf(p, ETH_GSTRING_LEN, "p%u.event_up",
+                                port->q_index);
+                       p += ETH_GSTRING_LEN;
+                       snprintf(p, ETH_GSTRING_LEN, "p%u.event_reset",
+                                port->q_index);
+                       p += ETH_GSTRING_LEN;
+               }
+               rcu_read_unlock();
+               break;
+       default:
+               WARN_ON(1);
+               break;
+       }
+}
+
+static void vnet_get_ethtool_stats(struct net_device *dev,
+                                  struct ethtool_stats *estats, u64 *data)
+{
+       struct vnet *vp = (struct vnet *)netdev_priv(dev);
+       struct vnet_port *port;
+       int i = 0;
+
+       data[i++] = dev->stats.rx_packets;
+       data[i++] = dev->stats.tx_packets;
+       data[i++] = dev->stats.rx_bytes;
+       data[i++] = dev->stats.tx_bytes;
+       data[i++] = dev->stats.rx_errors;
+       data[i++] = dev->stats.tx_errors;
+       data[i++] = dev->stats.rx_dropped;
+       data[i++] = dev->stats.tx_dropped;
+       data[i++] = dev->stats.multicast;
+       data[i++] = dev->stats.rx_length_errors;
+       data[i++] = dev->stats.rx_frame_errors;
+       data[i++] = dev->stats.rx_missed_errors;
+       data[i++] = dev->stats.tx_carrier_errors;
+       data[i++] = vp->nports;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(port, &vp->port_list, list) {
+               data[i++] = port->q_index;
+               data[i++] = port->stats.rx_packets;
+               data[i++] = port->stats.tx_packets;
+               data[i++] = port->stats.rx_bytes;
+               data[i++] = port->stats.tx_bytes;
+               data[i++] = port->stats.event_up;
+               data[i++] = port->stats.event_reset;
+       }
+       rcu_read_unlock();
+}
+
 static const struct ethtool_ops vnet_ethtool_ops = {
        .get_drvinfo            = vnet_get_drvinfo,
        .get_msglevel           = vnet_get_msglevel,
        .set_msglevel           = vnet_set_msglevel,
        .get_link               = ethtool_op_get_link,
+       .get_sset_count         = vnet_get_sset_count,
+       .get_strings            = vnet_get_strings,
+       .get_ethtool_stats      = vnet_get_ethtool_stats,
 };
 
 static LIST_HEAD(vnet_list);
index fa2d11ca9b81e49d84a0b66dbe262cd3977c0560..9e86833249d48beed95fe8c4d19774582f29dc75 100644 (file)
@@ -1,7 +1,7 @@
 /* sunvnet.c: Sun LDOM Virtual Network Driver.
  *
  * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
- * Copyright (C) 2016 Oracle. All rights reserved.
+ * Copyright (C) 2016-2017 Oracle. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -43,7 +43,6 @@ MODULE_LICENSE("GPL");
 MODULE_VERSION("1.1");
 
 static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
-static void vnet_port_reset(struct vnet_port *port);
 
 static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
 {
@@ -410,8 +409,12 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
 
        skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
 
+       if (unlikely(is_multicast_ether_addr(eth_hdr(skb)->h_dest)))
+               dev->stats.multicast++;
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += len;
+       port->stats.rx_packets++;
+       port->stats.rx_bytes += len;
        napi_gro_receive(&port->napi, skb);
        return 0;
 
@@ -747,6 +750,13 @@ static int vnet_event_napi(struct vnet_port *port, int budget)
 
        /* RESET takes precedent over any other event */
        if (port->rx_event & LDC_EVENT_RESET) {
+               /* a link went down */
+
+               if (port->vsw == 1) {
+                       netif_tx_stop_all_queues(dev);
+                       netif_carrier_off(dev);
+               }
+
                vio_link_state_change(vio, LDC_EVENT_RESET);
                vnet_port_reset(port);
                vio_port_up(vio);
@@ -762,12 +772,21 @@ static int vnet_event_napi(struct vnet_port *port, int budget)
                        maybe_tx_wakeup(port);
 
                port->rx_event = 0;
+               port->stats.event_reset++;
                return 0;
        }
 
        if (port->rx_event & LDC_EVENT_UP) {
+               /* a link came up */
+
+               if (port->vsw == 1) {
+                       netif_carrier_on(port->dev);
+                       netif_tx_start_all_queues(port->dev);
+               }
+
                vio_link_state_change(vio, LDC_EVENT_UP);
                port->rx_event = 0;
+               port->stats.event_up++;
                return 0;
        }
 
@@ -1417,6 +1436,8 @@ ldc_start_done:
 
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
+       port->stats.tx_packets++;
+       port->stats.tx_bytes += port->tx_bufs[txi].skb->len;
 
        dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
        if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
@@ -1631,7 +1652,7 @@ void sunvnet_port_free_tx_bufs_common(struct vnet_port *port)
 }
 EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common);
 
-static void vnet_port_reset(struct vnet_port *port)
+void vnet_port_reset(struct vnet_port *port)
 {
        del_timer(&port->clean_timer);
        sunvnet_port_free_tx_bufs_common(port);
@@ -1639,6 +1660,7 @@ static void vnet_port_reset(struct vnet_port *port)
        port->tso = (port->vsw == 0);  /* no tso in vsw, misbehaves in bridge */
        port->tsolen = 0;
 }
+EXPORT_SYMBOL_GPL(vnet_port_reset);
 
 static int vnet_port_alloc_tx_ring(struct vnet_port *port)
 {
@@ -1708,20 +1730,32 @@ EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common);
 void sunvnet_port_add_txq_common(struct vnet_port *port)
 {
        struct vnet *vp = port->vp;
-       int n;
+       int smallest = 0;
+       int i;
+
+       /* find the first least-used q
+        * When there are more ldoms than q's, we start to
+        * double up on ports per queue.
+        */
+       for (i = 0; i < VNET_MAX_TXQS; i++) {
+               if (vp->q_used[i] == 0) {
+                       smallest = i;
+                       break;
+               }
+               if (vp->q_used[i] < vp->q_used[smallest])
+                       smallest = i;
+       }
 
-       n = vp->nports++;
-       n = n & (VNET_MAX_TXQS - 1);
-       port->q_index = n;
-       netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
-                                               port->q_index));
+       vp->nports++;
+       vp->q_used[smallest]++;
+       port->q_index = smallest;
 }
 EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common);
 
 void sunvnet_port_rm_txq_common(struct vnet_port *port)
 {
        port->vp->nports--;
-       netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
-                                               port->q_index));
+       port->vp->q_used[port->q_index]--;
+       port->q_index = 0;
 }
 EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common);
index ce5c824128a3698acb31ddbac42555657863b10f..b20d6fa7ef25b798401e24a59398c893a026c462 100644 (file)
@@ -35,6 +35,19 @@ struct vnet_tx_entry {
 
 struct vnet;
 
+struct vnet_port_stats {
+       /* keep them all the same size */
+       u32 rx_bytes;
+       u32 tx_bytes;
+       u32 rx_packets;
+       u32 tx_packets;
+       u32 event_up;
+       u32 event_reset;
+       u32 q_placeholder;
+};
+
+#define NUM_VNET_PORT_STATS  (sizeof(struct vnet_port_stats) / sizeof(u32))
+
 /* Structure to describe a vnet-port or vsw-port in the MD.
  * If the vsw bit is set, this structure represents a vswitch
  * port, and the net_device can be found from ->dev. If the
@@ -44,6 +57,8 @@ struct vnet;
 struct vnet_port {
        struct vio_driver_state vio;
 
+       struct vnet_port_stats stats;
+
        struct hlist_node       hash;
        u8                      raddr[ETH_ALEN];
        unsigned                switch_port:1;
@@ -97,22 +112,15 @@ struct vnet_mcast_entry {
 };
 
 struct vnet {
-       /* Protects port_list and port_hash.  */
-       spinlock_t              lock;
-
+       spinlock_t              lock; /* Protects port_list and port_hash.  */
        struct net_device       *dev;
-
        u32                     msg_enable;
-
+       u8                      q_used[VNET_MAX_TXQS];
        struct list_head        port_list;
-
        struct hlist_head       port_hash[VNET_PORT_HASH_SIZE];
-
        struct vnet_mcast_entry *mcast_list;
-
        struct list_head        list;
        u64                     local_mac;
-
        int                     nports;
 };
 
@@ -139,6 +147,7 @@ int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg);
 void sunvnet_handshake_complete_common(struct vio_driver_state *vio);
 int sunvnet_poll_common(struct napi_struct *napi, int budget);
 void sunvnet_port_free_tx_bufs_common(struct vnet_port *port);
+void vnet_port_reset(struct vnet_port *port);
 bool sunvnet_port_is_up_common(struct vnet_port *vnet);
 void sunvnet_port_add_txq_common(struct vnet_port *port);
 void sunvnet_port_rm_txq_common(struct vnet_port *port);
diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig
new file mode 100644 (file)
index 0000000..a950388
--- /dev/null
@@ -0,0 +1,41 @@
+#
+# Synopsys network device configuration
+#
+
+config NET_VENDOR_SYNOPSYS
+       bool "Synopsys devices"
+       default y
+       ---help---
+         If you have a network (Ethernet) device belonging to this class, say Y.
+
+         Note that the answer to this question doesn't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+         the questions about Synopsys devices. If you say Y, you will be asked
+         for your specific device in the following questions.
+
+if NET_VENDOR_SYNOPSYS
+
+config DWC_XLGMAC
+       tristate "Synopsys DWC Enterprise Ethernet (XLGMAC) driver support"
+       depends on HAS_IOMEM && HAS_DMA
+       select BITREVERSE
+       select CRC32
+       ---help---
+         This driver supports the Synopsys DesignWare Cores Enterprise
+         Ethernet (dwc-xlgmac).
+
+if DWC_XLGMAC
+
+config DWC_XLGMAC_PCI
+       tristate "XLGMAC PCI bus support"
+       depends on DWC_XLGMAC && PCI
+       ---help---
+         This selects the pci bus support for the dwc-xlgmac driver.
+         This driver was tested on Synopsys XLGMAC IP Prototyping Kit.
+
+         If you have a controller with this interface, say Y or M here.
+         If unsure, say N.
+
+endif # DWC_XLGMAC
+
+endif # NET_VENDOR_SYNOPSYS
diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile
new file mode 100644 (file)
index 0000000..c06e2eb
--- /dev/null
@@ -0,0 +1,9 @@
+#
+# Makefile for the Synopsys network device drivers.
+#
+
+obj-$(CONFIG_DWC_XLGMAC) += dwc-xlgmac.o
+dwc-xlgmac-objs := dwc-xlgmac-net.o dwc-xlgmac-desc.o \
+                  dwc-xlgmac-hw.o dwc-xlgmac-common.o
+
+dwc-xlgmac-$(CONFIG_DWC_XLGMAC_PCI) += dwc-xlgmac-pci.o
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
new file mode 100644 (file)
index 0000000..07def2b
--- /dev/null
@@ -0,0 +1,736 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+static int debug = -1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "DWC ethernet debug level (0=none,...,16=all)");
+static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+                                     NETIF_MSG_IFUP);
+
+static unsigned char dev_addr[6] = {0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7};
+
+static void xlgmac_read_mac_addr(struct xlgmac_pdata *pdata)
+{
+       struct net_device *netdev = pdata->netdev;
+
+       /* Currently it uses a static mac address for test */
+       memcpy(pdata->mac_addr, dev_addr, netdev->addr_len);
+}
+
+static void xlgmac_default_config(struct xlgmac_pdata *pdata)
+{
+       pdata->tx_osp_mode = DMA_OSP_ENABLE;
+       pdata->tx_sf_mode = MTL_TSF_ENABLE;
+       pdata->rx_sf_mode = MTL_RSF_DISABLE;
+       pdata->pblx8 = DMA_PBL_X8_ENABLE;
+       pdata->tx_pbl = DMA_PBL_32;
+       pdata->rx_pbl = DMA_PBL_32;
+       pdata->tx_threshold = MTL_TX_THRESHOLD_128;
+       pdata->rx_threshold = MTL_RX_THRESHOLD_128;
+       pdata->tx_pause = 1;
+       pdata->rx_pause = 1;
+       pdata->phy_speed = SPEED_25000;
+       pdata->sysclk_rate = XLGMAC_SYSCLOCK;
+
+       strlcpy(pdata->drv_name, XLGMAC_DRV_NAME, sizeof(pdata->drv_name));
+       strlcpy(pdata->drv_ver, XLGMAC_DRV_VERSION, sizeof(pdata->drv_ver));
+}
+
+static void xlgmac_init_all_ops(struct xlgmac_pdata *pdata)
+{
+       xlgmac_init_desc_ops(&pdata->desc_ops);
+       xlgmac_init_hw_ops(&pdata->hw_ops);
+}
+
+static int xlgmac_init(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+       struct net_device *netdev = pdata->netdev;
+       unsigned int i;
+       int ret;
+
+       /* Set default configuration data */
+       xlgmac_default_config(pdata);
+
+       /* Set irq, base_addr, MAC address, */
+       netdev->irq = pdata->dev_irq;
+       netdev->base_addr = (unsigned long)pdata->mac_regs;
+       xlgmac_read_mac_addr(pdata);
+       memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
+
+       /* Set all the function pointers */
+       xlgmac_init_all_ops(pdata);
+
+       /* Issue software reset to device */
+       hw_ops->exit(pdata);
+
+       /* Populate the hardware features */
+       xlgmac_get_all_hw_features(pdata);
+       xlgmac_print_all_hw_features(pdata);
+
+       /* TODO: Set the PHY mode to XLGMII */
+
+       /* Set the DMA mask */
+       ret = dma_set_mask_and_coherent(pdata->dev,
+                                       DMA_BIT_MASK(pdata->hw_feat.dma_width));
+       if (ret) {
+               dev_err(pdata->dev, "dma_set_mask_and_coherent failed\n");
+               return ret;
+       }
+
+       /* Channel and ring params initializtion
+        *  pdata->channel_count;
+        *  pdata->tx_ring_count;
+        *  pdata->rx_ring_count;
+        *  pdata->tx_desc_count;
+        *  pdata->rx_desc_count;
+        */
+       BUILD_BUG_ON_NOT_POWER_OF_2(XLGMAC_TX_DESC_CNT);
+       pdata->tx_desc_count = XLGMAC_TX_DESC_CNT;
+       if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
+               dev_err(pdata->dev, "tx descriptor count (%d) is not valid\n",
+                       pdata->tx_desc_count);
+               ret = -EINVAL;
+               return ret;
+       }
+       BUILD_BUG_ON_NOT_POWER_OF_2(XLGMAC_RX_DESC_CNT);
+       pdata->rx_desc_count = XLGMAC_RX_DESC_CNT;
+       if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
+               dev_err(pdata->dev, "rx descriptor count (%d) is not valid\n",
+                       pdata->rx_desc_count);
+               ret = -EINVAL;
+               return ret;
+       }
+
+       pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
+                                    pdata->hw_feat.tx_ch_cnt);
+       pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
+                                    pdata->hw_feat.tx_q_cnt);
+       pdata->tx_q_count = pdata->tx_ring_count;
+       ret = netif_set_real_num_tx_queues(netdev, pdata->tx_q_count);
+       if (ret) {
+               dev_err(pdata->dev, "error setting real tx queue count\n");
+               return ret;
+       }
+
+       pdata->rx_ring_count = min_t(unsigned int,
+                                    netif_get_num_default_rss_queues(),
+                                    pdata->hw_feat.rx_ch_cnt);
+       pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
+                                    pdata->hw_feat.rx_q_cnt);
+       pdata->rx_q_count = pdata->rx_ring_count;
+       ret = netif_set_real_num_rx_queues(netdev, pdata->rx_q_count);
+       if (ret) {
+               dev_err(pdata->dev, "error setting real rx queue count\n");
+               return ret;
+       }
+
+       pdata->channel_count =
+               max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+
+       /* Initialize RSS hash key and lookup table */
+       netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
+
+       for (i = 0; i < XLGMAC_RSS_MAX_TABLE_SIZE; i++)
+               pdata->rss_table[i] = XLGMAC_SET_REG_BITS(
+                                       pdata->rss_table[i],
+                                       MAC_RSSDR_DMCH_POS,
+                                       MAC_RSSDR_DMCH_LEN,
+                                       i % pdata->rx_ring_count);
+
+       pdata->rss_options = XLGMAC_SET_REG_BITS(
+                               pdata->rss_options,
+                               MAC_RSSCR_IP2TE_POS,
+                               MAC_RSSCR_IP2TE_LEN, 1);
+       pdata->rss_options = XLGMAC_SET_REG_BITS(
+                               pdata->rss_options,
+                               MAC_RSSCR_TCP4TE_POS,
+                               MAC_RSSCR_TCP4TE_LEN, 1);
+       pdata->rss_options = XLGMAC_SET_REG_BITS(
+                               pdata->rss_options,
+                               MAC_RSSCR_UDP4TE_POS,
+                               MAC_RSSCR_UDP4TE_LEN, 1);
+
+       /* Set device operations */
+       netdev->netdev_ops = xlgmac_get_netdev_ops();
+
+       /* Set device features */
+       if (pdata->hw_feat.tso) {
+               netdev->hw_features = NETIF_F_TSO;
+               netdev->hw_features |= NETIF_F_TSO6;
+               netdev->hw_features |= NETIF_F_SG;
+               netdev->hw_features |= NETIF_F_IP_CSUM;
+               netdev->hw_features |= NETIF_F_IPV6_CSUM;
+       } else if (pdata->hw_feat.tx_coe) {
+               netdev->hw_features = NETIF_F_IP_CSUM;
+               netdev->hw_features |= NETIF_F_IPV6_CSUM;
+       }
+
+       if (pdata->hw_feat.rx_coe) {
+               netdev->hw_features |= NETIF_F_RXCSUM;
+               netdev->hw_features |= NETIF_F_GRO;
+       }
+
+       if (pdata->hw_feat.rss)
+               netdev->hw_features |= NETIF_F_RXHASH;
+
+       netdev->vlan_features |= netdev->hw_features;
+
+       netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+       if (pdata->hw_feat.sa_vlan_ins)
+               netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+       if (pdata->hw_feat.vlhash)
+               netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+       netdev->features |= netdev->hw_features;
+       pdata->netdev_features = netdev->features;
+
+       netdev->priv_flags |= IFF_UNICAST_FLT;
+
+       /* Use default watchdog timeout */
+       netdev->watchdog_timeo = 0;
+
+       /* Tx coalesce parameters initialization */
+       pdata->tx_usecs = XLGMAC_INIT_DMA_TX_USECS;
+       pdata->tx_frames = XLGMAC_INIT_DMA_TX_FRAMES;
+
+       /* Rx coalesce parameters initialization */
+       pdata->rx_riwt = hw_ops->usec_to_riwt(pdata, XLGMAC_INIT_DMA_RX_USECS);
+       pdata->rx_usecs = XLGMAC_INIT_DMA_RX_USECS;
+       pdata->rx_frames = XLGMAC_INIT_DMA_RX_FRAMES;
+
+       return 0;
+}
+
+int xlgmac_drv_probe(struct device *dev, struct xlgmac_resources *res)
+{
+       struct xlgmac_pdata *pdata;
+       struct net_device *netdev;
+       int ret;
+
+       netdev = alloc_etherdev_mq(sizeof(struct xlgmac_pdata),
+                                  XLGMAC_MAX_DMA_CHANNELS);
+
+       if (!netdev) {
+               dev_err(dev, "alloc_etherdev failed\n");
+               return -ENOMEM;
+       }
+
+       SET_NETDEV_DEV(netdev, dev);
+       dev_set_drvdata(dev, netdev);
+       pdata = netdev_priv(netdev);
+       pdata->dev = dev;
+       pdata->netdev = netdev;
+
+       pdata->dev_irq = res->irq;
+       pdata->mac_regs = res->addr;
+
+       mutex_init(&pdata->rss_mutex);
+       pdata->msg_enable = netif_msg_init(debug, default_msg_level);
+
+       ret = xlgmac_init(pdata);
+       if (ret) {
+               dev_err(dev, "xlgmac init failed\n");
+               goto err_free_netdev;
+       }
+
+       ret = register_netdev(netdev);
+       if (ret) {
+               dev_err(dev, "net device registration failed\n");
+               goto err_free_netdev;
+       }
+
+       return 0;
+
+err_free_netdev:
+       free_netdev(netdev);
+
+       return ret;
+}
+
+int xlgmac_drv_remove(struct device *dev)
+{
+       struct net_device *netdev = dev_get_drvdata(dev);
+
+       unregister_netdev(netdev);
+       free_netdev(netdev);
+
+       return 0;
+}
+
+void xlgmac_dump_tx_desc(struct xlgmac_pdata *pdata,
+                        struct xlgmac_ring *ring,
+                        unsigned int idx,
+                        unsigned int count,
+                        unsigned int flag)
+{
+       struct xlgmac_desc_data *desc_data;
+       struct xlgmac_dma_desc *dma_desc;
+
+       while (count--) {
+               desc_data = XLGMAC_GET_DESC_DATA(ring, idx);
+               dma_desc = desc_data->dma_desc;
+
+               netdev_dbg(pdata->netdev, "TX: dma_desc=%p, dma_desc_addr=%pad\n",
+                          desc_data->dma_desc, &desc_data->dma_desc_addr);
+               netdev_dbg(pdata->netdev,
+                          "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+                          (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+                          le32_to_cpu(dma_desc->desc0),
+                          le32_to_cpu(dma_desc->desc1),
+                          le32_to_cpu(dma_desc->desc2),
+                          le32_to_cpu(dma_desc->desc3));
+
+               idx++;
+       }
+}
+
+void xlgmac_dump_rx_desc(struct xlgmac_pdata *pdata,
+                        struct xlgmac_ring *ring,
+                        unsigned int idx)
+{
+       struct xlgmac_desc_data *desc_data;
+       struct xlgmac_dma_desc *dma_desc;
+
+       desc_data = XLGMAC_GET_DESC_DATA(ring, idx);
+       dma_desc = desc_data->dma_desc;
+
+       netdev_dbg(pdata->netdev, "RX: dma_desc=%p, dma_desc_addr=%pad\n",
+                  desc_data->dma_desc, &desc_data->dma_desc_addr);
+       netdev_dbg(pdata->netdev,
+                  "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
+                  idx,
+                  le32_to_cpu(dma_desc->desc0),
+                  le32_to_cpu(dma_desc->desc1),
+                  le32_to_cpu(dma_desc->desc2),
+                  le32_to_cpu(dma_desc->desc3));
+}
+
+void xlgmac_print_pkt(struct net_device *netdev,
+                     struct sk_buff *skb, bool tx_rx)
+{
+       struct ethhdr *eth = (struct ethhdr *)skb->data;
+       unsigned char *buf = skb->data;
+       unsigned char buffer[128];
+       unsigned int i, j;
+
+       netdev_dbg(netdev, "\n************** SKB dump ****************\n");
+
+       netdev_dbg(netdev, "%s packet of %d bytes\n",
+                  (tx_rx ? "TX" : "RX"), skb->len);
+
+       netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
+       netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
+       netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
+
+       for (i = 0, j = 0; i < skb->len;) {
+               j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
+                             buf[i++]);
+
+               if ((i % 32) == 0) {
+                       netdev_dbg(netdev, "  %#06x: %s\n", i - 32, buffer);
+                       j = 0;
+               } else if ((i % 16) == 0) {
+                       buffer[j++] = ' ';
+                       buffer[j++] = ' ';
+               } else if ((i % 4) == 0) {
+                       buffer[j++] = ' ';
+               }
+       }
+       if (i % 32)
+               netdev_dbg(netdev, "  %#06x: %s\n", i - (i % 32), buffer);
+
+       netdev_dbg(netdev, "\n************** SKB dump ****************\n");
+}
+
+void xlgmac_get_all_hw_features(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_hw_features *hw_feat = &pdata->hw_feat;
+       unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+
+       mac_hfr0 = readl(pdata->mac_regs + MAC_HWF0R);
+       mac_hfr1 = readl(pdata->mac_regs + MAC_HWF1R);
+       mac_hfr2 = readl(pdata->mac_regs + MAC_HWF2R);
+
+       memset(hw_feat, 0, sizeof(*hw_feat));
+
+       hw_feat->version = readl(pdata->mac_regs + MAC_VR);
+
+       /* Hardware feature register 0 */
+       hw_feat->phyifsel    = XLGMAC_GET_REG_BITS(mac_hfr0,
+                                               MAC_HWF0R_PHYIFSEL_POS,
+                                               MAC_HWF0R_PHYIFSEL_LEN);
+       hw_feat->vlhash      = XLGMAC_GET_REG_BITS(mac_hfr0,
+                                               MAC_HWF0R_VLHASH_POS,
+                                               MAC_HWF0R_VLHASH_LEN);
+       hw_feat->sma         = XLGMAC_GET_REG_BITS(mac_hfr0,
+                                               MAC_HWF0R_SMASEL_POS,
+                                               MAC_HWF0R_SMASEL_LEN);
+       hw_feat->rwk         = XLGMAC_GET_REG_BITS(mac_hfr0,
+                                               MAC_HWF0R_RWKSEL_POS,
+                                               MAC_HWF0R_RWKSEL_LEN);
+       hw_feat->mgk         = XLGMAC_GET_REG_BITS(mac_hfr0,
+                                               MAC_HWF0R_MGKSEL_POS,
+                                               MAC_HWF0R_MGKSEL_LEN);
+       hw_feat->mmc         = XLGMAC_GET_REG_BITS(mac_hfr0,
+                                               MAC_HWF0R_MMCSEL_POS,
+                                               MAC_HWF0R_MMCSEL_LEN);
+       hw_feat->aoe         = XLGMAC_GET_REG_BITS(mac_hfr0,
+                                               MAC_HWF0R_ARPOFFSEL_POS,
+                                               MAC_HWF0R_ARPOFFSEL_LEN);
+       hw_feat->ts          = XLGMAC_GET_REG_BITS(mac_hfr0,
+                                               MAC_HWF0R_TSSEL_POS,
+                                               MAC_HWF0R_TSSEL_LEN);
+       hw_feat->eee         = XLGMAC_GET_REG_BITS(mac_hfr0,
+                                               MAC_HWF0R_EEESEL_POS,
+                                               MAC_HWF0R_EEESEL_LEN);
+       hw_feat->tx_coe      = XLGMAC_GET_REG_BITS(mac_hfr0,
+                                               MAC_HWF0R_TXCOESEL_POS,
+                                               MAC_HWF0R_TXCOESEL_LEN);
+       hw_feat->rx_coe      = XLGMAC_GET_REG_BITS(mac_hfr0,
+                                               MAC_HWF0R_RXCOESEL_POS,
+                                               MAC_HWF0R_RXCOESEL_LEN);
+       hw_feat->addn_mac    = XLGMAC_GET_REG_BITS(mac_hfr0,
+                                               MAC_HWF0R_ADDMACADRSEL_POS,
+                                               MAC_HWF0R_ADDMACADRSEL_LEN);
+       hw_feat->ts_src      = XLGMAC_GET_REG_BITS(mac_hfr0,
+                                               MAC_HWF0R_TSSTSSEL_POS,
+                                               MAC_HWF0R_TSSTSSEL_LEN);
+       hw_feat->sa_vlan_ins = XLGMAC_GET_REG_BITS(mac_hfr0,
+                                               MAC_HWF0R_SAVLANINS_POS,
+                                               MAC_HWF0R_SAVLANINS_LEN);
+
+       /* Hardware feature register 1 */
+       hw_feat->rx_fifo_size  = XLGMAC_GET_REG_BITS(mac_hfr1,
+                                               MAC_HWF1R_RXFIFOSIZE_POS,
+                                               MAC_HWF1R_RXFIFOSIZE_LEN);
+       hw_feat->tx_fifo_size  = XLGMAC_GET_REG_BITS(mac_hfr1,
+                                               MAC_HWF1R_TXFIFOSIZE_POS,
+                                               MAC_HWF1R_TXFIFOSIZE_LEN);
+       hw_feat->adv_ts_hi     = XLGMAC_GET_REG_BITS(mac_hfr1,
+                                               MAC_HWF1R_ADVTHWORD_POS,
+                                               MAC_HWF1R_ADVTHWORD_LEN);
+       hw_feat->dma_width     = XLGMAC_GET_REG_BITS(mac_hfr1,
+                                               MAC_HWF1R_ADDR64_POS,
+                                               MAC_HWF1R_ADDR64_LEN);
+       hw_feat->dcb           = XLGMAC_GET_REG_BITS(mac_hfr1,
+                                               MAC_HWF1R_DCBEN_POS,
+                                               MAC_HWF1R_DCBEN_LEN);
+       hw_feat->sph           = XLGMAC_GET_REG_BITS(mac_hfr1,
+                                               MAC_HWF1R_SPHEN_POS,
+                                               MAC_HWF1R_SPHEN_LEN);
+       hw_feat->tso           = XLGMAC_GET_REG_BITS(mac_hfr1,
+                                               MAC_HWF1R_TSOEN_POS,
+                                               MAC_HWF1R_TSOEN_LEN);
+       hw_feat->dma_debug     = XLGMAC_GET_REG_BITS(mac_hfr1,
+                                               MAC_HWF1R_DBGMEMA_POS,
+                                               MAC_HWF1R_DBGMEMA_LEN);
+       hw_feat->rss           = XLGMAC_GET_REG_BITS(mac_hfr1,
+                                               MAC_HWF1R_RSSEN_POS,
+                                               MAC_HWF1R_RSSEN_LEN);
+       hw_feat->tc_cnt        = XLGMAC_GET_REG_BITS(mac_hfr1,
+                                               MAC_HWF1R_NUMTC_POS,
+                                               MAC_HWF1R_NUMTC_LEN);
+       hw_feat->hash_table_size = XLGMAC_GET_REG_BITS(mac_hfr1,
+                                               MAC_HWF1R_HASHTBLSZ_POS,
+                                               MAC_HWF1R_HASHTBLSZ_LEN);
+       hw_feat->l3l4_filter_num = XLGMAC_GET_REG_BITS(mac_hfr1,
+                                               MAC_HWF1R_L3L4FNUM_POS,
+                                               MAC_HWF1R_L3L4FNUM_LEN);
+
+       /* Hardware feature register 2 */
+       hw_feat->rx_q_cnt     = XLGMAC_GET_REG_BITS(mac_hfr2,
+                                               MAC_HWF2R_RXQCNT_POS,
+                                               MAC_HWF2R_RXQCNT_LEN);
+       hw_feat->tx_q_cnt     = XLGMAC_GET_REG_BITS(mac_hfr2,
+                                               MAC_HWF2R_TXQCNT_POS,
+                                               MAC_HWF2R_TXQCNT_LEN);
+       hw_feat->rx_ch_cnt    = XLGMAC_GET_REG_BITS(mac_hfr2,
+                                               MAC_HWF2R_RXCHCNT_POS,
+                                               MAC_HWF2R_RXCHCNT_LEN);
+       hw_feat->tx_ch_cnt    = XLGMAC_GET_REG_BITS(mac_hfr2,
+                                               MAC_HWF2R_TXCHCNT_POS,
+                                               MAC_HWF2R_TXCHCNT_LEN);
+       hw_feat->pps_out_num  = XLGMAC_GET_REG_BITS(mac_hfr2,
+                                               MAC_HWF2R_PPSOUTNUM_POS,
+                                               MAC_HWF2R_PPSOUTNUM_LEN);
+       hw_feat->aux_snap_num = XLGMAC_GET_REG_BITS(mac_hfr2,
+                                               MAC_HWF2R_AUXSNAPNUM_POS,
+                                               MAC_HWF2R_AUXSNAPNUM_LEN);
+
+       /* Translate the Hash Table size into actual number */
+       switch (hw_feat->hash_table_size) {
+       case 0:
+               break;
+       case 1:
+               hw_feat->hash_table_size = 64;
+               break;
+       case 2:
+               hw_feat->hash_table_size = 128;
+               break;
+       case 3:
+               hw_feat->hash_table_size = 256;
+               break;
+       }
+
+       /* Translate the address width setting into actual number */
+       switch (hw_feat->dma_width) {
+       case 0:
+               hw_feat->dma_width = 32;
+               break;
+       case 1:
+               hw_feat->dma_width = 40;
+               break;
+       case 2:
+               hw_feat->dma_width = 48;
+               break;
+       default:
+               hw_feat->dma_width = 32;
+       }
+
+       /* The Queue, Channel and TC counts are zero based so increment them
+        * to get the actual number
+        */
+       hw_feat->rx_q_cnt++;
+       hw_feat->tx_q_cnt++;
+       hw_feat->rx_ch_cnt++;
+       hw_feat->tx_ch_cnt++;
+       hw_feat->tc_cnt++;
+}
+
+void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata)
+{
+       char *str = NULL;
+
+       XLGMAC_PR("\n");
+       XLGMAC_PR("=====================================================\n");
+       XLGMAC_PR("\n");
+       XLGMAC_PR("HW support following features\n");
+       XLGMAC_PR("\n");
+       /* HW Feature Register0 */
+       XLGMAC_PR("VLAN Hash Filter Selected                   : %s\n",
+                 pdata->hw_feat.vlhash ? "YES" : "NO");
+       XLGMAC_PR("SMA (MDIO) Interface                        : %s\n",
+                 pdata->hw_feat.sma ? "YES" : "NO");
+       XLGMAC_PR("PMT Remote Wake-up Packet Enable            : %s\n",
+                 pdata->hw_feat.rwk ? "YES" : "NO");
+       XLGMAC_PR("PMT Magic Packet Enable                     : %s\n",
+                 pdata->hw_feat.mgk ? "YES" : "NO");
+       XLGMAC_PR("RMON/MMC Module Enable                      : %s\n",
+                 pdata->hw_feat.mmc ? "YES" : "NO");
+       XLGMAC_PR("ARP Offload Enabled                         : %s\n",
+                 pdata->hw_feat.aoe ? "YES" : "NO");
+       XLGMAC_PR("IEEE 1588-2008 Timestamp Enabled            : %s\n",
+                 pdata->hw_feat.ts ? "YES" : "NO");
+       XLGMAC_PR("Energy Efficient Ethernet Enabled           : %s\n",
+                 pdata->hw_feat.eee ? "YES" : "NO");
+       XLGMAC_PR("Transmit Checksum Offload Enabled           : %s\n",
+                 pdata->hw_feat.tx_coe ? "YES" : "NO");
+       XLGMAC_PR("Receive Checksum Offload Enabled            : %s\n",
+                 pdata->hw_feat.rx_coe ? "YES" : "NO");
+       XLGMAC_PR("Additional MAC Addresses 1-31 Selected      : %s\n",
+                 pdata->hw_feat.addn_mac ? "YES" : "NO");
+
+       switch (pdata->hw_feat.ts_src) {
+       case 0:
+               str = "RESERVED";
+               break;
+       case 1:
+               str = "INTERNAL";
+               break;
+       case 2:
+               str = "EXTERNAL";
+               break;
+       case 3:
+               str = "BOTH";
+               break;
+       }
+       XLGMAC_PR("Timestamp System Time Source                : %s\n", str);
+
+       XLGMAC_PR("Source Address or VLAN Insertion Enable     : %s\n",
+                 pdata->hw_feat.sa_vlan_ins ? "YES" : "NO");
+
+       /* HW Feature Register1 */
+       switch (pdata->hw_feat.rx_fifo_size) {
+       case 0:
+               str = "128 bytes";
+               break;
+       case 1:
+               str = "256 bytes";
+               break;
+       case 2:
+               str = "512 bytes";
+               break;
+       case 3:
+               str = "1 KBytes";
+               break;
+       case 4:
+               str = "2 KBytes";
+               break;
+       case 5:
+               str = "4 KBytes";
+               break;
+       case 6:
+               str = "8 KBytes";
+               break;
+       case 7:
+               str = "16 KBytes";
+               break;
+       case 8:
+               str = "32 kBytes";
+               break;
+       case 9:
+               str = "64 KBytes";
+               break;
+       case 10:
+               str = "128 KBytes";
+               break;
+       case 11:
+               str = "256 KBytes";
+               break;
+       default:
+               str = "RESERVED";
+       }
+       XLGMAC_PR("MTL Receive FIFO Size                       : %s\n", str);
+
+       switch (pdata->hw_feat.tx_fifo_size) {
+       case 0:
+               str = "128 bytes";
+               break;
+       case 1:
+               str = "256 bytes";
+               break;
+       case 2:
+               str = "512 bytes";
+               break;
+       case 3:
+               str = "1 KBytes";
+               break;
+       case 4:
+               str = "2 KBytes";
+               break;
+       case 5:
+               str = "4 KBytes";
+               break;
+       case 6:
+               str = "8 KBytes";
+               break;
+       case 7:
+               str = "16 KBytes";
+               break;
+       case 8:
+               str = "32 kBytes";
+               break;
+       case 9:
+               str = "64 KBytes";
+               break;
+       case 10:
+               str = "128 KBytes";
+               break;
+       case 11:
+               str = "256 KBytes";
+               break;
+       default:
+               str = "RESERVED";
+       }
+       XLGMAC_PR("MTL Transmit FIFO Size                      : %s\n", str);
+
+       XLGMAC_PR("IEEE 1588 High Word Register Enable         : %s\n",
+                 pdata->hw_feat.adv_ts_hi ? "YES" : "NO");
+       XLGMAC_PR("Address width                               : %u\n",
+                 pdata->hw_feat.dma_width);
+       XLGMAC_PR("DCB Feature Enable                          : %s\n",
+                 pdata->hw_feat.dcb ? "YES" : "NO");
+       XLGMAC_PR("Split Header Feature Enable                 : %s\n",
+                 pdata->hw_feat.sph ? "YES" : "NO");
+       XLGMAC_PR("TCP Segmentation Offload Enable             : %s\n",
+                 pdata->hw_feat.tso ? "YES" : "NO");
+       XLGMAC_PR("DMA Debug Registers Enabled                 : %s\n",
+                 pdata->hw_feat.dma_debug ? "YES" : "NO");
+       XLGMAC_PR("RSS Feature Enabled                         : %s\n",
+                 pdata->hw_feat.rss ? "YES" : "NO");
+       XLGMAC_PR("Number of Traffic classes                   : %u\n",
+                 (pdata->hw_feat.tc_cnt));
+       XLGMAC_PR("Hash Table Size                             : %u\n",
+                 pdata->hw_feat.hash_table_size);
+       XLGMAC_PR("Total number of L3 or L4 Filters            : %u\n",
+                 pdata->hw_feat.l3l4_filter_num);
+
+       /* HW Feature Register2 */
+       XLGMAC_PR("Number of MTL Receive Queues                : %u\n",
+                 pdata->hw_feat.rx_q_cnt);
+       XLGMAC_PR("Number of MTL Transmit Queues               : %u\n",
+                 pdata->hw_feat.tx_q_cnt);
+       XLGMAC_PR("Number of DMA Receive Channels              : %u\n",
+                 pdata->hw_feat.rx_ch_cnt);
+       XLGMAC_PR("Number of DMA Transmit Channels             : %u\n",
+                 pdata->hw_feat.tx_ch_cnt);
+
+       switch (pdata->hw_feat.pps_out_num) {
+       case 0:
+               str = "No PPS output";
+               break;
+       case 1:
+               str = "1 PPS output";
+               break;
+       case 2:
+               str = "2 PPS output";
+               break;
+       case 3:
+               str = "3 PPS output";
+               break;
+       case 4:
+               str = "4 PPS output";
+               break;
+       default:
+               str = "RESERVED";
+       }
+       XLGMAC_PR("Number of PPS Outputs                       : %s\n", str);
+
+       switch (pdata->hw_feat.aux_snap_num) {
+       case 0:
+               str = "No auxiliary input";
+               break;
+       case 1:
+               str = "1 auxiliary input";
+               break;
+       case 2:
+               str = "2 auxiliary input";
+               break;
+       case 3:
+               str = "3 auxiliary input";
+               break;
+       case 4:
+               str = "4 auxiliary input";
+               break;
+       default:
+               str = "RESERVED";
+       }
+       XLGMAC_PR("Number of Auxiliary Snapshot Inputs         : %s", str);
+
+       XLGMAC_PR("\n");
+       XLGMAC_PR("=====================================================\n");
+       XLGMAC_PR("\n");
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
new file mode 100644 (file)
index 0000000..e9672b1
--- /dev/null
@@ -0,0 +1,644 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+static void xlgmac_unmap_desc_data(struct xlgmac_pdata *pdata,
+                                  struct xlgmac_desc_data *desc_data)
+{
+       if (desc_data->skb_dma) {
+               if (desc_data->mapped_as_page) {
+                       dma_unmap_page(pdata->dev, desc_data->skb_dma,
+                                      desc_data->skb_dma_len, DMA_TO_DEVICE);
+               } else {
+                       dma_unmap_single(pdata->dev, desc_data->skb_dma,
+                                        desc_data->skb_dma_len, DMA_TO_DEVICE);
+               }
+               desc_data->skb_dma = 0;
+               desc_data->skb_dma_len = 0;
+       }
+
+       if (desc_data->skb) {
+               dev_kfree_skb_any(desc_data->skb);
+               desc_data->skb = NULL;
+       }
+
+       if (desc_data->rx.hdr.pa.pages)
+               put_page(desc_data->rx.hdr.pa.pages);
+
+       if (desc_data->rx.hdr.pa_unmap.pages) {
+               dma_unmap_page(pdata->dev, desc_data->rx.hdr.pa_unmap.pages_dma,
+                              desc_data->rx.hdr.pa_unmap.pages_len,
+                              DMA_FROM_DEVICE);
+               put_page(desc_data->rx.hdr.pa_unmap.pages);
+       }
+
+       if (desc_data->rx.buf.pa.pages)
+               put_page(desc_data->rx.buf.pa.pages);
+
+       if (desc_data->rx.buf.pa_unmap.pages) {
+               dma_unmap_page(pdata->dev, desc_data->rx.buf.pa_unmap.pages_dma,
+                              desc_data->rx.buf.pa_unmap.pages_len,
+                              DMA_FROM_DEVICE);
+               put_page(desc_data->rx.buf.pa_unmap.pages);
+       }
+
+       memset(&desc_data->tx, 0, sizeof(desc_data->tx));
+       memset(&desc_data->rx, 0, sizeof(desc_data->rx));
+
+       desc_data->mapped_as_page = 0;
+
+       if (desc_data->state_saved) {
+               desc_data->state_saved = 0;
+               desc_data->state.skb = NULL;
+               desc_data->state.len = 0;
+               desc_data->state.error = 0;
+       }
+}
+
+static void xlgmac_free_ring(struct xlgmac_pdata *pdata,
+                            struct xlgmac_ring *ring)
+{
+       struct xlgmac_desc_data *desc_data;
+       unsigned int i;
+
+       if (!ring)
+               return;
+
+       if (ring->desc_data_head) {
+               for (i = 0; i < ring->dma_desc_count; i++) {
+                       desc_data = XLGMAC_GET_DESC_DATA(ring, i);
+                       xlgmac_unmap_desc_data(pdata, desc_data);
+               }
+
+               kfree(ring->desc_data_head);
+               ring->desc_data_head = NULL;
+       }
+
+       if (ring->rx_hdr_pa.pages) {
+               dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
+                              ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
+               put_page(ring->rx_hdr_pa.pages);
+
+               ring->rx_hdr_pa.pages = NULL;
+               ring->rx_hdr_pa.pages_len = 0;
+               ring->rx_hdr_pa.pages_offset = 0;
+               ring->rx_hdr_pa.pages_dma = 0;
+       }
+
+       if (ring->rx_buf_pa.pages) {
+               dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
+                              ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
+               put_page(ring->rx_buf_pa.pages);
+
+               ring->rx_buf_pa.pages = NULL;
+               ring->rx_buf_pa.pages_len = 0;
+               ring->rx_buf_pa.pages_offset = 0;
+               ring->rx_buf_pa.pages_dma = 0;
+       }
+
+       if (ring->dma_desc_head) {
+               dma_free_coherent(pdata->dev,
+                                 (sizeof(struct xlgmac_dma_desc) *
+                                 ring->dma_desc_count),
+                                 ring->dma_desc_head,
+                                 ring->dma_desc_head_addr);
+               ring->dma_desc_head = NULL;
+       }
+}
+
+static int xlgmac_init_ring(struct xlgmac_pdata *pdata,
+                           struct xlgmac_ring *ring,
+                           unsigned int dma_desc_count)
+{
+       if (!ring)
+               return 0;
+
+       /* Descriptors */
+       ring->dma_desc_count = dma_desc_count;
+       ring->dma_desc_head = dma_alloc_coherent(pdata->dev,
+                                       (sizeof(struct xlgmac_dma_desc) *
+                                        dma_desc_count),
+                                       &ring->dma_desc_head_addr,
+                                       GFP_KERNEL);
+       if (!ring->dma_desc_head)
+               return -ENOMEM;
+
+       /* Array of descriptor data */
+       ring->desc_data_head = kcalloc(dma_desc_count,
+                                       sizeof(struct xlgmac_desc_data),
+                                       GFP_KERNEL);
+       if (!ring->desc_data_head)
+               return -ENOMEM;
+
+       netif_dbg(pdata, drv, pdata->netdev,
+                 "dma_desc_head=%p, dma_desc_head_addr=%pad, desc_data_head=%p\n",
+               ring->dma_desc_head,
+               &ring->dma_desc_head_addr,
+               ring->desc_data_head);
+
+       return 0;
+}
+
+static void xlgmac_free_rings(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+
+       if (!pdata->channel_head)
+               return;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               xlgmac_free_ring(pdata, channel->tx_ring);
+               xlgmac_free_ring(pdata, channel->rx_ring);
+       }
+}
+
+static int xlgmac_alloc_rings(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+       int ret;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
+                         channel->name);
+
+               ret = xlgmac_init_ring(pdata, channel->tx_ring,
+                                      pdata->tx_desc_count);
+
+               if (ret) {
+                       netdev_alert(pdata->netdev,
+                                    "error initializing Tx ring");
+                       goto err_init_ring;
+               }
+
+               netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
+                         channel->name);
+
+               ret = xlgmac_init_ring(pdata, channel->rx_ring,
+                                      pdata->rx_desc_count);
+               if (ret) {
+                       netdev_alert(pdata->netdev,
+                                    "error initializing Rx ring\n");
+                       goto err_init_ring;
+               }
+       }
+
+       return 0;
+
+err_init_ring:
+       xlgmac_free_rings(pdata);
+
+       return ret;
+}
+
+static void xlgmac_free_channels(struct xlgmac_pdata *pdata)
+{
+       if (!pdata->channel_head)
+               return;
+
+       kfree(pdata->channel_head->tx_ring);
+       pdata->channel_head->tx_ring = NULL;
+
+       kfree(pdata->channel_head->rx_ring);
+       pdata->channel_head->rx_ring = NULL;
+
+       kfree(pdata->channel_head);
+
+       pdata->channel_head = NULL;
+       pdata->channel_count = 0;
+}
+
+static int xlgmac_alloc_channels(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel_head, *channel;
+       struct xlgmac_ring *tx_ring, *rx_ring;
+       int ret = -ENOMEM;
+       unsigned int i;
+
+       channel_head = kcalloc(pdata->channel_count,
+                              sizeof(struct xlgmac_channel), GFP_KERNEL);
+       if (!channel_head)
+               return ret;
+
+       netif_dbg(pdata, drv, pdata->netdev,
+                 "channel_head=%p\n", channel_head);
+
+       tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xlgmac_ring),
+                         GFP_KERNEL);
+       if (!tx_ring)
+               goto err_tx_ring;
+
+       rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xlgmac_ring),
+                         GFP_KERNEL);
+       if (!rx_ring)
+               goto err_rx_ring;
+
+       for (i = 0, channel = channel_head; i < pdata->channel_count;
+               i++, channel++) {
+               snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
+               channel->pdata = pdata;
+               channel->queue_index = i;
+               channel->dma_regs = pdata->mac_regs + DMA_CH_BASE +
+                                   (DMA_CH_INC * i);
+
+               if (pdata->per_channel_irq) {
+                       /* Get the per DMA interrupt */
+                       ret = pdata->channel_irq[i];
+                       if (ret < 0) {
+                               netdev_err(pdata->netdev,
+                                          "get_irq %u failed\n",
+                                          i + 1);
+                               goto err_irq;
+                       }
+                       channel->dma_irq = ret;
+               }
+
+               if (i < pdata->tx_ring_count)
+                       channel->tx_ring = tx_ring++;
+
+               if (i < pdata->rx_ring_count)
+                       channel->rx_ring = rx_ring++;
+
+               netif_dbg(pdata, drv, pdata->netdev,
+                         "%s: dma_regs=%p, tx_ring=%p, rx_ring=%p\n",
+                         channel->name, channel->dma_regs,
+                         channel->tx_ring, channel->rx_ring);
+       }
+
+       pdata->channel_head = channel_head;
+
+       return 0;
+
+err_irq:
+       kfree(rx_ring);
+
+err_rx_ring:
+       kfree(tx_ring);
+
+err_tx_ring:
+       kfree(channel_head);
+
+       return ret;
+}
+
+static void xlgmac_free_channels_and_rings(struct xlgmac_pdata *pdata)
+{
+       xlgmac_free_rings(pdata);
+
+       xlgmac_free_channels(pdata);
+}
+
+static int xlgmac_alloc_channels_and_rings(struct xlgmac_pdata *pdata)
+{
+       int ret;
+
+       ret = xlgmac_alloc_channels(pdata);
+       if (ret)
+               goto err_alloc;
+
+       ret = xlgmac_alloc_rings(pdata);
+       if (ret)
+               goto err_alloc;
+
+       return 0;
+
+err_alloc:
+       xlgmac_free_channels_and_rings(pdata);
+
+       return ret;
+}
+
+static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata,
+                             struct xlgmac_page_alloc *pa,
+                             gfp_t gfp, int order)
+{
+       struct page *pages = NULL;
+       dma_addr_t pages_dma;
+
+       /* Try to obtain pages, decreasing order if necessary */
+       gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
+       while (order >= 0) {
+               pages = alloc_pages(gfp, order);
+               if (pages)
+                       break;
+
+               order--;
+       }
+       if (!pages)
+               return -ENOMEM;
+
+       /* Map the pages */
+       pages_dma = dma_map_page(pdata->dev, pages, 0,
+                                PAGE_SIZE << order, DMA_FROM_DEVICE);
+       if (dma_mapping_error(pdata->dev, pages_dma)) {
+               put_page(pages);
+               return -ENOMEM;
+       }
+
+       pa->pages = pages;
+       pa->pages_len = PAGE_SIZE << order;
+       pa->pages_offset = 0;
+       pa->pages_dma = pages_dma;
+
+       return 0;
+}
+
+static void xlgmac_set_buffer_data(struct xlgmac_buffer_data *bd,
+                                  struct xlgmac_page_alloc *pa,
+                                  unsigned int len)
+{
+       get_page(pa->pages);
+       bd->pa = *pa;
+
+       bd->dma_base = pa->pages_dma;
+       bd->dma_off = pa->pages_offset;
+       bd->dma_len = len;
+
+       pa->pages_offset += len;
+       if ((pa->pages_offset + len) > pa->pages_len) {
+               /* This data descriptor is responsible for unmapping page(s) */
+               bd->pa_unmap = *pa;
+
+               /* Get a new allocation next time */
+               pa->pages = NULL;
+               pa->pages_len = 0;
+               pa->pages_offset = 0;
+               pa->pages_dma = 0;
+       }
+}
+
+static int xlgmac_map_rx_buffer(struct xlgmac_pdata *pdata,
+                               struct xlgmac_ring *ring,
+                               struct xlgmac_desc_data *desc_data)
+{
+       int order, ret;
+
+       if (!ring->rx_hdr_pa.pages) {
+               ret = xlgmac_alloc_pages(pdata, &ring->rx_hdr_pa,
+                                        GFP_ATOMIC, 0);
+               if (ret)
+                       return ret;
+       }
+
+       if (!ring->rx_buf_pa.pages) {
+               order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
+               ret = xlgmac_alloc_pages(pdata, &ring->rx_buf_pa,
+                                        GFP_ATOMIC, order);
+               if (ret)
+                       return ret;
+       }
+
+       /* Set up the header page info */
+       xlgmac_set_buffer_data(&desc_data->rx.hdr, &ring->rx_hdr_pa,
+                              XLGMAC_SKB_ALLOC_SIZE);
+
+       /* Set up the buffer page info */
+       xlgmac_set_buffer_data(&desc_data->rx.buf, &ring->rx_buf_pa,
+                              pdata->rx_buf_size);
+
+       return 0;
+}
+
+static void xlgmac_tx_desc_init(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+       struct xlgmac_desc_data *desc_data;
+       struct xlgmac_dma_desc *dma_desc;
+       struct xlgmac_channel *channel;
+       struct xlgmac_ring *ring;
+       dma_addr_t dma_desc_addr;
+       unsigned int i, j;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               ring = channel->tx_ring;
+               if (!ring)
+                       break;
+
+               dma_desc = ring->dma_desc_head;
+               dma_desc_addr = ring->dma_desc_head_addr;
+
+               for (j = 0; j < ring->dma_desc_count; j++) {
+                       desc_data = XLGMAC_GET_DESC_DATA(ring, j);
+
+                       desc_data->dma_desc = dma_desc;
+                       desc_data->dma_desc_addr = dma_desc_addr;
+
+                       dma_desc++;
+                       dma_desc_addr += sizeof(struct xlgmac_dma_desc);
+               }
+
+               ring->cur = 0;
+               ring->dirty = 0;
+               memset(&ring->tx, 0, sizeof(ring->tx));
+
+               hw_ops->tx_desc_init(channel);
+       }
+}
+
+static void xlgmac_rx_desc_init(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+       struct xlgmac_desc_data *desc_data;
+       struct xlgmac_dma_desc *dma_desc;
+       struct xlgmac_channel *channel;
+       struct xlgmac_ring *ring;
+       dma_addr_t dma_desc_addr;
+       unsigned int i, j;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               ring = channel->rx_ring;
+               if (!ring)
+                       break;
+
+               dma_desc = ring->dma_desc_head;
+               dma_desc_addr = ring->dma_desc_head_addr;
+
+               for (j = 0; j < ring->dma_desc_count; j++) {
+                       desc_data = XLGMAC_GET_DESC_DATA(ring, j);
+
+                       desc_data->dma_desc = dma_desc;
+                       desc_data->dma_desc_addr = dma_desc_addr;
+
+                       if (xlgmac_map_rx_buffer(pdata, ring, desc_data))
+                               break;
+
+                       dma_desc++;
+                       dma_desc_addr += sizeof(struct xlgmac_dma_desc);
+               }
+
+               ring->cur = 0;
+               ring->dirty = 0;
+
+               hw_ops->rx_desc_init(channel);
+       }
+}
+
+static int xlgmac_map_tx_skb(struct xlgmac_channel *channel,
+                            struct sk_buff *skb)
+{
+       struct xlgmac_pdata *pdata = channel->pdata;
+       struct xlgmac_ring *ring = channel->tx_ring;
+       unsigned int start_index, cur_index;
+       struct xlgmac_desc_data *desc_data;
+       unsigned int offset, datalen, len;
+       struct xlgmac_pkt_info *pkt_info;
+       struct skb_frag_struct *frag;
+       unsigned int tso, vlan;
+       dma_addr_t skb_dma;
+       unsigned int i;
+
+       offset = 0;
+       start_index = ring->cur;
+       cur_index = ring->cur;
+
+       pkt_info = &ring->pkt_info;
+       pkt_info->desc_count = 0;
+       pkt_info->length = 0;
+
+       tso = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+                                 TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
+                                 TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN);
+       vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+                                  TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+                                  TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN);
+
+       /* Save space for a context descriptor if needed */
+       if ((tso && (pkt_info->mss != ring->tx.cur_mss)) ||
+           (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)))
+               cur_index++;
+       desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+
+       if (tso) {
+               /* Map the TSO header */
+               skb_dma = dma_map_single(pdata->dev, skb->data,
+                                        pkt_info->header_len, DMA_TO_DEVICE);
+               if (dma_mapping_error(pdata->dev, skb_dma)) {
+                       netdev_alert(pdata->netdev, "dma_map_single failed\n");
+                       goto err_out;
+               }
+               desc_data->skb_dma = skb_dma;
+               desc_data->skb_dma_len = pkt_info->header_len;
+               netif_dbg(pdata, tx_queued, pdata->netdev,
+                         "skb header: index=%u, dma=%pad, len=%u\n",
+                         cur_index, &skb_dma, pkt_info->header_len);
+
+               offset = pkt_info->header_len;
+
+               pkt_info->length += pkt_info->header_len;
+
+               cur_index++;
+               desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+       }
+
+       /* Map the (remainder of the) packet */
+       for (datalen = skb_headlen(skb) - offset; datalen; ) {
+               len = min_t(unsigned int, datalen, XLGMAC_TX_MAX_BUF_SIZE);
+
+               skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
+                                        DMA_TO_DEVICE);
+               if (dma_mapping_error(pdata->dev, skb_dma)) {
+                       netdev_alert(pdata->netdev, "dma_map_single failed\n");
+                       goto err_out;
+               }
+               desc_data->skb_dma = skb_dma;
+               desc_data->skb_dma_len = len;
+               netif_dbg(pdata, tx_queued, pdata->netdev,
+                         "skb data: index=%u, dma=%pad, len=%u\n",
+                         cur_index, &skb_dma, len);
+
+               datalen -= len;
+               offset += len;
+
+               pkt_info->length += len;
+
+               cur_index++;
+               desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+       }
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               netif_dbg(pdata, tx_queued, pdata->netdev,
+                         "mapping frag %u\n", i);
+
+               frag = &skb_shinfo(skb)->frags[i];
+               offset = 0;
+
+               for (datalen = skb_frag_size(frag); datalen; ) {
+                       len = min_t(unsigned int, datalen,
+                                   XLGMAC_TX_MAX_BUF_SIZE);
+
+                       skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
+                                                  len, DMA_TO_DEVICE);
+                       if (dma_mapping_error(pdata->dev, skb_dma)) {
+                               netdev_alert(pdata->netdev,
+                                            "skb_frag_dma_map failed\n");
+                               goto err_out;
+                       }
+                       desc_data->skb_dma = skb_dma;
+                       desc_data->skb_dma_len = len;
+                       desc_data->mapped_as_page = 1;
+                       netif_dbg(pdata, tx_queued, pdata->netdev,
+                                 "skb frag: index=%u, dma=%pad, len=%u\n",
+                                 cur_index, &skb_dma, len);
+
+                       datalen -= len;
+                       offset += len;
+
+                       pkt_info->length += len;
+
+                       cur_index++;
+                       desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+               }
+       }
+
+       /* Save the skb address in the last entry. We always have some data
+        * that has been mapped so desc_data is always advanced past the last
+        * piece of mapped data - use the entry pointed to by cur_index - 1.
+        */
+       desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index - 1);
+       desc_data->skb = skb;
+
+       /* Save the number of descriptor entries used */
+       pkt_info->desc_count = cur_index - start_index;
+
+       return pkt_info->desc_count;
+
+err_out:
+       while (start_index < cur_index) {
+               desc_data = XLGMAC_GET_DESC_DATA(ring, start_index++);
+               xlgmac_unmap_desc_data(pdata, desc_data);
+       }
+
+       return 0;
+}
+
+void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops)
+{
+       desc_ops->alloc_channles_and_rings = xlgmac_alloc_channels_and_rings;
+       desc_ops->free_channels_and_rings = xlgmac_free_channels_and_rings;
+       desc_ops->map_tx_skb = xlgmac_map_tx_skb;
+       desc_ops->map_rx_buffer = xlgmac_map_rx_buffer;
+       desc_ops->unmap_desc_data = xlgmac_unmap_desc_data;
+       desc_ops->tx_desc_init = xlgmac_tx_desc_init;
+       desc_ops->rx_desc_init = xlgmac_rx_desc_init;
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
new file mode 100644 (file)
index 0000000..0dec1dc
--- /dev/null
@@ -0,0 +1,3145 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/clk.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
+#include <linux/dcbnl.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+static int xlgmac_tx_complete(struct xlgmac_dma_desc *dma_desc)
+{
+       return !XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+                               TX_NORMAL_DESC3_OWN_POS,
+                               TX_NORMAL_DESC3_OWN_LEN);
+}
+
+static int xlgmac_disable_rx_csum(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       regval = readl(pdata->mac_regs + MAC_RCR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS,
+                                    MAC_RCR_IPC_LEN, 0);
+       writel(regval, pdata->mac_regs + MAC_RCR);
+
+       return 0;
+}
+
+static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       regval = readl(pdata->mac_regs + MAC_RCR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS,
+                                    MAC_RCR_IPC_LEN, 1);
+       writel(regval, pdata->mac_regs + MAC_RCR);
+
+       return 0;
+}
+
+static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, u8 *addr)
+{
+       unsigned int mac_addr_hi, mac_addr_lo;
+
+       mac_addr_hi = (addr[5] <<  8) | (addr[4] <<  0);
+       mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
+                     (addr[1] <<  8) | (addr[0] <<  0);
+
+       writel(mac_addr_hi, pdata->mac_regs + MAC_MACA0HR);
+       writel(mac_addr_lo, pdata->mac_regs + MAC_MACA0LR);
+
+       return 0;
+}
+
+static void xlgmac_set_mac_reg(struct xlgmac_pdata *pdata,
+                              struct netdev_hw_addr *ha,
+                              unsigned int *mac_reg)
+{
+       unsigned int mac_addr_hi, mac_addr_lo;
+       u8 *mac_addr;
+
+       mac_addr_lo = 0;
+       mac_addr_hi = 0;
+
+       if (ha) {
+               mac_addr = (u8 *)&mac_addr_lo;
+               mac_addr[0] = ha->addr[0];
+               mac_addr[1] = ha->addr[1];
+               mac_addr[2] = ha->addr[2];
+               mac_addr[3] = ha->addr[3];
+               mac_addr = (u8 *)&mac_addr_hi;
+               mac_addr[0] = ha->addr[4];
+               mac_addr[1] = ha->addr[5];
+
+               netif_dbg(pdata, drv, pdata->netdev,
+                         "adding mac address %pM at %#x\n",
+                         ha->addr, *mac_reg);
+
+               mac_addr_hi = XLGMAC_SET_REG_BITS(mac_addr_hi,
+                                                 MAC_MACA1HR_AE_POS,
+                                               MAC_MACA1HR_AE_LEN,
+                                               1);
+       }
+
+       writel(mac_addr_hi, pdata->mac_regs + *mac_reg);
+       *mac_reg += MAC_MACA_INC;
+       writel(mac_addr_lo, pdata->mac_regs + *mac_reg);
+       *mac_reg += MAC_MACA_INC;
+}
+
+static int xlgmac_enable_rx_vlan_stripping(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       regval = readl(pdata->mac_regs + MAC_VLANTR);
+       /* Put the VLAN tag in the Rx descriptor */
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLRXS_POS,
+                                    MAC_VLANTR_EVLRXS_LEN, 1);
+       /* Don't check the VLAN type */
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_DOVLTC_POS,
+                                    MAC_VLANTR_DOVLTC_LEN, 1);
+       /* Check only C-TAG (0x8100) packets */
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ERSVLM_POS,
+                                    MAC_VLANTR_ERSVLM_LEN, 0);
+       /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ESVL_POS,
+                                    MAC_VLANTR_ESVL_LEN, 0);
+       /* Enable VLAN tag stripping */
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS,
+                                    MAC_VLANTR_EVLS_LEN, 0x3);
+       writel(regval, pdata->mac_regs + MAC_VLANTR);
+
+       return 0;
+}
+
+static int xlgmac_disable_rx_vlan_stripping(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       regval = readl(pdata->mac_regs + MAC_VLANTR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS,
+                                    MAC_VLANTR_EVLS_LEN, 0);
+       writel(regval, pdata->mac_regs + MAC_VLANTR);
+
+       return 0;
+}
+
+static int xlgmac_enable_rx_vlan_filtering(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       regval = readl(pdata->mac_regs + MAC_PFR);
+       /* Enable VLAN filtering */
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS,
+                                    MAC_PFR_VTFE_LEN, 1);
+       writel(regval, pdata->mac_regs + MAC_PFR);
+
+       regval = readl(pdata->mac_regs + MAC_VLANTR);
+       /* Enable VLAN Hash Table filtering */
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTHM_POS,
+                                    MAC_VLANTR_VTHM_LEN, 1);
+       /* Disable VLAN tag inverse matching */
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTIM_POS,
+                                    MAC_VLANTR_VTIM_LEN, 0);
+       /* Only filter on the lower 12-bits of the VLAN tag */
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ETV_POS,
+                                    MAC_VLANTR_ETV_LEN, 1);
+       /* In order for the VLAN Hash Table filtering to be effective,
+        * the VLAN tag identifier in the VLAN Tag Register must not
+        * be zero.  Set the VLAN tag identifier to "1" to enable the
+        * VLAN Hash Table filtering.  This implies that a VLAN tag of
+        * 1 will always pass filtering.
+        */
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS,
+                                    MAC_VLANTR_VL_LEN, 1);
+       writel(regval, pdata->mac_regs + MAC_VLANTR);
+
+       return 0;
+}
+
+static int xlgmac_disable_rx_vlan_filtering(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       regval = readl(pdata->mac_regs + MAC_PFR);
+       /* Disable VLAN filtering */
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS,
+                                    MAC_PFR_VTFE_LEN, 0);
+       writel(regval, pdata->mac_regs + MAC_PFR);
+
+       return 0;
+}
+
+static u32 xlgmac_vid_crc32_le(__le16 vid_le)
+{
+       unsigned char *data = (unsigned char *)&vid_le;
+       unsigned char data_byte = 0;
+       u32 poly = 0xedb88320;
+       u32 crc = ~0;
+       u32 temp = 0;
+       int i, bits;
+
+       bits = get_bitmask_order(VLAN_VID_MASK);
+       for (i = 0; i < bits; i++) {
+               if ((i % 8) == 0)
+                       data_byte = data[i / 8];
+
+               temp = ((crc & 1) ^ data_byte) & 1;
+               crc >>= 1;
+               data_byte >>= 1;
+
+               if (temp)
+                       crc ^= poly;
+       }
+
+       return crc;
+}
+
+static int xlgmac_update_vlan_hash_table(struct xlgmac_pdata *pdata)
+{
+       u16 vlan_hash_table = 0;
+       __le16 vid_le;
+       u32 regval;
+       u32 crc;
+       u16 vid;
+
+       /* Generate the VLAN Hash Table value */
+       for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
+               /* Get the CRC32 value of the VLAN ID */
+               vid_le = cpu_to_le16(vid);
+               crc = bitrev32(~xlgmac_vid_crc32_le(vid_le)) >> 28;
+
+               vlan_hash_table |= (1 << crc);
+       }
+
+       regval = readl(pdata->mac_regs + MAC_VLANHTR);
+       /* Set the VLAN Hash Table filtering register */
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANHTR_VLHT_POS,
+                                    MAC_VLANHTR_VLHT_LEN, vlan_hash_table);
+       writel(regval, pdata->mac_regs + MAC_VLANHTR);
+
+       return 0;
+}
+
+static int xlgmac_set_promiscuous_mode(struct xlgmac_pdata *pdata,
+                                      unsigned int enable)
+{
+       unsigned int val = enable ? 1 : 0;
+       u32 regval;
+
+       regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR),
+                                    MAC_PFR_PR_POS, MAC_PFR_PR_LEN);
+       if (regval == val)
+               return 0;
+
+       netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
+                 enable ? "entering" : "leaving");
+
+       regval = readl(pdata->mac_regs + MAC_PFR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PR_POS,
+                                    MAC_PFR_PR_LEN, val);
+       writel(regval, pdata->mac_regs + MAC_PFR);
+
+       /* Hardware will still perform VLAN filtering in promiscuous mode */
+       if (enable) {
+               xlgmac_disable_rx_vlan_filtering(pdata);
+       } else {
+               if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+                       xlgmac_enable_rx_vlan_filtering(pdata);
+       }
+
+       return 0;
+}
+
+static int xlgmac_set_all_multicast_mode(struct xlgmac_pdata *pdata,
+                                        unsigned int enable)
+{
+       unsigned int val = enable ? 1 : 0;
+       u32 regval;
+
+       regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR),
+                                    MAC_PFR_PM_POS, MAC_PFR_PM_LEN);
+       if (regval == val)
+               return 0;
+
+       netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
+                 enable ? "entering" : "leaving");
+
+       regval = readl(pdata->mac_regs + MAC_PFR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PM_POS,
+                                    MAC_PFR_PM_LEN, val);
+       writel(regval, pdata->mac_regs + MAC_PFR);
+
+       return 0;
+}
+
+static void xlgmac_set_mac_addn_addrs(struct xlgmac_pdata *pdata)
+{
+       struct net_device *netdev = pdata->netdev;
+       struct netdev_hw_addr *ha;
+       unsigned int addn_macs;
+       unsigned int mac_reg;
+
+       mac_reg = MAC_MACA1HR;
+       addn_macs = pdata->hw_feat.addn_mac;
+
+       if (netdev_uc_count(netdev) > addn_macs) {
+               xlgmac_set_promiscuous_mode(pdata, 1);
+       } else {
+               netdev_for_each_uc_addr(ha, netdev) {
+                       xlgmac_set_mac_reg(pdata, ha, &mac_reg);
+                       addn_macs--;
+               }
+
+               if (netdev_mc_count(netdev) > addn_macs) {
+                       xlgmac_set_all_multicast_mode(pdata, 1);
+               } else {
+                       netdev_for_each_mc_addr(ha, netdev) {
+                               xlgmac_set_mac_reg(pdata, ha, &mac_reg);
+                               addn_macs--;
+                       }
+               }
+       }
+
+       /* Clear remaining additional MAC address entries */
+       while (addn_macs--)
+               xlgmac_set_mac_reg(pdata, NULL, &mac_reg);
+}
+
+static void xlgmac_set_mac_hash_table(struct xlgmac_pdata *pdata)
+{
+       unsigned int hash_table_shift, hash_table_count;
+       u32 hash_table[XLGMAC_MAC_HASH_TABLE_SIZE];
+       struct net_device *netdev = pdata->netdev;
+       struct netdev_hw_addr *ha;
+       unsigned int hash_reg;
+       unsigned int i;
+       u32 crc;
+
+       hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
+       hash_table_count = pdata->hw_feat.hash_table_size / 32;
+       memset(hash_table, 0, sizeof(hash_table));
+
+       /* Build the MAC Hash Table register values */
+       netdev_for_each_uc_addr(ha, netdev) {
+               crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+               crc >>= hash_table_shift;
+               hash_table[crc >> 5] |= (1 << (crc & 0x1f));
+       }
+
+       netdev_for_each_mc_addr(ha, netdev) {
+               crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+               crc >>= hash_table_shift;
+               hash_table[crc >> 5] |= (1 << (crc & 0x1f));
+       }
+
+       /* Set the MAC Hash Table registers */
+       hash_reg = MAC_HTR0;
+       for (i = 0; i < hash_table_count; i++) {
+               writel(hash_table[i], pdata->mac_regs + hash_reg);
+               hash_reg += MAC_HTR_INC;
+       }
+}
+
+static int xlgmac_add_mac_addresses(struct xlgmac_pdata *pdata)
+{
+       if (pdata->hw_feat.hash_table_size)
+               xlgmac_set_mac_hash_table(pdata);
+       else
+               xlgmac_set_mac_addn_addrs(pdata);
+
+       return 0;
+}
+
+static void xlgmac_config_mac_address(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       xlgmac_set_mac_address(pdata, pdata->netdev->dev_addr);
+
+       /* Filtering is done using perfect filtering and hash filtering */
+       if (pdata->hw_feat.hash_table_size) {
+               regval = readl(pdata->mac_regs + MAC_PFR);
+               regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HPF_POS,
+                                            MAC_PFR_HPF_LEN, 1);
+               regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HUC_POS,
+                                            MAC_PFR_HUC_LEN, 1);
+               regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HMC_POS,
+                                            MAC_PFR_HMC_LEN, 1);
+               writel(regval, pdata->mac_regs + MAC_PFR);
+       }
+}
+
+static void xlgmac_config_jumbo_enable(struct xlgmac_pdata *pdata)
+{
+       unsigned int val;
+       u32 regval;
+
+       val = (pdata->netdev->mtu > XLGMAC_STD_PACKET_MTU) ? 1 : 0;
+
+       regval = readl(pdata->mac_regs + MAC_RCR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_JE_POS,
+                                    MAC_RCR_JE_LEN, val);
+       writel(regval, pdata->mac_regs + MAC_RCR);
+}
+
+static void xlgmac_config_checksum_offload(struct xlgmac_pdata *pdata)
+{
+       if (pdata->netdev->features & NETIF_F_RXCSUM)
+               xlgmac_enable_rx_csum(pdata);
+       else
+               xlgmac_disable_rx_csum(pdata);
+}
+
+static void xlgmac_config_vlan_support(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       regval = readl(pdata->mac_regs + MAC_VLANIR);
+       /* Indicate that VLAN Tx CTAGs come from context descriptors */
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_CSVL_POS,
+                                    MAC_VLANIR_CSVL_LEN, 0);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLTI_POS,
+                                    MAC_VLANIR_VLTI_LEN, 1);
+       writel(regval, pdata->mac_regs + MAC_VLANIR);
+
+       /* Set the current VLAN Hash Table register value */
+       xlgmac_update_vlan_hash_table(pdata);
+
+       if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+               xlgmac_enable_rx_vlan_filtering(pdata);
+       else
+               xlgmac_disable_rx_vlan_filtering(pdata);
+
+       if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+               xlgmac_enable_rx_vlan_stripping(pdata);
+       else
+               xlgmac_disable_rx_vlan_stripping(pdata);
+}
+
+static int xlgmac_config_rx_mode(struct xlgmac_pdata *pdata)
+{
+       struct net_device *netdev = pdata->netdev;
+       unsigned int pr_mode, am_mode;
+
+       pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
+       am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
+
+       xlgmac_set_promiscuous_mode(pdata, pr_mode);
+       xlgmac_set_all_multicast_mode(pdata, am_mode);
+
+       xlgmac_add_mac_addresses(pdata);
+
+       return 0;
+}
+
+static void xlgmac_prepare_tx_stop(struct xlgmac_pdata *pdata,
+                                  struct xlgmac_channel *channel)
+{
+       unsigned int tx_dsr, tx_pos, tx_qidx;
+       unsigned long tx_timeout;
+       unsigned int tx_status;
+
+       /* Calculate the status register to read and the position within */
+       if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
+               tx_dsr = DMA_DSR0;
+               tx_pos = (channel->queue_index * DMA_DSR_Q_LEN) +
+                        DMA_DSR0_TPS_START;
+       } else {
+               tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
+
+               tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
+               tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_LEN) +
+                        DMA_DSRX_TPS_START;
+       }
+
+       /* The Tx engine cannot be stopped if it is actively processing
+        * descriptors. Wait for the Tx engine to enter the stopped or
+        * suspended state.  Don't wait forever though...
+        */
+       tx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ);
+       while (time_before(jiffies, tx_timeout)) {
+               tx_status = readl(pdata->mac_regs + tx_dsr);
+               tx_status = XLGMAC_GET_REG_BITS(tx_status, tx_pos,
+                                               DMA_DSR_TPS_LEN);
+               if ((tx_status == DMA_TPS_STOPPED) ||
+                   (tx_status == DMA_TPS_SUSPENDED))
+                       break;
+
+               usleep_range(500, 1000);
+       }
+
+       if (!time_before(jiffies, tx_timeout))
+               netdev_info(pdata->netdev,
+                           "timed out waiting for Tx DMA channel %u to stop\n",
+                           channel->queue_index);
+}
+
+static void xlgmac_enable_tx(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+       u32 regval;
+
+       /* Enable each Tx DMA channel */
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (!channel->tx_ring)
+                       break;
+
+               regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+               regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS,
+                                            DMA_CH_TCR_ST_LEN, 1);
+               writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+       }
+
+       /* Enable each Tx queue */
+       for (i = 0; i < pdata->tx_q_count; i++) {
+               regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+               regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS,
+                                            MTL_Q_TQOMR_TXQEN_LEN,
+                                       MTL_Q_ENABLED);
+               writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+       }
+
+       /* Enable MAC Tx */
+       regval = readl(pdata->mac_regs + MAC_TCR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS,
+                                    MAC_TCR_TE_LEN, 1);
+       writel(regval, pdata->mac_regs + MAC_TCR);
+}
+
+static void xlgmac_disable_tx(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+       u32 regval;
+
+       /* Prepare for Tx DMA channel stop */
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (!channel->tx_ring)
+                       break;
+
+               xlgmac_prepare_tx_stop(pdata, channel);
+       }
+
+       /* Disable MAC Tx */
+       regval = readl(pdata->mac_regs + MAC_TCR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS,
+                                    MAC_TCR_TE_LEN, 0);
+       writel(regval, pdata->mac_regs + MAC_TCR);
+
+       /* Disable each Tx queue */
+       for (i = 0; i < pdata->tx_q_count; i++) {
+               regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+               regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS,
+                                            MTL_Q_TQOMR_TXQEN_LEN, 0);
+               writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+       }
+
+       /* Disable each Tx DMA channel */
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (!channel->tx_ring)
+                       break;
+
+               regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+               regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS,
+                                            DMA_CH_TCR_ST_LEN, 0);
+               writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+       }
+}
+
+static void xlgmac_prepare_rx_stop(struct xlgmac_pdata *pdata,
+                                  unsigned int queue)
+{
+       unsigned int rx_status, prxq, rxqsts;
+       unsigned long rx_timeout;
+
+       /* The Rx engine cannot be stopped if it is actively processing
+        * packets. Wait for the Rx queue to empty the Rx fifo.  Don't
+        * wait forever though...
+        */
+       rx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ);
+       while (time_before(jiffies, rx_timeout)) {
+               rx_status = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR));
+               prxq = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_PRXQ_POS,
+                                          MTL_Q_RQDR_PRXQ_LEN);
+               rxqsts = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_RXQSTS_POS,
+                                            MTL_Q_RQDR_RXQSTS_LEN);
+               if ((prxq == 0) && (rxqsts == 0))
+                       break;
+
+               usleep_range(500, 1000);
+       }
+
+       if (!time_before(jiffies, rx_timeout))
+               netdev_info(pdata->netdev,
+                           "timed out waiting for Rx queue %u to empty\n",
+                           queue);
+}
+
+static void xlgmac_enable_rx(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel;
+       unsigned int regval, i;
+
+       /* Enable each Rx DMA channel */
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (!channel->rx_ring)
+                       break;
+
+               regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+               regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS,
+                                            DMA_CH_RCR_SR_LEN, 1);
+               writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+       }
+
+       /* Enable each Rx queue */
+       regval = 0;
+       for (i = 0; i < pdata->rx_q_count; i++)
+               regval |= (0x02 << (i << 1));
+       writel(regval, pdata->mac_regs + MAC_RQC0R);
+
+       /* Enable MAC Rx */
+       regval = readl(pdata->mac_regs + MAC_RCR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS,
+                                    MAC_RCR_DCRCC_LEN, 1);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS,
+                                    MAC_RCR_CST_LEN, 1);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS,
+                                    MAC_RCR_ACS_LEN, 1);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS,
+                                    MAC_RCR_RE_LEN, 1);
+       writel(regval, pdata->mac_regs + MAC_RCR);
+}
+
+static void xlgmac_disable_rx(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+       u32 regval;
+
+       /* Disable MAC Rx */
+       regval = readl(pdata->mac_regs + MAC_RCR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS,
+                                    MAC_RCR_DCRCC_LEN, 0);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS,
+                                    MAC_RCR_CST_LEN, 0);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS,
+                                    MAC_RCR_ACS_LEN, 0);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS,
+                                    MAC_RCR_RE_LEN, 0);
+       writel(regval, pdata->mac_regs + MAC_RCR);
+
+       /* Prepare for Rx DMA channel stop */
+       for (i = 0; i < pdata->rx_q_count; i++)
+               xlgmac_prepare_rx_stop(pdata, i);
+
+       /* Disable each Rx queue */
+       writel(0, pdata->mac_regs + MAC_RQC0R);
+
+       /* Disable each Rx DMA channel */
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (!channel->rx_ring)
+                       break;
+
+               regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+               regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS,
+                                            DMA_CH_RCR_SR_LEN, 0);
+               writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+       }
+}
+
+static void xlgmac_tx_start_xmit(struct xlgmac_channel *channel,
+                                struct xlgmac_ring *ring)
+{
+       struct xlgmac_pdata *pdata = channel->pdata;
+       struct xlgmac_desc_data *desc_data;
+
+       /* Make sure everything is written before the register write */
+       wmb();
+
+       /* Issue a poll command to Tx DMA by writing address
+        * of next immediate free descriptor
+        */
+       desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+       writel(lower_32_bits(desc_data->dma_desc_addr),
+              XLGMAC_DMA_REG(channel, DMA_CH_TDTR_LO));
+
+       /* Start the Tx timer */
+       if (pdata->tx_usecs && !channel->tx_timer_active) {
+               channel->tx_timer_active = 1;
+               mod_timer(&channel->tx_timer,
+                         jiffies + usecs_to_jiffies(pdata->tx_usecs));
+       }
+
+       ring->tx.xmit_more = 0;
+}
+
+static void xlgmac_dev_xmit(struct xlgmac_channel *channel)
+{
+       struct xlgmac_pdata *pdata = channel->pdata;
+       struct xlgmac_ring *ring = channel->tx_ring;
+       unsigned int tso_context, vlan_context;
+       struct xlgmac_desc_data *desc_data;
+       struct xlgmac_dma_desc *dma_desc;
+       struct xlgmac_pkt_info *pkt_info;
+       unsigned int csum, tso, vlan;
+       int start_index = ring->cur;
+       int cur_index = ring->cur;
+       unsigned int tx_set_ic;
+       int i;
+
+       pkt_info = &ring->pkt_info;
+       csum = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+                                  TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
+                               TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN);
+       tso = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+                                 TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
+                               TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN);
+       vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+                                  TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+                               TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN);
+
+       if (tso && (pkt_info->mss != ring->tx.cur_mss))
+               tso_context = 1;
+       else
+               tso_context = 0;
+
+       if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag))
+               vlan_context = 1;
+       else
+               vlan_context = 0;
+
+       /* Determine if an interrupt should be generated for this Tx:
+        *   Interrupt:
+        *     - Tx frame count exceeds the frame count setting
+        *     - Addition of Tx frame count to the frame count since the
+        *       last interrupt was set exceeds the frame count setting
+        *   No interrupt:
+        *     - No frame count setting specified (ethtool -C ethX tx-frames 0)
+        *     - Addition of Tx frame count to the frame count since the
+        *       last interrupt was set does not exceed the frame count setting
+        */
+       ring->coalesce_count += pkt_info->tx_packets;
+       if (!pdata->tx_frames)
+               tx_set_ic = 0;
+       else if (pkt_info->tx_packets > pdata->tx_frames)
+               tx_set_ic = 1;
+       else if ((ring->coalesce_count % pdata->tx_frames) <
+                pkt_info->tx_packets)
+               tx_set_ic = 1;
+       else
+               tx_set_ic = 0;
+
+       desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+       dma_desc = desc_data->dma_desc;
+
+       /* Create a context descriptor if this is a TSO pkt_info */
+       if (tso_context || vlan_context) {
+               if (tso_context) {
+                       netif_dbg(pdata, tx_queued, pdata->netdev,
+                                 "TSO context descriptor, mss=%u\n",
+                                 pkt_info->mss);
+
+                       /* Set the MSS size */
+                       dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+                                               dma_desc->desc2,
+                                               TX_CONTEXT_DESC2_MSS_POS,
+                                               TX_CONTEXT_DESC2_MSS_LEN,
+                                               pkt_info->mss);
+
+                       /* Mark it as a CONTEXT descriptor */
+                       dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                                               dma_desc->desc3,
+                                               TX_CONTEXT_DESC3_CTXT_POS,
+                                               TX_CONTEXT_DESC3_CTXT_LEN,
+                                               1);
+
+                       /* Indicate this descriptor contains the MSS */
+                       dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                                               dma_desc->desc3,
+                                               TX_CONTEXT_DESC3_TCMSSV_POS,
+                                               TX_CONTEXT_DESC3_TCMSSV_LEN,
+                                               1);
+
+                       ring->tx.cur_mss = pkt_info->mss;
+               }
+
+               if (vlan_context) {
+                       netif_dbg(pdata, tx_queued, pdata->netdev,
+                                 "VLAN context descriptor, ctag=%u\n",
+                                 pkt_info->vlan_ctag);
+
+                       /* Mark it as a CONTEXT descriptor */
+                       dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                                               dma_desc->desc3,
+                                               TX_CONTEXT_DESC3_CTXT_POS,
+                                               TX_CONTEXT_DESC3_CTXT_LEN,
+                                               1);
+
+                       /* Set the VLAN tag */
+                       dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                                               dma_desc->desc3,
+                                               TX_CONTEXT_DESC3_VT_POS,
+                                               TX_CONTEXT_DESC3_VT_LEN,
+                                               pkt_info->vlan_ctag);
+
+                       /* Indicate this descriptor contains the VLAN tag */
+                       dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                                               dma_desc->desc3,
+                                               TX_CONTEXT_DESC3_VLTV_POS,
+                                               TX_CONTEXT_DESC3_VLTV_LEN,
+                                               1);
+
+                       ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag;
+               }
+
+               cur_index++;
+               desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+               dma_desc = desc_data->dma_desc;
+       }
+
+       /* Update buffer address (for TSO this is the header) */
+       dma_desc->desc0 =  cpu_to_le32(lower_32_bits(desc_data->skb_dma));
+       dma_desc->desc1 =  cpu_to_le32(upper_32_bits(desc_data->skb_dma));
+
+       /* Update the buffer length */
+       dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+                               dma_desc->desc2,
+                               TX_NORMAL_DESC2_HL_B1L_POS,
+                               TX_NORMAL_DESC2_HL_B1L_LEN,
+                               desc_data->skb_dma_len);
+
+       /* VLAN tag insertion check */
+       if (vlan)
+               dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+                                       dma_desc->desc2,
+                                       TX_NORMAL_DESC2_VTIR_POS,
+                                       TX_NORMAL_DESC2_VTIR_LEN,
+                                       TX_NORMAL_DESC2_VLAN_INSERT);
+
+       /* Timestamp enablement check */
+       if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
+                               TX_PACKET_ATTRIBUTES_PTP_POS,
+                               TX_PACKET_ATTRIBUTES_PTP_LEN))
+               dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+                                       dma_desc->desc2,
+                                       TX_NORMAL_DESC2_TTSE_POS,
+                                       TX_NORMAL_DESC2_TTSE_LEN,
+                                       1);
+
+       /* Mark it as First Descriptor */
+       dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                               dma_desc->desc3,
+                               TX_NORMAL_DESC3_FD_POS,
+                               TX_NORMAL_DESC3_FD_LEN,
+                               1);
+
+       /* Mark it as a NORMAL descriptor */
+       dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                               dma_desc->desc3,
+                               TX_NORMAL_DESC3_CTXT_POS,
+                               TX_NORMAL_DESC3_CTXT_LEN,
+                               0);
+
+       /* Set OWN bit if not the first descriptor */
+       if (cur_index != start_index)
+               dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                                       dma_desc->desc3,
+                                       TX_NORMAL_DESC3_OWN_POS,
+                                       TX_NORMAL_DESC3_OWN_LEN,
+                                       1);
+
+       if (tso) {
+               /* Enable TSO */
+               dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                                       dma_desc->desc3,
+                                       TX_NORMAL_DESC3_TSE_POS,
+                                       TX_NORMAL_DESC3_TSE_LEN, 1);
+               dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                                       dma_desc->desc3,
+                                       TX_NORMAL_DESC3_TCPPL_POS,
+                                       TX_NORMAL_DESC3_TCPPL_LEN,
+                                       pkt_info->tcp_payload_len);
+               dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                                       dma_desc->desc3,
+                                       TX_NORMAL_DESC3_TCPHDRLEN_POS,
+                                       TX_NORMAL_DESC3_TCPHDRLEN_LEN,
+                                       pkt_info->tcp_header_len / 4);
+
+               pdata->stats.tx_tso_packets++;
+       } else {
+               /* Enable CRC and Pad Insertion */
+               dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                                       dma_desc->desc3,
+                                       TX_NORMAL_DESC3_CPC_POS,
+                                       TX_NORMAL_DESC3_CPC_LEN, 0);
+
+               /* Enable HW CSUM */
+               if (csum)
+                       dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                                               dma_desc->desc3,
+                                               TX_NORMAL_DESC3_CIC_POS,
+                                               TX_NORMAL_DESC3_CIC_LEN,
+                                               0x3);
+
+               /* Set the total length to be transmitted */
+               dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                                       dma_desc->desc3,
+                                       TX_NORMAL_DESC3_FL_POS,
+                                       TX_NORMAL_DESC3_FL_LEN,
+                                       pkt_info->length);
+       }
+
+       for (i = cur_index - start_index + 1; i < pkt_info->desc_count; i++) {
+               cur_index++;
+               desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+               dma_desc = desc_data->dma_desc;
+
+               /* Update buffer address */
+               dma_desc->desc0 =
+                       cpu_to_le32(lower_32_bits(desc_data->skb_dma));
+               dma_desc->desc1 =
+                       cpu_to_le32(upper_32_bits(desc_data->skb_dma));
+
+               /* Update the buffer length */
+               dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+                                       dma_desc->desc2,
+                                       TX_NORMAL_DESC2_HL_B1L_POS,
+                                       TX_NORMAL_DESC2_HL_B1L_LEN,
+                                       desc_data->skb_dma_len);
+
+               /* Set OWN bit */
+               dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                                       dma_desc->desc3,
+                                       TX_NORMAL_DESC3_OWN_POS,
+                                       TX_NORMAL_DESC3_OWN_LEN, 1);
+
+               /* Mark it as NORMAL descriptor */
+               dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                                       dma_desc->desc3,
+                                       TX_NORMAL_DESC3_CTXT_POS,
+                                       TX_NORMAL_DESC3_CTXT_LEN, 0);
+
+               /* Enable HW CSUM */
+               if (csum)
+                       dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                                               dma_desc->desc3,
+                                               TX_NORMAL_DESC3_CIC_POS,
+                                               TX_NORMAL_DESC3_CIC_LEN,
+                                               0x3);
+       }
+
+       /* Set LAST bit for the last descriptor */
+       dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                               dma_desc->desc3,
+                               TX_NORMAL_DESC3_LD_POS,
+                               TX_NORMAL_DESC3_LD_LEN, 1);
+
+       /* Set IC bit based on Tx coalescing settings */
+       if (tx_set_ic)
+               dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+                                       dma_desc->desc2,
+                                       TX_NORMAL_DESC2_IC_POS,
+                                       TX_NORMAL_DESC2_IC_LEN, 1);
+
+       /* Save the Tx info to report back during cleanup */
+       desc_data->tx.packets = pkt_info->tx_packets;
+       desc_data->tx.bytes = pkt_info->tx_bytes;
+
+       /* In case the Tx DMA engine is running, make sure everything
+        * is written to the descriptor(s) before setting the OWN bit
+        * for the first descriptor
+        */
+       dma_wmb();
+
+       /* Set OWN bit for the first descriptor */
+       desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
+       dma_desc = desc_data->dma_desc;
+       dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                               dma_desc->desc3,
+                               TX_NORMAL_DESC3_OWN_POS,
+                               TX_NORMAL_DESC3_OWN_LEN, 1);
+
+       if (netif_msg_tx_queued(pdata))
+               xlgmac_dump_tx_desc(pdata, ring, start_index,
+                                   pkt_info->desc_count, 1);
+
+       /* Make sure ownership is written to the descriptor */
+       smp_wmb();
+
+       ring->cur = cur_index + 1;
+       if (!pkt_info->skb->xmit_more ||
+           netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
+                                                  channel->queue_index)))
+               xlgmac_tx_start_xmit(channel, ring);
+       else
+               ring->tx.xmit_more = 1;
+
+       XLGMAC_PR("%s: descriptors %u to %u written\n",
+                 channel->name, start_index & (ring->dma_desc_count - 1),
+                 (ring->cur - 1) & (ring->dma_desc_count - 1));
+}
+
+static void xlgmac_get_rx_tstamp(struct xlgmac_pkt_info *pkt_info,
+                                struct xlgmac_dma_desc *dma_desc)
+{
+       u32 tsa, tsd;
+       u64 nsec;
+
+       tsa = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+                                    RX_CONTEXT_DESC3_TSA_POS,
+                               RX_CONTEXT_DESC3_TSA_LEN);
+       tsd = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+                                    RX_CONTEXT_DESC3_TSD_POS,
+                               RX_CONTEXT_DESC3_TSD_LEN);
+       if (tsa && !tsd) {
+               nsec = le32_to_cpu(dma_desc->desc1);
+               nsec <<= 32;
+               nsec |= le32_to_cpu(dma_desc->desc0);
+               if (nsec != 0xffffffffffffffffULL) {
+                       pkt_info->rx_tstamp = nsec;
+                       pkt_info->attributes = XLGMAC_SET_REG_BITS(
+                                       pkt_info->attributes,
+                                       RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS,
+                                       RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN,
+                                       1);
+               }
+       }
+}
+
+static void xlgmac_tx_desc_reset(struct xlgmac_desc_data *desc_data)
+{
+       struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc;
+
+       /* Reset the Tx descriptor
+        *   Set buffer 1 (lo) address to zero
+        *   Set buffer 1 (hi) address to zero
+        *   Reset all other control bits (IC, TTSE, B2L & B1L)
+        *   Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
+        */
+       dma_desc->desc0 = 0;
+       dma_desc->desc1 = 0;
+       dma_desc->desc2 = 0;
+       dma_desc->desc3 = 0;
+
+       /* Make sure ownership is written to the descriptor */
+       dma_wmb();
+}
+
+static void xlgmac_tx_desc_init(struct xlgmac_channel *channel)
+{
+       struct xlgmac_ring *ring = channel->tx_ring;
+       struct xlgmac_desc_data *desc_data;
+       int start_index = ring->cur;
+       int i;
+
+       /* Initialze all descriptors */
+       for (i = 0; i < ring->dma_desc_count; i++) {
+               desc_data = XLGMAC_GET_DESC_DATA(ring, i);
+
+               /* Initialize Tx descriptor */
+               xlgmac_tx_desc_reset(desc_data);
+       }
+
+       /* Update the total number of Tx descriptors */
+       writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_TDRLR));
+
+       /* Update the starting address of descriptor ring */
+       desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
+       writel(upper_32_bits(desc_data->dma_desc_addr),
+              XLGMAC_DMA_REG(channel, DMA_CH_TDLR_HI));
+       writel(lower_32_bits(desc_data->dma_desc_addr),
+              XLGMAC_DMA_REG(channel, DMA_CH_TDLR_LO));
+}
+
+static void xlgmac_rx_desc_reset(struct xlgmac_pdata *pdata,
+                                struct xlgmac_desc_data *desc_data,
+                                unsigned int index)
+{
+       struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc;
+       unsigned int rx_frames = pdata->rx_frames;
+       unsigned int rx_usecs = pdata->rx_usecs;
+       dma_addr_t hdr_dma, buf_dma;
+       unsigned int inte;
+
+       if (!rx_usecs && !rx_frames) {
+               /* No coalescing, interrupt for every descriptor */
+               inte = 1;
+       } else {
+               /* Set interrupt based on Rx frame coalescing setting */
+               if (rx_frames && !((index + 1) % rx_frames))
+                       inte = 1;
+               else
+                       inte = 0;
+       }
+
+       /* Reset the Rx descriptor
+        *   Set buffer 1 (lo) address to header dma address (lo)
+        *   Set buffer 1 (hi) address to header dma address (hi)
+        *   Set buffer 2 (lo) address to buffer dma address (lo)
+        *   Set buffer 2 (hi) address to buffer dma address (hi) and
+        *     set control bits OWN and INTE
+        */
+       hdr_dma = desc_data->rx.hdr.dma_base + desc_data->rx.hdr.dma_off;
+       buf_dma = desc_data->rx.buf.dma_base + desc_data->rx.buf.dma_off;
+       dma_desc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
+       dma_desc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
+       dma_desc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
+       dma_desc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
+
+       dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                               dma_desc->desc3,
+                               RX_NORMAL_DESC3_INTE_POS,
+                               RX_NORMAL_DESC3_INTE_LEN,
+                               inte);
+
+       /* Since the Rx DMA engine is likely running, make sure everything
+        * is written to the descriptor(s) before setting the OWN bit
+        * for the descriptor
+        */
+       dma_wmb();
+
+       dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+                               dma_desc->desc3,
+                               RX_NORMAL_DESC3_OWN_POS,
+                               RX_NORMAL_DESC3_OWN_LEN,
+                               1);
+
+       /* Make sure ownership is written to the descriptor */
+       dma_wmb();
+}
+
+static void xlgmac_rx_desc_init(struct xlgmac_channel *channel)
+{
+       struct xlgmac_pdata *pdata = channel->pdata;
+       struct xlgmac_ring *ring = channel->rx_ring;
+       unsigned int start_index = ring->cur;
+       struct xlgmac_desc_data *desc_data;
+       unsigned int i;
+
+       /* Initialize all descriptors */
+       for (i = 0; i < ring->dma_desc_count; i++) {
+               desc_data = XLGMAC_GET_DESC_DATA(ring, i);
+
+               /* Initialize Rx descriptor */
+               xlgmac_rx_desc_reset(pdata, desc_data, i);
+       }
+
+       /* Update the total number of Rx descriptors */
+       writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_RDRLR));
+
+       /* Update the starting address of descriptor ring */
+       desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
+       writel(upper_32_bits(desc_data->dma_desc_addr),
+              XLGMAC_DMA_REG(channel, DMA_CH_RDLR_HI));
+       writel(lower_32_bits(desc_data->dma_desc_addr),
+              XLGMAC_DMA_REG(channel, DMA_CH_RDLR_LO));
+
+       /* Update the Rx Descriptor Tail Pointer */
+       desc_data = XLGMAC_GET_DESC_DATA(ring, start_index +
+                                         ring->dma_desc_count - 1);
+       writel(lower_32_bits(desc_data->dma_desc_addr),
+              XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
+}
+
+static int xlgmac_is_context_desc(struct xlgmac_dma_desc *dma_desc)
+{
+       /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
+       return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+                               TX_NORMAL_DESC3_CTXT_POS,
+                               TX_NORMAL_DESC3_CTXT_LEN);
+}
+
+static int xlgmac_is_last_desc(struct xlgmac_dma_desc *dma_desc)
+{
+       /* Rx and Tx share LD bit, so check TDES3.LD bit */
+       return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+                               TX_NORMAL_DESC3_LD_POS,
+                               TX_NORMAL_DESC3_LD_LEN);
+}
+
+static int xlgmac_disable_tx_flow_control(struct xlgmac_pdata *pdata)
+{
+       unsigned int max_q_count, q_count;
+       unsigned int reg, regval;
+       unsigned int i;
+
+       /* Clear MTL flow control */
+       for (i = 0; i < pdata->rx_q_count; i++) {
+               regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+               regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS,
+                                            MTL_Q_RQOMR_EHFC_LEN, 0);
+               writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+       }
+
+       /* Clear MAC flow control */
+       max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES;
+       q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+       reg = MAC_Q0TFCR;
+       for (i = 0; i < q_count; i++) {
+               regval = readl(pdata->mac_regs + reg);
+               regval = XLGMAC_SET_REG_BITS(regval,
+                                            MAC_Q0TFCR_TFE_POS,
+                                       MAC_Q0TFCR_TFE_LEN,
+                                       0);
+               writel(regval, pdata->mac_regs + reg);
+
+               reg += MAC_QTFCR_INC;
+       }
+
+       return 0;
+}
+
+static int xlgmac_enable_tx_flow_control(struct xlgmac_pdata *pdata)
+{
+       unsigned int max_q_count, q_count;
+       unsigned int reg, regval;
+       unsigned int i;
+
+       /* Set MTL flow control */
+       for (i = 0; i < pdata->rx_q_count; i++) {
+               regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+               regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS,
+                                            MTL_Q_RQOMR_EHFC_LEN, 1);
+               writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+       }
+
+       /* Set MAC flow control */
+       max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES;
+       q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+       reg = MAC_Q0TFCR;
+       for (i = 0; i < q_count; i++) {
+               regval = readl(pdata->mac_regs + reg);
+
+               /* Enable transmit flow control */
+               regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS,
+                                            MAC_Q0TFCR_TFE_LEN, 1);
+               /* Set pause time */
+               regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_PT_POS,
+                                            MAC_Q0TFCR_PT_LEN, 0xffff);
+
+               writel(regval, pdata->mac_regs + reg);
+
+               reg += MAC_QTFCR_INC;
+       }
+
+       return 0;
+}
+
+static int xlgmac_disable_rx_flow_control(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       regval = readl(pdata->mac_regs + MAC_RFCR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS,
+                                    MAC_RFCR_RFE_LEN, 0);
+       writel(regval, pdata->mac_regs + MAC_RFCR);
+
+       return 0;
+}
+
+static int xlgmac_enable_rx_flow_control(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       regval = readl(pdata->mac_regs + MAC_RFCR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS,
+                                    MAC_RFCR_RFE_LEN, 1);
+       writel(regval, pdata->mac_regs + MAC_RFCR);
+
+       return 0;
+}
+
+static int xlgmac_config_tx_flow_control(struct xlgmac_pdata *pdata)
+{
+       if (pdata->tx_pause)
+               xlgmac_enable_tx_flow_control(pdata);
+       else
+               xlgmac_disable_tx_flow_control(pdata);
+
+       return 0;
+}
+
+static int xlgmac_config_rx_flow_control(struct xlgmac_pdata *pdata)
+{
+       if (pdata->rx_pause)
+               xlgmac_enable_rx_flow_control(pdata);
+       else
+               xlgmac_disable_rx_flow_control(pdata);
+
+       return 0;
+}
+
+static int xlgmac_config_rx_coalesce(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+       u32 regval;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (!channel->rx_ring)
+                       break;
+
+               regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RIWT));
+               regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RIWT_RWT_POS,
+                                            DMA_CH_RIWT_RWT_LEN,
+                                            pdata->rx_riwt);
+               writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RIWT));
+       }
+
+       return 0;
+}
+
+static void xlgmac_config_flow_control(struct xlgmac_pdata *pdata)
+{
+       xlgmac_config_tx_flow_control(pdata);
+       xlgmac_config_rx_flow_control(pdata);
+}
+
+static void xlgmac_config_rx_fep_enable(struct xlgmac_pdata *pdata)
+{
+       unsigned int i;
+       u32 regval;
+
+       for (i = 0; i < pdata->rx_q_count; i++) {
+               regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+               regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FEP_POS,
+                                            MTL_Q_RQOMR_FEP_LEN, 1);
+               writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+       }
+}
+
+static void xlgmac_config_rx_fup_enable(struct xlgmac_pdata *pdata)
+{
+       unsigned int i;
+       u32 regval;
+
+       for (i = 0; i < pdata->rx_q_count; i++) {
+               regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+               regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FUP_POS,
+                                            MTL_Q_RQOMR_FUP_LEN, 1);
+               writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+       }
+}
+
+static int xlgmac_config_tx_coalesce(struct xlgmac_pdata *pdata)
+{
+       return 0;
+}
+
+static void xlgmac_config_rx_buffer_size(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+       u32 regval;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (!channel->rx_ring)
+                       break;
+
+               regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+               regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_RBSZ_POS,
+                                            DMA_CH_RCR_RBSZ_LEN,
+                                       pdata->rx_buf_size);
+               writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+       }
+}
+
+static void xlgmac_config_tso_mode(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+       u32 regval;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (!channel->tx_ring)
+                       break;
+
+               if (pdata->hw_feat.tso) {
+                       regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+                       regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS,
+                                                    DMA_CH_TCR_TSE_LEN, 1);
+                       writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+               }
+       }
+}
+
+static void xlgmac_config_sph_mode(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+       u32 regval;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (!channel->rx_ring)
+                       break;
+
+               regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR));
+               regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_SPH_POS,
+                                            DMA_CH_CR_SPH_LEN, 1);
+               writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR));
+       }
+
+       regval = readl(pdata->mac_regs + MAC_RCR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_HDSMS_POS,
+                                    MAC_RCR_HDSMS_LEN,
+                               XLGMAC_SPH_HDSMS_SIZE);
+       writel(regval, pdata->mac_regs + MAC_RCR);
+}
+
+static unsigned int xlgmac_usec_to_riwt(struct xlgmac_pdata *pdata,
+                                       unsigned int usec)
+{
+       unsigned long rate;
+       unsigned int ret;
+
+       rate = pdata->sysclk_rate;
+
+       /* Convert the input usec value to the watchdog timer value. Each
+        * watchdog timer value is equivalent to 256 clock cycles.
+        * Calculate the required value as:
+        *   ( usec * ( system_clock_mhz / 10^6 ) / 256
+        */
+       ret = (usec * (rate / 1000000)) / 256;
+
+       return ret;
+}
+
+static unsigned int xlgmac_riwt_to_usec(struct xlgmac_pdata *pdata,
+                                       unsigned int riwt)
+{
+       unsigned long rate;
+       unsigned int ret;
+
+       rate = pdata->sysclk_rate;
+
+       /* Convert the input watchdog timer value to the usec value. Each
+        * watchdog timer value is equivalent to 256 clock cycles.
+        * Calculate the required value as:
+        *   ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
+        */
+       ret = (riwt * 256) / (rate / 1000000);
+
+       return ret;
+}
+
+static int xlgmac_config_rx_threshold(struct xlgmac_pdata *pdata,
+                                     unsigned int val)
+{
+       unsigned int i;
+       u32 regval;
+
+       for (i = 0; i < pdata->rx_q_count; i++) {
+               regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+               regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RTC_POS,
+                                            MTL_Q_RQOMR_RTC_LEN, val);
+               writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+       }
+
+       return 0;
+}
+
+static void xlgmac_config_mtl_mode(struct xlgmac_pdata *pdata)
+{
+       unsigned int i;
+       u32 regval;
+
+       /* Set Tx to weighted round robin scheduling algorithm */
+       regval = readl(pdata->mac_regs + MTL_OMR);
+       regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_ETSALG_POS,
+                                    MTL_OMR_ETSALG_LEN, MTL_ETSALG_WRR);
+       writel(regval, pdata->mac_regs + MTL_OMR);
+
+       /* Set Tx traffic classes to use WRR algorithm with equal weights */
+       for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+               regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR));
+               regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_ETSCR_TSA_POS,
+                                            MTL_TC_ETSCR_TSA_LEN, MTL_TSA_ETS);
+               writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR));
+
+               regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR));
+               regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_QWR_QW_POS,
+                                            MTL_TC_QWR_QW_LEN, 1);
+               writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR));
+       }
+
+       /* Set Rx to strict priority algorithm */
+       regval = readl(pdata->mac_regs + MTL_OMR);
+       regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_RAA_POS,
+                                    MTL_OMR_RAA_LEN, MTL_RAA_SP);
+       writel(regval, pdata->mac_regs + MTL_OMR);
+}
+
+static void xlgmac_config_queue_mapping(struct xlgmac_pdata *pdata)
+{
+       unsigned int ppq, ppq_extra, prio, prio_queues;
+       unsigned int qptc, qptc_extra, queue;
+       unsigned int reg, regval;
+       unsigned int mask;
+       unsigned int i, j;
+
+       /* Map the MTL Tx Queues to Traffic Classes
+        *   Note: Tx Queues >= Traffic Classes
+        */
+       qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
+       qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
+
+       for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
+               for (j = 0; j < qptc; j++) {
+                       netif_dbg(pdata, drv, pdata->netdev,
+                                 "TXq%u mapped to TC%u\n", queue, i);
+                       regval = readl(XLGMAC_MTL_REG(pdata, queue,
+                                                     MTL_Q_TQOMR));
+                       regval = XLGMAC_SET_REG_BITS(regval,
+                                                    MTL_Q_TQOMR_Q2TCMAP_POS,
+                                                    MTL_Q_TQOMR_Q2TCMAP_LEN,
+                                                    i);
+                       writel(regval, XLGMAC_MTL_REG(pdata, queue,
+                                                     MTL_Q_TQOMR));
+                       queue++;
+               }
+
+               if (i < qptc_extra) {
+                       netif_dbg(pdata, drv, pdata->netdev,
+                                 "TXq%u mapped to TC%u\n", queue, i);
+                       regval = readl(XLGMAC_MTL_REG(pdata, queue,
+                                                     MTL_Q_TQOMR));
+                       regval = XLGMAC_SET_REG_BITS(regval,
+                                                    MTL_Q_TQOMR_Q2TCMAP_POS,
+                                                    MTL_Q_TQOMR_Q2TCMAP_LEN,
+                                                    i);
+                       writel(regval, XLGMAC_MTL_REG(pdata, queue,
+                                                     MTL_Q_TQOMR));
+                       queue++;
+               }
+       }
+
+       /* Map the 8 VLAN priority values to available MTL Rx queues */
+       prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
+                           pdata->rx_q_count);
+       ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
+       ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
+
+       reg = MAC_RQC2R;
+       regval = 0;
+       for (i = 0, prio = 0; i < prio_queues;) {
+               mask = 0;
+               for (j = 0; j < ppq; j++) {
+                       netif_dbg(pdata, drv, pdata->netdev,
+                                 "PRIO%u mapped to RXq%u\n", prio, i);
+                       mask |= (1 << prio);
+                       prio++;
+               }
+
+               if (i < ppq_extra) {
+                       netif_dbg(pdata, drv, pdata->netdev,
+                                 "PRIO%u mapped to RXq%u\n", prio, i);
+                       mask |= (1 << prio);
+                       prio++;
+               }
+
+               regval |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
+
+               if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
+                       continue;
+
+               writel(regval, pdata->mac_regs + reg);
+               reg += MAC_RQC2_INC;
+               regval = 0;
+       }
+
+       /* Configure one to one, MTL Rx queue to DMA Rx channel mapping
+        *  ie Q0 <--> CH0, Q1 <--> CH1 ... Q11 <--> CH11
+        */
+       reg = MTL_RQDCM0R;
+       regval = readl(pdata->mac_regs + reg);
+       regval |= (MTL_RQDCM0R_Q0MDMACH | MTL_RQDCM0R_Q1MDMACH |
+                   MTL_RQDCM0R_Q2MDMACH | MTL_RQDCM0R_Q3MDMACH);
+       writel(regval, pdata->mac_regs + reg);
+
+       reg += MTL_RQDCM_INC;
+       regval = readl(pdata->mac_regs + reg);
+       regval |= (MTL_RQDCM1R_Q4MDMACH | MTL_RQDCM1R_Q5MDMACH |
+                   MTL_RQDCM1R_Q6MDMACH | MTL_RQDCM1R_Q7MDMACH);
+       writel(regval, pdata->mac_regs + reg);
+
+       reg += MTL_RQDCM_INC;
+       regval = readl(pdata->mac_regs + reg);
+       regval |= (MTL_RQDCM2R_Q8MDMACH | MTL_RQDCM2R_Q9MDMACH |
+                   MTL_RQDCM2R_Q10MDMACH | MTL_RQDCM2R_Q11MDMACH);
+       writel(regval, pdata->mac_regs + reg);
+}
+
+static unsigned int xlgmac_calculate_per_queue_fifo(
+                                       unsigned int fifo_size,
+                                       unsigned int queue_count)
+{
+       unsigned int q_fifo_size;
+       unsigned int p_fifo;
+
+       /* Calculate the configured fifo size */
+       q_fifo_size = 1 << (fifo_size + 7);
+
+       /* The configured value may not be the actual amount of fifo RAM */
+       q_fifo_size = min_t(unsigned int, XLGMAC_MAX_FIFO, q_fifo_size);
+
+       q_fifo_size = q_fifo_size / queue_count;
+
+       /* Each increment in the queue fifo size represents 256 bytes of
+        * fifo, with 0 representing 256 bytes. Distribute the fifo equally
+        * between the queues.
+        */
+       p_fifo = q_fifo_size / 256;
+       if (p_fifo)
+               p_fifo--;
+
+       return p_fifo;
+}
+
+static void xlgmac_config_tx_fifo_size(struct xlgmac_pdata *pdata)
+{
+       unsigned int fifo_size;
+       unsigned int i;
+       u32 regval;
+
+       fifo_size = xlgmac_calculate_per_queue_fifo(
+                               pdata->hw_feat.tx_fifo_size,
+                               pdata->tx_q_count);
+
+       for (i = 0; i < pdata->tx_q_count; i++) {
+               regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+               regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TQS_POS,
+                                            MTL_Q_TQOMR_TQS_LEN, fifo_size);
+               writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+       }
+
+       netif_info(pdata, drv, pdata->netdev,
+                  "%d Tx hardware queues, %d byte fifo per queue\n",
+                  pdata->tx_q_count, ((fifo_size + 1) * 256));
+}
+
+static void xlgmac_config_rx_fifo_size(struct xlgmac_pdata *pdata)
+{
+       unsigned int fifo_size;
+       unsigned int i;
+       u32 regval;
+
+       fifo_size = xlgmac_calculate_per_queue_fifo(
+                                       pdata->hw_feat.rx_fifo_size,
+                                       pdata->rx_q_count);
+
+       for (i = 0; i < pdata->rx_q_count; i++) {
+               regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+               regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RQS_POS,
+                                            MTL_Q_RQOMR_RQS_LEN, fifo_size);
+               writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+       }
+
+       netif_info(pdata, drv, pdata->netdev,
+                  "%d Rx hardware queues, %d byte fifo per queue\n",
+                  pdata->rx_q_count, ((fifo_size + 1) * 256));
+}
+
+static void xlgmac_config_flow_control_threshold(struct xlgmac_pdata *pdata)
+{
+       unsigned int i;
+       u32 regval;
+
+       for (i = 0; i < pdata->rx_q_count; i++) {
+               regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR));
+               /* Activate flow control when less than 4k left in fifo */
+               regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFA_POS,
+                                            MTL_Q_RQFCR_RFA_LEN, 2);
+               /* De-activate flow control when more than 6k left in fifo */
+               regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFD_POS,
+                                            MTL_Q_RQFCR_RFD_LEN, 4);
+               writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR));
+       }
+}
+
+static int xlgmac_config_tx_threshold(struct xlgmac_pdata *pdata,
+                                     unsigned int val)
+{
+       unsigned int i;
+       u32 regval;
+
+       for (i = 0; i < pdata->tx_q_count; i++) {
+               regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+               regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TTC_POS,
+                                            MTL_Q_TQOMR_TTC_LEN, val);
+               writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+       }
+
+       return 0;
+}
+
+static int xlgmac_config_rsf_mode(struct xlgmac_pdata *pdata,
+                                 unsigned int val)
+{
+       unsigned int i;
+       u32 regval;
+
+       for (i = 0; i < pdata->rx_q_count; i++) {
+               regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+               regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RSF_POS,
+                                            MTL_Q_RQOMR_RSF_LEN, val);
+               writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+       }
+
+       return 0;
+}
+
+static int xlgmac_config_tsf_mode(struct xlgmac_pdata *pdata,
+                                 unsigned int val)
+{
+       unsigned int i;
+       u32 regval;
+
+       for (i = 0; i < pdata->tx_q_count; i++) {
+               regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+               regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TSF_POS,
+                                            MTL_Q_TQOMR_TSF_LEN, val);
+               writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+       }
+
+       return 0;
+}
+
+static int xlgmac_config_osp_mode(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+       u32 regval;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (!channel->tx_ring)
+                       break;
+
+               regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+               regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_OSP_POS,
+                                            DMA_CH_TCR_OSP_LEN,
+                                       pdata->tx_osp_mode);
+               writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+       }
+
+       return 0;
+}
+
+static int xlgmac_config_pblx8(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+       u32 regval;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR));
+               regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_PBLX8_POS,
+                                            DMA_CH_CR_PBLX8_LEN,
+                                       pdata->pblx8);
+               writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR));
+       }
+
+       return 0;
+}
+
+static int xlgmac_get_tx_pbl_val(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_TCR));
+       regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_TCR_PBL_POS,
+                                    DMA_CH_TCR_PBL_LEN);
+       return regval;
+}
+
+static int xlgmac_config_tx_pbl_val(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+       u32 regval;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (!channel->tx_ring)
+                       break;
+
+               regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+               regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_PBL_POS,
+                                            DMA_CH_TCR_PBL_LEN,
+                                       pdata->tx_pbl);
+               writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+       }
+
+       return 0;
+}
+
+static int xlgmac_get_rx_pbl_val(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_RCR));
+       regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_RCR_PBL_POS,
+                                    DMA_CH_RCR_PBL_LEN);
+       return regval;
+}
+
+static int xlgmac_config_rx_pbl_val(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+       u32 regval;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (!channel->rx_ring)
+                       break;
+
+               regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+               regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_PBL_POS,
+                                            DMA_CH_RCR_PBL_LEN,
+                                       pdata->rx_pbl);
+               writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+       }
+
+       return 0;
+}
+
+static u64 xlgmac_mmc_read(struct xlgmac_pdata *pdata, unsigned int reg_lo)
+{
+       bool read_hi;
+       u64 val;
+
+       switch (reg_lo) {
+       /* These registers are always 64 bit */
+       case MMC_TXOCTETCOUNT_GB_LO:
+       case MMC_TXOCTETCOUNT_G_LO:
+       case MMC_RXOCTETCOUNT_GB_LO:
+       case MMC_RXOCTETCOUNT_G_LO:
+               read_hi = true;
+               break;
+
+       default:
+               read_hi = false;
+       }
+
+       val = (u64)readl(pdata->mac_regs + reg_lo);
+
+       if (read_hi)
+               val |= ((u64)readl(pdata->mac_regs + reg_lo + 4) << 32);
+
+       return val;
+}
+
+static void xlgmac_tx_mmc_int(struct xlgmac_pdata *pdata)
+{
+       unsigned int mmc_isr = readl(pdata->mac_regs + MMC_TISR);
+       struct xlgmac_stats *stats = &pdata->stats;
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TXOCTETCOUNT_GB_POS,
+                               MMC_TISR_TXOCTETCOUNT_GB_LEN))
+               stats->txoctetcount_gb +=
+                       xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TXFRAMECOUNT_GB_POS,
+                               MMC_TISR_TXFRAMECOUNT_GB_LEN))
+               stats->txframecount_gb +=
+                       xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TXBROADCASTFRAMES_G_POS,
+                               MMC_TISR_TXBROADCASTFRAMES_G_LEN))
+               stats->txbroadcastframes_g +=
+                       xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TXMULTICASTFRAMES_G_POS,
+                               MMC_TISR_TXMULTICASTFRAMES_G_LEN))
+               stats->txmulticastframes_g +=
+                       xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TX64OCTETS_GB_POS,
+                               MMC_TISR_TX64OCTETS_GB_LEN))
+               stats->tx64octets_gb +=
+                       xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TX65TO127OCTETS_GB_POS,
+                               MMC_TISR_TX65TO127OCTETS_GB_LEN))
+               stats->tx65to127octets_gb +=
+                       xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TX128TO255OCTETS_GB_POS,
+                               MMC_TISR_TX128TO255OCTETS_GB_LEN))
+               stats->tx128to255octets_gb +=
+                       xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TX256TO511OCTETS_GB_POS,
+                               MMC_TISR_TX256TO511OCTETS_GB_LEN))
+               stats->tx256to511octets_gb +=
+                       xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TX512TO1023OCTETS_GB_POS,
+                               MMC_TISR_TX512TO1023OCTETS_GB_LEN))
+               stats->tx512to1023octets_gb +=
+                       xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TX1024TOMAXOCTETS_GB_POS,
+                               MMC_TISR_TX1024TOMAXOCTETS_GB_LEN))
+               stats->tx1024tomaxoctets_gb +=
+                       xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TXUNICASTFRAMES_GB_POS,
+                               MMC_TISR_TXUNICASTFRAMES_GB_LEN))
+               stats->txunicastframes_gb +=
+                       xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TXMULTICASTFRAMES_GB_POS,
+                               MMC_TISR_TXMULTICASTFRAMES_GB_LEN))
+               stats->txmulticastframes_gb +=
+                       xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TXBROADCASTFRAMES_GB_POS,
+                               MMC_TISR_TXBROADCASTFRAMES_GB_LEN))
+               stats->txbroadcastframes_g +=
+                       xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TXUNDERFLOWERROR_POS,
+                               MMC_TISR_TXUNDERFLOWERROR_LEN))
+               stats->txunderflowerror +=
+                       xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TXOCTETCOUNT_G_POS,
+                               MMC_TISR_TXOCTETCOUNT_G_LEN))
+               stats->txoctetcount_g +=
+                       xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TXFRAMECOUNT_G_POS,
+                               MMC_TISR_TXFRAMECOUNT_G_LEN))
+               stats->txframecount_g +=
+                       xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TXPAUSEFRAMES_POS,
+                               MMC_TISR_TXPAUSEFRAMES_LEN))
+               stats->txpauseframes +=
+                       xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_TISR_TXVLANFRAMES_G_POS,
+                               MMC_TISR_TXVLANFRAMES_G_LEN))
+               stats->txvlanframes_g +=
+                       xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
+}
+
+static void xlgmac_rx_mmc_int(struct xlgmac_pdata *pdata)
+{
+       unsigned int mmc_isr = readl(pdata->mac_regs + MMC_RISR);
+       struct xlgmac_stats *stats = &pdata->stats;
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RXFRAMECOUNT_GB_POS,
+                               MMC_RISR_RXFRAMECOUNT_GB_LEN))
+               stats->rxframecount_gb +=
+                       xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RXOCTETCOUNT_GB_POS,
+                               MMC_RISR_RXOCTETCOUNT_GB_LEN))
+               stats->rxoctetcount_gb +=
+                       xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RXOCTETCOUNT_G_POS,
+                               MMC_RISR_RXOCTETCOUNT_G_LEN))
+               stats->rxoctetcount_g +=
+                       xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RXBROADCASTFRAMES_G_POS,
+                               MMC_RISR_RXBROADCASTFRAMES_G_LEN))
+               stats->rxbroadcastframes_g +=
+                       xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RXMULTICASTFRAMES_G_POS,
+                               MMC_RISR_RXMULTICASTFRAMES_G_LEN))
+               stats->rxmulticastframes_g +=
+                       xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RXCRCERROR_POS,
+                               MMC_RISR_RXCRCERROR_LEN))
+               stats->rxcrcerror +=
+                       xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RXRUNTERROR_POS,
+                               MMC_RISR_RXRUNTERROR_LEN))
+               stats->rxrunterror +=
+                       xlgmac_mmc_read(pdata, MMC_RXRUNTERROR);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RXJABBERERROR_POS,
+                               MMC_RISR_RXJABBERERROR_LEN))
+               stats->rxjabbererror +=
+                       xlgmac_mmc_read(pdata, MMC_RXJABBERERROR);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RXUNDERSIZE_G_POS,
+                               MMC_RISR_RXUNDERSIZE_G_LEN))
+               stats->rxundersize_g +=
+                       xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RXOVERSIZE_G_POS,
+                               MMC_RISR_RXOVERSIZE_G_LEN))
+               stats->rxoversize_g +=
+                       xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RX64OCTETS_GB_POS,
+                               MMC_RISR_RX64OCTETS_GB_LEN))
+               stats->rx64octets_gb +=
+                       xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RX65TO127OCTETS_GB_POS,
+                               MMC_RISR_RX65TO127OCTETS_GB_LEN))
+               stats->rx65to127octets_gb +=
+                       xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RX128TO255OCTETS_GB_POS,
+                               MMC_RISR_RX128TO255OCTETS_GB_LEN))
+               stats->rx128to255octets_gb +=
+                       xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RX256TO511OCTETS_GB_POS,
+                               MMC_RISR_RX256TO511OCTETS_GB_LEN))
+               stats->rx256to511octets_gb +=
+                       xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RX512TO1023OCTETS_GB_POS,
+                               MMC_RISR_RX512TO1023OCTETS_GB_LEN))
+               stats->rx512to1023octets_gb +=
+                       xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RX1024TOMAXOCTETS_GB_POS,
+                               MMC_RISR_RX1024TOMAXOCTETS_GB_LEN))
+               stats->rx1024tomaxoctets_gb +=
+                       xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RXUNICASTFRAMES_G_POS,
+                               MMC_RISR_RXUNICASTFRAMES_G_LEN))
+               stats->rxunicastframes_g +=
+                       xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RXLENGTHERROR_POS,
+                               MMC_RISR_RXLENGTHERROR_LEN))
+               stats->rxlengtherror +=
+                       xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RXOUTOFRANGETYPE_POS,
+                               MMC_RISR_RXOUTOFRANGETYPE_LEN))
+               stats->rxoutofrangetype +=
+                       xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RXPAUSEFRAMES_POS,
+                               MMC_RISR_RXPAUSEFRAMES_LEN))
+               stats->rxpauseframes +=
+                       xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RXFIFOOVERFLOW_POS,
+                               MMC_RISR_RXFIFOOVERFLOW_LEN))
+               stats->rxfifooverflow +=
+                       xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RXVLANFRAMES_GB_POS,
+                               MMC_RISR_RXVLANFRAMES_GB_LEN))
+               stats->rxvlanframes_gb +=
+                       xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+       if (XLGMAC_GET_REG_BITS(mmc_isr,
+                               MMC_RISR_RXWATCHDOGERROR_POS,
+                               MMC_RISR_RXWATCHDOGERROR_LEN))
+               stats->rxwatchdogerror +=
+                       xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+}
+
+static void xlgmac_read_mmc_stats(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_stats *stats = &pdata->stats;
+       u32 regval;
+
+       /* Freeze counters */
+       regval = readl(pdata->mac_regs + MMC_CR);
+       regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS,
+                                    MMC_CR_MCF_LEN, 1);
+       writel(regval, pdata->mac_regs + MMC_CR);
+
+       stats->txoctetcount_gb +=
+               xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+       stats->txframecount_gb +=
+               xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+       stats->txbroadcastframes_g +=
+               xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+       stats->txmulticastframes_g +=
+               xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+       stats->tx64octets_gb +=
+               xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
+
+       stats->tx65to127octets_gb +=
+               xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+       stats->tx128to255octets_gb +=
+               xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+       stats->tx256to511octets_gb +=
+               xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+       stats->tx512to1023octets_gb +=
+               xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+       stats->tx1024tomaxoctets_gb +=
+               xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+       stats->txunicastframes_gb +=
+               xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+       stats->txmulticastframes_gb +=
+               xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+       stats->txbroadcastframes_g +=
+               xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+       stats->txunderflowerror +=
+               xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+       stats->txoctetcount_g +=
+               xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+       stats->txframecount_g +=
+               xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+       stats->txpauseframes +=
+               xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
+
+       stats->txvlanframes_g +=
+               xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
+
+       stats->rxframecount_gb +=
+               xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+       stats->rxoctetcount_gb +=
+               xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+       stats->rxoctetcount_g +=
+               xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+       stats->rxbroadcastframes_g +=
+               xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+       stats->rxmulticastframes_g +=
+               xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+       stats->rxcrcerror +=
+               xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO);
+
+       stats->rxrunterror +=
+               xlgmac_mmc_read(pdata, MMC_RXRUNTERROR);
+
+       stats->rxjabbererror +=
+               xlgmac_mmc_read(pdata, MMC_RXJABBERERROR);
+
+       stats->rxundersize_g +=
+               xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G);
+
+       stats->rxoversize_g +=
+               xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G);
+
+       stats->rx64octets_gb +=
+               xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
+
+       stats->rx65to127octets_gb +=
+               xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+       stats->rx128to255octets_gb +=
+               xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+       stats->rx256to511octets_gb +=
+               xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+       stats->rx512to1023octets_gb +=
+               xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+       stats->rx1024tomaxoctets_gb +=
+               xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+       stats->rxunicastframes_g +=
+               xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+       stats->rxlengtherror +=
+               xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
+
+       stats->rxoutofrangetype +=
+               xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+       stats->rxpauseframes +=
+               xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
+
+       stats->rxfifooverflow +=
+               xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+       stats->rxvlanframes_gb +=
+               xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+       stats->rxwatchdogerror +=
+               xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+
+       /* Un-freeze counters */
+       regval = readl(pdata->mac_regs + MMC_CR);
+       regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS,
+                                    MMC_CR_MCF_LEN, 0);
+       writel(regval, pdata->mac_regs + MMC_CR);
+}
+
+static void xlgmac_config_mmc(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       regval = readl(pdata->mac_regs + MMC_CR);
+       /* Set counters to reset on read */
+       regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_ROR_POS,
+                                    MMC_CR_ROR_LEN, 1);
+       /* Reset the counters */
+       regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_CR_POS,
+                                    MMC_CR_CR_LEN, 1);
+       writel(regval, pdata->mac_regs + MMC_CR);
+}
+
+static int xlgmac_write_rss_reg(struct xlgmac_pdata *pdata, unsigned int type,
+                               unsigned int index, unsigned int val)
+{
+       unsigned int wait;
+       int ret = 0;
+       u32 regval;
+
+       mutex_lock(&pdata->rss_mutex);
+
+       regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR),
+                                    MAC_RSSAR_OB_POS, MAC_RSSAR_OB_LEN);
+       if (regval) {
+               ret = -EBUSY;
+               goto unlock;
+       }
+
+       writel(val, pdata->mac_regs + MAC_RSSDR);
+
+       regval = readl(pdata->mac_regs + MAC_RSSAR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_RSSIA_POS,
+                                    MAC_RSSAR_RSSIA_LEN, index);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_ADDRT_POS,
+                                    MAC_RSSAR_ADDRT_LEN, type);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_CT_POS,
+                                    MAC_RSSAR_CT_LEN, 0);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_OB_POS,
+                                    MAC_RSSAR_OB_LEN, 1);
+       writel(regval, pdata->mac_regs + MAC_RSSAR);
+
+       wait = 1000;
+       while (wait--) {
+               regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR),
+                                            MAC_RSSAR_OB_POS,
+                                            MAC_RSSAR_OB_LEN);
+               if (!regval)
+                       goto unlock;
+
+               usleep_range(1000, 1500);
+       }
+
+       ret = -EBUSY;
+
+unlock:
+       mutex_unlock(&pdata->rss_mutex);
+
+       return ret;
+}
+
+static int xlgmac_write_rss_hash_key(struct xlgmac_pdata *pdata)
+{
+       unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
+       unsigned int *key = (unsigned int *)&pdata->rss_key;
+       int ret;
+
+       while (key_regs--) {
+               ret = xlgmac_write_rss_reg(pdata, XLGMAC_RSS_HASH_KEY_TYPE,
+                                          key_regs, *key++);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int xlgmac_write_rss_lookup_table(struct xlgmac_pdata *pdata)
+{
+       unsigned int i;
+       int ret;
+
+       for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+               ret = xlgmac_write_rss_reg(pdata,
+                                          XLGMAC_RSS_LOOKUP_TABLE_TYPE, i,
+                                          pdata->rss_table[i]);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int xlgmac_set_rss_hash_key(struct xlgmac_pdata *pdata, const u8 *key)
+{
+       memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
+
+       return xlgmac_write_rss_hash_key(pdata);
+}
+
+static int xlgmac_set_rss_lookup_table(struct xlgmac_pdata *pdata,
+                                      const u32 *table)
+{
+       unsigned int i;
+       u32 tval;
+
+       for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+               tval = table[i];
+               pdata->rss_table[i] = XLGMAC_SET_REG_BITS(
+                                               pdata->rss_table[i],
+                                               MAC_RSSDR_DMCH_POS,
+                                               MAC_RSSDR_DMCH_LEN,
+                                               tval);
+       }
+
+       return xlgmac_write_rss_lookup_table(pdata);
+}
+
+static int xlgmac_enable_rss(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+       int ret;
+
+       if (!pdata->hw_feat.rss)
+               return -EOPNOTSUPP;
+
+       /* Program the hash key */
+       ret = xlgmac_write_rss_hash_key(pdata);
+       if (ret)
+               return ret;
+
+       /* Program the lookup table */
+       ret = xlgmac_write_rss_lookup_table(pdata);
+       if (ret)
+               return ret;
+
+       /* Set the RSS options */
+       writel(pdata->rss_options, pdata->mac_regs + MAC_RSSCR);
+
+       /* Enable RSS */
+       regval = readl(pdata->mac_regs + MAC_RSSCR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS,
+                                    MAC_RSSCR_RSSE_LEN, 1);
+       writel(regval, pdata->mac_regs + MAC_RSSCR);
+
+       return 0;
+}
+
+static int xlgmac_disable_rss(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       if (!pdata->hw_feat.rss)
+               return -EOPNOTSUPP;
+
+       regval = readl(pdata->mac_regs + MAC_RSSCR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS,
+                                    MAC_RSSCR_RSSE_LEN, 0);
+       writel(regval, pdata->mac_regs + MAC_RSSCR);
+
+       return 0;
+}
+
+static void xlgmac_config_rss(struct xlgmac_pdata *pdata)
+{
+       int ret;
+
+       if (!pdata->hw_feat.rss)
+               return;
+
+       if (pdata->netdev->features & NETIF_F_RXHASH)
+               ret = xlgmac_enable_rss(pdata);
+       else
+               ret = xlgmac_disable_rss(pdata);
+
+       if (ret)
+               netdev_err(pdata->netdev,
+                          "error configuring RSS, RSS disabled\n");
+}
+
+static void xlgmac_enable_dma_interrupts(struct xlgmac_pdata *pdata)
+{
+       unsigned int dma_ch_isr, dma_ch_ier;
+       struct xlgmac_channel *channel;
+       unsigned int i;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               /* Clear all the interrupts which are set */
+               dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
+               writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
+
+               /* Clear all interrupt enable bits */
+               dma_ch_ier = 0;
+
+               /* Enable following interrupts
+                *   NIE  - Normal Interrupt Summary Enable
+                *   AIE  - Abnormal Interrupt Summary Enable
+                *   FBEE - Fatal Bus Error Enable
+                */
+               dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
+                                                DMA_CH_IER_NIE_POS,
+                                       DMA_CH_IER_NIE_LEN, 1);
+               dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
+                                                DMA_CH_IER_AIE_POS,
+                                       DMA_CH_IER_AIE_LEN, 1);
+               dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
+                                                DMA_CH_IER_FBEE_POS,
+                                       DMA_CH_IER_FBEE_LEN, 1);
+
+               if (channel->tx_ring) {
+                       /* Enable the following Tx interrupts
+                        *   TIE  - Transmit Interrupt Enable (unless using
+                        *          per channel interrupts)
+                        */
+                       if (!pdata->per_channel_irq)
+                               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                                               dma_ch_ier,
+                                               DMA_CH_IER_TIE_POS,
+                                               DMA_CH_IER_TIE_LEN,
+                                               1);
+               }
+               if (channel->rx_ring) {
+                       /* Enable following Rx interrupts
+                        *   RBUE - Receive Buffer Unavailable Enable
+                        *   RIE  - Receive Interrupt Enable (unless using
+                        *          per channel interrupts)
+                        */
+                       dma_ch_ier = XLGMAC_SET_REG_BITS(
+                                       dma_ch_ier,
+                                       DMA_CH_IER_RBUE_POS,
+                                       DMA_CH_IER_RBUE_LEN,
+                                       1);
+                       if (!pdata->per_channel_irq)
+                               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                                               dma_ch_ier,
+                                               DMA_CH_IER_RIE_POS,
+                                               DMA_CH_IER_RIE_LEN,
+                                               1);
+               }
+
+               writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_IER));
+       }
+}
+
+static void xlgmac_enable_mtl_interrupts(struct xlgmac_pdata *pdata)
+{
+       unsigned int q_count, i;
+       unsigned int mtl_q_isr;
+
+       q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
+       for (i = 0; i < q_count; i++) {
+               /* Clear all the interrupts which are set */
+               mtl_q_isr = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR));
+               writel(mtl_q_isr, XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR));
+
+               /* No MTL interrupts to be enabled */
+               writel(0, XLGMAC_MTL_REG(pdata, i, MTL_Q_IER));
+       }
+}
+
+static void xlgmac_enable_mac_interrupts(struct xlgmac_pdata *pdata)
+{
+       unsigned int mac_ier = 0;
+       u32 regval;
+
+       /* Enable Timestamp interrupt */
+       mac_ier = XLGMAC_SET_REG_BITS(mac_ier, MAC_IER_TSIE_POS,
+                                     MAC_IER_TSIE_LEN, 1);
+
+       writel(mac_ier, pdata->mac_regs + MAC_IER);
+
+       /* Enable all counter interrupts */
+       regval = readl(pdata->mac_regs + MMC_RIER);
+       regval = XLGMAC_SET_REG_BITS(regval, MMC_RIER_ALL_INTERRUPTS_POS,
+                                    MMC_RIER_ALL_INTERRUPTS_LEN, 0xffffffff);
+       writel(regval, pdata->mac_regs + MMC_RIER);
+       regval = readl(pdata->mac_regs + MMC_TIER);
+       regval = XLGMAC_SET_REG_BITS(regval, MMC_TIER_ALL_INTERRUPTS_POS,
+                                    MMC_TIER_ALL_INTERRUPTS_LEN, 0xffffffff);
+       writel(regval, pdata->mac_regs + MMC_TIER);
+}
+
+static int xlgmac_set_xlgmii_25000_speed(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
+                                    MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
+       if (regval == 0x1)
+               return 0;
+
+       regval = readl(pdata->mac_regs + MAC_TCR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
+                                    MAC_TCR_SS_LEN, 0x1);
+       writel(regval, pdata->mac_regs + MAC_TCR);
+
+       return 0;
+}
+
+static int xlgmac_set_xlgmii_40000_speed(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
+                                    MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
+       if (regval == 0)
+               return 0;
+
+       regval = readl(pdata->mac_regs + MAC_TCR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
+                                    MAC_TCR_SS_LEN, 0);
+       writel(regval, pdata->mac_regs + MAC_TCR);
+
+       return 0;
+}
+
+static int xlgmac_set_xlgmii_50000_speed(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
+                                    MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
+       if (regval == 0x2)
+               return 0;
+
+       regval = readl(pdata->mac_regs + MAC_TCR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
+                                    MAC_TCR_SS_LEN, 0x2);
+       writel(regval, pdata->mac_regs + MAC_TCR);
+
+       return 0;
+}
+
+static int xlgmac_set_xlgmii_100000_speed(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
+                                    MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
+       if (regval == 0x3)
+               return 0;
+
+       regval = readl(pdata->mac_regs + MAC_TCR);
+       regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
+                                    MAC_TCR_SS_LEN, 0x3);
+       writel(regval, pdata->mac_regs + MAC_TCR);
+
+       return 0;
+}
+
+static void xlgmac_config_mac_speed(struct xlgmac_pdata *pdata)
+{
+       switch (pdata->phy_speed) {
+       case SPEED_100000:
+               xlgmac_set_xlgmii_100000_speed(pdata);
+               break;
+
+       case SPEED_50000:
+               xlgmac_set_xlgmii_50000_speed(pdata);
+               break;
+
+       case SPEED_40000:
+               xlgmac_set_xlgmii_40000_speed(pdata);
+               break;
+
+       case SPEED_25000:
+               xlgmac_set_xlgmii_25000_speed(pdata);
+               break;
+       }
+}
+
+static int xlgmac_dev_read(struct xlgmac_channel *channel)
+{
+       struct xlgmac_pdata *pdata = channel->pdata;
+       struct xlgmac_ring *ring = channel->rx_ring;
+       struct net_device *netdev = pdata->netdev;
+       struct xlgmac_desc_data *desc_data;
+       struct xlgmac_dma_desc *dma_desc;
+       struct xlgmac_pkt_info *pkt_info;
+       unsigned int err, etlt, l34t;
+
+       desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+       dma_desc = desc_data->dma_desc;
+       pkt_info = &ring->pkt_info;
+
+       /* Check for data availability */
+       if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+                                  RX_NORMAL_DESC3_OWN_POS,
+                                  RX_NORMAL_DESC3_OWN_LEN))
+               return 1;
+
+       /* Make sure descriptor fields are read after reading the OWN bit */
+       dma_rmb();
+
+       if (netif_msg_rx_status(pdata))
+               xlgmac_dump_rx_desc(pdata, ring, ring->cur);
+
+       if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+                                  RX_NORMAL_DESC3_CTXT_POS,
+                                  RX_NORMAL_DESC3_CTXT_LEN)) {
+               /* Timestamp Context Descriptor */
+               xlgmac_get_rx_tstamp(pkt_info, dma_desc);
+
+               pkt_info->attributes = XLGMAC_SET_REG_BITS(
+                                       pkt_info->attributes,
+                                       RX_PACKET_ATTRIBUTES_CONTEXT_POS,
+                                       RX_PACKET_ATTRIBUTES_CONTEXT_LEN,
+                                       1);
+               pkt_info->attributes = XLGMAC_SET_REG_BITS(
+                               pkt_info->attributes,
+                               RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
+                               RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN,
+                               0);
+               return 0;
+       }
+
+       /* Normal Descriptor, be sure Context Descriptor bit is off */
+       pkt_info->attributes = XLGMAC_SET_REG_BITS(
+                               pkt_info->attributes,
+                               RX_PACKET_ATTRIBUTES_CONTEXT_POS,
+                               RX_PACKET_ATTRIBUTES_CONTEXT_LEN,
+                               0);
+
+       /* Indicate if a Context Descriptor is next */
+       if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+                                  RX_NORMAL_DESC3_CDA_POS,
+                                  RX_NORMAL_DESC3_CDA_LEN))
+               pkt_info->attributes = XLGMAC_SET_REG_BITS(
+                               pkt_info->attributes,
+                               RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
+                               RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN,
+                               1);
+
+       /* Get the header length */
+       if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+                                  RX_NORMAL_DESC3_FD_POS,
+                                  RX_NORMAL_DESC3_FD_LEN)) {
+               desc_data->rx.hdr_len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc2,
+                                                       RX_NORMAL_DESC2_HL_POS,
+                                                       RX_NORMAL_DESC2_HL_LEN);
+               if (desc_data->rx.hdr_len)
+                       pdata->stats.rx_split_header_packets++;
+       }
+
+       /* Get the RSS hash */
+       if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+                                  RX_NORMAL_DESC3_RSV_POS,
+                                  RX_NORMAL_DESC3_RSV_LEN)) {
+               pkt_info->attributes = XLGMAC_SET_REG_BITS(
+                               pkt_info->attributes,
+                               RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
+                               RX_PACKET_ATTRIBUTES_RSS_HASH_LEN,
+                               1);
+
+               pkt_info->rss_hash = le32_to_cpu(dma_desc->desc1);
+
+               l34t = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+                                             RX_NORMAL_DESC3_L34T_POS,
+                                         RX_NORMAL_DESC3_L34T_LEN);
+               switch (l34t) {
+               case RX_DESC3_L34T_IPV4_TCP:
+               case RX_DESC3_L34T_IPV4_UDP:
+               case RX_DESC3_L34T_IPV6_TCP:
+               case RX_DESC3_L34T_IPV6_UDP:
+                       pkt_info->rss_hash_type = PKT_HASH_TYPE_L4;
+                       break;
+               default:
+                       pkt_info->rss_hash_type = PKT_HASH_TYPE_L3;
+               }
+       }
+
+       /* Get the pkt_info length */
+       desc_data->rx.len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+                                       RX_NORMAL_DESC3_PL_POS,
+                                       RX_NORMAL_DESC3_PL_LEN);
+
+       if (!XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+                                   RX_NORMAL_DESC3_LD_POS,
+                                   RX_NORMAL_DESC3_LD_LEN)) {
+               /* Not all the data has been transferred for this pkt_info */
+               pkt_info->attributes = XLGMAC_SET_REG_BITS(
+                               pkt_info->attributes,
+                               RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
+                               RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN,
+                               1);
+               return 0;
+       }
+
+       /* This is the last of the data for this pkt_info */
+       pkt_info->attributes = XLGMAC_SET_REG_BITS(
+                       pkt_info->attributes,
+                       RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
+                       RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN,
+                       0);
+
+       /* Set checksum done indicator as appropriate */
+       if (netdev->features & NETIF_F_RXCSUM)
+               pkt_info->attributes = XLGMAC_SET_REG_BITS(
+                               pkt_info->attributes,
+                               RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
+                               RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN,
+                               1);
+
+       /* Check for errors (only valid in last descriptor) */
+       err = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+                                    RX_NORMAL_DESC3_ES_POS,
+                                    RX_NORMAL_DESC3_ES_LEN);
+       etlt = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+                                     RX_NORMAL_DESC3_ETLT_POS,
+                                     RX_NORMAL_DESC3_ETLT_LEN);
+       netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
+
+       if (!err || !etlt) {
+               /* No error if err is 0 or etlt is 0 */
+               if ((etlt == 0x09) &&
+                   (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+                       pkt_info->attributes = XLGMAC_SET_REG_BITS(
+                                       pkt_info->attributes,
+                                       RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+                                       RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
+                                       1);
+                       pkt_info->vlan_ctag =
+                               XLGMAC_GET_REG_BITS_LE(dma_desc->desc0,
+                                                      RX_NORMAL_DESC0_OVT_POS,
+                                                  RX_NORMAL_DESC0_OVT_LEN);
+                       netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
+                                 pkt_info->vlan_ctag);
+               }
+       } else {
+               if ((etlt == 0x05) || (etlt == 0x06))
+                       pkt_info->attributes = XLGMAC_SET_REG_BITS(
+                                       pkt_info->attributes,
+                                       RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
+                                       RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN,
+                                       0);
+               else
+                       pkt_info->errors = XLGMAC_SET_REG_BITS(
+                                       pkt_info->errors,
+                                       RX_PACKET_ERRORS_FRAME_POS,
+                                       RX_PACKET_ERRORS_FRAME_LEN,
+                                       1);
+       }
+
+       XLGMAC_PR("%s - descriptor=%u (cur=%d)\n", channel->name,
+                 ring->cur & (ring->dma_desc_count - 1), ring->cur);
+
+       return 0;
+}
+
+static int xlgmac_enable_int(struct xlgmac_channel *channel,
+                            enum xlgmac_int int_id)
+{
+       unsigned int dma_ch_ier;
+
+       dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER));
+
+       switch (int_id) {
+       case XLGMAC_INT_DMA_CH_SR_TI:
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_TIE_POS,
+                               DMA_CH_IER_TIE_LEN, 1);
+               break;
+       case XLGMAC_INT_DMA_CH_SR_TPS:
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_TXSE_POS,
+                               DMA_CH_IER_TXSE_LEN, 1);
+               break;
+       case XLGMAC_INT_DMA_CH_SR_TBU:
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_TBUE_POS,
+                               DMA_CH_IER_TBUE_LEN, 1);
+               break;
+       case XLGMAC_INT_DMA_CH_SR_RI:
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_RIE_POS,
+                               DMA_CH_IER_RIE_LEN, 1);
+               break;
+       case XLGMAC_INT_DMA_CH_SR_RBU:
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_RBUE_POS,
+                               DMA_CH_IER_RBUE_LEN, 1);
+               break;
+       case XLGMAC_INT_DMA_CH_SR_RPS:
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_RSE_POS,
+                               DMA_CH_IER_RSE_LEN, 1);
+               break;
+       case XLGMAC_INT_DMA_CH_SR_TI_RI:
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_TIE_POS,
+                               DMA_CH_IER_TIE_LEN, 1);
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_RIE_POS,
+                               DMA_CH_IER_RIE_LEN, 1);
+               break;
+       case XLGMAC_INT_DMA_CH_SR_FBE:
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_FBEE_POS,
+                               DMA_CH_IER_FBEE_LEN, 1);
+               break;
+       case XLGMAC_INT_DMA_ALL:
+               dma_ch_ier |= channel->saved_ier;
+               break;
+       default:
+               return -1;
+       }
+
+       writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER));
+
+       return 0;
+}
+
+static int xlgmac_disable_int(struct xlgmac_channel *channel,
+                             enum xlgmac_int int_id)
+{
+       unsigned int dma_ch_ier;
+
+       dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER));
+
+       switch (int_id) {
+       case XLGMAC_INT_DMA_CH_SR_TI:
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_TIE_POS,
+                               DMA_CH_IER_TIE_LEN, 0);
+               break;
+       case XLGMAC_INT_DMA_CH_SR_TPS:
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_TXSE_POS,
+                               DMA_CH_IER_TXSE_LEN, 0);
+               break;
+       case XLGMAC_INT_DMA_CH_SR_TBU:
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_TBUE_POS,
+                               DMA_CH_IER_TBUE_LEN, 0);
+               break;
+       case XLGMAC_INT_DMA_CH_SR_RI:
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_RIE_POS,
+                               DMA_CH_IER_RIE_LEN, 0);
+               break;
+       case XLGMAC_INT_DMA_CH_SR_RBU:
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_RBUE_POS,
+                               DMA_CH_IER_RBUE_LEN, 0);
+               break;
+       case XLGMAC_INT_DMA_CH_SR_RPS:
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_RSE_POS,
+                               DMA_CH_IER_RSE_LEN, 0);
+               break;
+       case XLGMAC_INT_DMA_CH_SR_TI_RI:
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_TIE_POS,
+                               DMA_CH_IER_TIE_LEN, 0);
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_RIE_POS,
+                               DMA_CH_IER_RIE_LEN, 0);
+               break;
+       case XLGMAC_INT_DMA_CH_SR_FBE:
+               dma_ch_ier = XLGMAC_SET_REG_BITS(
+                               dma_ch_ier, DMA_CH_IER_FBEE_POS,
+                               DMA_CH_IER_FBEE_LEN, 0);
+               break;
+       case XLGMAC_INT_DMA_ALL:
+               channel->saved_ier = dma_ch_ier & XLGMAC_DMA_INTERRUPT_MASK;
+               dma_ch_ier &= ~XLGMAC_DMA_INTERRUPT_MASK;
+               break;
+       default:
+               return -1;
+       }
+
+       writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER));
+
+       return 0;
+}
+
+static int xlgmac_flush_tx_queues(struct xlgmac_pdata *pdata)
+{
+       unsigned int i, count;
+       u32 regval;
+
+       for (i = 0; i < pdata->tx_q_count; i++) {
+               regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+               regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS,
+                                            MTL_Q_TQOMR_FTQ_LEN, 1);
+               writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+       }
+
+       /* Poll Until Poll Condition */
+       for (i = 0; i < pdata->tx_q_count; i++) {
+               count = 2000;
+               regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+               regval = XLGMAC_GET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS,
+                                            MTL_Q_TQOMR_FTQ_LEN);
+               while (--count && regval)
+                       usleep_range(500, 600);
+
+               if (!count)
+                       return -EBUSY;
+       }
+
+       return 0;
+}
+
+static void xlgmac_config_dma_bus(struct xlgmac_pdata *pdata)
+{
+       u32 regval;
+
+       regval = readl(pdata->mac_regs + DMA_SBMR);
+       /* Set enhanced addressing mode */
+       regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_EAME_POS,
+                                    DMA_SBMR_EAME_LEN, 1);
+       /* Set the System Bus mode */
+       regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_UNDEF_POS,
+                                    DMA_SBMR_UNDEF_LEN, 1);
+       regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_256_POS,
+                                    DMA_SBMR_BLEN_256_LEN, 1);
+       writel(regval, pdata->mac_regs + DMA_SBMR);
+}
+
+static int xlgmac_hw_init(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
+       int ret;
+
+       /* Flush Tx queues */
+       ret = xlgmac_flush_tx_queues(pdata);
+       if (ret)
+               return ret;
+
+       /* Initialize DMA related features */
+       xlgmac_config_dma_bus(pdata);
+       xlgmac_config_osp_mode(pdata);
+       xlgmac_config_pblx8(pdata);
+       xlgmac_config_tx_pbl_val(pdata);
+       xlgmac_config_rx_pbl_val(pdata);
+       xlgmac_config_rx_coalesce(pdata);
+       xlgmac_config_tx_coalesce(pdata);
+       xlgmac_config_rx_buffer_size(pdata);
+       xlgmac_config_tso_mode(pdata);
+       xlgmac_config_sph_mode(pdata);
+       xlgmac_config_rss(pdata);
+       desc_ops->tx_desc_init(pdata);
+       desc_ops->rx_desc_init(pdata);
+       xlgmac_enable_dma_interrupts(pdata);
+
+       /* Initialize MTL related features */
+       xlgmac_config_mtl_mode(pdata);
+       xlgmac_config_queue_mapping(pdata);
+       xlgmac_config_tsf_mode(pdata, pdata->tx_sf_mode);
+       xlgmac_config_rsf_mode(pdata, pdata->rx_sf_mode);
+       xlgmac_config_tx_threshold(pdata, pdata->tx_threshold);
+       xlgmac_config_rx_threshold(pdata, pdata->rx_threshold);
+       xlgmac_config_tx_fifo_size(pdata);
+       xlgmac_config_rx_fifo_size(pdata);
+       xlgmac_config_flow_control_threshold(pdata);
+       xlgmac_config_rx_fep_enable(pdata);
+       xlgmac_config_rx_fup_enable(pdata);
+       xlgmac_enable_mtl_interrupts(pdata);
+
+       /* Initialize MAC related features */
+       xlgmac_config_mac_address(pdata);
+       xlgmac_config_rx_mode(pdata);
+       xlgmac_config_jumbo_enable(pdata);
+       xlgmac_config_flow_control(pdata);
+       xlgmac_config_mac_speed(pdata);
+       xlgmac_config_checksum_offload(pdata);
+       xlgmac_config_vlan_support(pdata);
+       xlgmac_config_mmc(pdata);
+       xlgmac_enable_mac_interrupts(pdata);
+
+       return 0;
+}
+
+static int xlgmac_hw_exit(struct xlgmac_pdata *pdata)
+{
+       unsigned int count = 2000;
+       u32 regval;
+
+       /* Issue a software reset */
+       regval = readl(pdata->mac_regs + DMA_MR);
+       regval = XLGMAC_SET_REG_BITS(regval, DMA_MR_SWR_POS,
+                                    DMA_MR_SWR_LEN, 1);
+       writel(regval, pdata->mac_regs + DMA_MR);
+       usleep_range(10, 15);
+
+       /* Poll Until Poll Condition */
+       while (--count &&
+              XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + DMA_MR),
+                                  DMA_MR_SWR_POS, DMA_MR_SWR_LEN))
+               usleep_range(500, 600);
+
+       if (!count)
+               return -EBUSY;
+
+       return 0;
+}
+
+void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops)
+{
+       hw_ops->init = xlgmac_hw_init;
+       hw_ops->exit = xlgmac_hw_exit;
+
+       hw_ops->tx_complete = xlgmac_tx_complete;
+
+       hw_ops->enable_tx = xlgmac_enable_tx;
+       hw_ops->disable_tx = xlgmac_disable_tx;
+       hw_ops->enable_rx = xlgmac_enable_rx;
+       hw_ops->disable_rx = xlgmac_disable_rx;
+
+       hw_ops->dev_xmit = xlgmac_dev_xmit;
+       hw_ops->dev_read = xlgmac_dev_read;
+       hw_ops->enable_int = xlgmac_enable_int;
+       hw_ops->disable_int = xlgmac_disable_int;
+
+       hw_ops->set_mac_address = xlgmac_set_mac_address;
+       hw_ops->config_rx_mode = xlgmac_config_rx_mode;
+       hw_ops->enable_rx_csum = xlgmac_enable_rx_csum;
+       hw_ops->disable_rx_csum = xlgmac_disable_rx_csum;
+
+       /* For MII speed configuration */
+       hw_ops->set_xlgmii_25000_speed = xlgmac_set_xlgmii_25000_speed;
+       hw_ops->set_xlgmii_40000_speed = xlgmac_set_xlgmii_40000_speed;
+       hw_ops->set_xlgmii_50000_speed = xlgmac_set_xlgmii_50000_speed;
+       hw_ops->set_xlgmii_100000_speed = xlgmac_set_xlgmii_100000_speed;
+
+       /* For descriptor related operation */
+       hw_ops->tx_desc_init = xlgmac_tx_desc_init;
+       hw_ops->rx_desc_init = xlgmac_rx_desc_init;
+       hw_ops->tx_desc_reset = xlgmac_tx_desc_reset;
+       hw_ops->rx_desc_reset = xlgmac_rx_desc_reset;
+       hw_ops->is_last_desc = xlgmac_is_last_desc;
+       hw_ops->is_context_desc = xlgmac_is_context_desc;
+       hw_ops->tx_start_xmit = xlgmac_tx_start_xmit;
+
+       /* For Flow Control */
+       hw_ops->config_tx_flow_control = xlgmac_config_tx_flow_control;
+       hw_ops->config_rx_flow_control = xlgmac_config_rx_flow_control;
+
+       /* For Vlan related config */
+       hw_ops->enable_rx_vlan_stripping = xlgmac_enable_rx_vlan_stripping;
+       hw_ops->disable_rx_vlan_stripping = xlgmac_disable_rx_vlan_stripping;
+       hw_ops->enable_rx_vlan_filtering = xlgmac_enable_rx_vlan_filtering;
+       hw_ops->disable_rx_vlan_filtering = xlgmac_disable_rx_vlan_filtering;
+       hw_ops->update_vlan_hash_table = xlgmac_update_vlan_hash_table;
+
+       /* For RX coalescing */
+       hw_ops->config_rx_coalesce = xlgmac_config_rx_coalesce;
+       hw_ops->config_tx_coalesce = xlgmac_config_tx_coalesce;
+       hw_ops->usec_to_riwt = xlgmac_usec_to_riwt;
+       hw_ops->riwt_to_usec = xlgmac_riwt_to_usec;
+
+       /* For RX and TX threshold config */
+       hw_ops->config_rx_threshold = xlgmac_config_rx_threshold;
+       hw_ops->config_tx_threshold = xlgmac_config_tx_threshold;
+
+       /* For RX and TX Store and Forward Mode config */
+       hw_ops->config_rsf_mode = xlgmac_config_rsf_mode;
+       hw_ops->config_tsf_mode = xlgmac_config_tsf_mode;
+
+       /* For TX DMA Operating on Second Frame config */
+       hw_ops->config_osp_mode = xlgmac_config_osp_mode;
+
+       /* For RX and TX PBL config */
+       hw_ops->config_rx_pbl_val = xlgmac_config_rx_pbl_val;
+       hw_ops->get_rx_pbl_val = xlgmac_get_rx_pbl_val;
+       hw_ops->config_tx_pbl_val = xlgmac_config_tx_pbl_val;
+       hw_ops->get_tx_pbl_val = xlgmac_get_tx_pbl_val;
+       hw_ops->config_pblx8 = xlgmac_config_pblx8;
+
+       /* For MMC statistics support */
+       hw_ops->tx_mmc_int = xlgmac_tx_mmc_int;
+       hw_ops->rx_mmc_int = xlgmac_rx_mmc_int;
+       hw_ops->read_mmc_stats = xlgmac_read_mmc_stats;
+
+       /* For Receive Side Scaling */
+       hw_ops->enable_rss = xlgmac_enable_rss;
+       hw_ops->disable_rss = xlgmac_disable_rss;
+       hw_ops->set_rss_hash_key = xlgmac_set_rss_hash_key;
+       hw_ops->set_rss_lookup_table = xlgmac_set_rss_lookup_table;
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
new file mode 100644 (file)
index 0000000..6acf86c
--- /dev/null
@@ -0,0 +1,1332 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/tcp.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+static int xlgmac_one_poll(struct napi_struct *, int);
+static int xlgmac_all_poll(struct napi_struct *, int);
+
+static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring)
+{
+       return (ring->dma_desc_count - (ring->cur - ring->dirty));
+}
+
+static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring)
+{
+       return (ring->cur - ring->dirty);
+}
+
+static int xlgmac_maybe_stop_tx_queue(
+                       struct xlgmac_channel *channel,
+                       struct xlgmac_ring *ring,
+                       unsigned int count)
+{
+       struct xlgmac_pdata *pdata = channel->pdata;
+
+       if (count > xlgmac_tx_avail_desc(ring)) {
+               netif_info(pdata, drv, pdata->netdev,
+                          "Tx queue stopped, not enough descriptors available\n");
+               netif_stop_subqueue(pdata->netdev, channel->queue_index);
+               ring->tx.queue_stopped = 1;
+
+               /* If we haven't notified the hardware because of xmit_more
+                * support, tell it now
+                */
+               if (ring->tx.xmit_more)
+                       pdata->hw_ops.tx_start_xmit(channel, ring);
+
+               return NETDEV_TX_BUSY;
+       }
+
+       return 0;
+}
+
+static void xlgmac_prep_vlan(struct sk_buff *skb,
+                            struct xlgmac_pkt_info *pkt_info)
+{
+       if (skb_vlan_tag_present(skb))
+               pkt_info->vlan_ctag = skb_vlan_tag_get(skb);
+}
+
+static int xlgmac_prep_tso(struct sk_buff *skb,
+                          struct xlgmac_pkt_info *pkt_info)
+{
+       int ret;
+
+       if (!XLGMAC_GET_REG_BITS(pkt_info->attributes,
+                                TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
+                                TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN))
+               return 0;
+
+       ret = skb_cow_head(skb, 0);
+       if (ret)
+               return ret;
+
+       pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       pkt_info->tcp_header_len = tcp_hdrlen(skb);
+       pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
+       pkt_info->mss = skb_shinfo(skb)->gso_size;
+
+       XLGMAC_PR("header_len=%u\n", pkt_info->header_len);
+       XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n",
+                 pkt_info->tcp_header_len, pkt_info->tcp_payload_len);
+       XLGMAC_PR("mss=%u\n", pkt_info->mss);
+
+       /* Update the number of packets that will ultimately be transmitted
+        * along with the extra bytes for each extra packet
+        */
+       pkt_info->tx_packets = skb_shinfo(skb)->gso_segs;
+       pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len;
+
+       return 0;
+}
+
+static int xlgmac_is_tso(struct sk_buff *skb)
+{
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return 0;
+
+       if (!skb_is_gso(skb))
+               return 0;
+
+       return 1;
+}
+
+static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
+                              struct xlgmac_ring *ring,
+                              struct sk_buff *skb,
+                              struct xlgmac_pkt_info *pkt_info)
+{
+       struct skb_frag_struct *frag;
+       unsigned int context_desc;
+       unsigned int len;
+       unsigned int i;
+
+       pkt_info->skb = skb;
+
+       context_desc = 0;
+       pkt_info->desc_count = 0;
+
+       pkt_info->tx_packets = 1;
+       pkt_info->tx_bytes = skb->len;
+
+       if (xlgmac_is_tso(skb)) {
+               /* TSO requires an extra descriptor if mss is different */
+               if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
+                       context_desc = 1;
+                       pkt_info->desc_count++;
+               }
+
+               /* TSO requires an extra descriptor for TSO header */
+               pkt_info->desc_count++;
+
+               pkt_info->attributes = XLGMAC_SET_REG_BITS(
+                                       pkt_info->attributes,
+                                       TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
+                                       TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN,
+                                       1);
+               pkt_info->attributes = XLGMAC_SET_REG_BITS(
+                                       pkt_info->attributes,
+                                       TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
+                                       TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
+                                       1);
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL)
+               pkt_info->attributes = XLGMAC_SET_REG_BITS(
+                                       pkt_info->attributes,
+                                       TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
+                                       TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
+                                       1);
+
+       if (skb_vlan_tag_present(skb)) {
+               /* VLAN requires an extra descriptor if tag is different */
+               if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
+                       /* We can share with the TSO context descriptor */
+                       if (!context_desc) {
+                               context_desc = 1;
+                               pkt_info->desc_count++;
+                       }
+
+               pkt_info->attributes = XLGMAC_SET_REG_BITS(
+                                       pkt_info->attributes,
+                                       TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+                                       TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
+                                       1);
+       }
+
+       for (len = skb_headlen(skb); len;) {
+               pkt_info->desc_count++;
+               len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
+       }
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               frag = &skb_shinfo(skb)->frags[i];
+               for (len = skb_frag_size(frag); len; ) {
+                       pkt_info->desc_count++;
+                       len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
+               }
+       }
+}
+
+static int xlgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
+{
+       unsigned int rx_buf_size;
+
+       if (mtu > XLGMAC_JUMBO_PACKET_MTU) {
+               netdev_alert(netdev, "MTU exceeds maximum supported value\n");
+               return -EINVAL;
+       }
+
+       rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+       rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE);
+
+       rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) &
+                     ~(XLGMAC_RX_BUF_ALIGN - 1);
+
+       return rx_buf_size;
+}
+
+static void xlgmac_enable_rx_tx_ints(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+       struct xlgmac_channel *channel;
+       enum xlgmac_int int_id;
+       unsigned int i;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (channel->tx_ring && channel->rx_ring)
+                       int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
+               else if (channel->tx_ring)
+                       int_id = XLGMAC_INT_DMA_CH_SR_TI;
+               else if (channel->rx_ring)
+                       int_id = XLGMAC_INT_DMA_CH_SR_RI;
+               else
+                       continue;
+
+               hw_ops->enable_int(channel, int_id);
+       }
+}
+
+static void xlgmac_disable_rx_tx_ints(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+       struct xlgmac_channel *channel;
+       enum xlgmac_int int_id;
+       unsigned int i;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (channel->tx_ring && channel->rx_ring)
+                       int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
+               else if (channel->tx_ring)
+                       int_id = XLGMAC_INT_DMA_CH_SR_TI;
+               else if (channel->rx_ring)
+                       int_id = XLGMAC_INT_DMA_CH_SR_RI;
+               else
+                       continue;
+
+               hw_ops->disable_int(channel, int_id);
+       }
+}
+
+static irqreturn_t xlgmac_isr(int irq, void *data)
+{
+       unsigned int dma_isr, dma_ch_isr, mac_isr;
+       struct xlgmac_pdata *pdata = data;
+       struct xlgmac_channel *channel;
+       struct xlgmac_hw_ops *hw_ops;
+       unsigned int i, ti, ri;
+
+       hw_ops = &pdata->hw_ops;
+
+       /* The DMA interrupt status register also reports MAC and MTL
+        * interrupts. So for polling mode, we just need to check for
+        * this register to be non-zero
+        */
+       dma_isr = readl(pdata->mac_regs + DMA_ISR);
+       if (!dma_isr)
+               return IRQ_HANDLED;
+
+       netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
+
+       for (i = 0; i < pdata->channel_count; i++) {
+               if (!(dma_isr & (1 << i)))
+                       continue;
+
+               channel = pdata->channel_head + i;
+
+               dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
+               netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
+                         i, dma_ch_isr);
+
+               /* The TI or RI interrupt bits may still be set even if using
+                * per channel DMA interrupts. Check to be sure those are not
+                * enabled before using the private data napi structure.
+                */
+               ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS,
+                                        DMA_CH_SR_TI_LEN);
+               ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS,
+                                        DMA_CH_SR_RI_LEN);
+               if (!pdata->per_channel_irq && (ti || ri)) {
+                       if (napi_schedule_prep(&pdata->napi)) {
+                               /* Disable Tx and Rx interrupts */
+                               xlgmac_disable_rx_tx_ints(pdata);
+
+                               /* Turn on polling */
+                               __napi_schedule_irqoff(&pdata->napi);
+                       }
+               }
+
+               if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS,
+                                       DMA_CH_SR_RBU_LEN))
+                       pdata->stats.rx_buffer_unavailable++;
+
+               /* Restart the device on a Fatal Bus Error */
+               if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS,
+                                       DMA_CH_SR_FBE_LEN))
+                       schedule_work(&pdata->restart_work);
+
+               /* Clear all interrupt signals */
+               writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
+       }
+
+       if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS,
+                               DMA_ISR_MACIS_LEN)) {
+               mac_isr = readl(pdata->mac_regs + MAC_ISR);
+
+               if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS,
+                                       MAC_ISR_MMCTXIS_LEN))
+                       hw_ops->tx_mmc_int(pdata);
+
+               if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS,
+                                       MAC_ISR_MMCRXIS_LEN))
+                       hw_ops->rx_mmc_int(pdata);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t xlgmac_dma_isr(int irq, void *data)
+{
+       struct xlgmac_channel *channel = data;
+
+       /* Per channel DMA interrupts are enabled, so we use the per
+        * channel napi structure and not the private data napi structure
+        */
+       if (napi_schedule_prep(&channel->napi)) {
+               /* Disable Tx and Rx interrupts */
+               disable_irq_nosync(channel->dma_irq);
+
+               /* Turn on polling */
+               __napi_schedule_irqoff(&channel->napi);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static void xlgmac_tx_timer(unsigned long data)
+{
+       struct xlgmac_channel *channel = (struct xlgmac_channel *)data;
+       struct xlgmac_pdata *pdata = channel->pdata;
+       struct napi_struct *napi;
+
+       napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
+       if (napi_schedule_prep(napi)) {
+               /* Disable Tx and Rx interrupts */
+               if (pdata->per_channel_irq)
+                       disable_irq_nosync(channel->dma_irq);
+               else
+                       xlgmac_disable_rx_tx_ints(pdata);
+
+               /* Turn on polling */
+               __napi_schedule(napi);
+       }
+
+       channel->tx_timer_active = 0;
+}
+
+static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (!channel->tx_ring)
+                       break;
+
+               setup_timer(&channel->tx_timer, xlgmac_tx_timer,
+                           (unsigned long)channel);
+       }
+}
+
+static void xlgmac_stop_timers(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (!channel->tx_ring)
+                       break;
+
+               del_timer_sync(&channel->tx_timer);
+       }
+}
+
+static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+
+       if (pdata->per_channel_irq) {
+               channel = pdata->channel_head;
+               for (i = 0; i < pdata->channel_count; i++, channel++) {
+                       if (add)
+                               netif_napi_add(pdata->netdev, &channel->napi,
+                                              xlgmac_one_poll,
+                                              NAPI_POLL_WEIGHT);
+
+                       napi_enable(&channel->napi);
+               }
+       } else {
+               if (add)
+                       netif_napi_add(pdata->netdev, &pdata->napi,
+                                      xlgmac_all_poll, NAPI_POLL_WEIGHT);
+
+               napi_enable(&pdata->napi);
+       }
+}
+
+static void xlgmac_napi_disable(struct xlgmac_pdata *pdata, unsigned int del)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+
+       if (pdata->per_channel_irq) {
+               channel = pdata->channel_head;
+               for (i = 0; i < pdata->channel_count; i++, channel++) {
+                       napi_disable(&channel->napi);
+
+                       if (del)
+                               netif_napi_del(&channel->napi);
+               }
+       } else {
+               napi_disable(&pdata->napi);
+
+               if (del)
+                       netif_napi_del(&pdata->napi);
+       }
+}
+
+static int xlgmac_request_irqs(struct xlgmac_pdata *pdata)
+{
+       struct net_device *netdev = pdata->netdev;
+       struct xlgmac_channel *channel;
+       unsigned int i;
+       int ret;
+
+       ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr,
+                              IRQF_SHARED, netdev->name, pdata);
+       if (ret) {
+               netdev_alert(netdev, "error requesting irq %d\n",
+                            pdata->dev_irq);
+               return ret;
+       }
+
+       if (!pdata->per_channel_irq)
+               return 0;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               snprintf(channel->dma_irq_name,
+                        sizeof(channel->dma_irq_name) - 1,
+                        "%s-TxRx-%u", netdev_name(netdev),
+                        channel->queue_index);
+
+               ret = devm_request_irq(pdata->dev, channel->dma_irq,
+                                      xlgmac_dma_isr, 0,
+                                      channel->dma_irq_name, channel);
+               if (ret) {
+                       netdev_alert(netdev, "error requesting irq %d\n",
+                                    channel->dma_irq);
+                       goto err_irq;
+               }
+       }
+
+       return 0;
+
+err_irq:
+       /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+       for (i--, channel--; i < pdata->channel_count; i--, channel--)
+               devm_free_irq(pdata->dev, channel->dma_irq, channel);
+
+       devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+       return ret;
+}
+
+static void xlgmac_free_irqs(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_channel *channel;
+       unsigned int i;
+
+       devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+       if (!pdata->per_channel_irq)
+               return;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++)
+               devm_free_irq(pdata->dev, channel->dma_irq, channel);
+}
+
+static void xlgmac_free_tx_data(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
+       struct xlgmac_desc_data *desc_data;
+       struct xlgmac_channel *channel;
+       struct xlgmac_ring *ring;
+       unsigned int i, j;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               ring = channel->tx_ring;
+               if (!ring)
+                       break;
+
+               for (j = 0; j < ring->dma_desc_count; j++) {
+                       desc_data = XLGMAC_GET_DESC_DATA(ring, j);
+                       desc_ops->unmap_desc_data(pdata, desc_data);
+               }
+       }
+}
+
+static void xlgmac_free_rx_data(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
+       struct xlgmac_desc_data *desc_data;
+       struct xlgmac_channel *channel;
+       struct xlgmac_ring *ring;
+       unsigned int i, j;
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               ring = channel->rx_ring;
+               if (!ring)
+                       break;
+
+               for (j = 0; j < ring->dma_desc_count; j++) {
+                       desc_data = XLGMAC_GET_DESC_DATA(ring, j);
+                       desc_ops->unmap_desc_data(pdata, desc_data);
+               }
+       }
+}
+
+static int xlgmac_start(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+       struct net_device *netdev = pdata->netdev;
+       int ret;
+
+       hw_ops->init(pdata);
+       xlgmac_napi_enable(pdata, 1);
+
+       ret = xlgmac_request_irqs(pdata);
+       if (ret)
+               goto err_napi;
+
+       hw_ops->enable_tx(pdata);
+       hw_ops->enable_rx(pdata);
+       netif_tx_start_all_queues(netdev);
+
+       return 0;
+
+err_napi:
+       xlgmac_napi_disable(pdata, 1);
+       hw_ops->exit(pdata);
+
+       return ret;
+}
+
+static void xlgmac_stop(struct xlgmac_pdata *pdata)
+{
+       struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+       struct net_device *netdev = pdata->netdev;
+       struct xlgmac_channel *channel;
+       struct netdev_queue *txq;
+       unsigned int i;
+
+       netif_tx_stop_all_queues(netdev);
+       xlgmac_stop_timers(pdata);
+       hw_ops->disable_tx(pdata);
+       hw_ops->disable_rx(pdata);
+       xlgmac_free_irqs(pdata);
+       xlgmac_napi_disable(pdata, 1);
+       hw_ops->exit(pdata);
+
+       channel = pdata->channel_head;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (!channel->tx_ring)
+                       continue;
+
+               txq = netdev_get_tx_queue(netdev, channel->queue_index);
+               netdev_tx_reset_queue(txq);
+       }
+}
+
+static void xlgmac_restart_dev(struct xlgmac_pdata *pdata)
+{
+       /* If not running, "restart" will happen on open */
+       if (!netif_running(pdata->netdev))
+               return;
+
+       xlgmac_stop(pdata);
+
+       xlgmac_free_tx_data(pdata);
+       xlgmac_free_rx_data(pdata);
+
+       xlgmac_start(pdata);
+}
+
+static void xlgmac_restart(struct work_struct *work)
+{
+       struct xlgmac_pdata *pdata = container_of(work,
+                                                  struct xlgmac_pdata,
+                                                  restart_work);
+
+       rtnl_lock();
+
+       xlgmac_restart_dev(pdata);
+
+       rtnl_unlock();
+}
+
+static int xlgmac_open(struct net_device *netdev)
+{
+       struct xlgmac_pdata *pdata = netdev_priv(netdev);
+       struct xlgmac_desc_ops *desc_ops;
+       int ret;
+
+       desc_ops = &pdata->desc_ops;
+
+       /* TODO: Initialize the phy */
+
+       /* Calculate the Rx buffer size before allocating rings */
+       ret = xlgmac_calc_rx_buf_size(netdev, netdev->mtu);
+       if (ret < 0)
+               return ret;
+       pdata->rx_buf_size = ret;
+
+       /* Allocate the channels and rings */
+       ret = desc_ops->alloc_channles_and_rings(pdata);
+       if (ret)
+               return ret;
+
+       INIT_WORK(&pdata->restart_work, xlgmac_restart);
+       xlgmac_init_timers(pdata);
+
+       ret = xlgmac_start(pdata);
+       if (ret)
+               goto err_channels_and_rings;
+
+       return 0;
+
+err_channels_and_rings:
+       desc_ops->free_channels_and_rings(pdata);
+
+       return ret;
+}
+
+static int xlgmac_close(struct net_device *netdev)
+{
+       struct xlgmac_pdata *pdata = netdev_priv(netdev);
+       struct xlgmac_desc_ops *desc_ops;
+
+       desc_ops = &pdata->desc_ops;
+
+       /* Stop the device */
+       xlgmac_stop(pdata);
+
+       /* Free the channels and rings */
+       desc_ops->free_channels_and_rings(pdata);
+
+       return 0;
+}
+
+static void xlgmac_tx_timeout(struct net_device *netdev)
+{
+       struct xlgmac_pdata *pdata = netdev_priv(netdev);
+
+       netdev_warn(netdev, "tx timeout, device restarting\n");
+       schedule_work(&pdata->restart_work);
+}
+
+static int xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+       struct xlgmac_pdata *pdata = netdev_priv(netdev);
+       struct xlgmac_pkt_info *tx_pkt_info;
+       struct xlgmac_desc_ops *desc_ops;
+       struct xlgmac_channel *channel;
+       struct xlgmac_hw_ops *hw_ops;
+       struct netdev_queue *txq;
+       struct xlgmac_ring *ring;
+       int ret;
+
+       desc_ops = &pdata->desc_ops;
+       hw_ops = &pdata->hw_ops;
+
+       XLGMAC_PR("skb->len = %d\n", skb->len);
+
+       channel = pdata->channel_head + skb->queue_mapping;
+       txq = netdev_get_tx_queue(netdev, channel->queue_index);
+       ring = channel->tx_ring;
+       tx_pkt_info = &ring->pkt_info;
+
+       if (skb->len == 0) {
+               netif_err(pdata, tx_err, netdev,
+                         "empty skb received from stack\n");
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       }
+
+       /* Prepare preliminary packet info for TX */
+       memset(tx_pkt_info, 0, sizeof(*tx_pkt_info));
+       xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info);
+
+       /* Check that there are enough descriptors available */
+       ret = xlgmac_maybe_stop_tx_queue(channel, ring,
+                                        tx_pkt_info->desc_count);
+       if (ret)
+               return ret;
+
+       ret = xlgmac_prep_tso(skb, tx_pkt_info);
+       if (ret) {
+               netif_err(pdata, tx_err, netdev,
+                         "error processing TSO packet\n");
+               dev_kfree_skb_any(skb);
+               return ret;
+       }
+       xlgmac_prep_vlan(skb, tx_pkt_info);
+
+       if (!desc_ops->map_tx_skb(channel, skb)) {
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       }
+
+       /* Report on the actual number of bytes (to be) sent */
+       netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes);
+
+       /* Configure required descriptor fields for transmission */
+       hw_ops->dev_xmit(channel);
+
+       if (netif_msg_pktdata(pdata))
+               xlgmac_print_pkt(netdev, skb, true);
+
+       /* Stop the queue in advance if there may not be enough descriptors */
+       xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR);
+
+       return NETDEV_TX_OK;
+}
+
+static void xlgmac_get_stats64(struct net_device *netdev,
+                              struct rtnl_link_stats64 *s)
+{
+       struct xlgmac_pdata *pdata = netdev_priv(netdev);
+       struct xlgmac_stats *pstats = &pdata->stats;
+
+       pdata->hw_ops.read_mmc_stats(pdata);
+
+       s->rx_packets = pstats->rxframecount_gb;
+       s->rx_bytes = pstats->rxoctetcount_gb;
+       s->rx_errors = pstats->rxframecount_gb -
+                      pstats->rxbroadcastframes_g -
+                      pstats->rxmulticastframes_g -
+                      pstats->rxunicastframes_g;
+       s->multicast = pstats->rxmulticastframes_g;
+       s->rx_length_errors = pstats->rxlengtherror;
+       s->rx_crc_errors = pstats->rxcrcerror;
+       s->rx_fifo_errors = pstats->rxfifooverflow;
+
+       s->tx_packets = pstats->txframecount_gb;
+       s->tx_bytes = pstats->txoctetcount_gb;
+       s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
+       s->tx_dropped = netdev->stats.tx_dropped;
+}
+
+static int xlgmac_set_mac_address(struct net_device *netdev, void *addr)
+{
+       struct xlgmac_pdata *pdata = netdev_priv(netdev);
+       struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+       struct sockaddr *saddr = addr;
+
+       if (!is_valid_ether_addr(saddr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
+
+       hw_ops->set_mac_address(pdata, netdev->dev_addr);
+
+       return 0;
+}
+
+static int xlgmac_ioctl(struct net_device *netdev,
+                       struct ifreq *ifreq, int cmd)
+{
+       if (!netif_running(netdev))
+               return -ENODEV;
+
+       return 0;
+}
+
+static int xlgmac_change_mtu(struct net_device *netdev, int mtu)
+{
+       struct xlgmac_pdata *pdata = netdev_priv(netdev);
+       int ret;
+
+       ret = xlgmac_calc_rx_buf_size(netdev, mtu);
+       if (ret < 0)
+               return ret;
+
+       pdata->rx_buf_size = ret;
+       netdev->mtu = mtu;
+
+       xlgmac_restart_dev(pdata);
+
+       return 0;
+}
+
+static int xlgmac_vlan_rx_add_vid(struct net_device *netdev,
+                                 __be16 proto,
+                                 u16 vid)
+{
+       struct xlgmac_pdata *pdata = netdev_priv(netdev);
+       struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+
+       set_bit(vid, pdata->active_vlans);
+       hw_ops->update_vlan_hash_table(pdata);
+
+       return 0;
+}
+
+static int xlgmac_vlan_rx_kill_vid(struct net_device *netdev,
+                                  __be16 proto,
+                                  u16 vid)
+{
+       struct xlgmac_pdata *pdata = netdev_priv(netdev);
+       struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+
+       clear_bit(vid, pdata->active_vlans);
+       hw_ops->update_vlan_hash_table(pdata);
+
+       return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void xlgmac_poll_controller(struct net_device *netdev)
+{
+       struct xlgmac_pdata *pdata = netdev_priv(netdev);
+       struct xlgmac_channel *channel;
+       unsigned int i;
+
+       if (pdata->per_channel_irq) {
+               channel = pdata->channel_head;
+               for (i = 0; i < pdata->channel_count; i++, channel++)
+                       xlgmac_dma_isr(channel->dma_irq, channel);
+       } else {
+               disable_irq(pdata->dev_irq);
+               xlgmac_isr(pdata->dev_irq, pdata);
+               enable_irq(pdata->dev_irq);
+       }
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static int xlgmac_set_features(struct net_device *netdev,
+                              netdev_features_t features)
+{
+       netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
+       struct xlgmac_pdata *pdata = netdev_priv(netdev);
+       struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+       int ret = 0;
+
+       rxhash = pdata->netdev_features & NETIF_F_RXHASH;
+       rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
+       rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
+       rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
+
+       if ((features & NETIF_F_RXHASH) && !rxhash)
+               ret = hw_ops->enable_rss(pdata);
+       else if (!(features & NETIF_F_RXHASH) && rxhash)
+               ret = hw_ops->disable_rss(pdata);
+       if (ret)
+               return ret;
+
+       if ((features & NETIF_F_RXCSUM) && !rxcsum)
+               hw_ops->enable_rx_csum(pdata);
+       else if (!(features & NETIF_F_RXCSUM) && rxcsum)
+               hw_ops->disable_rx_csum(pdata);
+
+       if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
+               hw_ops->enable_rx_vlan_stripping(pdata);
+       else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
+               hw_ops->disable_rx_vlan_stripping(pdata);
+
+       if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
+               hw_ops->enable_rx_vlan_filtering(pdata);
+       else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
+               hw_ops->disable_rx_vlan_filtering(pdata);
+
+       pdata->netdev_features = features;
+
+       return 0;
+}
+
+static void xlgmac_set_rx_mode(struct net_device *netdev)
+{
+       struct xlgmac_pdata *pdata = netdev_priv(netdev);
+       struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+
+       hw_ops->config_rx_mode(pdata);
+}
+
+static const struct net_device_ops xlgmac_netdev_ops = {
+       .ndo_open               = xlgmac_open,
+       .ndo_stop               = xlgmac_close,
+       .ndo_start_xmit         = xlgmac_xmit,
+       .ndo_tx_timeout         = xlgmac_tx_timeout,
+       .ndo_get_stats64        = xlgmac_get_stats64,
+       .ndo_change_mtu         = xlgmac_change_mtu,
+       .ndo_set_mac_address    = xlgmac_set_mac_address,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_do_ioctl           = xlgmac_ioctl,
+       .ndo_vlan_rx_add_vid    = xlgmac_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = xlgmac_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = xlgmac_poll_controller,
+#endif
+       .ndo_set_features       = xlgmac_set_features,
+       .ndo_set_rx_mode        = xlgmac_set_rx_mode,
+};
+
+const struct net_device_ops *xlgmac_get_netdev_ops(void)
+{
+       return &xlgmac_netdev_ops;
+}
+
+static void xlgmac_rx_refresh(struct xlgmac_channel *channel)
+{
+       struct xlgmac_pdata *pdata = channel->pdata;
+       struct xlgmac_ring *ring = channel->rx_ring;
+       struct xlgmac_desc_data *desc_data;
+       struct xlgmac_desc_ops *desc_ops;
+       struct xlgmac_hw_ops *hw_ops;
+
+       desc_ops = &pdata->desc_ops;
+       hw_ops = &pdata->hw_ops;
+
+       while (ring->dirty != ring->cur) {
+               desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
+
+               /* Reset desc_data values */
+               desc_ops->unmap_desc_data(pdata, desc_data);
+
+               if (desc_ops->map_rx_buffer(pdata, ring, desc_data))
+                       break;
+
+               hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty);
+
+               ring->dirty++;
+       }
+
+       /* Make sure everything is written before the register write */
+       wmb();
+
+       /* Update the Rx Tail Pointer Register with address of
+        * the last cleaned entry
+        */
+       desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1);
+       writel(lower_32_bits(desc_data->dma_desc_addr),
+              XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
+}
+
+static struct sk_buff *xlgmac_create_skb(struct xlgmac_pdata *pdata,
+                                        struct napi_struct *napi,
+                                        struct xlgmac_desc_data *desc_data,
+                                        unsigned int len)
+{
+       unsigned int copy_len;
+       struct sk_buff *skb;
+       u8 *packet;
+
+       skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len);
+       if (!skb)
+               return NULL;
+
+       /* Start with the header buffer which may contain just the header
+        * or the header plus data
+        */
+       dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base,
+                                     desc_data->rx.hdr.dma_off,
+                                     desc_data->rx.hdr.dma_len,
+                                     DMA_FROM_DEVICE);
+
+       packet = page_address(desc_data->rx.hdr.pa.pages) +
+                desc_data->rx.hdr.pa.pages_offset;
+       copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len;
+       copy_len = min(desc_data->rx.hdr.dma_len, copy_len);
+       skb_copy_to_linear_data(skb, packet, copy_len);
+       skb_put(skb, copy_len);
+
+       len -= copy_len;
+       if (len) {
+               /* Add the remaining data as a frag */
+               dma_sync_single_range_for_cpu(pdata->dev,
+                                             desc_data->rx.buf.dma_base,
+                                             desc_data->rx.buf.dma_off,
+                                             desc_data->rx.buf.dma_len,
+                                             DMA_FROM_DEVICE);
+
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+                               desc_data->rx.buf.pa.pages,
+                               desc_data->rx.buf.pa.pages_offset,
+                               len, desc_data->rx.buf.dma_len);
+               desc_data->rx.buf.pa.pages = NULL;
+       }
+
+       return skb;
+}
+
+static int xlgmac_tx_poll(struct xlgmac_channel *channel)
+{
+       struct xlgmac_pdata *pdata = channel->pdata;
+       struct xlgmac_ring *ring = channel->tx_ring;
+       struct net_device *netdev = pdata->netdev;
+       unsigned int tx_packets = 0, tx_bytes = 0;
+       struct xlgmac_desc_data *desc_data;
+       struct xlgmac_dma_desc *dma_desc;
+       struct xlgmac_desc_ops *desc_ops;
+       struct xlgmac_hw_ops *hw_ops;
+       struct netdev_queue *txq;
+       int processed = 0;
+       unsigned int cur;
+
+       desc_ops = &pdata->desc_ops;
+       hw_ops = &pdata->hw_ops;
+
+       /* Nothing to do if there isn't a Tx ring for this channel */
+       if (!ring)
+               return 0;
+
+       cur = ring->cur;
+
+       /* Be sure we get ring->cur before accessing descriptor data */
+       smp_rmb();
+
+       txq = netdev_get_tx_queue(netdev, channel->queue_index);
+
+       while ((processed < XLGMAC_TX_DESC_MAX_PROC) &&
+              (ring->dirty != cur)) {
+               desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
+               dma_desc = desc_data->dma_desc;
+
+               if (!hw_ops->tx_complete(dma_desc))
+                       break;
+
+               /* Make sure descriptor fields are read after reading
+                * the OWN bit
+                */
+               dma_rmb();
+
+               if (netif_msg_tx_done(pdata))
+                       xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
+
+               if (hw_ops->is_last_desc(dma_desc)) {
+                       tx_packets += desc_data->tx.packets;
+                       tx_bytes += desc_data->tx.bytes;
+               }
+
+               /* Free the SKB and reset the descriptor for re-use */
+               desc_ops->unmap_desc_data(pdata, desc_data);
+               hw_ops->tx_desc_reset(desc_data);
+
+               processed++;
+               ring->dirty++;
+       }
+
+       if (!processed)
+               return 0;
+
+       netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
+
+       if ((ring->tx.queue_stopped == 1) &&
+           (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) {
+               ring->tx.queue_stopped = 0;
+               netif_tx_wake_queue(txq);
+       }
+
+       XLGMAC_PR("processed=%d\n", processed);
+
+       return processed;
+}
+
+static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget)
+{
+       struct xlgmac_pdata *pdata = channel->pdata;
+       struct xlgmac_ring *ring = channel->rx_ring;
+       struct net_device *netdev = pdata->netdev;
+       unsigned int len, dma_desc_len, max_len;
+       unsigned int context_next, context;
+       struct xlgmac_desc_data *desc_data;
+       struct xlgmac_pkt_info *pkt_info;
+       unsigned int incomplete, error;
+       struct xlgmac_hw_ops *hw_ops;
+       unsigned int received = 0;
+       struct napi_struct *napi;
+       struct sk_buff *skb;
+       int packet_count = 0;
+
+       hw_ops = &pdata->hw_ops;
+
+       /* Nothing to do if there isn't a Rx ring for this channel */
+       if (!ring)
+               return 0;
+
+       incomplete = 0;
+       context_next = 0;
+
+       napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
+       desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+       pkt_info = &ring->pkt_info;
+       while (packet_count < budget) {
+               /* First time in loop see if we need to restore state */
+               if (!received && desc_data->state_saved) {
+                       skb = desc_data->state.skb;
+                       error = desc_data->state.error;
+                       len = desc_data->state.len;
+               } else {
+                       memset(pkt_info, 0, sizeof(*pkt_info));
+                       skb = NULL;
+                       error = 0;
+                       len = 0;
+               }
+
+read_again:
+               desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+
+               if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY)
+                       xlgmac_rx_refresh(channel);
+
+               if (hw_ops->dev_read(channel))
+                       break;
+
+               received++;
+               ring->cur++;
+
+               incomplete = XLGMAC_GET_REG_BITS(
+                                       pkt_info->attributes,
+                                       RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
+                                       RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN);
+               context_next = XLGMAC_GET_REG_BITS(
+                                       pkt_info->attributes,
+                                       RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
+                                       RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN);
+               context = XLGMAC_GET_REG_BITS(
+                                       pkt_info->attributes,
+                                       RX_PACKET_ATTRIBUTES_CONTEXT_POS,
+                                       RX_PACKET_ATTRIBUTES_CONTEXT_LEN);
+
+               /* Earlier error, just drain the remaining data */
+               if ((incomplete || context_next) && error)
+                       goto read_again;
+
+               if (error || pkt_info->errors) {
+                       if (pkt_info->errors)
+                               netif_err(pdata, rx_err, netdev,
+                                         "error in received packet\n");
+                       dev_kfree_skb(skb);
+                       goto next_packet;
+               }
+
+               if (!context) {
+                       /* Length is cumulative, get this descriptor's length */
+                       dma_desc_len = desc_data->rx.len - len;
+                       len += dma_desc_len;
+
+                       if (dma_desc_len && !skb) {
+                               skb = xlgmac_create_skb(pdata, napi, desc_data,
+                                                       dma_desc_len);
+                               if (!skb)
+                                       error = 1;
+                       } else if (dma_desc_len) {
+                               dma_sync_single_range_for_cpu(
+                                               pdata->dev,
+                                               desc_data->rx.buf.dma_base,
+                                               desc_data->rx.buf.dma_off,
+                                               desc_data->rx.buf.dma_len,
+                                               DMA_FROM_DEVICE);
+
+                               skb_add_rx_frag(
+                                       skb, skb_shinfo(skb)->nr_frags,
+                                       desc_data->rx.buf.pa.pages,
+                                       desc_data->rx.buf.pa.pages_offset,
+                                       dma_desc_len,
+                                       desc_data->rx.buf.dma_len);
+                               desc_data->rx.buf.pa.pages = NULL;
+                       }
+               }
+
+               if (incomplete || context_next)
+                       goto read_again;
+
+               if (!skb)
+                       goto next_packet;
+
+               /* Be sure we don't exceed the configured MTU */
+               max_len = netdev->mtu + ETH_HLEN;
+               if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+                   (skb->protocol == htons(ETH_P_8021Q)))
+                       max_len += VLAN_HLEN;
+
+               if (skb->len > max_len) {
+                       netif_err(pdata, rx_err, netdev,
+                                 "packet length exceeds configured MTU\n");
+                       dev_kfree_skb(skb);
+                       goto next_packet;
+               }
+
+               if (netif_msg_pktdata(pdata))
+                       xlgmac_print_pkt(netdev, skb, false);
+
+               skb_checksum_none_assert(skb);
+               if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
+                                       RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
+                                   RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN))
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+               if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
+                                       RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+                                   RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN))
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                              pkt_info->vlan_ctag);
+
+               if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
+                                       RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
+                                   RX_PACKET_ATTRIBUTES_RSS_HASH_LEN))
+                       skb_set_hash(skb, pkt_info->rss_hash,
+                                    pkt_info->rss_hash_type);
+
+               skb->dev = netdev;
+               skb->protocol = eth_type_trans(skb, netdev);
+               skb_record_rx_queue(skb, channel->queue_index);
+
+               napi_gro_receive(napi, skb);
+
+next_packet:
+               packet_count++;
+       }
+
+       /* Check if we need to save state before leaving */
+       if (received && (incomplete || context_next)) {
+               desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+               desc_data->state_saved = 1;
+               desc_data->state.skb = skb;
+               desc_data->state.len = len;
+               desc_data->state.error = error;
+       }
+
+       XLGMAC_PR("packet_count = %d\n", packet_count);
+
+       return packet_count;
+}
+
+static int xlgmac_one_poll(struct napi_struct *napi, int budget)
+{
+       struct xlgmac_channel *channel = container_of(napi,
+                                               struct xlgmac_channel,
+                                               napi);
+       int processed = 0;
+
+       XLGMAC_PR("budget=%d\n", budget);
+
+       /* Cleanup Tx ring first */
+       xlgmac_tx_poll(channel);
+
+       /* Process Rx ring next */
+       processed = xlgmac_rx_poll(channel, budget);
+
+       /* If we processed everything, we are done */
+       if (processed < budget) {
+               /* Turn off polling */
+               napi_complete_done(napi, processed);
+
+               /* Enable Tx and Rx interrupts */
+               enable_irq(channel->dma_irq);
+       }
+
+       XLGMAC_PR("received = %d\n", processed);
+
+       return processed;
+}
+
+static int xlgmac_all_poll(struct napi_struct *napi, int budget)
+{
+       struct xlgmac_pdata *pdata = container_of(napi,
+                                                  struct xlgmac_pdata,
+                                                  napi);
+       struct xlgmac_channel *channel;
+       int processed, last_processed;
+       int ring_budget;
+       unsigned int i;
+
+       XLGMAC_PR("budget=%d\n", budget);
+
+       processed = 0;
+       ring_budget = budget / pdata->rx_ring_count;
+       do {
+               last_processed = processed;
+
+               channel = pdata->channel_head;
+               for (i = 0; i < pdata->channel_count; i++, channel++) {
+                       /* Cleanup Tx ring first */
+                       xlgmac_tx_poll(channel);
+
+                       /* Process Rx ring next */
+                       if (ring_budget > (budget - processed))
+                               ring_budget = budget - processed;
+                       processed += xlgmac_rx_poll(channel, ring_budget);
+               }
+       } while ((processed < budget) && (processed != last_processed));
+
+       /* If we processed everything, we are done */
+       if (processed < budget) {
+               /* Turn off polling */
+               napi_complete_done(napi, processed);
+
+               /* Enable Tx and Rx interrupts */
+               xlgmac_enable_rx_tx_ints(pdata);
+       }
+
+       XLGMAC_PR("received = %d\n", processed);
+
+       return processed;
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
new file mode 100644 (file)
index 0000000..386bafe
--- /dev/null
@@ -0,0 +1,78 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+static int xlgmac_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
+{
+       struct device *dev = &pcidev->dev;
+       struct xlgmac_resources res;
+       int i, ret;
+
+       ret = pcim_enable_device(pcidev);
+       if (ret) {
+               dev_err(dev, "ERROR: failed to enable device\n");
+               return ret;
+       }
+
+       for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+               if (pci_resource_len(pcidev, i) == 0)
+                       continue;
+               ret = pcim_iomap_regions(pcidev, BIT(i), XLGMAC_DRV_NAME);
+               if (ret)
+                       return ret;
+               break;
+       }
+
+       pci_set_master(pcidev);
+
+       memset(&res, 0, sizeof(res));
+       res.irq = pcidev->irq;
+       res.addr = pcim_iomap_table(pcidev)[i];
+
+       return xlgmac_drv_probe(&pcidev->dev, &res);
+}
+
+static void xlgmac_remove(struct pci_dev *pcidev)
+{
+       xlgmac_drv_remove(&pcidev->dev);
+}
+
+static const struct pci_device_id xlgmac_pci_tbl[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0x7302) },
+       { 0 }
+};
+MODULE_DEVICE_TABLE(pci, xlgmac_pci_tbl);
+
+static struct pci_driver xlgmac_pci_driver = {
+       .name           = XLGMAC_DRV_NAME,
+       .id_table       = xlgmac_pci_tbl,
+       .probe          = xlgmac_probe,
+       .remove         = xlgmac_remove,
+};
+
+module_pci_driver(xlgmac_pci_driver);
+
+MODULE_DESCRIPTION(XLGMAC_DRV_DESC);
+MODULE_VERSION(XLGMAC_DRV_VERSION);
+MODULE_AUTHOR("Jie Deng <jiedeng@synopsys.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h b/drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h
new file mode 100644 (file)
index 0000000..3754f22
--- /dev/null
@@ -0,0 +1,744 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#ifndef __DWC_XLGMAC_REG_H__
+#define __DWC_XLGMAC_REG_H__
+
+/* MAC register offsets */
+#define MAC_TCR                                0x0000
+#define MAC_RCR                                0x0004
+#define MAC_PFR                                0x0008
+#define MAC_HTR0                       0x0010
+#define MAC_VLANTR                     0x0050
+#define MAC_VLANHTR                    0x0058
+#define MAC_VLANIR                     0x0060
+#define MAC_Q0TFCR                     0x0070
+#define MAC_RFCR                       0x0090
+#define MAC_RQC0R                      0x00a0
+#define MAC_RQC1R                      0x00a4
+#define MAC_RQC2R                      0x00a8
+#define MAC_RQC3R                      0x00ac
+#define MAC_ISR                                0x00b0
+#define MAC_IER                                0x00b4
+#define MAC_VR                         0x0110
+#define MAC_HWF0R                      0x011c
+#define MAC_HWF1R                      0x0120
+#define MAC_HWF2R                      0x0124
+#define MAC_MACA0HR                    0x0300
+#define MAC_MACA0LR                    0x0304
+#define MAC_MACA1HR                    0x0308
+#define MAC_MACA1LR                    0x030c
+#define MAC_RSSCR                      0x0c80
+#define MAC_RSSAR                      0x0c88
+#define MAC_RSSDR                      0x0c8c
+
+#define MAC_QTFCR_INC                  4
+#define MAC_MACA_INC                   4
+#define MAC_HTR_INC                    4
+#define MAC_RQC2_INC                   4
+#define MAC_RQC2_Q_PER_REG             4
+
+/* MAC register entry bit positions and sizes */
+#define MAC_HWF0R_ADDMACADRSEL_POS     18
+#define MAC_HWF0R_ADDMACADRSEL_LEN     5
+#define MAC_HWF0R_ARPOFFSEL_POS                9
+#define MAC_HWF0R_ARPOFFSEL_LEN                1
+#define MAC_HWF0R_EEESEL_POS           13
+#define MAC_HWF0R_EEESEL_LEN           1
+#define MAC_HWF0R_PHYIFSEL_POS         1
+#define MAC_HWF0R_PHYIFSEL_LEN         2
+#define MAC_HWF0R_MGKSEL_POS           7
+#define MAC_HWF0R_MGKSEL_LEN           1
+#define MAC_HWF0R_MMCSEL_POS           8
+#define MAC_HWF0R_MMCSEL_LEN           1
+#define MAC_HWF0R_RWKSEL_POS           6
+#define MAC_HWF0R_RWKSEL_LEN           1
+#define MAC_HWF0R_RXCOESEL_POS         16
+#define MAC_HWF0R_RXCOESEL_LEN         1
+#define MAC_HWF0R_SAVLANINS_POS                27
+#define MAC_HWF0R_SAVLANINS_LEN                1
+#define MAC_HWF0R_SMASEL_POS           5
+#define MAC_HWF0R_SMASEL_LEN           1
+#define MAC_HWF0R_TSSEL_POS            12
+#define MAC_HWF0R_TSSEL_LEN            1
+#define MAC_HWF0R_TSSTSSEL_POS         25
+#define MAC_HWF0R_TSSTSSEL_LEN         2
+#define MAC_HWF0R_TXCOESEL_POS         14
+#define MAC_HWF0R_TXCOESEL_LEN         1
+#define MAC_HWF0R_VLHASH_POS           4
+#define MAC_HWF0R_VLHASH_LEN           1
+#define MAC_HWF1R_ADDR64_POS           14
+#define MAC_HWF1R_ADDR64_LEN           2
+#define MAC_HWF1R_ADVTHWORD_POS                13
+#define MAC_HWF1R_ADVTHWORD_LEN                1
+#define MAC_HWF1R_DBGMEMA_POS          19
+#define MAC_HWF1R_DBGMEMA_LEN          1
+#define MAC_HWF1R_DCBEN_POS            16
+#define MAC_HWF1R_DCBEN_LEN            1
+#define MAC_HWF1R_HASHTBLSZ_POS                24
+#define MAC_HWF1R_HASHTBLSZ_LEN                3
+#define MAC_HWF1R_L3L4FNUM_POS         27
+#define MAC_HWF1R_L3L4FNUM_LEN         4
+#define MAC_HWF1R_NUMTC_POS            21
+#define MAC_HWF1R_NUMTC_LEN            3
+#define MAC_HWF1R_RSSEN_POS            20
+#define MAC_HWF1R_RSSEN_LEN            1
+#define MAC_HWF1R_RXFIFOSIZE_POS       0
+#define MAC_HWF1R_RXFIFOSIZE_LEN       5
+#define MAC_HWF1R_SPHEN_POS            17
+#define MAC_HWF1R_SPHEN_LEN            1
+#define MAC_HWF1R_TSOEN_POS            18
+#define MAC_HWF1R_TSOEN_LEN            1
+#define MAC_HWF1R_TXFIFOSIZE_POS       6
+#define MAC_HWF1R_TXFIFOSIZE_LEN       5
+#define MAC_HWF2R_AUXSNAPNUM_POS       28
+#define MAC_HWF2R_AUXSNAPNUM_LEN       3
+#define MAC_HWF2R_PPSOUTNUM_POS                24
+#define MAC_HWF2R_PPSOUTNUM_LEN                3
+#define MAC_HWF2R_RXCHCNT_POS          12
+#define MAC_HWF2R_RXCHCNT_LEN          4
+#define MAC_HWF2R_RXQCNT_POS           0
+#define MAC_HWF2R_RXQCNT_LEN           4
+#define MAC_HWF2R_TXCHCNT_POS          18
+#define MAC_HWF2R_TXCHCNT_LEN          4
+#define MAC_HWF2R_TXQCNT_POS           6
+#define MAC_HWF2R_TXQCNT_LEN           4
+#define MAC_IER_TSIE_POS               12
+#define MAC_IER_TSIE_LEN               1
+#define MAC_ISR_MMCRXIS_POS            9
+#define MAC_ISR_MMCRXIS_LEN            1
+#define MAC_ISR_MMCTXIS_POS            10
+#define MAC_ISR_MMCTXIS_LEN            1
+#define MAC_ISR_PMTIS_POS              4
+#define MAC_ISR_PMTIS_LEN              1
+#define MAC_ISR_TSIS_POS               12
+#define MAC_ISR_TSIS_LEN               1
+#define MAC_MACA1HR_AE_POS             31
+#define MAC_MACA1HR_AE_LEN             1
+#define MAC_PFR_HMC_POS                        2
+#define MAC_PFR_HMC_LEN                        1
+#define MAC_PFR_HPF_POS                        10
+#define MAC_PFR_HPF_LEN                        1
+#define MAC_PFR_HUC_POS                        1
+#define MAC_PFR_HUC_LEN                        1
+#define MAC_PFR_PM_POS                 4
+#define MAC_PFR_PM_LEN                 1
+#define MAC_PFR_PR_POS                 0
+#define MAC_PFR_PR_LEN                 1
+#define MAC_PFR_VTFE_POS               16
+#define MAC_PFR_VTFE_LEN               1
+#define MAC_Q0TFCR_PT_POS              16
+#define MAC_Q0TFCR_PT_LEN              16
+#define MAC_Q0TFCR_TFE_POS             1
+#define MAC_Q0TFCR_TFE_LEN             1
+#define MAC_RCR_ACS_POS                        1
+#define MAC_RCR_ACS_LEN                        1
+#define MAC_RCR_CST_POS                        2
+#define MAC_RCR_CST_LEN                        1
+#define MAC_RCR_DCRCC_POS              3
+#define MAC_RCR_DCRCC_LEN              1
+#define MAC_RCR_HDSMS_POS              12
+#define MAC_RCR_HDSMS_LEN              3
+#define MAC_RCR_IPC_POS                        9
+#define MAC_RCR_IPC_LEN                        1
+#define MAC_RCR_JE_POS                 8
+#define MAC_RCR_JE_LEN                 1
+#define MAC_RCR_LM_POS                 10
+#define MAC_RCR_LM_LEN                 1
+#define MAC_RCR_RE_POS                 0
+#define MAC_RCR_RE_LEN                 1
+#define MAC_RFCR_PFCE_POS              8
+#define MAC_RFCR_PFCE_LEN              1
+#define MAC_RFCR_RFE_POS               0
+#define MAC_RFCR_RFE_LEN               1
+#define MAC_RFCR_UP_POS                        1
+#define MAC_RFCR_UP_LEN                        1
+#define MAC_RQC0R_RXQ0EN_POS           0
+#define MAC_RQC0R_RXQ0EN_LEN           2
+#define MAC_RSSAR_ADDRT_POS            2
+#define MAC_RSSAR_ADDRT_LEN            1
+#define MAC_RSSAR_CT_POS               1
+#define MAC_RSSAR_CT_LEN               1
+#define MAC_RSSAR_OB_POS               0
+#define MAC_RSSAR_OB_LEN               1
+#define MAC_RSSAR_RSSIA_POS            8
+#define MAC_RSSAR_RSSIA_LEN            8
+#define MAC_RSSCR_IP2TE_POS            1
+#define MAC_RSSCR_IP2TE_LEN            1
+#define MAC_RSSCR_RSSE_POS             0
+#define MAC_RSSCR_RSSE_LEN             1
+#define MAC_RSSCR_TCP4TE_POS           2
+#define MAC_RSSCR_TCP4TE_LEN           1
+#define MAC_RSSCR_UDP4TE_POS           3
+#define MAC_RSSCR_UDP4TE_LEN           1
+#define MAC_RSSDR_DMCH_POS             0
+#define MAC_RSSDR_DMCH_LEN             4
+#define MAC_TCR_SS_POS                 28
+#define MAC_TCR_SS_LEN                 3
+#define MAC_TCR_TE_POS                 0
+#define MAC_TCR_TE_LEN                 1
+#define MAC_VLANHTR_VLHT_POS           0
+#define MAC_VLANHTR_VLHT_LEN           16
+#define MAC_VLANIR_VLTI_POS            20
+#define MAC_VLANIR_VLTI_LEN            1
+#define MAC_VLANIR_CSVL_POS            19
+#define MAC_VLANIR_CSVL_LEN            1
+#define MAC_VLANTR_DOVLTC_POS          20
+#define MAC_VLANTR_DOVLTC_LEN          1
+#define MAC_VLANTR_ERSVLM_POS          19
+#define MAC_VLANTR_ERSVLM_LEN          1
+#define MAC_VLANTR_ESVL_POS            18
+#define MAC_VLANTR_ESVL_LEN            1
+#define MAC_VLANTR_ETV_POS             16
+#define MAC_VLANTR_ETV_LEN             1
+#define MAC_VLANTR_EVLS_POS            21
+#define MAC_VLANTR_EVLS_LEN            2
+#define MAC_VLANTR_EVLRXS_POS          24
+#define MAC_VLANTR_EVLRXS_LEN          1
+#define MAC_VLANTR_VL_POS              0
+#define MAC_VLANTR_VL_LEN              16
+#define MAC_VLANTR_VTHM_POS            25
+#define MAC_VLANTR_VTHM_LEN            1
+#define MAC_VLANTR_VTIM_POS            17
+#define MAC_VLANTR_VTIM_LEN            1
+#define MAC_VR_DEVID_POS               8
+#define MAC_VR_DEVID_LEN               8
+#define MAC_VR_SNPSVER_POS             0
+#define MAC_VR_SNPSVER_LEN             8
+#define MAC_VR_USERVER_POS             16
+#define MAC_VR_USERVER_LEN             8
+
+/* MMC register offsets */
+#define MMC_CR                         0x0800
+#define MMC_RISR                       0x0804
+#define MMC_TISR                       0x0808
+#define MMC_RIER                       0x080c
+#define MMC_TIER                       0x0810
+#define MMC_TXOCTETCOUNT_GB_LO         0x0814
+#define MMC_TXFRAMECOUNT_GB_LO         0x081c
+#define MMC_TXBROADCASTFRAMES_G_LO     0x0824
+#define MMC_TXMULTICASTFRAMES_G_LO     0x082c
+#define MMC_TX64OCTETS_GB_LO           0x0834
+#define MMC_TX65TO127OCTETS_GB_LO      0x083c
+#define MMC_TX128TO255OCTETS_GB_LO     0x0844
+#define MMC_TX256TO511OCTETS_GB_LO     0x084c
+#define MMC_TX512TO1023OCTETS_GB_LO    0x0854
+#define MMC_TX1024TOMAXOCTETS_GB_LO    0x085c
+#define MMC_TXUNICASTFRAMES_GB_LO      0x0864
+#define MMC_TXMULTICASTFRAMES_GB_LO    0x086c
+#define MMC_TXBROADCASTFRAMES_GB_LO    0x0874
+#define MMC_TXUNDERFLOWERROR_LO                0x087c
+#define MMC_TXOCTETCOUNT_G_LO          0x0884
+#define MMC_TXFRAMECOUNT_G_LO          0x088c
+#define MMC_TXPAUSEFRAMES_LO           0x0894
+#define MMC_TXVLANFRAMES_G_LO          0x089c
+#define MMC_RXFRAMECOUNT_GB_LO         0x0900
+#define MMC_RXOCTETCOUNT_GB_LO         0x0908
+#define MMC_RXOCTETCOUNT_G_LO          0x0910
+#define MMC_RXBROADCASTFRAMES_G_LO     0x0918
+#define MMC_RXMULTICASTFRAMES_G_LO     0x0920
+#define MMC_RXCRCERROR_LO              0x0928
+#define MMC_RXRUNTERROR                        0x0930
+#define MMC_RXJABBERERROR              0x0934
+#define MMC_RXUNDERSIZE_G              0x0938
+#define MMC_RXOVERSIZE_G               0x093c
+#define MMC_RX64OCTETS_GB_LO           0x0940
+#define MMC_RX65TO127OCTETS_GB_LO      0x0948
+#define MMC_RX128TO255OCTETS_GB_LO     0x0950
+#define MMC_RX256TO511OCTETS_GB_LO     0x0958
+#define MMC_RX512TO1023OCTETS_GB_LO    0x0960
+#define MMC_RX1024TOMAXOCTETS_GB_LO    0x0968
+#define MMC_RXUNICASTFRAMES_G_LO       0x0970
+#define MMC_RXLENGTHERROR_LO           0x0978
+#define MMC_RXOUTOFRANGETYPE_LO                0x0980
+#define MMC_RXPAUSEFRAMES_LO           0x0988
+#define MMC_RXFIFOOVERFLOW_LO          0x0990
+#define MMC_RXVLANFRAMES_GB_LO         0x0998
+#define MMC_RXWATCHDOGERROR            0x09a0
+
+/* MMC register entry bit positions and sizes */
+#define MMC_CR_CR_POS                          0
+#define MMC_CR_CR_LEN                          1
+#define MMC_CR_CSR_POS                         1
+#define MMC_CR_CSR_LEN                         1
+#define MMC_CR_ROR_POS                         2
+#define MMC_CR_ROR_LEN                         1
+#define MMC_CR_MCF_POS                         3
+#define MMC_CR_MCF_LEN                         1
+#define MMC_CR_MCT_POS                         4
+#define MMC_CR_MCT_LEN                         2
+#define MMC_RIER_ALL_INTERRUPTS_POS            0
+#define MMC_RIER_ALL_INTERRUPTS_LEN            23
+#define MMC_RISR_RXFRAMECOUNT_GB_POS           0
+#define MMC_RISR_RXFRAMECOUNT_GB_LEN           1
+#define MMC_RISR_RXOCTETCOUNT_GB_POS           1
+#define MMC_RISR_RXOCTETCOUNT_GB_LEN           1
+#define MMC_RISR_RXOCTETCOUNT_G_POS            2
+#define MMC_RISR_RXOCTETCOUNT_G_LEN            1
+#define MMC_RISR_RXBROADCASTFRAMES_G_POS       3
+#define MMC_RISR_RXBROADCASTFRAMES_G_LEN       1
+#define MMC_RISR_RXMULTICASTFRAMES_G_POS       4
+#define MMC_RISR_RXMULTICASTFRAMES_G_LEN       1
+#define MMC_RISR_RXCRCERROR_POS                        5
+#define MMC_RISR_RXCRCERROR_LEN                        1
+#define MMC_RISR_RXRUNTERROR_POS               6
+#define MMC_RISR_RXRUNTERROR_LEN               1
+#define MMC_RISR_RXJABBERERROR_POS             7
+#define MMC_RISR_RXJABBERERROR_LEN             1
+#define MMC_RISR_RXUNDERSIZE_G_POS             8
+#define MMC_RISR_RXUNDERSIZE_G_LEN             1
+#define MMC_RISR_RXOVERSIZE_G_POS              9
+#define MMC_RISR_RXOVERSIZE_G_LEN              1
+#define MMC_RISR_RX64OCTETS_GB_POS             10
+#define MMC_RISR_RX64OCTETS_GB_LEN             1
+#define MMC_RISR_RX65TO127OCTETS_GB_POS                11
+#define MMC_RISR_RX65TO127OCTETS_GB_LEN                1
+#define MMC_RISR_RX128TO255OCTETS_GB_POS       12
+#define MMC_RISR_RX128TO255OCTETS_GB_LEN       1
+#define MMC_RISR_RX256TO511OCTETS_GB_POS       13
+#define MMC_RISR_RX256TO511OCTETS_GB_LEN       1
+#define MMC_RISR_RX512TO1023OCTETS_GB_POS      14
+#define MMC_RISR_RX512TO1023OCTETS_GB_LEN      1
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_POS      15
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_LEN      1
+#define MMC_RISR_RXUNICASTFRAMES_G_POS         16
+#define MMC_RISR_RXUNICASTFRAMES_G_LEN         1
+#define MMC_RISR_RXLENGTHERROR_POS             17
+#define MMC_RISR_RXLENGTHERROR_LEN             1
+#define MMC_RISR_RXOUTOFRANGETYPE_POS          18
+#define MMC_RISR_RXOUTOFRANGETYPE_LEN          1
+#define MMC_RISR_RXPAUSEFRAMES_POS             19
+#define MMC_RISR_RXPAUSEFRAMES_LEN             1
+#define MMC_RISR_RXFIFOOVERFLOW_POS            20
+#define MMC_RISR_RXFIFOOVERFLOW_LEN            1
+#define MMC_RISR_RXVLANFRAMES_GB_POS           21
+#define MMC_RISR_RXVLANFRAMES_GB_LEN           1
+#define MMC_RISR_RXWATCHDOGERROR_POS           22
+#define MMC_RISR_RXWATCHDOGERROR_LEN           1
+#define MMC_TIER_ALL_INTERRUPTS_POS            0
+#define MMC_TIER_ALL_INTERRUPTS_LEN            18
+#define MMC_TISR_TXOCTETCOUNT_GB_POS           0
+#define MMC_TISR_TXOCTETCOUNT_GB_LEN           1
+#define MMC_TISR_TXFRAMECOUNT_GB_POS           1
+#define MMC_TISR_TXFRAMECOUNT_GB_LEN           1
+#define MMC_TISR_TXBROADCASTFRAMES_G_POS       2
+#define MMC_TISR_TXBROADCASTFRAMES_G_LEN       1
+#define MMC_TISR_TXMULTICASTFRAMES_G_POS       3
+#define MMC_TISR_TXMULTICASTFRAMES_G_LEN       1
+#define MMC_TISR_TX64OCTETS_GB_POS             4
+#define MMC_TISR_TX64OCTETS_GB_LEN             1
+#define MMC_TISR_TX65TO127OCTETS_GB_POS                5
+#define MMC_TISR_TX65TO127OCTETS_GB_LEN                1
+#define MMC_TISR_TX128TO255OCTETS_GB_POS       6
+#define MMC_TISR_TX128TO255OCTETS_GB_LEN       1
+#define MMC_TISR_TX256TO511OCTETS_GB_POS       7
+#define MMC_TISR_TX256TO511OCTETS_GB_LEN       1
+#define MMC_TISR_TX512TO1023OCTETS_GB_POS      8
+#define MMC_TISR_TX512TO1023OCTETS_GB_LEN      1
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_POS      9
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_LEN      1
+#define MMC_TISR_TXUNICASTFRAMES_GB_POS                10
+#define MMC_TISR_TXUNICASTFRAMES_GB_LEN                1
+#define MMC_TISR_TXMULTICASTFRAMES_GB_POS      11
+#define MMC_TISR_TXMULTICASTFRAMES_GB_LEN      1
+#define MMC_TISR_TXBROADCASTFRAMES_GB_POS      12
+#define MMC_TISR_TXBROADCASTFRAMES_GB_LEN      1
+#define MMC_TISR_TXUNDERFLOWERROR_POS          13
+#define MMC_TISR_TXUNDERFLOWERROR_LEN          1
+#define MMC_TISR_TXOCTETCOUNT_G_POS            14
+#define MMC_TISR_TXOCTETCOUNT_G_LEN            1
+#define MMC_TISR_TXFRAMECOUNT_G_POS            15
+#define MMC_TISR_TXFRAMECOUNT_G_LEN            1
+#define MMC_TISR_TXPAUSEFRAMES_POS             16
+#define MMC_TISR_TXPAUSEFRAMES_LEN             1
+#define MMC_TISR_TXVLANFRAMES_G_POS            17
+#define MMC_TISR_TXVLANFRAMES_G_LEN            1
+
+/* MTL register offsets */
+#define MTL_OMR                                0x1000
+#define MTL_FDDR                       0x1010
+#define MTL_RQDCM0R                    0x1030
+
+#define MTL_RQDCM_INC                  4
+#define MTL_RQDCM_Q_PER_REG            4
+
+/* MTL register entry bit positions and sizes */
+#define MTL_OMR_ETSALG_POS             5
+#define MTL_OMR_ETSALG_LEN             2
+#define MTL_OMR_RAA_POS                        2
+#define MTL_OMR_RAA_LEN                        1
+
+/* MTL queue register offsets
+ *   Multiple queues can be active.  The first queue has registers
+ *   that begin at 0x1100.  Each subsequent queue has registers that
+ *   are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_Q_BASE                     0x1100
+#define MTL_Q_INC                      0x80
+
+#define MTL_Q_TQOMR                    0x00
+#define MTL_Q_RQOMR                    0x40
+#define MTL_Q_RQDR                     0x48
+#define MTL_Q_RQFCR                    0x50
+#define MTL_Q_IER                      0x70
+#define MTL_Q_ISR                      0x74
+
+/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQDR_PRXQ_POS            16
+#define MTL_Q_RQDR_PRXQ_LEN            14
+#define MTL_Q_RQDR_RXQSTS_POS          4
+#define MTL_Q_RQDR_RXQSTS_LEN          2
+#define MTL_Q_RQFCR_RFA_POS            1
+#define MTL_Q_RQFCR_RFA_LEN            6
+#define MTL_Q_RQFCR_RFD_POS            17
+#define MTL_Q_RQFCR_RFD_LEN            6
+#define MTL_Q_RQOMR_EHFC_POS           7
+#define MTL_Q_RQOMR_EHFC_LEN           1
+#define MTL_Q_RQOMR_RQS_POS            16
+#define MTL_Q_RQOMR_RQS_LEN            9
+#define MTL_Q_RQOMR_RSF_POS            5
+#define MTL_Q_RQOMR_RSF_LEN            1
+#define MTL_Q_RQOMR_FEP_POS            4
+#define MTL_Q_RQOMR_FEP_LEN            1
+#define MTL_Q_RQOMR_FUP_POS            3
+#define MTL_Q_RQOMR_FUP_LEN            1
+#define MTL_Q_RQOMR_RTC_POS            0
+#define MTL_Q_RQOMR_RTC_LEN            2
+#define MTL_Q_TQOMR_FTQ_POS            0
+#define MTL_Q_TQOMR_FTQ_LEN            1
+#define MTL_Q_TQOMR_Q2TCMAP_POS                8
+#define MTL_Q_TQOMR_Q2TCMAP_LEN                3
+#define MTL_Q_TQOMR_TQS_POS            16
+#define MTL_Q_TQOMR_TQS_LEN            10
+#define MTL_Q_TQOMR_TSF_POS            1
+#define MTL_Q_TQOMR_TSF_LEN            1
+#define MTL_Q_TQOMR_TTC_POS            4
+#define MTL_Q_TQOMR_TTC_LEN            3
+#define MTL_Q_TQOMR_TXQEN_POS          2
+#define MTL_Q_TQOMR_TXQEN_LEN          2
+
+/* MTL queue register value */
+#define MTL_RSF_DISABLE                        0x00
+#define MTL_RSF_ENABLE                 0x01
+#define MTL_TSF_DISABLE                        0x00
+#define MTL_TSF_ENABLE                 0x01
+
+#define MTL_RX_THRESHOLD_64            0x00
+#define MTL_RX_THRESHOLD_96            0x02
+#define MTL_RX_THRESHOLD_128           0x03
+#define MTL_TX_THRESHOLD_64            0x00
+#define MTL_TX_THRESHOLD_96            0x02
+#define MTL_TX_THRESHOLD_128           0x03
+#define MTL_TX_THRESHOLD_192           0x04
+#define MTL_TX_THRESHOLD_256           0x05
+#define MTL_TX_THRESHOLD_384           0x06
+#define MTL_TX_THRESHOLD_512           0x07
+
+#define MTL_ETSALG_WRR                 0x00
+#define MTL_ETSALG_WFQ                 0x01
+#define MTL_ETSALG_DWRR                        0x02
+#define MTL_RAA_SP                     0x00
+#define MTL_RAA_WSP                    0x01
+
+#define MTL_Q_DISABLED                 0x00
+#define MTL_Q_ENABLED                  0x02
+
+#define MTL_RQDCM0R_Q0MDMACH           0x0
+#define MTL_RQDCM0R_Q1MDMACH           0x00000100
+#define MTL_RQDCM0R_Q2MDMACH           0x00020000
+#define MTL_RQDCM0R_Q3MDMACH           0x03000000
+#define MTL_RQDCM1R_Q4MDMACH           0x00000004
+#define MTL_RQDCM1R_Q5MDMACH           0x00000500
+#define MTL_RQDCM1R_Q6MDMACH           0x00060000
+#define MTL_RQDCM1R_Q7MDMACH           0x07000000
+#define MTL_RQDCM2R_Q8MDMACH           0x00000008
+#define MTL_RQDCM2R_Q9MDMACH           0x00000900
+#define MTL_RQDCM2R_Q10MDMACH          0x000A0000
+#define MTL_RQDCM2R_Q11MDMACH          0x0B000000
+
+/* MTL traffic class register offsets
+ *   Multiple traffic classes can be active.  The first class has registers
+ *   that begin at 0x1100.  Each subsequent queue has registers that
+ *   are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_TC_BASE                    MTL_Q_BASE
+#define MTL_TC_INC                     MTL_Q_INC
+
+#define MTL_TC_ETSCR                   0x10
+#define MTL_TC_ETSSR                   0x14
+#define MTL_TC_QWR                     0x18
+
+/* MTL traffic class register entry bit positions and sizes */
+#define MTL_TC_ETSCR_TSA_POS           0
+#define MTL_TC_ETSCR_TSA_LEN           2
+#define MTL_TC_QWR_QW_POS              0
+#define MTL_TC_QWR_QW_LEN              21
+
+/* MTL traffic class register value */
+#define MTL_TSA_SP                     0x00
+#define MTL_TSA_ETS                    0x02
+
+/* DMA register offsets */
+#define DMA_MR                         0x3000
+#define DMA_SBMR                       0x3004
+#define DMA_ISR                                0x3008
+#define DMA_DSR0                       0x3020
+#define DMA_DSR1                       0x3024
+
+/* DMA register entry bit positions and sizes */
+#define DMA_ISR_MACIS_POS              17
+#define DMA_ISR_MACIS_LEN              1
+#define DMA_ISR_MTLIS_POS              16
+#define DMA_ISR_MTLIS_LEN              1
+#define DMA_MR_SWR_POS                 0
+#define DMA_MR_SWR_LEN                 1
+#define DMA_SBMR_EAME_POS              11
+#define DMA_SBMR_EAME_LEN              1
+#define DMA_SBMR_BLEN_64_POS           5
+#define DMA_SBMR_BLEN_64_LEN           1
+#define DMA_SBMR_BLEN_128_POS          6
+#define DMA_SBMR_BLEN_128_LEN          1
+#define DMA_SBMR_BLEN_256_POS          7
+#define DMA_SBMR_BLEN_256_LEN          1
+#define DMA_SBMR_UNDEF_POS             0
+#define DMA_SBMR_UNDEF_LEN             1
+
+/* DMA register values */
+#define DMA_DSR_RPS_LEN                        4
+#define DMA_DSR_TPS_LEN                        4
+#define DMA_DSR_Q_LEN                  (DMA_DSR_RPS_LEN + DMA_DSR_TPS_LEN)
+#define DMA_DSR0_TPS_START             12
+#define DMA_DSRX_FIRST_QUEUE           3
+#define DMA_DSRX_INC                   4
+#define DMA_DSRX_QPR                   4
+#define DMA_DSRX_TPS_START             4
+#define DMA_TPS_STOPPED                        0x00
+#define DMA_TPS_SUSPENDED              0x06
+
+/* DMA channel register offsets
+ *   Multiple channels can be active.  The first channel has registers
+ *   that begin at 0x3100.  Each subsequent channel has registers that
+ *   are accessed using an offset of 0x80 from the previous channel.
+ */
+#define DMA_CH_BASE                    0x3100
+#define DMA_CH_INC                     0x80
+
+#define DMA_CH_CR                      0x00
+#define DMA_CH_TCR                     0x04
+#define DMA_CH_RCR                     0x08
+#define DMA_CH_TDLR_HI                 0x10
+#define DMA_CH_TDLR_LO                 0x14
+#define DMA_CH_RDLR_HI                 0x18
+#define DMA_CH_RDLR_LO                 0x1c
+#define DMA_CH_TDTR_LO                 0x24
+#define DMA_CH_RDTR_LO                 0x2c
+#define DMA_CH_TDRLR                   0x30
+#define DMA_CH_RDRLR                   0x34
+#define DMA_CH_IER                     0x38
+#define DMA_CH_RIWT                    0x3c
+#define DMA_CH_SR                      0x60
+
+/* DMA channel register entry bit positions and sizes */
+#define DMA_CH_CR_PBLX8_POS            16
+#define DMA_CH_CR_PBLX8_LEN            1
+#define DMA_CH_CR_SPH_POS              24
+#define DMA_CH_CR_SPH_LEN              1
+#define DMA_CH_IER_AIE_POS             15
+#define DMA_CH_IER_AIE_LEN             1
+#define DMA_CH_IER_FBEE_POS            12
+#define DMA_CH_IER_FBEE_LEN            1
+#define DMA_CH_IER_NIE_POS             16
+#define DMA_CH_IER_NIE_LEN             1
+#define DMA_CH_IER_RBUE_POS            7
+#define DMA_CH_IER_RBUE_LEN            1
+#define DMA_CH_IER_RIE_POS             6
+#define DMA_CH_IER_RIE_LEN             1
+#define DMA_CH_IER_RSE_POS             8
+#define DMA_CH_IER_RSE_LEN             1
+#define DMA_CH_IER_TBUE_POS            2
+#define DMA_CH_IER_TBUE_LEN            1
+#define DMA_CH_IER_TIE_POS             0
+#define DMA_CH_IER_TIE_LEN             1
+#define DMA_CH_IER_TXSE_POS            1
+#define DMA_CH_IER_TXSE_LEN            1
+#define DMA_CH_RCR_PBL_POS             16
+#define DMA_CH_RCR_PBL_LEN             6
+#define DMA_CH_RCR_RBSZ_POS            1
+#define DMA_CH_RCR_RBSZ_LEN            14
+#define DMA_CH_RCR_SR_POS              0
+#define DMA_CH_RCR_SR_LEN              1
+#define DMA_CH_RIWT_RWT_POS            0
+#define DMA_CH_RIWT_RWT_LEN            8
+#define DMA_CH_SR_FBE_POS              12
+#define DMA_CH_SR_FBE_LEN              1
+#define DMA_CH_SR_RBU_POS              7
+#define DMA_CH_SR_RBU_LEN              1
+#define DMA_CH_SR_RI_POS               6
+#define DMA_CH_SR_RI_LEN               1
+#define DMA_CH_SR_RPS_POS              8
+#define DMA_CH_SR_RPS_LEN              1
+#define DMA_CH_SR_TBU_POS              2
+#define DMA_CH_SR_TBU_LEN              1
+#define DMA_CH_SR_TI_POS               0
+#define DMA_CH_SR_TI_LEN               1
+#define DMA_CH_SR_TPS_POS              1
+#define DMA_CH_SR_TPS_LEN              1
+#define DMA_CH_TCR_OSP_POS             4
+#define DMA_CH_TCR_OSP_LEN             1
+#define DMA_CH_TCR_PBL_POS             16
+#define DMA_CH_TCR_PBL_LEN             6
+#define DMA_CH_TCR_ST_POS              0
+#define DMA_CH_TCR_ST_LEN              1
+#define DMA_CH_TCR_TSE_POS             12
+#define DMA_CH_TCR_TSE_LEN             1
+
+/* DMA channel register values */
+#define DMA_OSP_DISABLE                        0x00
+#define DMA_OSP_ENABLE                 0x01
+#define DMA_PBL_1                      1
+#define DMA_PBL_2                      2
+#define DMA_PBL_4                      4
+#define DMA_PBL_8                      8
+#define DMA_PBL_16                     16
+#define DMA_PBL_32                     32
+#define DMA_PBL_64                     64
+#define DMA_PBL_128                    128
+#define DMA_PBL_256                    256
+#define DMA_PBL_X8_DISABLE             0x00
+#define DMA_PBL_X8_ENABLE              0x01
+
+/* Descriptor/Packet entry bit positions and sizes */
+#define RX_PACKET_ERRORS_CRC_POS               2
+#define RX_PACKET_ERRORS_CRC_LEN               1
+#define RX_PACKET_ERRORS_FRAME_POS             3
+#define RX_PACKET_ERRORS_FRAME_LEN             1
+#define RX_PACKET_ERRORS_LENGTH_POS            0
+#define RX_PACKET_ERRORS_LENGTH_LEN            1
+#define RX_PACKET_ERRORS_OVERRUN_POS           1
+#define RX_PACKET_ERRORS_OVERRUN_LEN           1
+
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_POS     0
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN     1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS     1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN     1
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_POS    2
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN    1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS  3
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN  1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_POS       4
+#define RX_PACKET_ATTRIBUTES_CONTEXT_LEN       1
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS     5
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN     1
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_POS      6
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_LEN      1
+
+#define RX_NORMAL_DESC0_OVT_POS                        0
+#define RX_NORMAL_DESC0_OVT_LEN                        16
+#define RX_NORMAL_DESC2_HL_POS                 0
+#define RX_NORMAL_DESC2_HL_LEN                 10
+#define RX_NORMAL_DESC3_CDA_POS                        27
+#define RX_NORMAL_DESC3_CDA_LEN                        1
+#define RX_NORMAL_DESC3_CTXT_POS               30
+#define RX_NORMAL_DESC3_CTXT_LEN               1
+#define RX_NORMAL_DESC3_ES_POS                 15
+#define RX_NORMAL_DESC3_ES_LEN                 1
+#define RX_NORMAL_DESC3_ETLT_POS               16
+#define RX_NORMAL_DESC3_ETLT_LEN               4
+#define RX_NORMAL_DESC3_FD_POS                 29
+#define RX_NORMAL_DESC3_FD_LEN                 1
+#define RX_NORMAL_DESC3_INTE_POS               30
+#define RX_NORMAL_DESC3_INTE_LEN               1
+#define RX_NORMAL_DESC3_L34T_POS               20
+#define RX_NORMAL_DESC3_L34T_LEN               4
+#define RX_NORMAL_DESC3_LD_POS                 28
+#define RX_NORMAL_DESC3_LD_LEN                 1
+#define RX_NORMAL_DESC3_OWN_POS                        31
+#define RX_NORMAL_DESC3_OWN_LEN                        1
+#define RX_NORMAL_DESC3_PL_POS                 0
+#define RX_NORMAL_DESC3_PL_LEN                 14
+#define RX_NORMAL_DESC3_RSV_POS                        26
+#define RX_NORMAL_DESC3_RSV_LEN                        1
+
+#define RX_DESC3_L34T_IPV4_TCP                 1
+#define RX_DESC3_L34T_IPV4_UDP                 2
+#define RX_DESC3_L34T_IPV4_ICMP                        3
+#define RX_DESC3_L34T_IPV6_TCP                 9
+#define RX_DESC3_L34T_IPV6_UDP                 10
+#define RX_DESC3_L34T_IPV6_ICMP                        11
+
+#define RX_CONTEXT_DESC3_TSA_POS               4
+#define RX_CONTEXT_DESC3_TSA_LEN               1
+#define RX_CONTEXT_DESC3_TSD_POS               6
+#define RX_CONTEXT_DESC3_TSD_LEN               1
+
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS   0
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN   1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS    1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN    1
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS     2
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN     1
+#define TX_PACKET_ATTRIBUTES_PTP_POS           3
+#define TX_PACKET_ATTRIBUTES_PTP_LEN           1
+
+#define TX_CONTEXT_DESC2_MSS_POS               0
+#define TX_CONTEXT_DESC2_MSS_LEN               15
+#define TX_CONTEXT_DESC3_CTXT_POS              30
+#define TX_CONTEXT_DESC3_CTXT_LEN              1
+#define TX_CONTEXT_DESC3_TCMSSV_POS            26
+#define TX_CONTEXT_DESC3_TCMSSV_LEN            1
+#define TX_CONTEXT_DESC3_VLTV_POS              16
+#define TX_CONTEXT_DESC3_VLTV_LEN              1
+#define TX_CONTEXT_DESC3_VT_POS                        0
+#define TX_CONTEXT_DESC3_VT_LEN                        16
+
+#define TX_NORMAL_DESC2_HL_B1L_POS             0
+#define TX_NORMAL_DESC2_HL_B1L_LEN             14
+#define TX_NORMAL_DESC2_IC_POS                 31
+#define TX_NORMAL_DESC2_IC_LEN                 1
+#define TX_NORMAL_DESC2_TTSE_POS               30
+#define TX_NORMAL_DESC2_TTSE_LEN               1
+#define TX_NORMAL_DESC2_VTIR_POS               14
+#define TX_NORMAL_DESC2_VTIR_LEN               2
+#define TX_NORMAL_DESC3_CIC_POS                        16
+#define TX_NORMAL_DESC3_CIC_LEN                        2
+#define TX_NORMAL_DESC3_CPC_POS                        26
+#define TX_NORMAL_DESC3_CPC_LEN                        2
+#define TX_NORMAL_DESC3_CTXT_POS               30
+#define TX_NORMAL_DESC3_CTXT_LEN               1
+#define TX_NORMAL_DESC3_FD_POS                 29
+#define TX_NORMAL_DESC3_FD_LEN                 1
+#define TX_NORMAL_DESC3_FL_POS                 0
+#define TX_NORMAL_DESC3_FL_LEN                 15
+#define TX_NORMAL_DESC3_LD_POS                 28
+#define TX_NORMAL_DESC3_LD_LEN                 1
+#define TX_NORMAL_DESC3_OWN_POS                        31
+#define TX_NORMAL_DESC3_OWN_LEN                        1
+#define TX_NORMAL_DESC3_TCPHDRLEN_POS          19
+#define TX_NORMAL_DESC3_TCPHDRLEN_LEN          4
+#define TX_NORMAL_DESC3_TCPPL_POS              0
+#define TX_NORMAL_DESC3_TCPPL_LEN              18
+#define TX_NORMAL_DESC3_TSE_POS                        18
+#define TX_NORMAL_DESC3_TSE_LEN                        1
+
+#define TX_NORMAL_DESC2_VLAN_INSERT            0x2
+
+#define XLGMAC_MTL_REG(pdata, n, reg)                                  \
+       ((pdata)->mac_regs + MTL_Q_BASE + ((n) * MTL_Q_INC) + (reg))
+
+#define XLGMAC_DMA_REG(channel, reg)   ((channel)->dma_regs + (reg))
+
+#endif /* __DWC_XLGMAC_REG_H__ */
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac.h b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
new file mode 100644 (file)
index 0000000..676b2fb
--- /dev/null
@@ -0,0 +1,649 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#ifndef __DWC_XLGMAC_H__
+#define __DWC_XLGMAC_H__
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/bitops.h>
+#include <linux/timecounter.h>
+
+#define XLGMAC_DRV_NAME                        "dwc-xlgmac"
+#define XLGMAC_DRV_VERSION             "1.0.0"
+#define XLGMAC_DRV_DESC                        "Synopsys DWC XLGMAC Driver"
+
+/* Descriptor related parameters */
+#define XLGMAC_TX_DESC_CNT             1024
+#define XLGMAC_TX_DESC_MIN_FREE                (XLGMAC_TX_DESC_CNT >> 3)
+#define XLGMAC_TX_DESC_MAX_PROC                (XLGMAC_TX_DESC_CNT >> 1)
+#define XLGMAC_RX_DESC_CNT             1024
+#define XLGMAC_RX_DESC_MAX_DIRTY       (XLGMAC_RX_DESC_CNT >> 3)
+
+/* Descriptors required for maximum contiguous TSO/GSO packet */
+#define XLGMAC_TX_MAX_SPLIT    ((GSO_MAX_SIZE / XLGMAC_TX_MAX_BUF_SIZE) + 1)
+
+/* Maximum possible descriptors needed for a SKB */
+#define XLGMAC_TX_MAX_DESC_NR  (MAX_SKB_FRAGS + XLGMAC_TX_MAX_SPLIT + 2)
+
+#define XLGMAC_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+#define XLGMAC_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define XLGMAC_RX_BUF_ALIGN    64
+
+/* Maximum Size for Splitting the Header Data
+ * Keep in sync with SKB_ALLOC_SIZE
+ * 3'b000: 64 bytes, 3'b001: 128 bytes
+ * 3'b010: 256 bytes, 3'b011: 512 bytes
+ * 3'b100: 1023 bytes ,   3'b101'3'b111: Reserved
+ */
+#define XLGMAC_SPH_HDSMS_SIZE          3
+#define XLGMAC_SKB_ALLOC_SIZE          512
+
+#define XLGMAC_MAX_FIFO                        81920
+
+#define XLGMAC_MAX_DMA_CHANNELS                16
+#define XLGMAC_DMA_STOP_TIMEOUT                5
+#define XLGMAC_DMA_INTERRUPT_MASK      0x31c7
+
+/* Default coalescing parameters */
+#define XLGMAC_INIT_DMA_TX_USECS       1000
+#define XLGMAC_INIT_DMA_TX_FRAMES      25
+#define XLGMAC_INIT_DMA_RX_USECS       30
+#define XLGMAC_INIT_DMA_RX_FRAMES      25
+
+/* Flow control queue count */
+#define XLGMAC_MAX_FLOW_CONTROL_QUEUES 8
+
+/* System clock is 125 MHz */
+#define XLGMAC_SYSCLOCK                        125000000
+
+/* Maximum MAC address hash table size (256 bits = 8 bytes) */
+#define XLGMAC_MAC_HASH_TABLE_SIZE     8
+
+/* Receive Side Scaling */
+#define XLGMAC_RSS_HASH_KEY_SIZE       40
+#define XLGMAC_RSS_MAX_TABLE_SIZE      256
+#define XLGMAC_RSS_LOOKUP_TABLE_TYPE   0
+#define XLGMAC_RSS_HASH_KEY_TYPE       1
+
+#define XLGMAC_STD_PACKET_MTU          1500
+#define XLGMAC_JUMBO_PACKET_MTU                9000
+
+/* Helper macro for descriptor handling
+ *  Always use XLGMAC_GET_DESC_DATA to access the descriptor data
+ */
+#define XLGMAC_GET_DESC_DATA(ring, idx) ({                             \
+       typeof(ring) _ring = (ring);                                    \
+       ((_ring)->desc_data_head +                                      \
+        ((idx) & ((_ring)->dma_desc_count - 1)));                      \
+})
+
+#define XLGMAC_GET_REG_BITS(var, pos, len) ({                          \
+       typeof(pos) _pos = (pos);                                       \
+       typeof(len) _len = (len);                                       \
+       ((var) & GENMASK(_pos + _len - 1, _pos)) >> (_pos);             \
+})
+
+#define XLGMAC_GET_REG_BITS_LE(var, pos, len) ({                       \
+       typeof(pos) _pos = (pos);                                       \
+       typeof(len) _len = (len);                                       \
+       typeof(var) _var = le32_to_cpu((var));                          \
+       ((_var) & GENMASK(_pos + _len - 1, _pos)) >> (_pos);            \
+})
+
+#define XLGMAC_SET_REG_BITS(var, pos, len, val) ({                     \
+       typeof(var) _var = (var);                                       \
+       typeof(pos) _pos = (pos);                                       \
+       typeof(len) _len = (len);                                       \
+       typeof(val) _val = (val);                                       \
+       _val = (_val << _pos) & GENMASK(_pos + _len - 1, _pos);         \
+       _var = (_var & ~GENMASK(_pos + _len - 1, _pos)) | _val;         \
+})
+
+#define XLGMAC_SET_REG_BITS_LE(var, pos, len, val) ({                  \
+       typeof(var) _var = (var);                                       \
+       typeof(pos) _pos = (pos);                                       \
+       typeof(len) _len = (len);                                       \
+       typeof(val) _val = (val);                                       \
+       _val = (_val << _pos) & GENMASK(_pos + _len - 1, _pos);         \
+       _var = (_var & ~GENMASK(_pos + _len - 1, _pos)) | _val;         \
+       cpu_to_le32(_var);                                              \
+})
+
+struct xlgmac_pdata;
+
+enum xlgmac_int {
+       XLGMAC_INT_DMA_CH_SR_TI,
+       XLGMAC_INT_DMA_CH_SR_TPS,
+       XLGMAC_INT_DMA_CH_SR_TBU,
+       XLGMAC_INT_DMA_CH_SR_RI,
+       XLGMAC_INT_DMA_CH_SR_RBU,
+       XLGMAC_INT_DMA_CH_SR_RPS,
+       XLGMAC_INT_DMA_CH_SR_TI_RI,
+       XLGMAC_INT_DMA_CH_SR_FBE,
+       XLGMAC_INT_DMA_ALL,
+};
+
+struct xlgmac_stats {
+       /* MMC TX counters */
+       u64 txoctetcount_gb;
+       u64 txframecount_gb;
+       u64 txbroadcastframes_g;
+       u64 txmulticastframes_g;
+       u64 tx64octets_gb;
+       u64 tx65to127octets_gb;
+       u64 tx128to255octets_gb;
+       u64 tx256to511octets_gb;
+       u64 tx512to1023octets_gb;
+       u64 tx1024tomaxoctets_gb;
+       u64 txunicastframes_gb;
+       u64 txmulticastframes_gb;
+       u64 txbroadcastframes_gb;
+       u64 txunderflowerror;
+       u64 txoctetcount_g;
+       u64 txframecount_g;
+       u64 txpauseframes;
+       u64 txvlanframes_g;
+
+       /* MMC RX counters */
+       u64 rxframecount_gb;
+       u64 rxoctetcount_gb;
+       u64 rxoctetcount_g;
+       u64 rxbroadcastframes_g;
+       u64 rxmulticastframes_g;
+       u64 rxcrcerror;
+       u64 rxrunterror;
+       u64 rxjabbererror;
+       u64 rxundersize_g;
+       u64 rxoversize_g;
+       u64 rx64octets_gb;
+       u64 rx65to127octets_gb;
+       u64 rx128to255octets_gb;
+       u64 rx256to511octets_gb;
+       u64 rx512to1023octets_gb;
+       u64 rx1024tomaxoctets_gb;
+       u64 rxunicastframes_g;
+       u64 rxlengtherror;
+       u64 rxoutofrangetype;
+       u64 rxpauseframes;
+       u64 rxfifooverflow;
+       u64 rxvlanframes_gb;
+       u64 rxwatchdogerror;
+
+       /* Extra counters */
+       u64 tx_tso_packets;
+       u64 rx_split_header_packets;
+       u64 rx_buffer_unavailable;
+};
+
+struct xlgmac_ring_buf {
+       struct sk_buff *skb;
+       dma_addr_t skb_dma;
+       unsigned int skb_len;
+};
+
+/* Common Tx and Rx DMA hardware descriptor */
+struct xlgmac_dma_desc {
+       __le32 desc0;
+       __le32 desc1;
+       __le32 desc2;
+       __le32 desc3;
+};
+
+/* Page allocation related values */
+struct xlgmac_page_alloc {
+       struct page *pages;
+       unsigned int pages_len;
+       unsigned int pages_offset;
+
+       dma_addr_t pages_dma;
+};
+
+/* Ring entry buffer data */
+struct xlgmac_buffer_data {
+       struct xlgmac_page_alloc pa;
+       struct xlgmac_page_alloc pa_unmap;
+
+       dma_addr_t dma_base;
+       unsigned long dma_off;
+       unsigned int dma_len;
+};
+
+/* Tx-related desc data */
+struct xlgmac_tx_desc_data {
+       unsigned int packets;           /* BQL packet count */
+       unsigned int bytes;             /* BQL byte count */
+};
+
+/* Rx-related desc data */
+struct xlgmac_rx_desc_data {
+       struct xlgmac_buffer_data hdr;  /* Header locations */
+       struct xlgmac_buffer_data buf;  /* Payload locations */
+
+       unsigned short hdr_len;         /* Length of received header */
+       unsigned short len;             /* Length of received packet */
+};
+
+struct xlgmac_pkt_info {
+       struct sk_buff *skb;
+
+       unsigned int attributes;
+
+       unsigned int errors;
+
+       /* descriptors needed for this packet */
+       unsigned int desc_count;
+       unsigned int length;
+
+       unsigned int tx_packets;
+       unsigned int tx_bytes;
+
+       unsigned int header_len;
+       unsigned int tcp_header_len;
+       unsigned int tcp_payload_len;
+       unsigned short mss;
+
+       unsigned short vlan_ctag;
+
+       u64 rx_tstamp;
+
+       u32 rss_hash;
+       enum pkt_hash_types rss_hash_type;
+};
+
+struct xlgmac_desc_data {
+       /* dma_desc: Virtual address of descriptor
+        *  dma_desc_addr: DMA address of descriptor
+        */
+       struct xlgmac_dma_desc *dma_desc;
+       dma_addr_t dma_desc_addr;
+
+       /* skb: Virtual address of SKB
+        *  skb_dma: DMA address of SKB data
+        *  skb_dma_len: Length of SKB DMA area
+        */
+       struct sk_buff *skb;
+       dma_addr_t skb_dma;
+       unsigned int skb_dma_len;
+
+       /* Tx/Rx -related data */
+       struct xlgmac_tx_desc_data tx;
+       struct xlgmac_rx_desc_data rx;
+
+       unsigned int mapped_as_page;
+
+       /* Incomplete receive save location.  If the budget is exhausted
+        * or the last descriptor (last normal descriptor or a following
+        * context descriptor) has not been DMA'd yet the current state
+        * of the receive processing needs to be saved.
+        */
+       unsigned int state_saved;
+       struct {
+               struct sk_buff *skb;
+               unsigned int len;
+               unsigned int error;
+       } state;
+};
+
+struct xlgmac_ring {
+       /* Per packet related information */
+       struct xlgmac_pkt_info pkt_info;
+
+       /* Virtual/DMA addresses of DMA descriptor list and the total count */
+       struct xlgmac_dma_desc *dma_desc_head;
+       dma_addr_t dma_desc_head_addr;
+       unsigned int dma_desc_count;
+
+       /* Array of descriptor data corresponding the DMA descriptor
+        * (always use the XLGMAC_GET_DESC_DATA macro to access this data)
+        */
+       struct xlgmac_desc_data *desc_data_head;
+
+       /* Page allocation for RX buffers */
+       struct xlgmac_page_alloc rx_hdr_pa;
+       struct xlgmac_page_alloc rx_buf_pa;
+
+       /* Ring index values
+        *  cur   - Tx: index of descriptor to be used for current transfer
+        *          Rx: index of descriptor to check for packet availability
+        *  dirty - Tx: index of descriptor to check for transfer complete
+        *          Rx: index of descriptor to check for buffer reallocation
+        */
+       unsigned int cur;
+       unsigned int dirty;
+
+       /* Coalesce frame count used for interrupt bit setting */
+       unsigned int coalesce_count;
+
+       union {
+               struct {
+                       unsigned int xmit_more;
+                       unsigned int queue_stopped;
+                       unsigned short cur_mss;
+                       unsigned short cur_vlan_ctag;
+               } tx;
+       };
+} ____cacheline_aligned;
+
+struct xlgmac_channel {
+       char name[16];
+
+       /* Address of private data area for device */
+       struct xlgmac_pdata *pdata;
+
+       /* Queue index and base address of queue's DMA registers */
+       unsigned int queue_index;
+       void __iomem *dma_regs;
+
+       /* Per channel interrupt irq number */
+       int dma_irq;
+       char dma_irq_name[IFNAMSIZ + 32];
+
+       /* Netdev related settings */
+       struct napi_struct napi;
+
+       unsigned int saved_ier;
+
+       unsigned int tx_timer_active;
+       struct timer_list tx_timer;
+
+       struct xlgmac_ring *tx_ring;
+       struct xlgmac_ring *rx_ring;
+} ____cacheline_aligned;
+
+struct xlgmac_desc_ops {
+       int (*alloc_channles_and_rings)(struct xlgmac_pdata *pdata);
+       void (*free_channels_and_rings)(struct xlgmac_pdata *pdata);
+       int (*map_tx_skb)(struct xlgmac_channel *channel,
+                         struct sk_buff *skb);
+       int (*map_rx_buffer)(struct xlgmac_pdata *pdata,
+                            struct xlgmac_ring *ring,
+                       struct xlgmac_desc_data *desc_data);
+       void (*unmap_desc_data)(struct xlgmac_pdata *pdata,
+                               struct xlgmac_desc_data *desc_data);
+       void (*tx_desc_init)(struct xlgmac_pdata *pdata);
+       void (*rx_desc_init)(struct xlgmac_pdata *pdata);
+};
+
+struct xlgmac_hw_ops {
+       int (*init)(struct xlgmac_pdata *pdata);
+       int (*exit)(struct xlgmac_pdata *pdata);
+
+       int (*tx_complete)(struct xlgmac_dma_desc *dma_desc);
+
+       void (*enable_tx)(struct xlgmac_pdata *pdata);
+       void (*disable_tx)(struct xlgmac_pdata *pdata);
+       void (*enable_rx)(struct xlgmac_pdata *pdata);
+       void (*disable_rx)(struct xlgmac_pdata *pdata);
+
+       int (*enable_int)(struct xlgmac_channel *channel,
+                         enum xlgmac_int int_id);
+       int (*disable_int)(struct xlgmac_channel *channel,
+                          enum xlgmac_int int_id);
+       void (*dev_xmit)(struct xlgmac_channel *channel);
+       int (*dev_read)(struct xlgmac_channel *channel);
+
+       int (*set_mac_address)(struct xlgmac_pdata *pdata, u8 *addr);
+       int (*config_rx_mode)(struct xlgmac_pdata *pdata);
+       int (*enable_rx_csum)(struct xlgmac_pdata *pdata);
+       int (*disable_rx_csum)(struct xlgmac_pdata *pdata);
+
+       /* For MII speed configuration */
+       int (*set_xlgmii_25000_speed)(struct xlgmac_pdata *pdata);
+       int (*set_xlgmii_40000_speed)(struct xlgmac_pdata *pdata);
+       int (*set_xlgmii_50000_speed)(struct xlgmac_pdata *pdata);
+       int (*set_xlgmii_100000_speed)(struct xlgmac_pdata *pdata);
+
+       /* For descriptor related operation */
+       void (*tx_desc_init)(struct xlgmac_channel *channel);
+       void (*rx_desc_init)(struct xlgmac_channel *channel);
+       void (*tx_desc_reset)(struct xlgmac_desc_data *desc_data);
+       void (*rx_desc_reset)(struct xlgmac_pdata *pdata,
+                             struct xlgmac_desc_data *desc_data,
+                       unsigned int index);
+       int (*is_last_desc)(struct xlgmac_dma_desc *dma_desc);
+       int (*is_context_desc)(struct xlgmac_dma_desc *dma_desc);
+       void (*tx_start_xmit)(struct xlgmac_channel *channel,
+                             struct xlgmac_ring *ring);
+
+       /* For Flow Control */
+       int (*config_tx_flow_control)(struct xlgmac_pdata *pdata);
+       int (*config_rx_flow_control)(struct xlgmac_pdata *pdata);
+
+       /* For Vlan related config */
+       int (*enable_rx_vlan_stripping)(struct xlgmac_pdata *pdata);
+       int (*disable_rx_vlan_stripping)(struct xlgmac_pdata *pdata);
+       int (*enable_rx_vlan_filtering)(struct xlgmac_pdata *pdata);
+       int (*disable_rx_vlan_filtering)(struct xlgmac_pdata *pdata);
+       int (*update_vlan_hash_table)(struct xlgmac_pdata *pdata);
+
+       /* For RX coalescing */
+       int (*config_rx_coalesce)(struct xlgmac_pdata *pdata);
+       int (*config_tx_coalesce)(struct xlgmac_pdata *pdata);
+       unsigned int (*usec_to_riwt)(struct xlgmac_pdata *pdata,
+                                    unsigned int usec);
+       unsigned int (*riwt_to_usec)(struct xlgmac_pdata *pdata,
+                                    unsigned int riwt);
+
+       /* For RX and TX threshold config */
+       int (*config_rx_threshold)(struct xlgmac_pdata *pdata,
+                                  unsigned int val);
+       int (*config_tx_threshold)(struct xlgmac_pdata *pdata,
+                                  unsigned int val);
+
+       /* For RX and TX Store and Forward Mode config */
+       int (*config_rsf_mode)(struct xlgmac_pdata *pdata,
+                              unsigned int val);
+       int (*config_tsf_mode)(struct xlgmac_pdata *pdata,
+                              unsigned int val);
+
+       /* For TX DMA Operate on Second Frame config */
+       int (*config_osp_mode)(struct xlgmac_pdata *pdata);
+
+       /* For RX and TX PBL config */
+       int (*config_rx_pbl_val)(struct xlgmac_pdata *pdata);
+       int (*get_rx_pbl_val)(struct xlgmac_pdata *pdata);
+       int (*config_tx_pbl_val)(struct xlgmac_pdata *pdata);
+       int (*get_tx_pbl_val)(struct xlgmac_pdata *pdata);
+       int (*config_pblx8)(struct xlgmac_pdata *pdata);
+
+       /* For MMC statistics */
+       void (*rx_mmc_int)(struct xlgmac_pdata *pdata);
+       void (*tx_mmc_int)(struct xlgmac_pdata *pdata);
+       void (*read_mmc_stats)(struct xlgmac_pdata *pdata);
+
+       /* For Receive Side Scaling */
+       int (*enable_rss)(struct xlgmac_pdata *pdata);
+       int (*disable_rss)(struct xlgmac_pdata *pdata);
+       int (*set_rss_hash_key)(struct xlgmac_pdata *pdata,
+                               const u8 *key);
+       int (*set_rss_lookup_table)(struct xlgmac_pdata *pdata,
+                                   const u32 *table);
+};
+
+/* This structure contains flags that indicate what hardware features
+ * or configurations are present in the device.
+ */
+struct xlgmac_hw_features {
+       /* HW Version */
+       unsigned int version;
+
+       /* HW Feature Register0 */
+       unsigned int phyifsel;          /* PHY interface support */
+       unsigned int vlhash;            /* VLAN Hash Filter */
+       unsigned int sma;               /* SMA(MDIO) Interface */
+       unsigned int rwk;               /* PMT remote wake-up packet */
+       unsigned int mgk;               /* PMT magic packet */
+       unsigned int mmc;               /* RMON module */
+       unsigned int aoe;               /* ARP Offload */
+       unsigned int ts;                /* IEEE 1588-2008 Advanced Timestamp */
+       unsigned int eee;               /* Energy Efficient Ethernet */
+       unsigned int tx_coe;            /* Tx Checksum Offload */
+       unsigned int rx_coe;            /* Rx Checksum Offload */
+       unsigned int addn_mac;          /* Additional MAC Addresses */
+       unsigned int ts_src;            /* Timestamp Source */
+       unsigned int sa_vlan_ins;       /* Source Address or VLAN Insertion */
+
+       /* HW Feature Register1 */
+       unsigned int rx_fifo_size;      /* MTL Receive FIFO Size */
+       unsigned int tx_fifo_size;      /* MTL Transmit FIFO Size */
+       unsigned int adv_ts_hi;         /* Advance Timestamping High Word */
+       unsigned int dma_width;         /* DMA width */
+       unsigned int dcb;               /* DCB Feature */
+       unsigned int sph;               /* Split Header Feature */
+       unsigned int tso;               /* TCP Segmentation Offload */
+       unsigned int dma_debug;         /* DMA Debug Registers */
+       unsigned int rss;               /* Receive Side Scaling */
+       unsigned int tc_cnt;            /* Number of Traffic Classes */
+       unsigned int hash_table_size;   /* Hash Table Size */
+       unsigned int l3l4_filter_num;   /* Number of L3-L4 Filters */
+
+       /* HW Feature Register2 */
+       unsigned int rx_q_cnt;          /* Number of MTL Receive Queues */
+       unsigned int tx_q_cnt;          /* Number of MTL Transmit Queues */
+       unsigned int rx_ch_cnt;         /* Number of DMA Receive Channels */
+       unsigned int tx_ch_cnt;         /* Number of DMA Transmit Channels */
+       unsigned int pps_out_num;       /* Number of PPS outputs */
+       unsigned int aux_snap_num;      /* Number of Aux snapshot inputs */
+};
+
+struct xlgmac_resources {
+       void __iomem *addr;
+       int irq;
+};
+
+struct xlgmac_pdata {
+       struct net_device *netdev;
+       struct device *dev;
+
+       struct xlgmac_hw_ops hw_ops;
+       struct xlgmac_desc_ops desc_ops;
+
+       /* Device statistics */
+       struct xlgmac_stats stats;
+
+       u32 msg_enable;
+
+       /* MAC registers base */
+       void __iomem *mac_regs;
+
+       /* Hardware features of the device */
+       struct xlgmac_hw_features hw_feat;
+
+       struct work_struct restart_work;
+
+       /* Rings for Tx/Rx on a DMA channel */
+       struct xlgmac_channel *channel_head;
+       unsigned int channel_count;
+       unsigned int tx_ring_count;
+       unsigned int rx_ring_count;
+       unsigned int tx_desc_count;
+       unsigned int rx_desc_count;
+       unsigned int tx_q_count;
+       unsigned int rx_q_count;
+
+       /* Tx/Rx common settings */
+       unsigned int pblx8;
+
+       /* Tx settings */
+       unsigned int tx_sf_mode;
+       unsigned int tx_threshold;
+       unsigned int tx_pbl;
+       unsigned int tx_osp_mode;
+
+       /* Rx settings */
+       unsigned int rx_sf_mode;
+       unsigned int rx_threshold;
+       unsigned int rx_pbl;
+
+       /* Tx coalescing settings */
+       unsigned int tx_usecs;
+       unsigned int tx_frames;
+
+       /* Rx coalescing settings */
+       unsigned int rx_riwt;
+       unsigned int rx_usecs;
+       unsigned int rx_frames;
+
+       /* Current Rx buffer size */
+       unsigned int rx_buf_size;
+
+       /* Flow control settings */
+       unsigned int tx_pause;
+       unsigned int rx_pause;
+
+       /* Device interrupt number */
+       int dev_irq;
+       unsigned int per_channel_irq;
+       int channel_irq[XLGMAC_MAX_DMA_CHANNELS];
+
+       /* Netdev related settings */
+       unsigned char mac_addr[ETH_ALEN];
+       netdev_features_t netdev_features;
+       struct napi_struct napi;
+
+       /* Filtering support */
+       unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+       /* Device clocks */
+       unsigned long sysclk_rate;
+
+       /* RSS addressing mutex */
+       struct mutex rss_mutex;
+
+       /* Receive Side Scaling settings */
+       u8 rss_key[XLGMAC_RSS_HASH_KEY_SIZE];
+       u32 rss_table[XLGMAC_RSS_MAX_TABLE_SIZE];
+       u32 rss_options;
+
+       int phy_speed;
+
+       char drv_name[32];
+       char drv_ver[32];
+};
+
+void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops);
+void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops);
+const struct net_device_ops *xlgmac_get_netdev_ops(void);
+void xlgmac_dump_tx_desc(struct xlgmac_pdata *pdata,
+                        struct xlgmac_ring *ring,
+                        unsigned int idx,
+                        unsigned int count,
+                        unsigned int flag);
+void xlgmac_dump_rx_desc(struct xlgmac_pdata *pdata,
+                        struct xlgmac_ring *ring,
+                        unsigned int idx);
+void xlgmac_print_pkt(struct net_device *netdev,
+                     struct sk_buff *skb, bool tx_rx);
+void xlgmac_get_all_hw_features(struct xlgmac_pdata *pdata);
+void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata);
+int xlgmac_drv_probe(struct device *dev,
+                    struct xlgmac_resources *res);
+int xlgmac_drv_remove(struct device *dev);
+
+/* For debug prints */
+#ifdef XLGMAC_DEBUG
+#define XLGMAC_PR(fmt, args...) \
+       pr_alert("[%s,%d]:" fmt, __func__, __LINE__, ## args)
+#else
+#define XLGMAC_PR(x...)                do { } while (0)
+#endif
+
+#endif /* __DWC_XLGMAC_H__ */
index f864fd0663dbf830bd94b1db4daa7a7b5ad3ce1d..711fbbbc4b1f724fcebdaec80eda2eec1eef8551 100644 (file)
@@ -2124,33 +2124,26 @@ static const char
 };
 
 /*
- * bdx_get_settings - get device-specific settings
+ * bdx_get_link_ksettings - get device-specific settings
  * @netdev
  * @ecmd
  */
-static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
-{
-       u32 rdintcm;
-       u32 tdintcm;
-       struct bdx_priv *priv = netdev_priv(netdev);
-
-       rdintcm = priv->rdintcm;
-       tdintcm = priv->tdintcm;
-
-       ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
-       ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
-       ethtool_cmd_speed_set(ecmd, SPEED_10000);
-       ecmd->duplex = DUPLEX_FULL;
-       ecmd->port = PORT_FIBRE;
-       ecmd->transceiver = XCVR_EXTERNAL;      /* what does it mean? */
-       ecmd->autoneg = AUTONEG_DISABLE;
-
-       /* PCK_TH measures in multiples of FIFO bytes
-          We translate to packets */
-       ecmd->maxtxpkt =
-           ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
-       ecmd->maxrxpkt =
-           ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
+static int bdx_get_link_ksettings(struct net_device *netdev,
+                                 struct ethtool_link_ksettings *ecmd)
+{
+       ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+       ethtool_link_ksettings_add_link_mode(ecmd, supported,
+                                            10000baseT_Full);
+       ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
+       ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
+       ethtool_link_ksettings_add_link_mode(ecmd, advertising,
+                                            10000baseT_Full);
+       ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
+
+       ecmd->base.speed = SPEED_10000;
+       ecmd->base.duplex = DUPLEX_FULL;
+       ecmd->base.port = PORT_FIBRE;
+       ecmd->base.autoneg = AUTONEG_DISABLE;
 
        return 0;
 }
@@ -2384,7 +2377,6 @@ static void bdx_get_ethtool_stats(struct net_device *netdev,
 static void bdx_set_ethtool_ops(struct net_device *netdev)
 {
        static const struct ethtool_ops bdx_ethtool_ops = {
-               .get_settings = bdx_get_settings,
                .get_drvinfo = bdx_get_drvinfo,
                .get_link = ethtool_op_get_link,
                .get_coalesce = bdx_get_coalesce,
@@ -2394,6 +2386,7 @@ static void bdx_set_ethtool_ops(struct net_device *netdev)
                .get_strings = bdx_get_strings,
                .get_sset_count = bdx_get_sset_count,
                .get_ethtool_stats = bdx_get_ethtool_stats,
+               .get_link_ksettings = bdx_get_link_ksettings,
        };
 
        netdev->ethtool_ops = &bdx_ethtool_ops;
index 296c8efd0038c8f66f41e9a58e30920462272192..9e631952b86f3d4ddd9108e1fe0db7c96d8363d2 100644 (file)
@@ -74,15 +74,21 @@ config TI_CPSW
          will be called cpsw.
 
 config TI_CPTS
-       tristate "TI Common Platform Time Sync (CPTS) Support"
+       bool "TI Common Platform Time Sync (CPTS) Support"
        depends on TI_CPSW || TI_KEYSTONE_NETCP
-       imply PTP_1588_CLOCK
+       depends on PTP_1588_CLOCK
        ---help---
          This driver supports the Common Platform Time Sync unit of
          the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
          The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the
          driver offers a PTP Hardware Clock.
 
+config TI_CPTS_MOD
+       tristate
+       depends on TI_CPTS
+       default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y
+       default m
+
 config TI_KEYSTONE_NETCP
        tristate "TI Keystone NETCP Core Support"
        select TI_CPSW_ALE
index 1e7c10bf87132cda8e9c7ef2afc118ba1725388e..10e6b0ce51baf3115b8c72d40e933f10873186f0 100644 (file)
@@ -12,7 +12,7 @@ obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
 obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
 obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
 obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o
-obj-$(CONFIG_TI_CPTS) += cpts.o
+obj-$(CONFIG_TI_CPTS_MOD) += cpts.o
 obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
 ti_cpsw-y := cpsw.o
 
index 9f3d9c67e3fe0f50b2d1119e74b7eac4b93e8bae..fa674a8bda0c8ff43d19699fefdd0ba718e75c90 100644 (file)
@@ -1267,6 +1267,7 @@ static void soft_reset_slave(struct cpsw_slave *slave)
 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
 {
        u32 slave_port;
+       struct phy_device *phy;
        struct cpsw_common *cpsw = priv->cpsw;
 
        soft_reset_slave(slave);
@@ -1300,27 +1301,28 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
                                   1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
 
        if (slave->data->phy_node) {
-               slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node,
+               phy = of_phy_connect(priv->ndev, slave->data->phy_node,
                                 &cpsw_adjust_link, 0, slave->data->phy_if);
-               if (!slave->phy) {
+               if (!phy) {
                        dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
                                slave->data->phy_node->full_name,
                                slave->slave_num);
                        return;
                }
        } else {
-               slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
+               phy = phy_connect(priv->ndev, slave->data->phy_id,
                                 &cpsw_adjust_link, slave->data->phy_if);
-               if (IS_ERR(slave->phy)) {
+               if (IS_ERR(phy)) {
                        dev_err(priv->dev,
                                "phy \"%s\" not found on slave %d, err %ld\n",
                                slave->data->phy_id, slave->slave_num,
-                               PTR_ERR(slave->phy));
-                       slave->phy = NULL;
+                               PTR_ERR(phy));
                        return;
                }
        }
 
+       slave->phy = phy;
+
        phy_attached_info(slave->phy);
 
        phy_start(slave->phy);
@@ -1817,6 +1819,8 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
        }
 
        cpsw_intr_enable(cpsw);
+       netif_trans_update(ndev);
+       netif_tx_wake_all_queues(ndev);
 }
 
 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
index 7c7ae0890e90c450c2228e44cf618cdfdedcceec..9027c9c509b581cda23c00006124ff3839a6ae2c 100644 (file)
@@ -1882,6 +1882,7 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
 static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
                          struct tc_to_netdev *tc)
 {
+       u8 num_tc;
        int i;
 
        /* setup tc must be called under rtnl lock */
@@ -1890,15 +1891,18 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
        if (tc->type != TC_SETUP_MQPRIO)
                return -EINVAL;
 
+       tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+       num_tc = tc->mqprio->num_tc;
+
        /* Sanity-check the number of traffic classes requested */
        if ((dev->real_num_tx_queues <= 1) ||
-           (dev->real_num_tx_queues < tc->tc))
+           (dev->real_num_tx_queues < num_tc))
                return -EINVAL;
 
        /* Configure traffic class to queue mappings */
-       if (tc->tc) {
-               netdev_set_num_tc(dev, tc->tc);
-               for (i = 0; i < tc->tc; i++)
+       if (num_tc) {
+               netdev_set_num_tc(dev, num_tc);
+               for (i = 0; i < num_tc; i++)
                        netdev_set_tc_queue(dev, i, 1, i);
        } else {
                netdev_reset_tc(dev);
index 72013314bba81fbbbb0f7d37da6143a9b0754b0c..fa6a06571187ed0d4f30a7001983b7f5fcbea018 100644 (file)
@@ -1206,61 +1206,68 @@ void gelic_net_get_drvinfo(struct net_device *netdev,
        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 }
 
-static int gelic_ether_get_settings(struct net_device *netdev,
-                                   struct ethtool_cmd *cmd)
+static int gelic_ether_get_link_ksettings(struct net_device *netdev,
+                                         struct ethtool_link_ksettings *cmd)
 {
        struct gelic_card *card = netdev_card(netdev);
+       u32 supported, advertising;
 
        gelic_card_get_ether_port_status(card, 0);
 
        if (card->ether_port_status & GELIC_LV1_ETHER_FULL_DUPLEX)
-               cmd->duplex = DUPLEX_FULL;
+               cmd->base.duplex = DUPLEX_FULL;
        else
-               cmd->duplex = DUPLEX_HALF;
+               cmd->base.duplex = DUPLEX_HALF;
 
        switch (card->ether_port_status & GELIC_LV1_ETHER_SPEED_MASK) {
        case GELIC_LV1_ETHER_SPEED_10:
-               ethtool_cmd_speed_set(cmd, SPEED_10);
+               cmd->base.speed = SPEED_10;
                break;
        case GELIC_LV1_ETHER_SPEED_100:
-               ethtool_cmd_speed_set(cmd, SPEED_100);
+               cmd->base.speed = SPEED_100;
                break;
        case GELIC_LV1_ETHER_SPEED_1000:
-               ethtool_cmd_speed_set(cmd, SPEED_1000);
+               cmd->base.speed = SPEED_1000;
                break;
        default:
                pr_info("%s: speed unknown\n", __func__);
-               ethtool_cmd_speed_set(cmd, SPEED_10);
+               cmd->base.speed = SPEED_10;
                break;
        }
 
-       cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg |
+       supported = SUPPORTED_TP | SUPPORTED_Autoneg |
                        SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
                        SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
                        SUPPORTED_1000baseT_Full;
-       cmd->advertising = cmd->supported;
+       advertising = supported;
        if (card->link_mode & GELIC_LV1_ETHER_AUTO_NEG) {
-               cmd->autoneg = AUTONEG_ENABLE;
+               cmd->base.autoneg = AUTONEG_ENABLE;
        } else {
-               cmd->autoneg = AUTONEG_DISABLE;
-               cmd->advertising &= ~ADVERTISED_Autoneg;
+               cmd->base.autoneg = AUTONEG_DISABLE;
+               advertising &= ~ADVERTISED_Autoneg;
        }
-       cmd->port = PORT_TP;
+       cmd->base.port = PORT_TP;
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
 
        return 0;
 }
 
-static int gelic_ether_set_settings(struct net_device *netdev,
-                                   struct ethtool_cmd *cmd)
+static int
+gelic_ether_set_link_ksettings(struct net_device *netdev,
+                              const struct ethtool_link_ksettings *cmd)
 {
        struct gelic_card *card = netdev_card(netdev);
        u64 mode;
        int ret;
 
-       if (cmd->autoneg == AUTONEG_ENABLE) {
+       if (cmd->base.autoneg == AUTONEG_ENABLE) {
                mode = GELIC_LV1_ETHER_AUTO_NEG;
        } else {
-               switch (cmd->speed) {
+               switch (cmd->base.speed) {
                case SPEED_10:
                        mode = GELIC_LV1_ETHER_SPEED_10;
                        break;
@@ -1273,9 +1280,9 @@ static int gelic_ether_set_settings(struct net_device *netdev,
                default:
                        return -EINVAL;
                }
-               if (cmd->duplex == DUPLEX_FULL)
+               if (cmd->base.duplex == DUPLEX_FULL) {
                        mode |= GELIC_LV1_ETHER_FULL_DUPLEX;
-               else if (cmd->speed == SPEED_1000) {
+               } else if (cmd->base.speed == SPEED_1000) {
                        pr_info("1000 half duplex is not supported.\n");
                        return -EINVAL;
                }
@@ -1370,11 +1377,11 @@ done:
 
 static const struct ethtool_ops gelic_ether_ethtool_ops = {
        .get_drvinfo    = gelic_net_get_drvinfo,
-       .get_settings   = gelic_ether_get_settings,
-       .set_settings   = gelic_ether_set_settings,
        .get_link       = ethtool_op_get_link,
        .get_wol        = gelic_net_get_wol,
        .set_wol        = gelic_net_set_wol,
+       .get_link_ksettings = gelic_ether_get_link_ksettings,
+       .set_link_ksettings = gelic_ether_set_link_ksettings,
 };
 
 /**
index ffe519382e111a04dd37c904a6080cee561e2d47..16bd036d06820e9139d04f05355a5a4e5127509c 100644 (file)
@@ -47,19 +47,23 @@ static struct {
 };
 
 static int
-spider_net_ethtool_get_settings(struct net_device *netdev,
-                              struct ethtool_cmd *cmd)
+spider_net_ethtool_get_link_ksettings(struct net_device *netdev,
+                                     struct ethtool_link_ksettings *cmd)
 {
        struct spider_net_card *card;
        card = netdev_priv(netdev);
 
-       cmd->supported   = (SUPPORTED_1000baseT_Full |
-                            SUPPORTED_FIBRE);
-       cmd->advertising = (ADVERTISED_1000baseT_Full |
-                            ADVERTISED_FIBRE);
-       cmd->port = PORT_FIBRE;
-       ethtool_cmd_speed_set(cmd, card->phy.speed);
-       cmd->duplex = DUPLEX_FULL;
+       ethtool_link_ksettings_zero_link_mode(cmd, supported);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+
+       ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+       cmd->base.port = PORT_FIBRE;
+       cmd->base.speed = card->phy.speed;
+       cmd->base.duplex = DUPLEX_FULL;
 
        return 0;
 }
@@ -166,7 +170,6 @@ static void spider_net_get_strings(struct net_device *netdev, u32 stringset,
 }
 
 const struct ethtool_ops spider_net_ethtool_ops = {
-       .get_settings           = spider_net_ethtool_get_settings,
        .get_drvinfo            = spider_net_ethtool_get_drvinfo,
        .get_wol                = spider_net_ethtool_get_wol,
        .get_msglevel           = spider_net_ethtool_get_msglevel,
@@ -177,5 +180,6 @@ const struct ethtool_ops spider_net_ethtool_ops = {
        .get_strings            = spider_net_get_strings,
        .get_sset_count         = spider_net_get_sset_count,
        .get_ethtool_stats      = spider_net_get_ethtool_stats,
+       .get_link_ksettings     = spider_net_ethtool_get_link_ksettings,
 };
 
index c5583991da4aa462652b5236992c7731529422db..5ac6eaa9e78510a2c28cf2506dd791ece82b3009 100644 (file)
@@ -1499,27 +1499,29 @@ static void tsi108_init_mac(struct net_device *dev)
        TSI_WRITE(TSI108_EC_INTMASK, ~0);
 }
 
-static int tsi108_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int tsi108_get_link_ksettings(struct net_device *dev,
+                                    struct ethtool_link_ksettings *cmd)
 {
        struct tsi108_prv_data *data = netdev_priv(dev);
        unsigned long flags;
        int rc;
 
        spin_lock_irqsave(&data->txlock, flags);
-       rc = mii_ethtool_gset(&data->mii_if, cmd);
+       rc = mii_ethtool_get_link_ksettings(&data->mii_if, cmd);
        spin_unlock_irqrestore(&data->txlock, flags);
 
        return rc;
 }
 
-static int tsi108_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int tsi108_set_link_ksettings(struct net_device *dev,
+                                    const struct ethtool_link_ksettings *cmd)
 {
        struct tsi108_prv_data *data = netdev_priv(dev);
        unsigned long flags;
        int rc;
 
        spin_lock_irqsave(&data->txlock, flags);
-       rc = mii_ethtool_sset(&data->mii_if, cmd);
+       rc = mii_ethtool_set_link_ksettings(&data->mii_if, cmd);
        spin_unlock_irqrestore(&data->txlock, flags);
 
        return rc;
@@ -1535,8 +1537,8 @@ static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 
 static const struct ethtool_ops tsi108_ethtool_ops = {
        .get_link       = ethtool_op_get_link,
-       .get_settings   = tsi108_get_settings,
-       .set_settings   = tsi108_set_settings,
+       .get_link_ksettings     = tsi108_get_link_ksettings,
+       .set_link_ksettings     = tsi108_set_link_ksettings,
 };
 
 static const struct net_device_ops tsi108_netdev_ops = {
index c068c58428f7611ddcd526010cb07f8ada61b760..4cf41f779d0ef4ef8d6cf2fa688abd97b3e8264d 100644 (file)
@@ -2303,25 +2303,27 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
        strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
 }
 
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+                                    struct ethtool_link_ksettings *cmd)
 {
        struct rhine_private *rp = netdev_priv(dev);
        int rc;
 
        mutex_lock(&rp->task_lock);
-       rc = mii_ethtool_gset(&rp->mii_if, cmd);
+       rc = mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
        mutex_unlock(&rp->task_lock);
 
        return rc;
 }
 
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_set_link_ksettings(struct net_device *dev,
+                                    const struct ethtool_link_ksettings *cmd)
 {
        struct rhine_private *rp = netdev_priv(dev);
        int rc;
 
        mutex_lock(&rp->task_lock);
-       rc = mii_ethtool_sset(&rp->mii_if, cmd);
+       rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
        rhine_set_carrier(&rp->mii_if);
        mutex_unlock(&rp->task_lock);
 
@@ -2391,14 +2393,14 @@ static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 
 static const struct ethtool_ops netdev_ethtool_ops = {
        .get_drvinfo            = netdev_get_drvinfo,
-       .get_settings           = netdev_get_settings,
-       .set_settings           = netdev_set_settings,
        .nway_reset             = netdev_nway_reset,
        .get_link               = netdev_get_link,
        .get_msglevel           = netdev_get_msglevel,
        .set_msglevel           = netdev_set_msglevel,
        .get_wol                = rhine_get_wol,
        .set_wol                = rhine_set_wol,
+       .get_link_ksettings     = netdev_get_link_ksettings,
+       .set_link_ksettings     = netdev_set_link_ksettings,
 };
 
 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
index d088788b27a751286f7556b7f478b210c0ab68a5..ef9538ee53d0db7f43eae4298dd39258b4c39122 100644 (file)
@@ -3291,15 +3291,17 @@ static void velocity_ethtool_down(struct net_device *dev)
                velocity_set_power_state(vptr, PCI_D3hot);
 }
 
-static int velocity_get_settings(struct net_device *dev,
-                                struct ethtool_cmd *cmd)
+static int velocity_get_link_ksettings(struct net_device *dev,
+                                      struct ethtool_link_ksettings *cmd)
 {
        struct velocity_info *vptr = netdev_priv(dev);
        struct mac_regs __iomem *regs = vptr->mac_regs;
        u32 status;
+       u32 supported, advertising;
+
        status = check_connection_type(vptr->mac_regs);
 
-       cmd->supported = SUPPORTED_TP |
+       supported = SUPPORTED_TP |
                        SUPPORTED_Autoneg |
                        SUPPORTED_10baseT_Half |
                        SUPPORTED_10baseT_Full |
@@ -3308,9 +3310,9 @@ static int velocity_get_settings(struct net_device *dev,
                        SUPPORTED_1000baseT_Half |
                        SUPPORTED_1000baseT_Full;
 
-       cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
+       advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
        if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
-               cmd->advertising |=
+               advertising |=
                        ADVERTISED_10baseT_Half |
                        ADVERTISED_10baseT_Full |
                        ADVERTISED_100baseT_Half |
@@ -3320,19 +3322,19 @@ static int velocity_get_settings(struct net_device *dev,
        } else {
                switch (vptr->options.spd_dpx) {
                case SPD_DPX_1000_FULL:
-                       cmd->advertising |= ADVERTISED_1000baseT_Full;
+                       advertising |= ADVERTISED_1000baseT_Full;
                        break;
                case SPD_DPX_100_HALF:
-                       cmd->advertising |= ADVERTISED_100baseT_Half;
+                       advertising |= ADVERTISED_100baseT_Half;
                        break;
                case SPD_DPX_100_FULL:
-                       cmd->advertising |= ADVERTISED_100baseT_Full;
+                       advertising |= ADVERTISED_100baseT_Full;
                        break;
                case SPD_DPX_10_HALF:
-                       cmd->advertising |= ADVERTISED_10baseT_Half;
+                       advertising |= ADVERTISED_10baseT_Half;
                        break;
                case SPD_DPX_10_FULL:
-                       cmd->advertising |= ADVERTISED_10baseT_Full;
+                       advertising |= ADVERTISED_10baseT_Full;
                        break;
                default:
                        break;
@@ -3340,30 +3342,35 @@ static int velocity_get_settings(struct net_device *dev,
        }
 
        if (status & VELOCITY_SPEED_1000)
-               ethtool_cmd_speed_set(cmd, SPEED_1000);
+               cmd->base.speed = SPEED_1000;
        else if (status & VELOCITY_SPEED_100)
-               ethtool_cmd_speed_set(cmd, SPEED_100);
+               cmd->base.speed = SPEED_100;
        else
-               ethtool_cmd_speed_set(cmd, SPEED_10);
+               cmd->base.speed = SPEED_10;
 
-       cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
-       cmd->port = PORT_TP;
-       cmd->transceiver = XCVR_INTERNAL;
-       cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
+       cmd->base.autoneg = (status & VELOCITY_AUTONEG_ENABLE) ?
+               AUTONEG_ENABLE : AUTONEG_DISABLE;
+       cmd->base.port = PORT_TP;
+       cmd->base.phy_address = readb(&regs->MIIADR) & 0x1F;
 
        if (status & VELOCITY_DUPLEX_FULL)
-               cmd->duplex = DUPLEX_FULL;
+               cmd->base.duplex = DUPLEX_FULL;
        else
-               cmd->duplex = DUPLEX_HALF;
+               cmd->base.duplex = DUPLEX_HALF;
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
 
        return 0;
 }
 
-static int velocity_set_settings(struct net_device *dev,
-                                struct ethtool_cmd *cmd)
+static int velocity_set_link_ksettings(struct net_device *dev,
+                                      const struct ethtool_link_ksettings *cmd)
 {
        struct velocity_info *vptr = netdev_priv(dev);
-       u32 speed = ethtool_cmd_speed(cmd);
+       u32 speed = cmd->base.speed;
        u32 curr_status;
        u32 new_status = 0;
        int ret = 0;
@@ -3371,11 +3378,12 @@ static int velocity_set_settings(struct net_device *dev,
        curr_status = check_connection_type(vptr->mac_regs);
        curr_status &= (~VELOCITY_LINK_FAIL);
 
-       new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
+       new_status |= ((cmd->base.autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
        new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
        new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
        new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
-       new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
+       new_status |= ((cmd->base.duplex == DUPLEX_FULL) ?
+                      VELOCITY_DUPLEX_FULL : 0);
 
        if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
            (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
@@ -3644,8 +3652,6 @@ static void velocity_get_ethtool_stats(struct net_device *dev,
 }
 
 static const struct ethtool_ops velocity_ethtool_ops = {
-       .get_settings           = velocity_get_settings,
-       .set_settings           = velocity_set_settings,
        .get_drvinfo            = velocity_get_drvinfo,
        .get_wol                = velocity_ethtool_get_wol,
        .set_wol                = velocity_ethtool_set_wol,
@@ -3658,7 +3664,9 @@ static const struct ethtool_ops velocity_ethtool_ops = {
        .get_coalesce           = velocity_get_coalesce,
        .set_coalesce           = velocity_set_coalesce,
        .begin                  = velocity_ethtool_up,
-       .complete               = velocity_ethtool_down
+       .complete               = velocity_ethtool_down,
+       .get_link_ksettings     = velocity_get_link_ksettings,
+       .set_link_ksettings     = velocity_set_link_ksettings,
 };
 
 #if defined(CONFIG_PM) && defined(CONFIG_INET)
index f90267f0519feebb7c9d944535cb81f53c6bb9fe..2bdfb39215e9ce749ecc5fac8f4a22226c081bc4 100644 (file)
@@ -1152,7 +1152,8 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
        if (err < 0)
                goto err_register;
 
-       priv->xfer_wq = alloc_workqueue(netdev_name(ndev), WQ_MEM_RECLAIM, 0);
+       priv->xfer_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
+                                       netdev_name(ndev));
        if (!priv->xfer_wq) {
                err = -ENOMEM;
                goto err_wq;
index 6575f880f1be52fb9daa91ea7f88eb3199b614f5..7d101714c2ef4cc38201971d18b4b8f8bac1b8cd 100644 (file)
@@ -175,16 +175,15 @@ static void fjes_get_drvinfo(struct net_device *netdev,
                 "platform:%s", plat_dev->name);
 }
 
-static int fjes_get_settings(struct net_device *netdev,
-                            struct ethtool_cmd *ecmd)
+static int fjes_get_link_ksettings(struct net_device *netdev,
+                                  struct ethtool_link_ksettings *ecmd)
 {
-       ecmd->supported = 0;
-       ecmd->advertising = 0;
-       ecmd->duplex = DUPLEX_FULL;
-       ecmd->autoneg = AUTONEG_DISABLE;
-       ecmd->transceiver = XCVR_DUMMY1;
-       ecmd->port = PORT_NONE;
-       ethtool_cmd_speed_set(ecmd, 20000);     /* 20Gb/s */
+       ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+       ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
+       ecmd->base.duplex = DUPLEX_FULL;
+       ecmd->base.autoneg = AUTONEG_DISABLE;
+       ecmd->base.port = PORT_NONE;
+       ecmd->base.speed = 20000;       /* 20Gb/s */
 
        return 0;
 }
@@ -296,7 +295,6 @@ static int fjes_get_dump_data(struct net_device *netdev,
 }
 
 static const struct ethtool_ops fjes_ethtool_ops = {
-               .get_settings           = fjes_get_settings,
                .get_drvinfo            = fjes_get_drvinfo,
                .get_ethtool_stats = fjes_get_ethtool_stats,
                .get_strings      = fjes_get_strings,
@@ -306,6 +304,7 @@ static const struct ethtool_ops fjes_ethtool_ops = {
                .set_dump               = fjes_set_dump,
                .get_dump_flag          = fjes_get_dump_flag,
                .get_dump_data          = fjes_get_dump_data,
+               .get_link_ksettings     = fjes_get_link_ksettings,
 };
 
 void fjes_set_ethtool_ops(struct net_device *netdev)
index b75d9cdcfb0c415c7abeaa97cebeacbc917e1abd..ae48c809bac9fe13b0a92e086f0a1c6a4cf6feaf 100644 (file)
@@ -45,6 +45,8 @@ MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
+#define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
+
 static int fjes_request_irq(struct fjes_adapter *);
 static void fjes_free_irq(struct fjes_adapter *);
 
@@ -78,7 +80,7 @@ static void fjes_rx_irq(struct fjes_adapter *, int);
 static int fjes_poll(struct napi_struct *, int);
 
 static const struct acpi_device_id fjes_acpi_ids[] = {
-       {"PNP0C02", 0},
+       {ACPI_MOTHERBOARD_RESOURCE_HID, 0},
        {"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
@@ -115,18 +117,17 @@ static struct resource fjes_resource[] = {
        },
 };
 
-static int fjes_acpi_add(struct acpi_device *device)
+static bool is_extended_socket_device(struct acpi_device *device)
 {
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
        char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
-       struct platform_device *plat_dev;
        union acpi_object *str;
        acpi_status status;
        int result;
 
        status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
        if (ACPI_FAILURE(status))
-               return -ENODEV;
+               return false;
 
        str = buffer.pointer;
        result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
@@ -136,10 +137,42 @@ static int fjes_acpi_add(struct acpi_device *device)
 
        if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
                kfree(buffer.pointer);
-               return -ENODEV;
+               return false;
        }
        kfree(buffer.pointer);
 
+       return true;
+}
+
+static int acpi_check_extended_socket_status(struct acpi_device *device)
+{
+       unsigned long long sta;
+       acpi_status status;
+
+       status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
+       if (ACPI_FAILURE(status))
+               return -ENODEV;
+
+       if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
+             (sta & ACPI_STA_DEVICE_ENABLED) &&
+             (sta & ACPI_STA_DEVICE_UI) &&
+             (sta & ACPI_STA_DEVICE_FUNCTIONING)))
+               return -ENODEV;
+
+       return 0;
+}
+
+static int fjes_acpi_add(struct acpi_device *device)
+{
+       struct platform_device *plat_dev;
+       acpi_status status;
+
+       if (!is_extended_socket_device(device))
+               return -ENODEV;
+
+       if (acpi_check_extended_socket_status(device))
+               return -ENODEV;
+
        status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
                                     fjes_get_acpi_resource, fjes_resource);
        if (ACPI_FAILURE(status))
@@ -1316,7 +1349,7 @@ static void fjes_netdev_setup(struct net_device *netdev)
        netdev->min_mtu = fjes_support_mtu[0];
        netdev->max_mtu = fjes_support_mtu[3];
        netdev->flags |= IFF_BROADCAST;
-       netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
+       netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
 }
 
 static void fjes_irq_watch_task(struct work_struct *work)
@@ -1473,11 +1506,44 @@ static void fjes_watch_unshare_task(struct work_struct *work)
        }
 }
 
+static acpi_status
+acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
+                                void *context, void **return_value)
+{
+       struct acpi_device *device;
+       bool *found = context;
+       int result;
+
+       result = acpi_bus_get_device(obj_handle, &device);
+       if (result)
+               return AE_OK;
+
+       if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
+               return AE_OK;
+
+       if (!is_extended_socket_device(device))
+               return AE_OK;
+
+       if (acpi_check_extended_socket_status(device))
+               return AE_OK;
+
+       *found = true;
+       return AE_CTRL_TERMINATE;
+}
+
 /* fjes_init_module - Driver Registration Routine */
 static int __init fjes_init_module(void)
 {
+       bool found = false;
        int result;
 
+       acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
+                           acpi_find_extended_socket_device, NULL, &found,
+                           NULL);
+
+       if (!found)
+               return -ENODEV;
+
        pr_info("%s - version %s - %s\n",
                fjes_driver_string, fjes_driver_version, fjes_copyright);
 
index 89698741682f41e2ea70868715ffcb4f4784e258..4fea1b3dfbb4457247c4bc98dc9378591c36ecd3 100644 (file)
@@ -56,7 +56,10 @@ struct pdp_ctx {
        u16                     af;
 
        struct in_addr          ms_addr_ip4;
-       struct in_addr          sgsn_addr_ip4;
+       struct in_addr          peer_addr_ip4;
+
+       struct sock             *sk;
+       struct net_device       *dev;
 
        atomic_t                tx_seq;
        struct rcu_head         rcu_head;
@@ -66,11 +69,12 @@ struct pdp_ctx {
 struct gtp_dev {
        struct list_head        list;
 
-       struct socket           *sock0;
-       struct socket           *sock1u;
+       struct sock             *sk0;
+       struct sock             *sk1u;
 
        struct net_device       *dev;
 
+       unsigned int            role;
        unsigned int            hash_size;
        struct hlist_head       *tid_hash;
        struct hlist_head       *addr_hash;
@@ -84,6 +88,8 @@ struct gtp_net {
 
 static u32 gtp_h_initval;
 
+static void pdp_context_delete(struct pdp_ctx *pctx);
+
 static inline u32 gtp0_hashfn(u64 tid)
 {
        u32 *tid32 = (u32 *) &tid;
@@ -149,8 +155,8 @@ static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
        return NULL;
 }
 
-static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
-                                 unsigned int hdrlen)
+static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
+                                 unsigned int hdrlen, unsigned int role)
 {
        struct iphdr *iph;
 
@@ -159,25 +165,62 @@ static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
 
        iph = (struct iphdr *)(skb->data + hdrlen);
 
-       return iph->saddr == pctx->ms_addr_ip4.s_addr;
+       if (role == GTP_ROLE_SGSN)
+               return iph->daddr == pctx->ms_addr_ip4.s_addr;
+       else
+               return iph->saddr == pctx->ms_addr_ip4.s_addr;
 }
 
-/* Check if the inner IP source address in this packet is assigned to any
+/* Check if the inner IP address in this packet is assigned to any
  * existing mobile subscriber.
  */
-static bool gtp_check_src_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
-                            unsigned int hdrlen)
+static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
+                            unsigned int hdrlen, unsigned int role)
 {
        switch (ntohs(skb->protocol)) {
        case ETH_P_IP:
-               return gtp_check_src_ms_ipv4(skb, pctx, hdrlen);
+               return gtp_check_ms_ipv4(skb, pctx, hdrlen, role);
        }
        return false;
 }
 
+static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
+                       unsigned int hdrlen, unsigned int role)
+{
+       struct pcpu_sw_netstats *stats;
+
+       if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
+               netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
+               return 1;
+       }
+
+       /* Get rid of the GTP + UDP headers. */
+       if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
+                                !net_eq(sock_net(pctx->sk), dev_net(pctx->dev))))
+               return -1;
+
+       netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
+
+       /* Now that the UDP and the GTP header have been removed, set up the
+        * new network header. This is required by the upper layer to
+        * calculate the transport header.
+        */
+       skb_reset_network_header(skb);
+
+       skb->dev = pctx->dev;
+
+       stats = this_cpu_ptr(pctx->dev->tstats);
+       u64_stats_update_begin(&stats->syncp);
+       stats->rx_packets++;
+       stats->rx_bytes += skb->len;
+       u64_stats_update_end(&stats->syncp);
+
+       netif_rx(skb);
+       return 0;
+}
+
 /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
-static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
-                              bool xnet)
+static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
 {
        unsigned int hdrlen = sizeof(struct udphdr) +
                              sizeof(struct gtp0_header);
@@ -201,17 +244,10 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
                return 1;
        }
 
-       if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
-               netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
-               return 1;
-       }
-
-       /* Get rid of the GTP + UDP headers. */
-       return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
+       return gtp_rx(pctx, skb, hdrlen, gtp->role);
 }
 
-static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
-                               bool xnet)
+static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
 {
        unsigned int hdrlen = sizeof(struct udphdr) +
                              sizeof(struct gtp1_header);
@@ -250,37 +286,33 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
                return 1;
        }
 
-       if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
-               netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
-               return 1;
-       }
-
-       /* Get rid of the GTP + UDP headers. */
-       return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
+       return gtp_rx(pctx, skb, hdrlen, gtp->role);
 }
 
-static void gtp_encap_disable(struct gtp_dev *gtp)
+static void gtp_encap_destroy(struct sock *sk)
 {
-       if (gtp->sock0 && gtp->sock0->sk) {
-               udp_sk(gtp->sock0->sk)->encap_type = 0;
-               rcu_assign_sk_user_data(gtp->sock0->sk, NULL);
-       }
-       if (gtp->sock1u && gtp->sock1u->sk) {
-               udp_sk(gtp->sock1u->sk)->encap_type = 0;
-               rcu_assign_sk_user_data(gtp->sock1u->sk, NULL);
-       }
+       struct gtp_dev *gtp;
 
-       gtp->sock0 = NULL;
-       gtp->sock1u = NULL;
+       gtp = rcu_dereference_sk_user_data(sk);
+       if (gtp) {
+               udp_sk(sk)->encap_type = 0;
+               rcu_assign_sk_user_data(sk, NULL);
+               sock_put(sk);
+       }
 }
 
-static void gtp_encap_destroy(struct sock *sk)
+static void gtp_encap_disable_sock(struct sock *sk)
 {
-       struct gtp_dev *gtp;
+       if (!sk)
+               return;
 
-       gtp = rcu_dereference_sk_user_data(sk);
-       if (gtp)
-               gtp_encap_disable(gtp);
+       gtp_encap_destroy(sk);
+}
+
+static void gtp_encap_disable(struct gtp_dev *gtp)
+{
+       gtp_encap_disable_sock(gtp->sk0);
+       gtp_encap_disable_sock(gtp->sk1u);
 }
 
 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
@@ -288,10 +320,8 @@ static void gtp_encap_destroy(struct sock *sk)
  */
 static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
 {
-       struct pcpu_sw_netstats *stats;
        struct gtp_dev *gtp;
-       bool xnet;
-       int ret;
+       int ret = 0;
 
        gtp = rcu_dereference_sk_user_data(sk);
        if (!gtp)
@@ -299,16 +329,14 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
 
        netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
 
-       xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
-
        switch (udp_sk(sk)->encap_type) {
        case UDP_ENCAP_GTP0:
                netdev_dbg(gtp->dev, "received GTP0 packet\n");
-               ret = gtp0_udp_encap_recv(gtp, skb, xnet);
+               ret = gtp0_udp_encap_recv(gtp, skb);
                break;
        case UDP_ENCAP_GTP1U:
                netdev_dbg(gtp->dev, "received GTP1U packet\n");
-               ret = gtp1u_udp_encap_recv(gtp, skb, xnet);
+               ret = gtp1u_udp_encap_recv(gtp, skb);
                break;
        default:
                ret = -1; /* Shouldn't happen. */
@@ -317,33 +345,17 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
        switch (ret) {
        case 1:
                netdev_dbg(gtp->dev, "pass up to the process\n");
-               return 1;
+               break;
        case 0:
-               netdev_dbg(gtp->dev, "forwarding packet from GGSN to uplink\n");
                break;
        case -1:
                netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
                kfree_skb(skb);
-               return 0;
+               ret = 0;
+               break;
        }
 
-       /* Now that the UDP and the GTP header have been removed, set up the
-        * new network header. This is required by the upper layer to
-        * calculate the transport header.
-        */
-       skb_reset_network_header(skb);
-
-       skb->dev = gtp->dev;
-
-       stats = this_cpu_ptr(gtp->dev->tstats);
-       u64_stats_update_begin(&stats->syncp);
-       stats->rx_packets++;
-       stats->rx_bytes += skb->len;
-       u64_stats_update_end(&stats->syncp);
-
-       netif_rx(skb);
-
-       return 0;
+       return ret;
 }
 
 static int gtp_dev_init(struct net_device *dev)
@@ -367,8 +379,9 @@ static void gtp_dev_uninit(struct net_device *dev)
        free_percpu(dev->tstats);
 }
 
-static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4,
-                                          const struct sock *sk, __be32 daddr)
+static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
+                                          const struct sock *sk,
+                                          __be32 daddr)
 {
        memset(fl4, 0, sizeof(*fl4));
        fl4->flowi4_oif         = sk->sk_bound_dev_if;
@@ -377,7 +390,7 @@ static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4,
        fl4->flowi4_tos         = RT_CONN_FLAGS(sk);
        fl4->flowi4_proto       = sk->sk_protocol;
 
-       return ip_route_output_key(net, fl4);
+       return ip_route_output_key(sock_net(sk), fl4);
 }
 
 static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
@@ -466,7 +479,6 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
        struct rtable *rt;
        struct flowi4 fl4;
        struct iphdr *iph;
-       struct sock *sk;
        __be16 df;
        int mtu;
 
@@ -474,7 +486,11 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
         * Prepend PDP header with TEI/TID from PDP ctx.
         */
        iph = ip_hdr(skb);
-       pctx = ipv4_pdp_find(gtp, iph->daddr);
+       if (gtp->role == GTP_ROLE_SGSN)
+               pctx = ipv4_pdp_find(gtp, iph->saddr);
+       else
+               pctx = ipv4_pdp_find(gtp, iph->daddr);
+
        if (!pctx) {
                netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
                           &iph->daddr);
@@ -482,40 +498,17 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
        }
        netdev_dbg(dev, "found PDP context %p\n", pctx);
 
-       switch (pctx->gtp_version) {
-       case GTP_V0:
-               if (gtp->sock0)
-                       sk = gtp->sock0->sk;
-               else
-                       sk = NULL;
-               break;
-       case GTP_V1:
-               if (gtp->sock1u)
-                       sk = gtp->sock1u->sk;
-               else
-                       sk = NULL;
-               break;
-       default:
-               return -ENOENT;
-       }
-
-       if (!sk) {
-               netdev_dbg(dev, "no userspace socket is available, skip\n");
-               return -ENOENT;
-       }
-
-       rt = ip4_route_output_gtp(sock_net(sk), &fl4, gtp->sock0->sk,
-                                 pctx->sgsn_addr_ip4.s_addr);
+       rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr);
        if (IS_ERR(rt)) {
                netdev_dbg(dev, "no route to SSGN %pI4\n",
-                          &pctx->sgsn_addr_ip4.s_addr);
+                          &pctx->peer_addr_ip4.s_addr);
                dev->stats.tx_carrier_errors++;
                goto err;
        }
 
        if (rt->dst.dev == dev) {
                netdev_dbg(dev, "circular route to SSGN %pI4\n",
-                          &pctx->sgsn_addr_ip4.s_addr);
+                          &pctx->peer_addr_ip4.s_addr);
                dev->stats.collisions++;
                goto err_rt;
        }
@@ -550,7 +543,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
                goto err_rt;
        }
 
-       gtp_set_pktinfo_ipv4(pktinfo, sk, iph, pctx, rt, &fl4, dev);
+       gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev);
        gtp_push_header(skb, pktinfo);
 
        return 0;
@@ -640,27 +633,23 @@ static void gtp_link_setup(struct net_device *dev)
 
 static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
 static void gtp_hashtable_free(struct gtp_dev *gtp);
-static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
-                           int fd_gtp0, int fd_gtp1);
+static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
 
 static int gtp_newlink(struct net *src_net, struct net_device *dev,
                        struct nlattr *tb[], struct nlattr *data[])
 {
-       int hashsize, err, fd0, fd1;
        struct gtp_dev *gtp;
        struct gtp_net *gn;
+       int hashsize, err;
 
-       if (!data[IFLA_GTP_FD0] || !data[IFLA_GTP_FD1])
+       if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
                return -EINVAL;
 
        gtp = netdev_priv(dev);
 
-       fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
-       fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
-
-       err = gtp_encap_enable(dev, gtp, fd0, fd1);
+       err = gtp_encap_enable(gtp, data);
        if (err < 0)
-               goto out_err;
+               return err;
 
        if (!data[IFLA_GTP_PDP_HASHSIZE])
                hashsize = 1024;
@@ -688,7 +677,6 @@ out_hashtable:
        gtp_hashtable_free(gtp);
 out_encap:
        gtp_encap_disable(gtp);
-out_err:
        return err;
 }
 
@@ -706,6 +694,7 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
        [IFLA_GTP_FD0]                  = { .type = NLA_U32 },
        [IFLA_GTP_FD1]                  = { .type = NLA_U32 },
        [IFLA_GTP_PDP_HASHSIZE]         = { .type = NLA_U32 },
+       [IFLA_GTP_ROLE]                 = { .type = NLA_U32 },
 };
 
 static int gtp_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -747,21 +736,6 @@ static struct rtnl_link_ops gtp_link_ops __read_mostly = {
        .fill_info      = gtp_fill_info,
 };
 
-static struct net *gtp_genl_get_net(struct net *src_net, struct nlattr *tb[])
-{
-       struct net *net;
-
-       /* Examine the link attributes and figure out which network namespace
-        * we are talking about.
-        */
-       if (tb[GTPA_NET_NS_FD])
-               net = get_net_ns_by_fd(nla_get_u32(tb[GTPA_NET_NS_FD]));
-       else
-               net = get_net(src_net);
-
-       return net;
-}
-
 static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
 {
        int i;
@@ -791,93 +765,127 @@ static void gtp_hashtable_free(struct gtp_dev *gtp)
        struct pdp_ctx *pctx;
        int i;
 
-       for (i = 0; i < gtp->hash_size; i++) {
-               hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
-                       hlist_del_rcu(&pctx->hlist_tid);
-                       hlist_del_rcu(&pctx->hlist_addr);
-                       kfree_rcu(pctx, rcu_head);
-               }
-       }
+       for (i = 0; i < gtp->hash_size; i++)
+               hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
+                       pdp_context_delete(pctx);
+
        synchronize_rcu();
        kfree(gtp->addr_hash);
        kfree(gtp->tid_hash);
 }
 
-static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
-                           int fd_gtp0, int fd_gtp1)
+static struct sock *gtp_encap_enable_socket(int fd, int type,
+                                           struct gtp_dev *gtp)
 {
        struct udp_tunnel_sock_cfg tuncfg = {NULL};
-       struct socket *sock0, *sock1u;
+       struct socket *sock;
+       struct sock *sk;
        int err;
 
-       netdev_dbg(dev, "enable gtp on %d, %d\n", fd_gtp0, fd_gtp1);
-
-       sock0 = sockfd_lookup(fd_gtp0, &err);
-       if (sock0 == NULL) {
-               netdev_dbg(dev, "socket fd=%d not found (gtp0)\n", fd_gtp0);
-               return -ENOENT;
-       }
+       pr_debug("enable gtp on %d, %d\n", fd, type);
 
-       if (sock0->sk->sk_protocol != IPPROTO_UDP) {
-               netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp0);
-               err = -EINVAL;
-               goto err1;
+       sock = sockfd_lookup(fd, &err);
+       if (!sock) {
+               pr_debug("gtp socket fd=%d not found\n", fd);
+               return NULL;
        }
 
-       sock1u = sockfd_lookup(fd_gtp1, &err);
-       if (sock1u == NULL) {
-               netdev_dbg(dev, "socket fd=%d not found (gtp1u)\n", fd_gtp1);
-               err = -ENOENT;
-               goto err1;
+       if (sock->sk->sk_protocol != IPPROTO_UDP) {
+               pr_debug("socket fd=%d not UDP\n", fd);
+               sk = ERR_PTR(-EINVAL);
+               goto out_sock;
        }
 
-       if (sock1u->sk->sk_protocol != IPPROTO_UDP) {
-               netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp1);
-               err = -EINVAL;
-               goto err2;
+       if (rcu_dereference_sk_user_data(sock->sk)) {
+               sk = ERR_PTR(-EBUSY);
+               goto out_sock;
        }
 
-       netdev_dbg(dev, "enable gtp on %p, %p\n", sock0, sock1u);
-
-       gtp->sock0 = sock0;
-       gtp->sock1u = sock1u;
+       sk = sock->sk;
+       sock_hold(sk);
 
        tuncfg.sk_user_data = gtp;
+       tuncfg.encap_type = type;
        tuncfg.encap_rcv = gtp_encap_recv;
        tuncfg.encap_destroy = gtp_encap_destroy;
 
-       tuncfg.encap_type = UDP_ENCAP_GTP0;
-       setup_udp_tunnel_sock(sock_net(gtp->sock0->sk), gtp->sock0, &tuncfg);
-
-       tuncfg.encap_type = UDP_ENCAP_GTP1U;
-       setup_udp_tunnel_sock(sock_net(gtp->sock1u->sk), gtp->sock1u, &tuncfg);
+       setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
 
-       err = 0;
-err2:
-       sockfd_put(sock1u);
-err1:
-       sockfd_put(sock0);
-       return err;
+out_sock:
+       sockfd_put(sock);
+       return sk;
 }
 
-static struct net_device *gtp_find_dev(struct net *net, int ifindex)
+static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
 {
-       struct gtp_net *gn = net_generic(net, gtp_net_id);
-       struct gtp_dev *gtp;
+       struct sock *sk1u = NULL;
+       struct sock *sk0 = NULL;
+       unsigned int role = GTP_ROLE_GGSN;
 
-       list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
-               if (ifindex == gtp->dev->ifindex)
-                       return gtp->dev;
+       if (data[IFLA_GTP_FD0]) {
+               u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
+
+               sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp);
+               if (IS_ERR(sk0))
+                       return PTR_ERR(sk0);
        }
-       return NULL;
+
+       if (data[IFLA_GTP_FD1]) {
+               u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
+
+               sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
+               if (IS_ERR(sk1u)) {
+                       if (sk0)
+                               gtp_encap_disable_sock(sk0);
+                       return PTR_ERR(sk1u);
+               }
+       }
+
+       if (data[IFLA_GTP_ROLE]) {
+               role = nla_get_u32(data[IFLA_GTP_ROLE]);
+               if (role > GTP_ROLE_SGSN)
+                       return -EINVAL;
+       }
+
+       gtp->sk0 = sk0;
+       gtp->sk1u = sk1u;
+       gtp->role = role;
+
+       return 0;
+}
+
+static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
+{
+       struct gtp_dev *gtp = NULL;
+       struct net_device *dev;
+       struct net *net;
+
+       /* Examine the link attributes and figure out which network namespace
+        * we are talking about.
+        */
+       if (nla[GTPA_NET_NS_FD])
+               net = get_net_ns_by_fd(nla_get_u32(nla[GTPA_NET_NS_FD]));
+       else
+               net = get_net(src_net);
+
+       if (IS_ERR(net))
+               return NULL;
+
+       /* Check if there's an existing gtpX device to configure */
+       dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
+       if (dev->netdev_ops == &gtp_netdev_ops)
+               gtp = netdev_priv(dev);
+
+       put_net(net);
+       return gtp;
 }
 
 static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
 {
        pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
        pctx->af = AF_INET;
-       pctx->sgsn_addr_ip4.s_addr =
-               nla_get_be32(info->attrs[GTPA_SGSN_ADDRESS]);
+       pctx->peer_addr_ip4.s_addr =
+               nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
        pctx->ms_addr_ip4.s_addr =
                nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
 
@@ -899,9 +907,10 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
        }
 }
 
-static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
+static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
+                       struct genl_info *info)
 {
-       struct gtp_dev *gtp = netdev_priv(dev);
+       struct net_device *dev = gtp->dev;
        u32 hash_ms, hash_tid = 0;
        struct pdp_ctx *pctx;
        bool found = false;
@@ -940,6 +949,9 @@ static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
        if (pctx == NULL)
                return -ENOMEM;
 
+       sock_hold(sk);
+       pctx->sk = sk;
+       pctx->dev = gtp->dev;
        ipv4_pdp_fill(pctx, info);
        atomic_set(&pctx->tx_seq, 0);
 
@@ -963,31 +975,50 @@ static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
        switch (pctx->gtp_version) {
        case GTP_V0:
                netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
-                          pctx->u.v0.tid, &pctx->sgsn_addr_ip4,
+                          pctx->u.v0.tid, &pctx->peer_addr_ip4,
                           &pctx->ms_addr_ip4, pctx);
                break;
        case GTP_V1:
                netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
                           pctx->u.v1.i_tei, pctx->u.v1.o_tei,
-                          &pctx->sgsn_addr_ip4, &pctx->ms_addr_ip4, pctx);
+                          &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx);
                break;
        }
 
        return 0;
 }
 
+static void pdp_context_free(struct rcu_head *head)
+{
+       struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head);
+
+       sock_put(pctx->sk);
+       kfree(pctx);
+}
+
+static void pdp_context_delete(struct pdp_ctx *pctx)
+{
+       hlist_del_rcu(&pctx->hlist_tid);
+       hlist_del_rcu(&pctx->hlist_addr);
+       call_rcu(&pctx->rcu_head, pdp_context_free);
+}
+
 static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
 {
-       struct net_device *dev;
-       struct net *net;
+       unsigned int version;
+       struct gtp_dev *gtp;
+       struct sock *sk;
+       int err;
 
        if (!info->attrs[GTPA_VERSION] ||
            !info->attrs[GTPA_LINK] ||
-           !info->attrs[GTPA_SGSN_ADDRESS] ||
+           !info->attrs[GTPA_PEER_ADDRESS] ||
            !info->attrs[GTPA_MS_ADDRESS])
                return -EINVAL;
 
-       switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
+       version = nla_get_u32(info->attrs[GTPA_VERSION]);
+
+       switch (version) {
        case GTP_V0:
                if (!info->attrs[GTPA_TID] ||
                    !info->attrs[GTPA_FLOW])
@@ -1003,77 +1034,101 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
                return -EINVAL;
        }
 
-       net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
-       if (IS_ERR(net))
-               return PTR_ERR(net);
+       rcu_read_lock();
 
-       /* Check if there's an existing gtpX device to configure */
-       dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
-       if (dev == NULL) {
-               put_net(net);
-               return -ENODEV;
+       gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
+       if (!gtp) {
+               err = -ENODEV;
+               goto out_unlock;
        }
-       put_net(net);
 
-       return ipv4_pdp_add(dev, info);
+       if (version == GTP_V0)
+               sk = gtp->sk0;
+       else if (version == GTP_V1)
+               sk = gtp->sk1u;
+       else
+               sk = NULL;
+
+       if (!sk) {
+               err = -ENODEV;
+               goto out_unlock;
+       }
+
+       err = ipv4_pdp_add(gtp, sk, info);
+
+out_unlock:
+       rcu_read_unlock();
+       return err;
 }
 
-static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
+static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
+                                           struct nlattr *nla[])
 {
-       struct net_device *dev;
-       struct pdp_ctx *pctx;
        struct gtp_dev *gtp;
-       struct net *net;
 
-       if (!info->attrs[GTPA_VERSION] ||
-           !info->attrs[GTPA_LINK])
-               return -EINVAL;
+       gtp = gtp_find_dev(net, nla);
+       if (!gtp)
+               return ERR_PTR(-ENODEV);
 
-       net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
-       if (IS_ERR(net))
-               return PTR_ERR(net);
+       if (nla[GTPA_MS_ADDRESS]) {
+               __be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]);
 
-       /* Check if there's an existing gtpX device to configure */
-       dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
-       if (dev == NULL) {
-               put_net(net);
-               return -ENODEV;
+               return ipv4_pdp_find(gtp, ip);
+       } else if (nla[GTPA_VERSION]) {
+               u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]);
+
+               if (gtp_version == GTP_V0 && nla[GTPA_TID])
+                       return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]));
+               else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI])
+                       return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]));
        }
-       put_net(net);
 
-       gtp = netdev_priv(dev);
+       return ERR_PTR(-EINVAL);
+}
 
-       switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
-       case GTP_V0:
-               if (!info->attrs[GTPA_TID])
-                       return -EINVAL;
-               pctx = gtp0_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_TID]));
-               break;
-       case GTP_V1:
-               if (!info->attrs[GTPA_I_TEI])
-                       return -EINVAL;
-               pctx = gtp1_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_I_TEI]));
-               break;
+static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[])
+{
+       struct pdp_ctx *pctx;
 
-       default:
+       if (nla[GTPA_LINK])
+               pctx = gtp_find_pdp_by_link(net, nla);
+       else
+               pctx = ERR_PTR(-EINVAL);
+
+       if (!pctx)
+               pctx = ERR_PTR(-ENOENT);
+
+       return pctx;
+}
+
+static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
+{
+       struct pdp_ctx *pctx;
+       int err = 0;
+
+       if (!info->attrs[GTPA_VERSION])
                return -EINVAL;
-       }
 
-       if (pctx == NULL)
-               return -ENOENT;
+       rcu_read_lock();
+
+       pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
+       if (IS_ERR(pctx)) {
+               err = PTR_ERR(pctx);
+               goto out_unlock;
+       }
 
        if (pctx->gtp_version == GTP_V0)
-               netdev_dbg(dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
+               netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
                           pctx->u.v0.tid, pctx);
        else if (pctx->gtp_version == GTP_V1)
-               netdev_dbg(dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
+               netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
                           pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
 
-       hlist_del_rcu(&pctx->hlist_tid);
-       hlist_del_rcu(&pctx->hlist_addr);
-       kfree_rcu(pctx, rcu_head);
+       pdp_context_delete(pctx);
 
-       return 0;
+out_unlock:
+       rcu_read_unlock();
+       return err;
 }
 
 static struct genl_family gtp_genl_family;
@@ -1089,7 +1144,7 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
                goto nlmsg_failure;
 
        if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
-           nla_put_be32(skb, GTPA_SGSN_ADDRESS, pctx->sgsn_addr_ip4.s_addr) ||
+           nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
            nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
                goto nla_put_failure;
 
@@ -1117,59 +1172,17 @@ nla_put_failure:
 static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
 {
        struct pdp_ctx *pctx = NULL;
-       struct net_device *dev;
        struct sk_buff *skb2;
-       struct gtp_dev *gtp;
-       u32 gtp_version;
-       struct net *net;
        int err;
 
-       if (!info->attrs[GTPA_VERSION] ||
-           !info->attrs[GTPA_LINK])
-               return -EINVAL;
-
-       gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
-       switch (gtp_version) {
-       case GTP_V0:
-       case GTP_V1:
-               break;
-       default:
+       if (!info->attrs[GTPA_VERSION])
                return -EINVAL;
-       }
-
-       net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
-       if (IS_ERR(net))
-               return PTR_ERR(net);
-
-       /* Check if there's an existing gtpX device to configure */
-       dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
-       if (dev == NULL) {
-               put_net(net);
-               return -ENODEV;
-       }
-       put_net(net);
-
-       gtp = netdev_priv(dev);
 
        rcu_read_lock();
-       if (gtp_version == GTP_V0 &&
-           info->attrs[GTPA_TID]) {
-               u64 tid = nla_get_u64(info->attrs[GTPA_TID]);
-
-               pctx = gtp0_pdp_find(gtp, tid);
-       } else if (gtp_version == GTP_V1 &&
-                info->attrs[GTPA_I_TEI]) {
-               u32 tid = nla_get_u32(info->attrs[GTPA_I_TEI]);
-
-               pctx = gtp1_pdp_find(gtp, tid);
-       } else if (info->attrs[GTPA_MS_ADDRESS]) {
-               __be32 ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
-
-               pctx = ipv4_pdp_find(gtp, ip);
-       }
 
-       if (pctx == NULL) {
-               err = -ENOENT;
+       pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
+       if (IS_ERR(pctx)) {
+               err = PTR_ERR(pctx);
                goto err_unlock;
        }
 
@@ -1242,7 +1255,7 @@ static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
        [GTPA_LINK]             = { .type = NLA_U32, },
        [GTPA_VERSION]          = { .type = NLA_U32, },
        [GTPA_TID]              = { .type = NLA_U64, },
-       [GTPA_SGSN_ADDRESS]     = { .type = NLA_U32, },
+       [GTPA_PEER_ADDRESS]     = { .type = NLA_U32, },
        [GTPA_MS_ADDRESS]       = { .type = NLA_U32, },
        [GTPA_FLOW]             = { .type = NLA_U16, },
        [GTPA_NET_NS_FD]        = { .type = NLA_U32, },
index 7433b164e51356d86f27a2605911eb90f1d7c18c..4747ad48b3cc5278b510306449a2fea7e52115c5 100644 (file)
@@ -686,7 +686,7 @@ struct net_device_context {
        /* point back to our device context */
        struct hv_device *device_ctx;
        /* netvsc_device */
-       struct netvsc_device *nvdev;
+       struct netvsc_device __rcu *nvdev;
        /* reconfigure work */
        struct delayed_work dwork;
        /* last reconfig time */
@@ -701,14 +701,13 @@ struct net_device_context {
 
        u32 tx_checksum_mask;
 
+       u32 tx_send_table[VRSS_SEND_TAB_SIZE];
+
        /* Ethtool settings */
        u8 duplex;
        u32 speed;
        struct netvsc_ethtool_stats eth_stats;
 
-       /* the device is going away */
-       bool start_remove;
-
        /* State to manage the associated VF interface. */
        struct net_device __rcu *vf_netdev;
 
@@ -721,6 +720,7 @@ struct net_device_context {
 /* Per channel data */
 struct netvsc_channel {
        struct vmbus_channel *channel;
+       const struct vmpacket_descriptor *desc;
        struct napi_struct napi;
        struct multi_send_data msd;
        struct multi_recv_comp mrc;
@@ -759,11 +759,10 @@ struct netvsc_device {
 
        struct nvsp_message revoke_packet;
 
-       u32 send_table[VRSS_SEND_TAB_SIZE];
        u32 max_chn;
        u32 num_chn;
-       spinlock_t sc_lock; /* Protects num_sc_offered variable */
-       u32 num_sc_offered;
+
+       refcount_t sc_offered;
 
        /* Holds rndis device info */
        void *extension;
@@ -778,6 +777,8 @@ struct netvsc_device {
        atomic_t open_cnt;
 
        struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
+
+       struct rcu_head rcu;
 };
 
 static inline struct netvsc_device *
@@ -1426,9 +1427,6 @@ struct rndis_message {
        ((void *) rndis_msg)
 
 
-#define __struct_bcount(x)
-
-
 
 #define RNDIS_HEADER_SIZE      (sizeof(struct rndis_message) - \
                                 sizeof(union rndis_message_container))
index b1328cef9d5a0064473d4a4496b09b4cfc865ae3..fd21d5aab580072c74c4e1efc219ebfcd98706a6 100644 (file)
@@ -80,8 +80,10 @@ static struct netvsc_device *alloc_net_device(void)
        return net_device;
 }
 
-static void free_netvsc_device(struct netvsc_device *nvdev)
+static void free_netvsc_device(struct rcu_head *head)
 {
+       struct netvsc_device *nvdev
+               = container_of(head, struct netvsc_device, rcu);
        int i;
 
        for (i = 0; i < VRSS_CHANNEL_MAX; i++)
@@ -90,14 +92,9 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
        kfree(nvdev);
 }
 
-
-static inline bool netvsc_channel_idle(const struct netvsc_device *net_device,
-                                      u16 q_idx)
+static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
 {
-       const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
-
-       return atomic_read(&net_device->num_outstanding_recvs) == 0 &&
-               atomic_read(&nvchan->queue_sends) == 0;
+       call_rcu(&nvdev->rcu, free_netvsc_device);
 }
 
 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
@@ -560,7 +557,7 @@ void netvsc_device_remove(struct hv_device *device)
 
        netvsc_disconnect_vsp(device);
 
-       net_device_ctx->nvdev = NULL;
+       RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
 
        /*
         * At this point, no one should be accessing net_device
@@ -571,11 +568,11 @@ void netvsc_device_remove(struct hv_device *device)
        /* Now, we can close the channel safely */
        vmbus_close(device->channel);
 
-       for (i = 0; i < VRSS_CHANNEL_MAX; i++)
-               napi_disable(&net_device->chan_table[0].napi);
+       for (i = 0; i < net_device->num_chn; i++)
+               napi_disable(&net_device->chan_table[i].napi);
 
        /* Release all resources */
-       free_netvsc_device(net_device);
+       free_netvsc_device_rcu(net_device);
 }
 
 #define RING_AVAIL_PERCENT_HIWATER 20
@@ -604,11 +601,11 @@ static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
 static void netvsc_send_tx_complete(struct netvsc_device *net_device,
                                    struct vmbus_channel *incoming_channel,
                                    struct hv_device *device,
-                                   const struct vmpacket_descriptor *desc)
+                                   const struct vmpacket_descriptor *desc,
+                                   int budget)
 {
        struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
        struct net_device *ndev = hv_get_drvdata(device);
-       struct net_device_context *net_device_ctx = netdev_priv(ndev);
        struct vmbus_channel *channel = device->channel;
        u16 q_idx = 0;
        int queue_sends;
@@ -632,7 +629,7 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
                tx_stats->bytes += packet->total_bytes;
                u64_stats_update_end(&tx_stats->syncp);
 
-               dev_consume_skb_any(skb);
+               napi_consume_skb(skb, budget);
        }
 
        queue_sends =
@@ -642,7 +639,6 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
                wake_up(&net_device->wait_drain);
 
        if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
-           !net_device_ctx->start_remove &&
            (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
             queue_sends < 1))
                netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
@@ -651,7 +647,8 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
 static void netvsc_send_completion(struct netvsc_device *net_device,
                                   struct vmbus_channel *incoming_channel,
                                   struct hv_device *device,
-                                  const struct vmpacket_descriptor *desc)
+                                  const struct vmpacket_descriptor *desc,
+                                  int budget)
 {
        struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
        struct net_device *ndev = hv_get_drvdata(device);
@@ -669,7 +666,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
 
        case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
                netvsc_send_tx_complete(net_device, incoming_channel,
-                                       device, desc);
+                                       device, desc, budget);
                break;
 
        default:
@@ -711,8 +708,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
                packet->page_buf_cnt;
 
        /* Add padding */
-       if (skb && skb->xmit_more && remain &&
-           !packet->cp_partial) {
+       if (skb->xmit_more && remain && !packet->cp_partial) {
                padding = net_device->pkt_align - remain;
                rndis_msg->msg_len += padding;
                packet->total_data_buflen += padding;
@@ -870,9 +866,7 @@ int netvsc_send(struct hv_device *device,
        if (msdp->pkt)
                msd_len = msdp->pkt->total_data_buflen;
 
-       try_batch = (skb != NULL) && msd_len > 0 && msdp->count <
-                   net_device->max_pkt;
-
+       try_batch =  msd_len > 0 && msdp->count < net_device->max_pkt;
        if (try_batch && msd_len + pktlen + net_device->pkt_align <
            net_device->send_section_size) {
                section_index = msdp->pkt->send_buf_index;
@@ -882,7 +876,7 @@ int netvsc_send(struct hv_device *device,
                section_index = msdp->pkt->send_buf_index;
                packet->cp_partial = true;
 
-       } else if ((skb != NULL) && pktlen + net_device->pkt_align <
+       } else if (pktlen + net_device->pkt_align <
                   net_device->send_section_size) {
                section_index = netvsc_get_next_send_section(net_device);
                if (section_index != NETVSC_INVALID_INDEX) {
@@ -1138,15 +1132,11 @@ static int netvsc_receive(struct net_device *ndev,
 static void netvsc_send_table(struct hv_device *hdev,
                              struct nvsp_message *nvmsg)
 {
-       struct netvsc_device *nvscdev;
        struct net_device *ndev = hv_get_drvdata(hdev);
+       struct net_device_context *net_device_ctx = netdev_priv(ndev);
        int i;
        u32 count, *tab;
 
-       nvscdev = get_outbound_net_device(hdev);
-       if (!nvscdev)
-               return;
-
        count = nvmsg->msg.v5_msg.send_table.count;
        if (count != VRSS_SEND_TAB_SIZE) {
                netdev_err(ndev, "Received wrong send-table size:%u\n", count);
@@ -1157,7 +1147,7 @@ static void netvsc_send_table(struct hv_device *hdev,
                      nvmsg->msg.v5_msg.send_table.offset);
 
        for (i = 0; i < count; i++)
-               nvscdev->send_table[i] = tab[i];
+               net_device_ctx->tx_send_table[i] = tab[i];
 }
 
 static void netvsc_send_vf(struct net_device_context *net_device_ctx,
@@ -1186,15 +1176,16 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
                                  struct vmbus_channel *channel,
                                  struct netvsc_device *net_device,
                                  struct net_device *ndev,
-                                 u64 request_id,
-                                 const struct vmpacket_descriptor *desc)
+                                 const struct vmpacket_descriptor *desc,
+                                 int budget)
 {
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
        struct nvsp_message *nvmsg = hv_pkt_data(desc);
 
        switch (desc->type) {
        case VM_PKT_COMP:
-               netvsc_send_completion(net_device, channel, device, desc);
+               netvsc_send_completion(net_device, channel, device,
+                                      desc, budget);
                break;
 
        case VM_PKT_DATA_USING_XFER_PAGES:
@@ -1208,7 +1199,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
 
        default:
                netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
-                          desc->type, request_id);
+                          desc->type, desc->trans_id);
                break;
        }
 
@@ -1222,6 +1213,10 @@ static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
        return primary ? primary->device_obj : channel->device_obj;
 }
 
+/* Network processing softirq
+ * Process data in incoming ring buffer from host
+ * Stops when ring is empty or budget is met or exceeded.
+ */
 int netvsc_poll(struct napi_struct *napi, int budget)
 {
        struct netvsc_channel *nvchan
@@ -1231,56 +1226,47 @@ int netvsc_poll(struct napi_struct *napi, int budget)
        u16 q_idx = channel->offermsg.offer.sub_channel_index;
        struct net_device *ndev = hv_get_drvdata(device);
        struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
-       const struct vmpacket_descriptor *desc;
        int work_done = 0;
 
-       desc = hv_pkt_iter_first(channel);
-       while (desc) {
-               int count;
-
-               count = netvsc_process_raw_pkt(device, channel, net_device,
-                                              ndev, desc->trans_id, desc);
-               work_done += count;
-               desc = __hv_pkt_iter_next(channel, desc);
+       /* If starting a new interval */
+       if (!nvchan->desc)
+               nvchan->desc = hv_pkt_iter_first(channel);
 
-               /* If receive packet budget is exhausted, reschedule */
-               if (work_done >= budget) {
-                       work_done = budget;
-                       break;
-               }
+       while (nvchan->desc && work_done < budget) {
+               work_done += netvsc_process_raw_pkt(device, channel, net_device,
+                                                   ndev, nvchan->desc, budget);
+               nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
        }
-       hv_pkt_iter_close(channel);
 
-       /* If ring is empty and NAPI is not doing polling */
+       /* If receive ring was exhausted
+        * and not doing busy poll
+        * then re-enable host interrupts
+        *  and reschedule if ring is not empty.
+        */
        if (work_done < budget &&
            napi_complete_done(napi, work_done) &&
            hv_end_read(&channel->inbound) != 0)
                napi_reschedule(napi);
 
        netvsc_chk_recv_comp(net_device, channel, q_idx);
-       return work_done;
+
+       /* Driver may overshoot since multiple packets per descriptor */
+       return min(work_done, budget);
 }
 
+/* Call back when data is available in host ring buffer.
+ * Processing is deferred until network softirq (NAPI)
+ */
 void netvsc_channel_cb(void *context)
 {
-       struct vmbus_channel *channel = context;
-       struct hv_device *device = netvsc_channel_to_device(channel);
-       u16 q_idx = channel->offermsg.offer.sub_channel_index;
-       struct netvsc_device *net_device;
-       struct net_device *ndev;
+       struct netvsc_channel *nvchan = context;
 
-       ndev = hv_get_drvdata(device);
-       if (unlikely(!ndev))
-               return;
-
-       net_device = net_device_to_netvsc_device(ndev);
-       if (unlikely(net_device->destroy) &&
-           netvsc_channel_idle(net_device, q_idx))
-               return;
+       if (napi_schedule_prep(&nvchan->napi)) {
+               /* disable interupts from host */
+               hv_begin_read(&nvchan->channel->inbound);
 
-       /* disable interupts from host */
-       hv_begin_read(&channel->inbound);
-       napi_schedule(&net_device->chan_table[q_idx].napi);
+               __napi_schedule(&nvchan->napi);
+       }
 }
 
 /*
@@ -1307,23 +1293,13 @@ int netvsc_device_add(struct hv_device *device,
         */
        set_channel_read_mode(device->channel, HV_CALL_ISR);
 
-       /* Open the channel */
-       ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
-                        ring_size * PAGE_SIZE, NULL, 0,
-                        netvsc_channel_cb, device->channel);
-
-       if (ret != 0) {
-               netdev_err(ndev, "unable to open channel: %d\n", ret);
-               goto cleanup;
-       }
-
-       /* Channel is opened */
-       netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
-
        /* If we're reopening the device we may have multiple queues, fill the
         * chn_table with the default channel to use it before subchannels are
         * opened.
+        * Initialize the channel state before we open;
+        * we can be interrupted as soon as we open the channel.
         */
+
        for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
                struct netvsc_channel *nvchan = &net_device->chan_table[i];
 
@@ -1332,15 +1308,27 @@ int netvsc_device_add(struct hv_device *device,
                               netvsc_poll, NAPI_POLL_WEIGHT);
        }
 
+       /* Open the channel */
+       ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
+                        ring_size * PAGE_SIZE, NULL, 0,
+                        netvsc_channel_cb,
+                        net_device->chan_table);
+
+       if (ret != 0) {
+               netdev_err(ndev, "unable to open channel: %d\n", ret);
+               goto cleanup;
+       }
+
+       /* Channel is opened */
+       netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
+
        /* Enable NAPI handler for init callbacks */
        napi_enable(&net_device->chan_table[0].napi);
 
        /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
         * populated.
         */
-       wmb();
-
-       net_device_ctx->nvdev = net_device;
+       rcu_assign_pointer(net_device_ctx->nvdev, net_device);
 
        /* Connect with the NetVsp */
        ret = netvsc_connect_vsp(device);
@@ -1359,7 +1347,7 @@ close:
        vmbus_close(device->channel);
 
 cleanup:
-       free_netvsc_device(net_device);
+       free_netvsc_device(&net_device->rcu);
 
        return ret;
 }
index 617dd90803c9a92c5cb0237c19f13d544506bc85..f24c2891dd0cf3e9f65af49c83fd723ccbcbc5ee 100644 (file)
@@ -62,7 +62,7 @@ static void do_set_multicast(struct work_struct *w)
                container_of(w, struct net_device_context, work);
        struct hv_device *device_obj = ndevctx->device_ctx;
        struct net_device *ndev = hv_get_drvdata(device_obj);
-       struct netvsc_device *nvdev = ndevctx->nvdev;
+       struct netvsc_device *nvdev = rcu_dereference(ndevctx->nvdev);
        struct rndis_device *rdev;
 
        if (!nvdev)
@@ -116,7 +116,7 @@ static int netvsc_open(struct net_device *net)
 static int netvsc_close(struct net_device *net)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
-       struct netvsc_device *nvdev = net_device_ctx->nvdev;
+       struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
        int ret;
        u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
        struct vmbus_channel *chn;
@@ -206,17 +206,15 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
                        void *accel_priv, select_queue_fallback_t fallback)
 {
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
-       struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
+       unsigned int num_tx_queues = ndev->real_num_tx_queues;
        struct sock *sk = skb->sk;
        int q_idx = sk_tx_queue_get(sk);
 
-       if (q_idx < 0 || skb->ooo_okay ||
-           q_idx >= ndev->real_num_tx_queues) {
+       if (q_idx < 0 || skb->ooo_okay || q_idx >= num_tx_queues) {
                u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
                int new_idx;
 
-               new_idx = nvsc_dev->send_table[hash]
-                       % nvsc_dev->num_chn;
+               new_idx = net_device_ctx->tx_send_table[hash] % num_tx_queues;
 
                if (q_idx != new_idx && sk &&
                    sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
@@ -225,9 +223,6 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
                q_idx = new_idx;
        }
 
-       if (unlikely(!nvsc_dev->chan_table[q_idx].channel))
-               q_idx = 0;
-
        return q_idx;
 }
 
@@ -642,9 +637,9 @@ int netvsc_recv_callback(struct net_device *net,
                         const struct ndis_pkt_8021q_info *vlan)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
-       struct netvsc_device *net_device = net_device_ctx->nvdev;
+       struct netvsc_device *net_device;
        u16 q_idx = channel->offermsg.offer.sub_channel_index;
-       struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
+       struct netvsc_channel *nvchan;
        struct net_device *vf_netdev;
        struct sk_buff *skb;
        struct netvsc_stats *rx_stats;
@@ -660,6 +655,11 @@ int netvsc_recv_callback(struct net_device *net,
         * interface in the guest.
         */
        rcu_read_lock();
+       net_device = rcu_dereference(net_device_ctx->nvdev);
+       if (unlikely(!net_device))
+               goto drop;
+
+       nvchan = &net_device->chan_table[q_idx];
        vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
        if (vf_netdev && (vf_netdev->flags & IFF_UP))
                net = vf_netdev;
@@ -668,6 +668,7 @@ int netvsc_recv_callback(struct net_device *net,
        skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
                                    csum_info, vlan, data, len);
        if (unlikely(!skb)) {
+drop:
                ++net->stats.rx_dropped;
                rcu_read_unlock();
                return NVSP_STAT_FAIL;
@@ -709,7 +710,7 @@ static void netvsc_get_channels(struct net_device *net,
                                struct ethtool_channels *channel)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
-       struct netvsc_device *nvdev = net_device_ctx->nvdev;
+       struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
 
        if (nvdev) {
                channel->max_combined   = nvdev->max_chn;
@@ -746,8 +747,9 @@ static int netvsc_set_channels(struct net_device *net,
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
        struct hv_device *dev = net_device_ctx->device_ctx;
-       struct netvsc_device *nvdev = net_device_ctx->nvdev;
+       struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
        unsigned int count = channels->combined_count;
+       bool was_running;
        int ret;
 
        /* We do not support separate count for rx, tx, or other */
@@ -758,7 +760,7 @@ static int netvsc_set_channels(struct net_device *net,
        if (count > net->num_tx_queues || count > net->num_rx_queues)
                return -EINVAL;
 
-       if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
+       if (!nvdev || nvdev->destroy)
                return -ENODEV;
 
        if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
@@ -767,11 +769,13 @@ static int netvsc_set_channels(struct net_device *net,
        if (count > nvdev->max_chn)
                return -EINVAL;
 
-       ret = netvsc_close(net);
-       if (ret)
-               return ret;
+       was_running = netif_running(net);
+       if (was_running) {
+               ret = netvsc_close(net);
+               if (ret)
+                       return ret;
+       }
 
-       net_device_ctx->start_remove = true;
        rndis_filter_device_remove(dev, nvdev);
 
        ret = netvsc_set_queues(net, dev, count);
@@ -780,8 +784,8 @@ static int netvsc_set_channels(struct net_device *net,
        else
                netvsc_set_queues(net, dev, nvdev->num_chn);
 
-       netvsc_open(net);
-       net_device_ctx->start_remove = false;
+       if (was_running)
+               ret = netvsc_open(net);
 
        /* We may have missed link change notifications */
        schedule_delayed_work(&net_device_ctx->dwork, 0);
@@ -789,18 +793,19 @@ static int netvsc_set_channels(struct net_device *net,
        return ret;
 }
 
-static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
+static bool
+netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
 {
-       struct ethtool_cmd diff1 = *cmd;
-       struct ethtool_cmd diff2 = {};
+       struct ethtool_link_ksettings diff1 = *cmd;
+       struct ethtool_link_ksettings diff2 = {};
 
-       ethtool_cmd_speed_set(&diff1, 0);
-       diff1.duplex = 0;
+       diff1.base.speed = 0;
+       diff1.base.duplex = 0;
        /* advertising and cmd are usually set */
-       diff1.advertising = 0;
-       diff1.cmd = 0;
+       ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
+       diff1.base.cmd = 0;
        /* We set port to PORT_OTHER */
-       diff2.port = PORT_OTHER;
+       diff2.base.port = PORT_OTHER;
 
        return !memcmp(&diff1, &diff2, sizeof(diff1));
 }
@@ -813,30 +818,32 @@ static void netvsc_init_settings(struct net_device *dev)
        ndc->duplex = DUPLEX_UNKNOWN;
 }
 
-static int netvsc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netvsc_get_link_ksettings(struct net_device *dev,
+                                    struct ethtool_link_ksettings *cmd)
 {
        struct net_device_context *ndc = netdev_priv(dev);
 
-       ethtool_cmd_speed_set(cmd, ndc->speed);
-       cmd->duplex = ndc->duplex;
-       cmd->port = PORT_OTHER;
+       cmd->base.speed = ndc->speed;
+       cmd->base.duplex = ndc->duplex;
+       cmd->base.port = PORT_OTHER;
 
        return 0;
 }
 
-static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netvsc_set_link_ksettings(struct net_device *dev,
+                                    const struct ethtool_link_ksettings *cmd)
 {
        struct net_device_context *ndc = netdev_priv(dev);
        u32 speed;
 
-       speed = ethtool_cmd_speed(cmd);
+       speed = cmd->base.speed;
        if (!ethtool_validate_speed(speed) ||
-           !ethtool_validate_duplex(cmd->duplex) ||
+           !ethtool_validate_duplex(cmd->base.duplex) ||
            !netvsc_validate_ethtool_ss_cmd(cmd))
                return -EINVAL;
 
        ndc->speed = speed;
-       ndc->duplex = cmd->duplex;
+       ndc->duplex = cmd->base.duplex;
 
        return 0;
 }
@@ -844,24 +851,27 @@ static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
 {
        struct net_device_context *ndevctx = netdev_priv(ndev);
-       struct netvsc_device *nvdev = ndevctx->nvdev;
+       struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
        struct hv_device *hdev = ndevctx->device_ctx;
        struct netvsc_device_info device_info;
-       int ret;
+       bool was_running;
+       int ret = 0;
 
-       if (ndevctx->start_remove || !nvdev || nvdev->destroy)
+       if (!nvdev || nvdev->destroy)
                return -ENODEV;
 
-       ret = netvsc_close(ndev);
-       if (ret)
-               goto out;
+       was_running = netif_running(ndev);
+       if (was_running) {
+               ret = netvsc_close(ndev);
+               if (ret)
+                       return ret;
+       }
 
        memset(&device_info, 0, sizeof(device_info));
        device_info.ring_size = ring_size;
        device_info.num_chn = nvdev->num_chn;
        device_info.max_num_vrss_chns = nvdev->num_chn;
 
-       ndevctx->start_remove = true;
        rndis_filter_device_remove(hdev, nvdev);
 
        /* 'nvdev' has been freed in rndis_filter_device_remove() ->
@@ -874,9 +884,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
 
        rndis_filter_device_add(hdev, &device_info);
 
-out:
-       netvsc_open(ndev);
-       ndevctx->start_remove = false;
+       if (was_running)
+               ret = netvsc_open(ndev);
 
        /* We may have missed link change notifications */
        schedule_delayed_work(&ndevctx->dwork, 0);
@@ -888,7 +897,7 @@ static void netvsc_get_stats64(struct net_device *net,
                               struct rtnl_link_stats64 *t)
 {
        struct net_device_context *ndev_ctx = netdev_priv(net);
-       struct netvsc_device *nvdev = ndev_ctx->nvdev;
+       struct netvsc_device *nvdev = rcu_dereference(ndev_ctx->nvdev);
        int i;
 
        if (!nvdev)
@@ -973,7 +982,10 @@ static const struct {
 static int netvsc_get_sset_count(struct net_device *dev, int string_set)
 {
        struct net_device_context *ndc = netdev_priv(dev);
-       struct netvsc_device *nvdev = ndc->nvdev;
+       struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
+
+       if (!nvdev)
+               return -ENODEV;
 
        switch (string_set) {
        case ETH_SS_STATS:
@@ -987,13 +999,16 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
                                     struct ethtool_stats *stats, u64 *data)
 {
        struct net_device_context *ndc = netdev_priv(dev);
-       struct netvsc_device *nvdev = ndc->nvdev;
+       struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
        const void *nds = &ndc->eth_stats;
        const struct netvsc_stats *qstats;
        unsigned int start;
        u64 packets, bytes;
        int i, j;
 
+       if (!nvdev)
+               return;
+
        for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
                data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
 
@@ -1022,10 +1037,13 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
 static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 {
        struct net_device_context *ndc = netdev_priv(dev);
-       struct netvsc_device *nvdev = ndc->nvdev;
+       struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
        u8 *p = data;
        int i;
 
+       if (!nvdev)
+               return;
+
        switch (stringset) {
        case ETH_SS_STATS:
                for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
@@ -1077,7 +1095,10 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
                 u32 *rules)
 {
        struct net_device_context *ndc = netdev_priv(dev);
-       struct netvsc_device *nvdev = ndc->nvdev;
+       struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
+
+       if (!nvdev)
+               return -ENODEV;
 
        switch (info->cmd) {
        case ETHTOOL_GRXRINGS:
@@ -1113,13 +1134,17 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
                           u8 *hfunc)
 {
        struct net_device_context *ndc = netdev_priv(dev);
-       struct netvsc_device *ndev = ndc->nvdev;
-       struct rndis_device *rndis_dev = ndev->extension;
+       struct netvsc_device *ndev = rcu_dereference(ndc->nvdev);
+       struct rndis_device *rndis_dev;
        int i;
 
+       if (!ndev)
+               return -ENODEV;
+
        if (hfunc)
                *hfunc = ETH_RSS_HASH_TOP;      /* Toeplitz */
 
+       rndis_dev = ndev->extension;
        if (indir) {
                for (i = 0; i < ITAB_NUM; i++)
                        indir[i] = rndis_dev->ind_table[i];
@@ -1135,13 +1160,17 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
                           const u8 *key, const u8 hfunc)
 {
        struct net_device_context *ndc = netdev_priv(dev);
-       struct netvsc_device *ndev = ndc->nvdev;
-       struct rndis_device *rndis_dev = ndev->extension;
+       struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
+       struct rndis_device *rndis_dev;
        int i;
 
+       if (!ndev)
+               return -ENODEV;
+
        if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
                return -EOPNOTSUPP;
 
+       rndis_dev = ndev->extension;
        if (indir) {
                for (i = 0; i < ITAB_NUM; i++)
                        if (indir[i] >= dev->num_rx_queues)
@@ -1170,13 +1199,13 @@ static const struct ethtool_ops ethtool_ops = {
        .get_channels   = netvsc_get_channels,
        .set_channels   = netvsc_set_channels,
        .get_ts_info    = ethtool_op_get_ts_info,
-       .get_settings   = netvsc_get_settings,
-       .set_settings   = netvsc_set_settings,
        .get_rxnfc      = netvsc_get_rxnfc,
        .get_rxfh_key_size = netvsc_get_rxfh_key_size,
        .get_rxfh_indir_size = netvsc_rss_indir_size,
        .get_rxfh       = netvsc_get_rxfh,
        .set_rxfh       = netvsc_set_rxfh,
+       .get_link_ksettings = netvsc_get_link_ksettings,
+       .set_link_ksettings = netvsc_set_link_ksettings,
 };
 
 static const struct net_device_ops device_ops = {
@@ -1212,10 +1241,10 @@ static void netvsc_link_change(struct work_struct *w)
        unsigned long flags, next_reconfig, delay;
 
        rtnl_lock();
-       if (ndev_ctx->start_remove)
+       net_device = rtnl_dereference(ndev_ctx->nvdev);
+       if (!net_device)
                goto out_unlock;
 
-       net_device = ndev_ctx->nvdev;
        rdev = net_device->extension;
 
        next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
@@ -1356,7 +1385,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
                return NOTIFY_DONE;
 
        net_device_ctx = netdev_priv(ndev);
-       netvsc_dev = net_device_ctx->nvdev;
+       netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
        if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
                return NOTIFY_DONE;
 
@@ -1382,7 +1411,7 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
                return NOTIFY_DONE;
 
        net_device_ctx = netdev_priv(ndev);
-       netvsc_dev = net_device_ctx->nvdev;
+       netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
 
        netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
 
@@ -1416,7 +1445,7 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
                return NOTIFY_DONE;
 
        net_device_ctx = netdev_priv(ndev);
-       netvsc_dev = net_device_ctx->nvdev;
+       netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
 
        netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
        netvsc_switch_datapath(ndev, false);
@@ -1476,8 +1505,6 @@ static int netvsc_probe(struct hv_device *dev,
 
        hv_set_drvdata(dev, net);
 
-       net_device_ctx->start_remove = false;
-
        INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
        INIT_WORK(&net_device_ctx->work, do_set_multicast);
 
@@ -1494,8 +1521,7 @@ static int netvsc_probe(struct hv_device *dev,
        /* Notify the netvsc driver of the new device */
        memset(&device_info, 0, sizeof(device_info));
        device_info.ring_size = ring_size;
-       device_info.max_num_vrss_chns = min_t(u32, VRSS_CHANNEL_DEFAULT,
-                                             num_online_cpus());
+       device_info.num_chn = VRSS_CHANNEL_DEFAULT;
        ret = rndis_filter_device_add(dev, &device_info);
        if (ret != 0) {
                netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
@@ -1511,6 +1537,7 @@ static int netvsc_probe(struct hv_device *dev,
                NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
        net->vlan_features = net->features;
 
+       /* RCU not necessary here, device not registered */
        nvdev = net_device_ctx->nvdev;
        netif_set_real_num_tx_queues(net, nvdev->num_chn);
        netif_set_real_num_rx_queues(net, nvdev->num_chn);
@@ -1546,26 +1573,20 @@ static int netvsc_remove(struct hv_device *dev)
 
        ndev_ctx = netdev_priv(net);
 
-       /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
-        * removing the device.
-        */
-       rtnl_lock();
-       ndev_ctx->start_remove = true;
-       rtnl_unlock();
+       netif_device_detach(net);
 
        cancel_delayed_work_sync(&ndev_ctx->dwork);
        cancel_work_sync(&ndev_ctx->work);
 
-       /* Stop outbound asap */
-       netif_tx_disable(net);
-
-       unregister_netdev(net);
-
        /*
         * Call to the vsc driver to let it know that the device is being
-        * removed
+        * removed. Also blocks mtu and channel changes.
         */
+       rtnl_lock();
        rndis_filter_device_remove(dev, ndev_ctx->nvdev);
+       rtnl_unlock();
+
+       unregister_netdev(net);
 
        hv_set_drvdata(dev, NULL);
 
index d7b6311e6c195d7815106518c5b5774f05e86648..1e9445bc45391195a9f15fbda7bf56fa2d3b762e 100644 (file)
@@ -819,16 +819,14 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
 {
        struct rndis_request *request;
        struct rndis_set_request *set;
-       struct rndis_set_complete *set_complete;
        int ret;
 
        request = get_rndis_request(dev, RNDIS_MSG_SET,
                        RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
                        sizeof(u32));
-       if (!request) {
-               ret = -ENOMEM;
-               goto cleanup;
-       }
+       if (!request)
+               return -ENOMEM;
+
 
        /* Setup the rndis set */
        set = &request->request_msg.msg.set_req;
@@ -840,15 +838,11 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
               &new_filter, sizeof(u32));
 
        ret = rndis_filter_send_request(dev, request);
-       if (ret != 0)
-               goto cleanup;
+       if (ret == 0)
+               wait_for_completion(&request->wait_event);
 
-       wait_for_completion(&request->wait_event);
+       put_rndis_request(dev, request);
 
-       set_complete = &request->response_msg.msg.set_complete;
-cleanup:
-       if (request)
-               put_rndis_request(dev, request);
        return ret;
 }
 
@@ -926,8 +920,6 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
        struct rndis_halt_request *halt;
        struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
        struct netvsc_device *nvdev = net_device_ctx->nvdev;
-       struct hv_device *hdev = net_device_ctx->device_ctx;
-       ulong flags;
 
        /* Attempt to do a rndis device halt */
        request = get_rndis_request(dev, RNDIS_MSG_HALT,
@@ -945,9 +937,10 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
        dev->state = RNDIS_DEV_UNINITIALIZED;
 
 cleanup:
-       spin_lock_irqsave(&hdev->channel->inbound_lock, flags);
        nvdev->destroy = true;
-       spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);
+
+       /* Force flag to be ordered before waiting */
+       wmb();
 
        /* Wait for all send completions */
        wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
@@ -996,28 +989,35 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
                hv_get_drvdata(new_sc->primary_channel->device_obj);
        struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev);
        u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
+       struct netvsc_channel *nvchan;
        int ret;
-       unsigned long flags;
 
        if (chn_index >= nvscdev->num_chn)
                return;
 
-       nvscdev->chan_table[chn_index].mrc.buf
+       nvchan = nvscdev->chan_table + chn_index;
+       nvchan->mrc.buf
                = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
 
+       if (!nvchan->mrc.buf)
+               return;
+
+       /* Because the device uses NAPI, all the interrupt batching and
+        * control is done via Net softirq, not the channel handling
+        */
+       set_channel_read_mode(new_sc, HV_CALL_ISR);
+
+       /* Set the channel before opening.*/
+       nvchan->channel = new_sc;
+
        ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
                         nvscdev->ring_size * PAGE_SIZE, NULL, 0,
-                        netvsc_channel_cb, new_sc);
+                        netvsc_channel_cb, nvchan);
 
-       if (ret == 0)
-               nvscdev->chan_table[chn_index].channel = new_sc;
 
-       napi_enable(&nvscdev->chan_table[chn_index].napi);
+       napi_enable(&nvchan->napi);
 
-       spin_lock_irqsave(&nvscdev->sc_lock, flags);
-       nvscdev->num_sc_offered--;
-       spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
-       if (nvscdev->num_sc_offered == 0)
+       if (refcount_dec_and_test(&nvscdev->sc_offered))
                complete(&nvscdev->channel_init_wait);
 }
 
@@ -1034,12 +1034,9 @@ int rndis_filter_device_add(struct hv_device *dev,
        struct ndis_recv_scale_cap rsscap;
        u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
        unsigned int gso_max_size = GSO_MAX_SIZE;
-       u32 mtu, size;
-       u32 num_rss_qs;
-       u32 sc_delta;
+       u32 mtu, size, num_rss_qs;
        const struct cpumask *node_cpu_mask;
        u32 num_possible_rss_qs;
-       unsigned long flags;
        int i, ret;
 
        rndis_device = get_rndis_device();
@@ -1062,7 +1059,7 @@ int rndis_filter_device_add(struct hv_device *dev,
        net_device->max_chn = 1;
        net_device->num_chn = 1;
 
-       spin_lock_init(&net_device->sc_lock);
+       refcount_set(&net_device->sc_offered, 0);
 
        net_device->extension = rndis_device;
        rndis_device->ndev = net;
@@ -1176,34 +1173,30 @@ int rndis_filter_device_add(struct hv_device *dev,
        if (ret || rsscap.num_recv_que < 2)
                goto out;
 
-       net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, rsscap.num_recv_que);
-
-       num_rss_qs = min(device_info->max_num_vrss_chns, net_device->max_chn);
-
        /*
         * We will limit the VRSS channels to the number CPUs in the NUMA node
         * the primary channel is currently bound to.
+        *
+        * This also guarantees that num_possible_rss_qs <= num_online_cpus
         */
        node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
-       num_possible_rss_qs = cpumask_weight(node_cpu_mask);
+       num_possible_rss_qs = min_t(u32, cpumask_weight(node_cpu_mask),
+                                   rsscap.num_recv_que);
 
-       /* We will use the given number of channels if available. */
-       if (device_info->num_chn && device_info->num_chn < net_device->max_chn)
-               net_device->num_chn = device_info->num_chn;
-       else
-               net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
+       net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
 
-       num_rss_qs = net_device->num_chn - 1;
+       /* We will use the given number of channels if available. */
+       net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
 
        for (i = 0; i < ITAB_NUM; i++)
                rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
                                                        net_device->num_chn);
 
-       net_device->num_sc_offered = num_rss_qs;
-
-       if (net_device->num_chn == 1)
-               goto out;
+       num_rss_qs = net_device->num_chn - 1;
+       if (num_rss_qs == 0)
+               return 0;
 
+       refcount_set(&net_device->sc_offered, num_rss_qs);
        vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
 
        init_packet = &net_device->channel_init_pkt;
@@ -1219,32 +1212,23 @@ int rndis_filter_device_add(struct hv_device *dev,
                               VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
        if (ret)
                goto out;
-       wait_for_completion(&net_device->channel_init_wait);
 
-       if (init_packet->msg.v5_msg.subchn_comp.status !=
-           NVSP_STAT_SUCCESS) {
+       if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
                ret = -ENODEV;
                goto out;
        }
+       wait_for_completion(&net_device->channel_init_wait);
+
        net_device->num_chn = 1 +
                init_packet->msg.v5_msg.subchn_comp.num_subchannels;
 
-       ret = rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
-                                        net_device->num_chn);
-
-       /*
-        * Set the number of sub-channels to be received.
-        */
-       spin_lock_irqsave(&net_device->sc_lock, flags);
-       sc_delta = num_rss_qs - (net_device->num_chn - 1);
-       net_device->num_sc_offered -= sc_delta;
-       spin_unlock_irqrestore(&net_device->sc_lock, flags);
-
+       /* ignore failues from setting rss parameters, still have channels */
+       rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
+                                  net_device->num_chn);
 out:
        if (ret) {
                net_device->max_chn = 1;
                net_device->num_chn = 1;
-               net_device->num_sc_offered = 0;
        }
 
        return 0; /* return 0 because primary channel can be used alone */
@@ -1259,12 +1243,6 @@ void rndis_filter_device_remove(struct hv_device *dev,
 {
        struct rndis_device *rndis_dev = net_dev->extension;
 
-       /* If not all subchannel offers are complete, wait for them until
-        * completion to avoid race.
-        */
-       if (net_dev->num_sc_offered > 0)
-               wait_for_completion(&net_dev->channel_init_wait);
-
        /* Halt and release the rndis device */
        rndis_filter_halt_device(rndis_dev);
 
index 7b131f8e40937000a273cfabbbf04a03a4af2f17..bd63289c55e8fcb68e9e371712ad1fdfc3521a43 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/spi/spi.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/regmap.h>
 #include <linux/ieee802154.h>
 #include <linux/irq.h>
index ffedad2a360afb8154f0e1e8b9be0c1d388fa5d9..15b920086251633e2ee0ee0b26ba0046859e5299 100644 (file)
@@ -418,8 +418,9 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
                memset(rd, 0, sizeof(*rd));
                rd->hw = hwmap + i;
                rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
-               if (rd->buf == NULL ||
-                   !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
+               if (rd->buf)
+                       busaddr = pci_map_single(pdev, rd->buf, len, dir);
+               if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) {
                        if (rd->buf) {
                                net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
                                                    __func__, rd->buf);
@@ -430,8 +431,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
                                rd = r->rd + j;
                                busaddr = rd_get_addr(rd);
                                rd_set_addr_status(rd, 0, 0);
-                               if (busaddr)
-                                       pci_unmap_single(pdev, busaddr, len, dir);
+                               pci_unmap_single(pdev, busaddr, len, dir);
                                kfree(rd->buf);
                                rd->buf = NULL;
                        }
index b23b71981fd55689daa817d41191063300bdaaf5..224f65cb576bbf106a4779ef5f60b75f34903b1c 100644 (file)
@@ -13,7 +13,7 @@
  *
  *             Alan Cox        :       Fixed oddments for NET3.014
  *             Alan Cox        :       Rejig for NET3.029 snap #3
- *             Alan Cox        :       Fixed NET3.029 bugs and sped up
+ *             Alan Cox        :       Fixed NET3.029 bugs and sped up
  *             Larry McVoy     :       Tiny tweak to double performance
  *             Alan Cox        :       Backed out LMV's tweak - the linux mm
  *                                     can't take it...
@@ -41,7 +41,7 @@
 #include <linux/in.h>
 
 #include <linux/uaccess.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <linux/inet.h>
 #include <linux/netdevice.h>
@@ -55,6 +55,7 @@
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/percpu.h>
+#include <linux/net_tstamp.h>
 #include <net/net_namespace.h>
 #include <linux/u64_stats_sync.h>
 
@@ -64,8 +65,7 @@ struct pcpu_lstats {
        struct u64_stats_sync   syncp;
 };
 
-/*
- * The higher levels take care of making this non-reentrant (it's
+/* The higher levels take care of making this non-reentrant (it's
  * called with bh's disabled).
  */
 static netdev_tx_t loopback_xmit(struct sk_buff *skb,
@@ -74,6 +74,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
        struct pcpu_lstats *lb_stats;
        int len;
 
+       skb_tx_timestamp(skb);
        skb_orphan(skb);
 
        /* Before queueing this packet to netif_rx(),
@@ -129,8 +130,21 @@ static u32 always_on(struct net_device *dev)
        return 1;
 }
 
+static int loopback_get_ts_info(struct net_device *netdev,
+                               struct ethtool_ts_info *ts_info)
+{
+       ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+                                  SOF_TIMESTAMPING_RX_SOFTWARE |
+                                  SOF_TIMESTAMPING_SOFTWARE;
+
+       ts_info->phc_index = -1;
+
+       return 0;
+};
+
 static const struct ethtool_ops loopback_ethtool_ops = {
        .get_link               = always_on,
+       .get_ts_info            = loopback_get_ts_info,
 };
 
 static int loopback_dev_init(struct net_device *dev)
@@ -149,14 +163,13 @@ static void loopback_dev_free(struct net_device *dev)
 }
 
 static const struct net_device_ops loopback_ops = {
-       .ndo_init      = loopback_dev_init,
-       .ndo_start_xmit= loopback_xmit,
+       .ndo_init        = loopback_dev_init,
+       .ndo_start_xmit  = loopback_xmit,
        .ndo_get_stats64 = loopback_get_stats64,
        .ndo_set_mac_address = eth_mac_addr,
 };
 
-/*
- * The loopback device is special. There is only one instance
+/* The loopback device is special. There is only one instance
  * per network namespace.
  */
 static void loopback_setup(struct net_device *dev)
@@ -170,7 +183,7 @@ static void loopback_setup(struct net_device *dev)
        dev->priv_flags         |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
        netif_keep_dst(dev);
        dev->hw_features        = NETIF_F_GSO_SOFTWARE;
-       dev->features           = NETIF_F_SG | NETIF_F_FRAGLIST
+       dev->features           = NETIF_F_SG | NETIF_F_FRAGLIST
                | NETIF_F_GSO_SOFTWARE
                | NETIF_F_HW_CSUM
                | NETIF_F_RXCSUM
@@ -206,7 +219,6 @@ static __net_init int loopback_net_init(struct net *net)
        net->loopback_dev = dev;
        return 0;
 
-
 out_free_netdev:
        free_netdev(dev);
 out:
@@ -217,5 +229,5 @@ out:
 
 /* Registered in net/core/dev.c */
 struct pernet_operations __net_initdata loopback_net_ops = {
-       .init = loopback_net_init,
+       .init = loopback_net_init,
 };
index 36877ba6551646bf1308066026b2f9f9dd12a5d3..4daf3d0926a82cfb52fd6cf774f0a467230c3246 100644 (file)
@@ -372,18 +372,19 @@ static void ntb_get_drvinfo(struct net_device *ndev,
        strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
 }
 
-static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ntb_get_link_ksettings(struct net_device *dev,
+                                 struct ethtool_link_ksettings *cmd)
 {
-       cmd->supported = SUPPORTED_Backplane;
-       cmd->advertising = ADVERTISED_Backplane;
-       ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
-       cmd->duplex = DUPLEX_FULL;
-       cmd->port = PORT_OTHER;
-       cmd->phy_address = 0;
-       cmd->transceiver = XCVR_DUMMY1;
-       cmd->autoneg = AUTONEG_ENABLE;
-       cmd->maxtxpkt = 0;
-       cmd->maxrxpkt = 0;
+       ethtool_link_ksettings_zero_link_mode(cmd, supported);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
+       ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, Backplane);
+
+       cmd->base.speed = SPEED_UNKNOWN;
+       cmd->base.duplex = DUPLEX_FULL;
+       cmd->base.port = PORT_OTHER;
+       cmd->base.phy_address = 0;
+       cmd->base.autoneg = AUTONEG_ENABLE;
 
        return 0;
 }
@@ -391,7 +392,7 @@ static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 static const struct ethtool_ops ntb_ethtool_ops = {
        .get_drvinfo = ntb_get_drvinfo,
        .get_link = ethtool_op_get_link,
-       .get_settings = ntb_get_settings,
+       .get_link_ksettings = ntb_get_link_ksettings,
 };
 
 static const struct ntb_queue_handlers ntb_netdev_handlers = {
index 8dbd59baa34d5ed9eda97396f38b5ab38e96416a..60ffc9da6a286272d84e8dc3f4326eb908e7961a 100644 (file)
@@ -2,33 +2,12 @@
 # PHY Layer Configuration
 #
 
-menuconfig PHYLIB
-       tristate "PHY Device support and infrastructure"
-       depends on NETDEVICES
+menuconfig MDIO_DEVICE
+       tristate "MDIO bus device drivers"
        help
-         Ethernet controllers are usually attached to PHY
-         devices.  This option provides infrastructure for
-         managing PHY devices.
-
-if PHYLIB
+          MDIO devices and driver infrastructure code.
 
-config SWPHY
-       bool
-
-config LED_TRIGGER_PHY
-       bool "Support LED triggers for tracking link state"
-       depends on LEDS_TRIGGERS
-       ---help---
-         Adds support for a set of LED trigger events per-PHY.  Link
-         state change will trigger the events, for consumption by an
-         LED class driver.  There are triggers for each link speed currently
-         supported by the phy, and are of the form:
-              <mii bus id>:<phy>:<speed>
-
-         Where speed is in the form:
-               <Speed in megabits>Mbps or <Speed in gigabits>Gbps
-
-comment "MDIO bus device drivers"
+if MDIO_DEVICE
 
 config MDIO_BCM_IPROC
        tristate "Broadcom iProc MDIO bus controller"
@@ -40,7 +19,7 @@ config MDIO_BCM_IPROC
 
 config MDIO_BCM_UNIMAC
        tristate "Broadcom UniMAC MDIO bus controller"
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && OF_MDIO
        help
          This module provides a driver for the Broadcom UniMAC MDIO busses.
          This hardware can be found in the Broadcom GENET Ethernet MAC
@@ -49,6 +28,7 @@ config MDIO_BCM_UNIMAC
 
 config MDIO_BITBANG
        tristate "Bitbanged MDIO buses"
+       depends on !(MDIO_DEVICE=y && PHYLIB=m)
        help
          This module implements the MDIO bus protocol in software,
          for use by low level drivers that export the ability to
@@ -160,6 +140,36 @@ config MDIO_XGENE
          This module provides a driver for the MDIO busses found in the
          APM X-Gene SoC's.
 
+endif
+
+menuconfig PHYLIB
+       tristate "PHY Device support and infrastructure"
+       depends on NETDEVICES
+       select MDIO_DEVICE
+       help
+         Ethernet controllers are usually attached to PHY
+         devices.  This option provides infrastructure for
+         managing PHY devices.
+
+if PHYLIB
+
+config SWPHY
+       bool
+
+config LED_TRIGGER_PHY
+       bool "Support LED triggers for tracking link state"
+       depends on LEDS_TRIGGERS
+       ---help---
+         Adds support for a set of LED trigger events per-PHY.  Link
+         state change will trigger the events, for consumption by an
+         LED class driver.  There are triggers for each link speed currently
+         supported by the phy, and are of the form:
+              <mii bus id>:<phy>:<speed>
+
+         Where speed is in the form:
+               <Speed in megabits>Mbps or <Speed in gigabits>Gbps
+
+
 comment "MII PHY device drivers"
 
 config AMD_PHY
index 407b0b601ea8264b0ac8bf32a609271d43be995a..e36db9a2ba3814330277ea21980689c2d88c4018 100644 (file)
@@ -1,7 +1,20 @@
 # Makefile for Linux PHY drivers and MDIO bus drivers
 
-libphy-y                       := phy.o phy_device.o mdio_bus.o mdio_device.o \
-                                  mdio-boardinfo.o
+libphy-y                       := phy.o phy-core.o phy_device.o
+mdio-bus-y                     += mdio_bus.o mdio_device.o
+
+ifdef CONFIG_MDIO_DEVICE
+obj-y                          += mdio-boardinfo.o
+endif
+
+# PHYLIB implies MDIO_DEVICE, in that case, we have a bunch of circular
+# dependencies that does not make it possible to split mdio-bus objects into a
+# dedicated loadable module, so we bundle them all together into libphy.ko
+ifdef CONFIG_PHYLIB
+libphy-y                       += $(mdio-bus-y)
+else
+obj-$(CONFIG_MDIO_DEVICE)      += mdio-bus.o
+endif
 libphy-$(CONFIG_SWPHY)         += swphy.o
 libphy-$(CONFIG_LED_TRIGGER_PHY)       += phy_led_triggers.o
 
index ab9ad689617c78d19b21ad8f35b5f1887c483b1c..171010eb4d9c5c36da0be9888fb75cc54e136768 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015 Broadcom Corporation
+ * Copyright (C) 2015-2017 Broadcom
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
@@ -201,8 +201,7 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
        int val;
 
        /* Enable EEE at PHY level */
-       val = phy_read_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL,
-                                   MDIO_MMD_AN);
+       val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL);
        if (val < 0)
                return val;
 
@@ -211,22 +210,19 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
        else
                val &= ~(LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X);
 
-       phy_write_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL,
-                              MDIO_MMD_AN, (u32)val);
+       phy_write_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL, (u32)val);
 
        /* Advertise EEE */
-       val = phy_read_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV,
-                                   MDIO_MMD_AN);
+       val = phy_read_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV);
        if (val < 0)
                return val;
 
        if (enable)
-               val |= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
+               val |= (MDIO_EEE_100TX | MDIO_EEE_1000T);
        else
-               val &= ~(MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
+               val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T);
 
-       phy_write_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV,
-                              MDIO_MMD_AN, (u32)val);
+       phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val);
 
        return 0;
 }
index d1c2614dad3a60860f33c0ff1851f18e0d1a4253..caa9f6e17f34cb54277a941a6ca81854ac0cf1ad 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Broadcom BCM7xxx internal transceivers support.
  *
- * Copyright (C) 2014, Broadcom Corporation
+ * Copyright (C) 2014-2017 Broadcom
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -19,7 +19,7 @@
 
 /* Broadcom BCM7xxx internal PHY registers */
 
-/* 40nm only register definitions */
+/* EPHY only register definitions */
 #define MII_BCM7XXX_100TX_AUX_CTL      0x10
 #define MII_BCM7XXX_100TX_FALSE_CAR    0x13
 #define MII_BCM7XXX_100TX_DISC         0x14
 #define  MII_BCM7XXX_64CLK_MDIO                BIT(12)
 #define MII_BCM7XXX_TEST               0x1f
 #define  MII_BCM7XXX_SHD_MODE_2                BIT(2)
+#define MII_BCM7XXX_SHD_2_ADDR_CTRL    0xe
+#define MII_BCM7XXX_SHD_2_CTRL_STAT    0xf
+#define MII_BCM7XXX_SHD_2_BIAS_TRIM    0x1a
+#define MII_BCM7XXX_SHD_3_AN_EEE_ADV   0x3
+#define MII_BCM7XXX_SHD_3_PCS_CTRL_2   0x6
+#define  MII_BCM7XXX_PCS_CTRL_2_DEF    0x4400
+#define MII_BCM7XXX_SHD_3_AN_STAT      0xb
+#define  MII_BCM7XXX_AN_NULL_MSG_EN    BIT(0)
+#define  MII_BCM7XXX_AN_EEE_EN         BIT(1)
+#define MII_BCM7XXX_SHD_3_EEE_THRESH   0xe
+#define  MII_BCM7XXX_EEE_THRESH_DEF    0x50
+#define MII_BCM7XXX_SHD_3_TL4          0x23
+#define  MII_BCM7XXX_TL4_RST_MSK       (BIT(2) | BIT(1))
 
 /* 28nm only register definitions */
 #define MISC_ADDR(base, channel)       base, channel
@@ -286,6 +299,181 @@ static int phy_set_clr_bits(struct phy_device *dev, int location,
        return v;
 }
 
+static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev)
+{
+       int ret;
+
+       /* set shadow mode 2 */
+       ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+                              MII_BCM7XXX_SHD_MODE_2, 0);
+       if (ret < 0)
+               return ret;
+
+       /* Set current trim values INT_trim = -1, Ext_trim =0 */
+       ret = phy_write(phydev, MII_BCM7XXX_SHD_2_BIAS_TRIM, 0x3BE0);
+       if (ret < 0)
+               goto reset_shadow_mode;
+
+       /* Cal reset */
+       ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+                       MII_BCM7XXX_SHD_3_TL4);
+       if (ret < 0)
+               goto reset_shadow_mode;
+       ret = phy_set_clr_bits(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+                              MII_BCM7XXX_TL4_RST_MSK, 0);
+       if (ret < 0)
+               goto reset_shadow_mode;
+
+       /* Cal reset disable */
+       ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+                       MII_BCM7XXX_SHD_3_TL4);
+       if (ret < 0)
+               goto reset_shadow_mode;
+       ret = phy_set_clr_bits(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+                              0, MII_BCM7XXX_TL4_RST_MSK);
+       if (ret < 0)
+               goto reset_shadow_mode;
+
+reset_shadow_mode:
+       /* reset shadow mode 2 */
+       ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
+                              MII_BCM7XXX_SHD_MODE_2);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+/* The 28nm EPHY does not support Clause 45 (MMD) used by bcm-phy-lib */
+static int bcm7xxx_28nm_ephy_apd_enable(struct phy_device *phydev)
+{
+       int ret;
+
+       /* set shadow mode 1 */
+       ret = phy_set_clr_bits(phydev, MII_BRCM_FET_BRCMTEST,
+                              MII_BRCM_FET_BT_SRE, 0);
+       if (ret < 0)
+               return ret;
+
+       /* Enable auto-power down */
+       ret = phy_set_clr_bits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2,
+                              MII_BRCM_FET_SHDW_AS2_APDE, 0);
+       if (ret < 0)
+               return ret;
+
+       /* reset shadow mode 1 */
+       ret = phy_set_clr_bits(phydev, MII_BRCM_FET_BRCMTEST, 0,
+                              MII_BRCM_FET_BT_SRE);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int bcm7xxx_28nm_ephy_eee_enable(struct phy_device *phydev)
+{
+       int ret;
+
+       /* set shadow mode 2 */
+       ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+                              MII_BCM7XXX_SHD_MODE_2, 0);
+       if (ret < 0)
+               return ret;
+
+       /* Advertise supported modes */
+       ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+                       MII_BCM7XXX_SHD_3_AN_EEE_ADV);
+       if (ret < 0)
+               goto reset_shadow_mode;
+       ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+                       MDIO_EEE_100TX);
+       if (ret < 0)
+               goto reset_shadow_mode;
+
+       /* Restore Defaults */
+       ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+                       MII_BCM7XXX_SHD_3_PCS_CTRL_2);
+       if (ret < 0)
+               goto reset_shadow_mode;
+       ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+                       MII_BCM7XXX_PCS_CTRL_2_DEF);
+       if (ret < 0)
+               goto reset_shadow_mode;
+
+       ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+                       MII_BCM7XXX_SHD_3_EEE_THRESH);
+       if (ret < 0)
+               goto reset_shadow_mode;
+       ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+                       MII_BCM7XXX_EEE_THRESH_DEF);
+       if (ret < 0)
+               goto reset_shadow_mode;
+
+       /* Enable EEE autonegotiation */
+       ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+                       MII_BCM7XXX_SHD_3_AN_STAT);
+       if (ret < 0)
+               goto reset_shadow_mode;
+       ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+                       (MII_BCM7XXX_AN_NULL_MSG_EN | MII_BCM7XXX_AN_EEE_EN));
+       if (ret < 0)
+               goto reset_shadow_mode;
+
+reset_shadow_mode:
+       /* reset shadow mode 2 */
+       ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
+                              MII_BCM7XXX_SHD_MODE_2);
+       if (ret < 0)
+               return ret;
+
+       /* Restart autoneg */
+       phy_write(phydev, MII_BMCR,
+                 (BMCR_SPEED100 | BMCR_ANENABLE | BMCR_ANRESTART));
+
+       return 0;
+}
+
+static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev)
+{
+       u8 rev = phydev->phy_id & ~phydev->drv->phy_id_mask;
+       int ret = 0;
+
+       pr_info_once("%s: %s PHY revision: 0x%02x\n",
+                    phydev_name(phydev), phydev->drv->name, rev);
+
+       /* Dummy read to a register to workaround a possible issue upon reset
+        * where the internal inverter may not allow the first MDIO transaction
+        * to pass the MDIO management controller and make us return 0xffff for
+        * such reads.
+        */
+       phy_read(phydev, MII_BMSR);
+
+       /* Apply AFE software work-around if necessary */
+       if (rev == 0x01) {
+               ret = bcm7xxx_28nm_ephy_01_afe_config_init(phydev);
+               if (ret)
+                       return ret;
+       }
+
+       ret = bcm7xxx_28nm_ephy_eee_enable(phydev);
+       if (ret)
+               return ret;
+
+       return bcm7xxx_28nm_ephy_apd_enable(phydev);
+}
+
+static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev)
+{
+       int ret;
+
+       /* Re-apply workarounds coming out suspend/resume */
+       ret = bcm7xxx_28nm_ephy_config_init(phydev);
+       if (ret)
+               return ret;
+
+       return genphy_config_aneg(phydev);
+}
+
 static int bcm7xxx_config_init(struct phy_device *phydev)
 {
        int ret;
@@ -434,6 +622,23 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
        .probe          = bcm7xxx_28nm_probe,                           \
 }
 
+#define BCM7XXX_28NM_EPHY(_oui, _name)                                 \
+{                                                                      \
+       .phy_id         = (_oui),                                       \
+       .phy_id_mask    = 0xfffffff0,                                   \
+       .name           = _name,                                        \
+       .features       = PHY_BASIC_FEATURES,                           \
+       .flags          = PHY_IS_INTERNAL,                              \
+       .config_init    = bcm7xxx_28nm_ephy_config_init,                \
+       .config_aneg    = genphy_config_aneg,                           \
+       .read_status    = genphy_read_status,                           \
+       .resume         = bcm7xxx_28nm_ephy_resume,                     \
+       .get_sset_count = bcm_phy_get_sset_count,                       \
+       .get_strings    = bcm_phy_get_strings,                          \
+       .get_stats      = bcm7xxx_28nm_get_phy_stats,                   \
+       .probe          = bcm7xxx_28nm_probe,                           \
+}
+
 #define BCM7XXX_40NM_EPHY(_oui, _name)                                 \
 {                                                                      \
        .phy_id         = (_oui),                                       \
@@ -450,6 +655,9 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
 
 static struct phy_driver bcm7xxx_driver[] = {
        BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
+       BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"),
+       BCM7XXX_28NM_EPHY(PHY_ID_BCM7268, "Broadcom BCM7268"),
+       BCM7XXX_28NM_EPHY(PHY_ID_BCM7271, "Broadcom BCM7271"),
        BCM7XXX_28NM_GPHY(PHY_ID_BCM7278, "Broadcom BCM7278"),
        BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"),
        BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"),
@@ -466,6 +674,9 @@ static struct phy_driver bcm7xxx_driver[] = {
 
 static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
        { PHY_ID_BCM7250, 0xfffffff0, },
+       { PHY_ID_BCM7260, 0xfffffff0, },
+       { PHY_ID_BCM7268, 0xfffffff0, },
+       { PHY_ID_BCM7271, 0xfffffff0, },
        { PHY_ID_BCM7278, 0xfffffff0, },
        { PHY_ID_BCM7364, 0xfffffff0, },
        { PHY_ID_BCM7366, 0xfffffff0, },
index 19865530e0b13c24e5d74def6cbd55e77b0ad378..b57f20e552ba83d0a0e39ef0593229b7ccd874d5 100644 (file)
@@ -133,14 +133,14 @@ static int dp83867_config_port_mirroring(struct phy_device *phydev)
                (struct dp83867_private *)phydev->priv;
        u16 val;
 
-       val = phy_read_mmd_indirect(phydev, DP83867_CFG4, DP83867_DEVADDR);
+       val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4);
 
        if (dp83867->port_mirroring == DP83867_PORT_MIRROING_EN)
                val |= DP83867_CFG4_PORT_MIRROR_EN;
        else
                val &= ~DP83867_CFG4_PORT_MIRROR_EN;
 
-       phy_write_mmd_indirect(phydev, DP83867_CFG4, DP83867_DEVADDR, val);
+       phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, val);
 
        return 0;
 }
@@ -231,8 +231,7 @@ static int dp83867_config_init(struct phy_device *phydev)
                 * register's bit 11 (marked as RESERVED).
                 */
 
-               bs = phy_read_mmd_indirect(phydev, DP83867_STRAP_STS1,
-                                          DP83867_DEVADDR);
+               bs = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS1);
                if (bs & DP83867_STRAP_STS1_RESERVED)
                        val &= ~DP83867_PHYCR_RESERVED_MASK;
 
@@ -243,8 +242,7 @@ static int dp83867_config_init(struct phy_device *phydev)
 
        if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
            (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
-               val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL,
-                                           DP83867_DEVADDR);
+               val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
 
                if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
                        val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
@@ -255,25 +253,24 @@ static int dp83867_config_init(struct phy_device *phydev)
                if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
                        val |= DP83867_RGMII_RX_CLK_DELAY_EN;
 
-               phy_write_mmd_indirect(phydev, DP83867_RGMIICTL,
-                                      DP83867_DEVADDR, val);
+               phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL, val);
 
                delay = (dp83867->rx_id_delay |
                        (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT));
 
-               phy_write_mmd_indirect(phydev, DP83867_RGMIIDCTL,
-                                      DP83867_DEVADDR, delay);
+               phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL,
+                             delay);
 
                if (dp83867->io_impedance >= 0) {
-                       val = phy_read_mmd_indirect(phydev, DP83867_IO_MUX_CFG,
-                                                   DP83867_DEVADDR);
+                       val = phy_read_mmd(phydev, DP83867_DEVADDR,
+                                          DP83867_IO_MUX_CFG);
 
                        val &= ~DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL;
                        val |= dp83867->io_impedance &
                               DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL;
 
-                       phy_write_mmd_indirect(phydev, DP83867_IO_MUX_CFG,
-                                              DP83867_DEVADDR, val);
+                       phy_write_mmd(phydev, DP83867_DEVADDR,
+                                     DP83867_IO_MUX_CFG, val);
                }
        }
 
index b1fd7bb0e4dbebe2da6f7f1b13857b1b64fa76f9..55f8c52dd2f1b719b98950125c901eadc8383777 100644 (file)
@@ -166,13 +166,13 @@ static int xway_gphy_config_init(struct phy_device *phydev)
        /* Clear all pending interrupts */
        phy_read(phydev, XWAY_MDIO_ISTAT);
 
-       phy_write_mmd_indirect(phydev, XWAY_MMD_LEDCH, MDIO_MMD_VEND2,
-                              XWAY_MMD_LEDCH_NACS_NONE |
-                              XWAY_MMD_LEDCH_SBF_F02HZ |
-                              XWAY_MMD_LEDCH_FBF_F16HZ);
-       phy_write_mmd_indirect(phydev, XWAY_MMD_LEDCL, MDIO_MMD_VEND2,
-                              XWAY_MMD_LEDCH_CBLINK_NONE |
-                              XWAY_MMD_LEDCH_SCAN_NONE);
+       phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDCH,
+                     XWAY_MMD_LEDCH_NACS_NONE |
+                     XWAY_MMD_LEDCH_SBF_F02HZ |
+                     XWAY_MMD_LEDCH_FBF_F16HZ);
+       phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDCL,
+                     XWAY_MMD_LEDCH_CBLINK_NONE |
+                     XWAY_MMD_LEDCH_SCAN_NONE);
 
        /**
         * In most cases only one LED is connected to this phy, so
@@ -183,12 +183,12 @@ static int xway_gphy_config_init(struct phy_device *phydev)
        ledxh = XWAY_MMD_LEDxH_BLINKF_NONE | XWAY_MMD_LEDxH_CON_LINK10XX;
        ledxl = XWAY_MMD_LEDxL_PULSE_TXACT | XWAY_MMD_LEDxL_PULSE_RXACT |
                XWAY_MMD_LEDxL_BLINKS_NONE;
-       phy_write_mmd_indirect(phydev, XWAY_MMD_LED0H, MDIO_MMD_VEND2, ledxh);
-       phy_write_mmd_indirect(phydev, XWAY_MMD_LED0L, MDIO_MMD_VEND2, ledxl);
-       phy_write_mmd_indirect(phydev, XWAY_MMD_LED1H, MDIO_MMD_VEND2, ledxh);
-       phy_write_mmd_indirect(phydev, XWAY_MMD_LED1L, MDIO_MMD_VEND2, ledxl);
-       phy_write_mmd_indirect(phydev, XWAY_MMD_LED2H, MDIO_MMD_VEND2, ledxh);
-       phy_write_mmd_indirect(phydev, XWAY_MMD_LED2L, MDIO_MMD_VEND2, ledxl);
+       phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED0H, ledxh);
+       phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED0L, ledxl);
+       phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED1H, ledxh);
+       phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED1L, ledxl);
+       phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED2H, ledxh);
+       phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED2L, ledxl);
 
        return 0;
 }
index f9d0fa315a47624409cb054e762a6c8b6537a7b6..272b051a019975110aa1d117da993cf18cb98816 100644 (file)
@@ -1883,17 +1883,6 @@ static int m88e1510_probe(struct phy_device *phydev)
        return m88e1510_hwmon_probe(phydev);
 }
 
-static void marvell_remove(struct phy_device *phydev)
-{
-#ifdef CONFIG_HWMON
-
-       struct marvell_priv *priv = phydev->priv;
-
-       if (priv && priv->hwmon_dev)
-               hwmon_device_unregister(priv->hwmon_dev);
-#endif
-}
-
 static struct phy_driver marvell_drivers[] = {
        {
                .phy_id = MARVELL_PHY_ID_88E1101,
@@ -1974,7 +1963,6 @@ static struct phy_driver marvell_drivers[] = {
                .features = PHY_GBIT_FEATURES,
                .flags = PHY_HAS_INTERRUPT,
                .probe = &m88e1121_probe,
-               .remove = &marvell_remove,
                .config_init = &m88e1121_config_init,
                .config_aneg = &m88e1121_config_aneg,
                .read_status = &marvell_read_status,
@@ -2087,7 +2075,6 @@ static struct phy_driver marvell_drivers[] = {
                .features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE,
                .flags = PHY_HAS_INTERRUPT,
                .probe = &m88e1510_probe,
-               .remove = &marvell_remove,
                .config_init = &m88e1510_config_init,
                .config_aneg = &m88e1510_config_aneg,
                .read_status = &marvell_read_status,
@@ -2109,7 +2096,6 @@ static struct phy_driver marvell_drivers[] = {
                .features = PHY_GBIT_FEATURES,
                .flags = PHY_HAS_INTERRUPT,
                .probe = m88e1510_probe,
-               .remove = &marvell_remove,
                .config_init = &marvell_config_init,
                .config_aneg = &m88e1510_config_aneg,
                .read_status = &marvell_read_status,
@@ -2127,7 +2113,6 @@ static struct phy_driver marvell_drivers[] = {
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E1545",
                .probe = m88e1510_probe,
-               .remove = &marvell_remove,
                .features = PHY_GBIT_FEATURES,
                .flags = PHY_HAS_INTERRUPT,
                .config_init = &marvell_config_init,
index 8c73b2e771ddd7c865900ceed1d9abdcefff8a21..34395230ce709bd68ba93ca46eff089bb576668f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Broadcom UniMAC MDIO bus controller driver
  *
- * Copyright (C) 2014, Broadcom Corporation
+ * Copyright (C) 2014-2017 Broadcom
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -228,6 +228,7 @@ static int unimac_mdio_remove(struct platform_device *pdev)
 }
 
 static const struct of_device_id unimac_mdio_ids[] = {
+       { .compatible = "brcm,genet-mdio-v5", },
        { .compatible = "brcm,genet-mdio-v4", },
        { .compatible = "brcm,genet-mdio-v3", },
        { .compatible = "brcm,genet-mdio-v2", },
index 6b988f77da08fca5ba9e7efec8c4af354ad51ecc..1861f387820d61d527ed552e5ddb24c0009234b3 100644 (file)
@@ -24,10 +24,12 @@ static DEFINE_MUTEX(mdio_board_lock);
  * @mdiodev: MDIO device pointer
  * Context: can sleep
  */
-void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus)
+void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus,
+                                          int (*cb)
+                                          (struct mii_bus *bus,
+                                           struct mdio_board_info *bi))
 {
        struct mdio_board_entry *be;
-       struct mdio_device *mdiodev;
        struct mdio_board_info *bi;
        int ret;
 
@@ -38,23 +40,14 @@ void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus)
                if (strcmp(bus->id, bi->bus_id))
                        continue;
 
-               mdiodev = mdio_device_create(bus, bi->mdio_addr);
-               if (IS_ERR(mdiodev))
+               ret = cb(bus, bi);
+               if (ret)
                        continue;
 
-               strncpy(mdiodev->modalias, bi->modalias,
-                       sizeof(mdiodev->modalias));
-               mdiodev->bus_match = mdio_device_bus_match;
-               mdiodev->dev.platform_data = (void *)bi->platform_data;
-
-               ret = mdio_device_register(mdiodev);
-               if (ret) {
-                       mdio_device_free(mdiodev);
-                       continue;
-               }
        }
        mutex_unlock(&mdio_board_lock);
 }
+EXPORT_SYMBOL(mdiobus_setup_mdiodev_from_board_info);
 
 /**
  * mdio_register_board_info - register MDIO devices for a given board
@@ -84,3 +77,4 @@ int mdiobus_register_board_info(const struct mdio_board_info *info,
 
        return 0;
 }
+EXPORT_SYMBOL(mdiobus_register_board_info);
index 00f98163e90eff985e24bf95e8fd7293d08b0763..3a7f143904e8c5948f94b1f570b9310368855ac8 100644 (file)
@@ -14,6 +14,9 @@ struct mdio_board_entry {
        struct mdio_board_info  board_info;
 };
 
-void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus);
+void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus,
+                                          int (*cb)
+                                          (struct mii_bus *bus,
+                                           struct mdio_board_info *bi));
 
 #endif /* __MDIO_BOARD_INFO_H */
index f095051beb549133db52cf3acd41ad9e3dbed29c..3e2ac07b6e372322c53125014cbac8b40ea1434f 100644 (file)
@@ -229,7 +229,7 @@ static int xgene_xfi_mdio_write(struct mii_bus *bus, int phy_id,
 
        val = SET_VAL(HSTPHYADX, phy_id) | SET_VAL(HSTREGADX, reg) |
              SET_VAL(HSTMIIMWRDAT, data);
-       xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, data);
+       xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, val);
 
        val = HSTLDCMD | SET_VAL(HSTMIIMCMD, MIIM_CMD_LEGACY_WRITE);
        xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, val);
index fa7d51f14869efa8b94ce161e5bd4cb96b4951d3..5a214f3b867184e9b016b4802850393516d2cba3 100644 (file)
@@ -289,6 +289,36 @@ static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio,
 }
 #endif
 
+/**
+ * mdiobus_create_device_from_board_info - create a full MDIO device given
+ * a mdio_board_info structure
+ * @bus: MDIO bus to create the devices on
+ * @bi: mdio_board_info structure describing the devices
+ *
+ * Returns 0 on success or < 0 on error.
+ */
+static int mdiobus_create_device(struct mii_bus *bus,
+                                struct mdio_board_info *bi)
+{
+       struct mdio_device *mdiodev;
+       int ret = 0;
+
+       mdiodev = mdio_device_create(bus, bi->mdio_addr);
+       if (IS_ERR(mdiodev))
+               return -ENODEV;
+
+       strncpy(mdiodev->modalias, bi->modalias,
+               sizeof(mdiodev->modalias));
+       mdiodev->bus_match = mdio_device_bus_match;
+       mdiodev->dev.platform_data = (void *)bi->platform_data;
+
+       ret = mdio_device_register(mdiodev);
+       if (ret)
+               mdio_device_free(mdiodev);
+
+       return ret;
+}
+
 /**
  * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
  * @bus: target mii_bus
@@ -345,7 +375,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
                }
        }
 
-       mdiobus_setup_mdiodev_from_board_info(bus);
+       mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device);
 
        bus->state = MDIOBUS_REGISTERED;
        pr_info("%s: probed\n", bus->name);
@@ -648,9 +678,18 @@ int __init mdio_bus_init(void)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(mdio_bus_init);
 
+#if IS_ENABLED(CONFIG_PHYLIB)
 void mdio_bus_exit(void)
 {
        class_unregister(&mdio_bus_class);
        bus_unregister(&mdio_bus_type);
 }
+EXPORT_SYMBOL_GPL(mdio_bus_exit);
+#else
+module_init(mdio_bus_init);
+/* no module_exit, intentional */
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MDIO bus/device layer");
+#endif
index 6742070ca676f57694a9a6cb11364941deb520a0..b847184de6fc918ab9ff34db5a7d2cbbf690e721 100644 (file)
@@ -637,8 +637,7 @@ static int ksz8873mll_config_aneg(struct phy_device *phydev)
  * MMD extended PHY registers.
  */
 static int
-ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
-                     int regnum)
+ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum)
 {
        return -1;
 }
@@ -646,10 +645,10 @@ ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
 /* This routine does nothing since the Micrel ksz9021 does not support
  * standard IEEE MMD extended PHY registers.
  */
-static void
-ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
-                     int regnum, u32 val)
+static int
+ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum, u16 val)
 {
+       return -1;
 }
 
 static int kszphy_get_sset_count(struct phy_device *phydev)
@@ -962,8 +961,8 @@ static struct phy_driver ksphy_driver[] = {
        .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
-       .read_mmd_indirect = ksz9021_rd_mmd_phyreg,
-       .write_mmd_indirect = ksz9021_wr_mmd_phyreg,
+       .read_mmd       = ksz9021_rd_mmd_phyreg,
+       .write_mmd      = ksz9021_wr_mmd_phyreg,
 }, {
        .phy_id         = PHY_ID_KSZ9031,
        .phy_id_mask    = MICREL_PHY_ID_MASK,
index 324fbf6ad8ff8fcc51e92cd2f4d26399b00dfc41..2b2f543cf9f030dbea5b2c478f6b8930f5fa520e 100644 (file)
@@ -78,9 +78,8 @@ static int lan88xx_probe(struct phy_device *phydev)
        priv->wolopts = 0;
 
        /* these values can be used to identify internal PHY */
-       priv->chip_id = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_ID, 3);
-       priv->chip_rev = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_REV,
-                                              3);
+       priv->chip_id = phy_read_mmd(phydev, 3, LAN88XX_MMD3_CHIP_ID);
+       priv->chip_rev = phy_read_mmd(phydev, 3, LAN88XX_MMD3_CHIP_REV);
 
        phydev->priv = priv;
 
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
new file mode 100644 (file)
index 0000000..357a4d0
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Core PHY library, taken from phy.c
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/export.h>
+#include <linux/phy.h>
+
+static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
+                            u16 regnum)
+{
+       /* Write the desired MMD Devad */
+       bus->write(bus, phy_addr, MII_MMD_CTRL, devad);
+
+       /* Write the desired MMD register address */
+       bus->write(bus, phy_addr, MII_MMD_DATA, regnum);
+
+       /* Select the Function : DATA with no post increment */
+       bus->write(bus, phy_addr, MII_MMD_CTRL, devad | MII_MMD_CTRL_NOINCR);
+}
+
+/**
+ * phy_read_mmd - Convenience function for reading a register
+ * from an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to read from (0..31)
+ * @regnum: The register on the MMD to read (0..65535)
+ *
+ * Same rules as for phy_read();
+ */
+int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
+{
+       int val;
+
+       if (regnum > (u16)~0 || devad > 32)
+               return -EINVAL;
+
+       if (phydev->drv->read_mmd) {
+               val = phydev->drv->read_mmd(phydev, devad, regnum);
+       } else if (phydev->is_c45) {
+               u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
+
+               val = mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr);
+       } else {
+               struct mii_bus *bus = phydev->mdio.bus;
+               int phy_addr = phydev->mdio.addr;
+
+               mutex_lock(&bus->mdio_lock);
+               mmd_phy_indirect(bus, phy_addr, devad, regnum);
+
+               /* Read the content of the MMD's selected register */
+               val = bus->read(bus, phy_addr, MII_MMD_DATA);
+               mutex_unlock(&bus->mdio_lock);
+       }
+       return val;
+}
+EXPORT_SYMBOL(phy_read_mmd);
+
+/**
+ * phy_write_mmd - Convenience function for writing a register
+ * on an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ * @val: value to write to @regnum
+ *
+ * Same rules as for phy_write();
+ */
+int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
+{
+       int ret;
+
+       if (regnum > (u16)~0 || devad > 32)
+               return -EINVAL;
+
+       if (phydev->drv->read_mmd) {
+               ret = phydev->drv->write_mmd(phydev, devad, regnum, val);
+       } else if (phydev->is_c45) {
+               u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
+
+               ret = mdiobus_write(phydev->mdio.bus, phydev->mdio.addr,
+                                   addr, val);
+       } else {
+               struct mii_bus *bus = phydev->mdio.bus;
+               int phy_addr = phydev->mdio.addr;
+
+               mutex_lock(&bus->mdio_lock);
+               mmd_phy_indirect(bus, phy_addr, devad, regnum);
+
+               /* Write the data into MMD's selected register */
+               bus->write(bus, phy_addr, MII_MMD_DATA, val);
+               mutex_unlock(&bus->mdio_lock);
+
+               ret = 0;
+       }
+       return ret;
+}
+EXPORT_SYMBOL(phy_write_mmd);
index 1be69d8bc90948e82f92736b8f7ee9d274b9bd2b..bf7d614ff18f838d7fb059dee50fa1264fcee0ca 100644 (file)
@@ -50,8 +50,22 @@ static const char *phy_speed_to_str(int speed)
                return "1Gbps";
        case SPEED_2500:
                return "2.5Gbps";
+       case SPEED_5000:
+               return "5Gbps";
        case SPEED_10000:
                return "10Gbps";
+       case SPEED_20000:
+               return "20Gbps";
+       case SPEED_25000:
+               return "25Gbps";
+       case SPEED_40000:
+               return "40Gbps";
+       case SPEED_50000:
+               return "50Gbps";
+       case SPEED_56000:
+               return "56Gbps";
+       case SPEED_100000:
+               return "100Gbps";
        case SPEED_UNKNOWN:
                return "Unknown";
        default:
@@ -681,7 +695,7 @@ void phy_stop_machine(struct phy_device *phydev)
        cancel_delayed_work_sync(&phydev->state_queue);
 
        mutex_lock(&phydev->lock);
-       if (phydev->state > PHY_UP)
+       if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
                phydev->state = PHY_UP;
        mutex_unlock(&phydev->lock);
 }
@@ -1192,91 +1206,6 @@ void phy_mac_interrupt(struct phy_device *phydev, int new_link)
 }
 EXPORT_SYMBOL(phy_mac_interrupt);
 
-static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
-                                   int addr)
-{
-       /* Write the desired MMD Devad */
-       bus->write(bus, addr, MII_MMD_CTRL, devad);
-
-       /* Write the desired MMD register address */
-       bus->write(bus, addr, MII_MMD_DATA, prtad);
-
-       /* Select the Function : DATA with no post increment */
-       bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
-}
-
-/**
- * phy_read_mmd_indirect - reads data from the MMD registers
- * @phydev: The PHY device bus
- * @prtad: MMD Address
- * @devad: MMD DEVAD
- *
- * Description: it reads data from the MMD registers (clause 22 to access to
- * clause 45) of the specified phy address.
- * To read these register we have:
- * 1) Write reg 13 // DEVAD
- * 2) Write reg 14 // MMD Address
- * 3) Write reg 13 // MMD Data Command for MMD DEVAD
- * 3) Read  reg 14 // Read MMD data
- */
-int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad)
-{
-       struct phy_driver *phydrv = phydev->drv;
-       int addr = phydev->mdio.addr;
-       int value = -1;
-
-       if (!phydrv->read_mmd_indirect) {
-               struct mii_bus *bus = phydev->mdio.bus;
-
-               mutex_lock(&bus->mdio_lock);
-               mmd_phy_indirect(bus, prtad, devad, addr);
-
-               /* Read the content of the MMD's selected register */
-               value = bus->read(bus, addr, MII_MMD_DATA);
-               mutex_unlock(&bus->mdio_lock);
-       } else {
-               value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr);
-       }
-       return value;
-}
-EXPORT_SYMBOL(phy_read_mmd_indirect);
-
-/**
- * phy_write_mmd_indirect - writes data to the MMD registers
- * @phydev: The PHY device
- * @prtad: MMD Address
- * @devad: MMD DEVAD
- * @data: data to write in the MMD register
- *
- * Description: Write data from the MMD registers of the specified
- * phy address.
- * To write these register we have:
- * 1) Write reg 13 // DEVAD
- * 2) Write reg 14 // MMD Address
- * 3) Write reg 13 // MMD Data Command for MMD DEVAD
- * 3) Write reg 14 // Write MMD data
- */
-void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
-                                  int devad, u32 data)
-{
-       struct phy_driver *phydrv = phydev->drv;
-       int addr = phydev->mdio.addr;
-
-       if (!phydrv->write_mmd_indirect) {
-               struct mii_bus *bus = phydev->mdio.bus;
-
-               mutex_lock(&bus->mdio_lock);
-               mmd_phy_indirect(bus, prtad, devad, addr);
-
-               /* Write the data into MMD's selected register */
-               bus->write(bus, addr, MII_MMD_DATA, data);
-               mutex_unlock(&bus->mdio_lock);
-       } else {
-               phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
-       }
-}
-EXPORT_SYMBOL(phy_write_mmd_indirect);
-
 /**
  * phy_init_eee - init and check the EEE feature
  * @phydev: target phy_device struct
@@ -1293,15 +1222,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                return -EIO;
 
        /* According to 802.3az,the EEE is supported only in full duplex-mode.
-        * Also EEE feature is active when core is operating with MII, GMII
-        * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
-        * should return an error if they do not support EEE.
         */
-       if ((phydev->duplex == DUPLEX_FULL) &&
-           ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
-           (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
-            phy_interface_is_rgmii(phydev) ||
-            phy_is_internal(phydev))) {
+       if (phydev->duplex == DUPLEX_FULL) {
                int eee_lp, eee_cap, eee_adv;
                u32 lp, cap, adv;
                int status;
@@ -1312,8 +1234,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                        return status;
 
                /* First check if the EEE ability is supported */
-               eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
-                                               MDIO_MMD_PCS);
+               eee_cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
                if (eee_cap <= 0)
                        goto eee_exit_err;
 
@@ -1324,13 +1245,11 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                /* Check which link settings negotiated and verify it in
                 * the EEE advertising registers.
                 */
-               eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
-                                              MDIO_MMD_AN);
+               eee_lp = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
                if (eee_lp <= 0)
                        goto eee_exit_err;
 
-               eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
-                                               MDIO_MMD_AN);
+               eee_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
                if (eee_adv <= 0)
                        goto eee_exit_err;
 
@@ -1343,14 +1262,12 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                        /* Configure the PHY to stop receiving xMII
                         * clock while it is signaling LPI.
                         */
-                       int val = phy_read_mmd_indirect(phydev, MDIO_CTRL1,
-                                                       MDIO_MMD_PCS);
+                       int val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
                        if (val < 0)
                                return val;
 
                        val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
-                       phy_write_mmd_indirect(phydev, MDIO_CTRL1,
-                                              MDIO_MMD_PCS, val);
+                       phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, val);
                }
 
                return 0; /* EEE supported */
@@ -1372,7 +1289,7 @@ int phy_get_eee_err(struct phy_device *phydev)
        if (!phydev->drv)
                return -EIO;
 
-       return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, MDIO_MMD_PCS);
+       return phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_WK_ERR);
 }
 EXPORT_SYMBOL(phy_get_eee_err);
 
@@ -1392,19 +1309,19 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
                return -EIO;
 
        /* Get Supported EEE */
-       val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS);
+       val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
        if (val < 0)
                return val;
        data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
 
        /* Get advertisement EEE */
-       val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN);
+       val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
        if (val < 0)
                return val;
        data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
 
        /* Get LP advertisement EEE */
-       val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, MDIO_MMD_AN);
+       val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
        if (val < 0)
                return val;
        data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
@@ -1422,15 +1339,37 @@ EXPORT_SYMBOL(phy_ethtool_get_eee);
  */
 int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
 {
-       int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
+       int cap, old_adv, adv, ret;
 
        if (!phydev->drv)
                return -EIO;
 
+       /* Get Supported EEE */
+       cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
+       if (cap < 0)
+               return cap;
+
+       old_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
+       if (old_adv < 0)
+               return old_adv;
+
+       adv = ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap;
+
        /* Mask prohibited EEE modes */
-       val &= ~phydev->eee_broken_modes;
+       adv &= ~phydev->eee_broken_modes;
+
+       if (old_adv != adv) {
+               ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
+               if (ret < 0)
+                       return ret;
 
-       phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, val);
+               /* Restart autonegotiation so the new modes get sent to the
+                * link partner.
+                */
+               ret = genphy_restart_aneg(phydev);
+               if (ret < 0)
+                       return ret;
+       }
 
        return 0;
 }
index 5198ccfa347f8b4bfb5ee5e0c69ee12fb44ec681..1219eeab69d1ff4b94fceb942e38c9b9e989ae40 100644 (file)
@@ -1217,7 +1217,7 @@ static int genphy_config_eee_advert(struct phy_device *phydev)
         * supported by the phy. If we read 0, EEE is not advertised
         * In both case, we don't need to continue
         */
-       adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN);
+       adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
        if (adv <= 0)
                return 0;
 
@@ -1228,7 +1228,7 @@ static int genphy_config_eee_advert(struct phy_device *phydev)
        if (old_adv == adv)
                return 0;
 
-       phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, adv);
+       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
 
        return 1;
 }
index fb32eaf2255d84a7de2842f60f91b52cf17c1717..cef6967b039617fdd909e783ca479a8241eebd88 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/module.h>
 #include <linux/mii.h>
 #include <linux/ethtool.h>
+#include <linux/of.h>
 #include <linux/phy.h>
 #include <linux/netdevice.h>
 #include <linux/smscphy.h>
index 93ffedfa299412f78af2c72fedc991101a52a451..1e2d4f1179da31ed1e458af5e29cc77314f875ad 100644 (file)
@@ -491,13 +491,14 @@ static int ks8995_probe(struct spi_device *spi)
        if (err)
                return err;
 
-       ks->regs_attr.size = ks->chip->regs_size;
        memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
+       ks->regs_attr.size = ks->chip->regs_size;
 
        err = ks8995_reset(ks);
        if (err)
                return err;
 
+       sysfs_attr_init(&ks->regs_attr.attr);
        err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
        if (err) {
                dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
index 4a24b5d15f5a5dfe770d184533f70f7140d9e145..1b52520715aec6f972626361a6aa93accf809301 100644 (file)
@@ -2072,6 +2072,7 @@ static int team_dev_type_check_change(struct net_device *dev,
 static void team_setup(struct net_device *dev)
 {
        ether_setup(dev);
+       dev->max_mtu = ETH_MAX_MTU;
 
        dev->netdev_ops = &team_netdev_ops;
        dev->ethtool_ops = &team_ethtool_ops;
index dc1b1dd9157c16d1bbd3505751a8782e020ab71a..bbd707b9ef7a6a305804ed0d56c3fc0e1db7d565 100644 (file)
@@ -822,7 +822,18 @@ static void tun_net_uninit(struct net_device *dev)
 /* Net device open. */
 static int tun_net_open(struct net_device *dev)
 {
+       struct tun_struct *tun = netdev_priv(dev);
+       int i;
+
        netif_tx_start_all_queues(dev);
+
+       for (i = 0; i < tun->numqueues; i++) {
+               struct tun_file *tfile;
+
+               tfile = rtnl_dereference(tun->tfiles[i]);
+               tfile->socket.sk->sk_write_space(tfile->socket.sk);
+       }
+
        return 0;
 }
 
@@ -1103,9 +1114,10 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
        if (!skb_array_empty(&tfile->tx_array))
                mask |= POLLIN | POLLRDNORM;
 
-       if (sock_writeable(sk) ||
-           (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
-            sock_writeable(sk)))
+       if (tun->dev->flags & IFF_UP &&
+           (sock_writeable(sk) ||
+            (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
+             sock_writeable(sk))))
                mask |= POLLOUT | POLLWRNORM;
 
        if (tun->dev->reg_state != NETREG_REGISTERED)
@@ -1919,6 +1931,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
                return -EINVAL;
 
        tun->set_features = features;
+       tun->dev->wanted_features &= ~TUN_USER_FEATURES;
+       tun->dev->wanted_features |= features;
        netdev_update_features(tun->dev);
 
        return 0;
@@ -2430,18 +2444,16 @@ static struct miscdevice tun_miscdev = {
 
 /* ethtool interface */
 
-static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-       cmd->supported          = 0;
-       cmd->advertising        = 0;
-       ethtool_cmd_speed_set(cmd, SPEED_10);
-       cmd->duplex             = DUPLEX_FULL;
-       cmd->port               = PORT_TP;
-       cmd->phy_address        = 0;
-       cmd->transceiver        = XCVR_INTERNAL;
-       cmd->autoneg            = AUTONEG_DISABLE;
-       cmd->maxtxpkt           = 0;
-       cmd->maxrxpkt           = 0;
+static int tun_get_link_ksettings(struct net_device *dev,
+                                 struct ethtool_link_ksettings *cmd)
+{
+       ethtool_link_ksettings_zero_link_mode(cmd, supported);
+       ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+       cmd->base.speed         = SPEED_10;
+       cmd->base.duplex        = DUPLEX_FULL;
+       cmd->base.port          = PORT_TP;
+       cmd->base.phy_address   = 0;
+       cmd->base.autoneg       = AUTONEG_DISABLE;
        return 0;
 }
 
@@ -2504,7 +2516,6 @@ static int tun_set_coalesce(struct net_device *dev,
 }
 
 static const struct ethtool_ops tun_ethtool_ops = {
-       .get_settings   = tun_get_settings,
        .get_drvinfo    = tun_get_drvinfo,
        .get_msglevel   = tun_get_msglevel,
        .set_msglevel   = tun_set_msglevel,
@@ -2512,6 +2523,7 @@ static const struct ethtool_ops tun_ethtool_ops = {
        .get_ts_info    = ethtool_op_get_ts_info,
        .get_coalesce   = tun_get_coalesce,
        .set_coalesce   = tun_set_coalesce,
+       .get_link_ksettings = tun_get_link_ksettings,
 };
 
 static int tun_queue_resize(struct tun_struct *tun)
@@ -2570,7 +2582,6 @@ static int __init tun_init(void)
        int ret = 0;
 
        pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
-       pr_info("%s\n", DRV_COPYRIGHT);
 
        ret = rtnl_link_register(&tun_link_ops);
        if (ret) {
index 0dd510604118bc8c26c5ec9a84410edbe16a4d8d..a3aa0a27dfe56b22121a0571cc4eaca1b2bbee03 100644 (file)
@@ -136,9 +136,9 @@ static const struct ethtool_ops ax88172_ethtool_ops = {
        .get_eeprom_len         = asix_get_eeprom_len,
        .get_eeprom             = asix_get_eeprom,
        .set_eeprom             = asix_set_eeprom,
-       .get_settings           = usbnet_get_settings,
-       .set_settings           = usbnet_set_settings,
        .nway_reset             = usbnet_nway_reset,
+       .get_link_ksettings     = usbnet_get_link_ksettings,
+       .set_link_ksettings     = usbnet_set_link_ksettings,
 };
 
 static void ax88172_set_multicast(struct net_device *net)
@@ -206,6 +206,7 @@ static const struct net_device_ops ax88172_netdev_ops = {
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
        .ndo_change_mtu         = usbnet_change_mtu,
+       .ndo_get_stats64        = usbnet_get_stats64,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_do_ioctl           = asix_ioctl,
@@ -301,9 +302,9 @@ static const struct ethtool_ops ax88772_ethtool_ops = {
        .get_eeprom_len         = asix_get_eeprom_len,
        .get_eeprom             = asix_get_eeprom,
        .set_eeprom             = asix_set_eeprom,
-       .get_settings           = usbnet_get_settings,
-       .set_settings           = usbnet_set_settings,
        .nway_reset             = usbnet_nway_reset,
+       .get_link_ksettings     = usbnet_get_link_ksettings,
+       .set_link_ksettings     = usbnet_set_link_ksettings,
 };
 
 static int ax88772_link_reset(struct usbnet *dev)
@@ -591,6 +592,7 @@ static const struct net_device_ops ax88772_netdev_ops = {
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
        .ndo_change_mtu         = usbnet_change_mtu,
+       .ndo_get_stats64        = usbnet_get_stats64,
        .ndo_set_mac_address    = asix_set_mac_address,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_do_ioctl           = asix_ioctl,
@@ -775,9 +777,9 @@ static const struct ethtool_ops ax88178_ethtool_ops = {
        .get_eeprom_len         = asix_get_eeprom_len,
        .get_eeprom             = asix_get_eeprom,
        .set_eeprom             = asix_set_eeprom,
-       .get_settings           = usbnet_get_settings,
-       .set_settings           = usbnet_set_settings,
        .nway_reset             = usbnet_nway_reset,
+       .get_link_ksettings     = usbnet_get_link_ksettings,
+       .set_link_ksettings     = usbnet_set_link_ksettings,
 };
 
 static int marvell_phy_init(struct usbnet *dev)
@@ -1044,6 +1046,7 @@ static const struct net_device_ops ax88178_netdev_ops = {
        .ndo_stop               = usbnet_stop,
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
+       .ndo_get_stats64        = usbnet_get_stats64,
        .ndo_set_mac_address    = asix_set_mac_address,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = asix_set_multicast,
index 6308386b09dfeafcb12c7912b06e8acf6aaff69b..501576f538546392381471da43f0d2897df243bd 100644 (file)
@@ -143,6 +143,7 @@ static const struct net_device_ops ax88172a_netdev_ops = {
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
        .ndo_change_mtu         = usbnet_change_mtu,
+       .ndo_get_stats64        = usbnet_get_stats64,
        .ndo_set_mac_address    = asix_set_mac_address,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_do_ioctl           = ax88172a_ioctl,
index a3a7db0702d8d7ece5d4999ce2426bef316f0e06..51cf60092a18e33924f52c568d91a33f30828b21 100644 (file)
@@ -620,16 +620,18 @@ ax88179_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
        return 0;
 }
 
-static int ax88179_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+static int ax88179_get_link_ksettings(struct net_device *net,
+                                     struct ethtool_link_ksettings *cmd)
 {
        struct usbnet *dev = netdev_priv(net);
-       return mii_ethtool_gset(&dev->mii, cmd);
+       return mii_ethtool_get_link_ksettings(&dev->mii, cmd);
 }
 
-static int ax88179_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+static int ax88179_set_link_ksettings(struct net_device *net,
+                                     const struct ethtool_link_ksettings *cmd)
 {
        struct usbnet *dev = netdev_priv(net);
-       return mii_ethtool_sset(&dev->mii, cmd);
+       return mii_ethtool_set_link_ksettings(&dev->mii, cmd);
 }
 
 static int
@@ -826,11 +828,11 @@ static const struct ethtool_ops ax88179_ethtool_ops = {
        .set_wol                = ax88179_set_wol,
        .get_eeprom_len         = ax88179_get_eeprom_len,
        .get_eeprom             = ax88179_get_eeprom,
-       .get_settings           = ax88179_get_settings,
-       .set_settings           = ax88179_set_settings,
        .get_eee                = ax88179_get_eee,
        .set_eee                = ax88179_set_eee,
        .nway_reset             = usbnet_nway_reset,
+       .get_link_ksettings     = ax88179_get_link_ksettings,
+       .set_link_ksettings     = ax88179_set_link_ksettings,
 };
 
 static void ax88179_set_multicast(struct net_device *net)
@@ -957,6 +959,7 @@ static const struct net_device_ops ax88179_netdev_ops = {
        .ndo_stop               = usbnet_stop,
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
+       .ndo_get_stats64        = usbnet_get_stats64,
        .ndo_change_mtu         = ax88179_change_mtu,
        .ndo_set_mac_address    = ax88179_set_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
index 0acc9b640419a2e94bc9a2d3d43a5a2a65800c8b..fce92f0e5abd56ba863a297ebb9eb6ea25ff37b1 100644 (file)
@@ -688,29 +688,34 @@ static void catc_get_drvinfo(struct net_device *dev,
        usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info));
 }
 
-static int catc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int catc_get_link_ksettings(struct net_device *dev,
+                                  struct ethtool_link_ksettings *cmd)
 {
        struct catc *catc = netdev_priv(dev);
        if (!catc->is_f5u011)
                return -EOPNOTSUPP;
 
-       cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_TP;
-       cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_TP;
-       ethtool_cmd_speed_set(cmd, SPEED_10);
-       cmd->duplex = DUPLEX_HALF;
-       cmd->port = PORT_TP; 
-       cmd->phy_address = 0;
-       cmd->transceiver = XCVR_INTERNAL;
-       cmd->autoneg = AUTONEG_DISABLE;
-       cmd->maxtxpkt = 1;
-       cmd->maxrxpkt = 1;
+       ethtool_link_ksettings_zero_link_mode(cmd, supported);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+
+       ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+
+       cmd->base.speed = SPEED_10;
+       cmd->base.duplex = DUPLEX_HALF;
+       cmd->base.port = PORT_TP;
+       cmd->base.phy_address = 0;
+       cmd->base.autoneg = AUTONEG_DISABLE;
+
        return 0;
 }
 
 static const struct ethtool_ops ops = {
        .get_drvinfo = catc_get_drvinfo,
-       .get_settings = catc_get_settings,
-       .get_link = ethtool_op_get_link
+       .get_link = ethtool_op_get_link,
+       .get_link_ksettings = catc_get_link_ksettings,
 };
 
 /*
index f5552aaaa77a59bf558da6c22218a919bf99ec94..f3ae88fdf332e890ac8273e3df1ca7dd53092c07 100644 (file)
@@ -532,6 +532,7 @@ static const struct driver_info wwan_info = {
 #define LENOVO_VENDOR_ID       0x17ef
 #define NVIDIA_VENDOR_ID       0x0955
 #define HP_VENDOR_ID           0x03f0
+#define MICROSOFT_VENDOR_ID    0x045e
 
 static const struct usb_device_id      products[] = {
 /* BLACKLIST !!
@@ -761,6 +762,20 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+/* Microsoft Surface 2 dock (based on Realtek RTL8152) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07ab, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
+/* Microsoft Surface 3 dock (based on Realtek RTL8153) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
 /* WHITELIST!!!
  *
  * CDC Ether uses two interfaces, not necessarily consecutive.
index 3a98f3762a4c81debc023d04b9c01876c6cbdc5f..a6b997cffd3b6e4c4f039a3d3dee5f386bb17e48 100644 (file)
@@ -100,6 +100,7 @@ static const struct net_device_ops cdc_mbim_netdev_ops = {
        .ndo_stop             = usbnet_stop,
        .ndo_start_xmit       = usbnet_start_xmit,
        .ndo_tx_timeout       = usbnet_tx_timeout,
+       .ndo_get_stats64      = usbnet_get_stats64,
        .ndo_change_mtu       = cdc_ncm_change_mtu,
        .ndo_set_mac_address  = eth_mac_addr,
        .ndo_validate_addr    = eth_validate_addr,
index f317984f75360141ee606b8ca31b78baa853a1d5..bb3f71f9fbde06ef1a8d514dbb856c8aa7ff5130 100644 (file)
@@ -131,8 +131,6 @@ static void cdc_ncm_get_strings(struct net_device __always_unused *netdev, u32 s
 static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx);
 
 static const struct ethtool_ops cdc_ncm_ethtool_ops = {
-       .get_settings      = usbnet_get_settings,
-       .set_settings      = usbnet_set_settings,
        .get_link          = usbnet_get_link,
        .nway_reset        = usbnet_nway_reset,
        .get_drvinfo       = usbnet_get_drvinfo,
@@ -142,6 +140,8 @@ static const struct ethtool_ops cdc_ncm_ethtool_ops = {
        .get_sset_count    = cdc_ncm_get_sset_count,
        .get_strings       = cdc_ncm_get_strings,
        .get_ethtool_stats = cdc_ncm_get_ethtool_stats,
+       .get_link_ksettings      = usbnet_get_link_ksettings,
+       .set_link_ksettings      = usbnet_set_link_ksettings,
 };
 
 static u32 cdc_ncm_check_rx_max(struct usbnet *dev, u32 new_rx)
@@ -753,6 +753,7 @@ static const struct net_device_ops cdc_ncm_netdev_ops = {
        .ndo_stop            = usbnet_stop,
        .ndo_start_xmit      = usbnet_start_xmit,
        .ndo_tx_timeout      = usbnet_tx_timeout,
+       .ndo_get_stats64     = usbnet_get_stats64,
        .ndo_change_mtu      = cdc_ncm_change_mtu,
        .ndo_set_mac_address = eth_mac_addr,
        .ndo_validate_addr   = eth_validate_addr,
index 0b4bdd39106b0a73e954070a42ac87be22ac1821..b91f92e4e5f22d659d89c35d8accc6ef03191b74 100644 (file)
@@ -281,9 +281,9 @@ static const struct ethtool_ops dm9601_ethtool_ops = {
        .set_msglevel   = usbnet_set_msglevel,
        .get_eeprom_len = dm9601_get_eeprom_len,
        .get_eeprom     = dm9601_get_eeprom,
-       .get_settings   = usbnet_get_settings,
-       .set_settings   = usbnet_set_settings,
        .nway_reset     = usbnet_nway_reset,
+       .get_link_ksettings     = usbnet_get_link_ksettings,
+       .set_link_ksettings     = usbnet_set_link_ksettings,
 };
 
 static void dm9601_set_multicast(struct net_device *net)
@@ -343,6 +343,7 @@ static const struct net_device_ops dm9601_netdev_ops = {
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
        .ndo_change_mtu         = usbnet_change_mtu,
+       .ndo_get_stats64        = usbnet_get_stats64,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_do_ioctl           = dm9601_ioctl,
        .ndo_set_rx_mode        = dm9601_set_multicast,
index 4ff70b22c6eec0e0516373b02b66c71fae158028..5a43b77a6b9c60ef4f2b667ae0ffaa86aa65d4bd 100644 (file)
@@ -144,6 +144,7 @@ static const struct net_device_ops int51x1_netdev_ops = {
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
        .ndo_change_mtu         = usbnet_change_mtu,
+       .ndo_get_stats64        = usbnet_get_stats64,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = int51x1_set_multicast,
index 876f02f4945eafdc2fb5cfa0f9dcb54d9b498af4..3d8ea18df69608942b03e0dc5292d50ace557799 100644 (file)
@@ -245,8 +245,6 @@ struct kaweth_device
        __u16 packet_filter_bitmap;
 
        struct kaweth_ethernet_configuration configuration;
-
-       struct net_device_stats stats;
 };
 
 /****************************************************************
@@ -598,7 +596,7 @@ static void kaweth_usb_receive(struct urb *urb)
        struct sk_buff *skb;
 
        if (unlikely(status == -EPIPE)) {
-               kaweth->stats.rx_errors++;
+               net->stats.rx_errors++;
                kaweth->end = 1;
                wake_up(&kaweth->term_wait);
                dev_dbg(dev, "Status was -EPIPE.\n");
@@ -613,12 +611,12 @@ static void kaweth_usb_receive(struct urb *urb)
        }
        if (unlikely(status == -EPROTO || status == -ETIME ||
                     status == -EILSEQ)) {
-               kaweth->stats.rx_errors++;
+               net->stats.rx_errors++;
                dev_dbg(dev, "Status was -EPROTO, -ETIME, or -EILSEQ.\n");
                return;
        }
        if (unlikely(status == -EOVERFLOW)) {
-               kaweth->stats.rx_errors++;
+               net->stats.rx_errors++;
                dev_dbg(dev, "Status was -EOVERFLOW.\n");
        }
        spin_lock(&kaweth->device_lock);
@@ -663,8 +661,8 @@ static void kaweth_usb_receive(struct urb *urb)
 
                netif_rx(skb);
 
-               kaweth->stats.rx_packets++;
-               kaweth->stats.rx_bytes += pkt_len;
+               net->stats.rx_packets++;
+               net->stats.rx_bytes += pkt_len;
        }
 
        kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC);
@@ -810,7 +808,7 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
                dev_kfree_skb_irq(skb);
                skb = copied_skb;
                if (!copied_skb) {
-                       kaweth->stats.tx_errors++;
+                       net->stats.tx_errors++;
                        netif_start_queue(net);
                        spin_unlock_irq(&kaweth->device_lock);
                        return NETDEV_TX_OK;
@@ -834,15 +832,15 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
        {
                dev_warn(&net->dev, "kaweth failed tx_urb %d\n", res);
 skip:
-               kaweth->stats.tx_errors++;
+               net->stats.tx_errors++;
 
                netif_start_queue(net);
                dev_kfree_skb_irq(skb);
        }
        else
        {
-               kaweth->stats.tx_packets++;
-               kaweth->stats.tx_bytes += skb->len;
+               net->stats.tx_packets++;
+               net->stats.tx_bytes += skb->len;
        }
 
        spin_unlock_irq(&kaweth->device_lock);
@@ -911,15 +909,6 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
        }
 }
 
-/****************************************************************
- *     kaweth_netdev_stats
- ****************************************************************/
-static struct net_device_stats *kaweth_netdev_stats(struct net_device *dev)
-{
-       struct kaweth_device *kaweth = netdev_priv(dev);
-       return &kaweth->stats;
-}
-
 /****************************************************************
  *     kaweth_tx_timeout
  ****************************************************************/
@@ -928,7 +917,7 @@ static void kaweth_tx_timeout(struct net_device *net)
        struct kaweth_device *kaweth = netdev_priv(net);
 
        dev_warn(&net->dev, "%s: Tx timed out. Resetting.\n", net->name);
-       kaweth->stats.tx_errors++;
+       net->stats.tx_errors++;
        netif_trans_update(net);
 
        usb_unlink_urb(kaweth->tx_urb);
@@ -981,7 +970,6 @@ static const struct net_device_ops kaweth_netdev_ops = {
        .ndo_start_xmit =               kaweth_start_xmit,
        .ndo_tx_timeout =               kaweth_tx_timeout,
        .ndo_set_rx_mode =              kaweth_set_rx_mode,
-       .ndo_get_stats =                kaweth_netdev_stats,
        .ndo_set_mac_address =          eth_mac_addr,
        .ndo_validate_addr =            eth_validate_addr,
 };
index 9889a70ff4f6fece5bfabbfb45a3470f721a5a32..a17e32bf5f924c11d18db475ae144aaafa3566ab 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/mdio.h>
+#include <linux/phy.h>
 #include <net/ip6_checksum.h>
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
@@ -1952,10 +1953,10 @@ static int lan8835_fixup(struct phy_device *phydev)
        struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
 
        /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
-       buf = phy_read_mmd_indirect(phydev, 0x8010, 3);
+       buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
        buf &= ~0x1800;
        buf |= 0x0800;
-       phy_write_mmd_indirect(phydev, 0x8010, 3, buf);
+       phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
 
        /* RGMII MAC TXC Delay Enable */
        ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
@@ -1975,11 +1976,11 @@ static int ksz9031rnx_fixup(struct phy_device *phydev)
 
        /* Micrel9301RNX PHY configuration */
        /* RGMII Control Signal Pad Skew */
-       phy_write_mmd_indirect(phydev, 4, 2, 0x0077);
+       phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
        /* RGMII RX Data Pad Skew */
-       phy_write_mmd_indirect(phydev, 5, 2, 0x7777);
+       phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
        /* RGMII RX Clock Pad Skew */
-       phy_write_mmd_indirect(phydev, 8, 2, 0x1FF);
+       phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
 
        dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
 
index 4f345bd4e6e29558daf29c3d472d2c0768c3202f..5a47e5510ca8243eed8f12cc86a4ec70ac42658e 100644 (file)
@@ -464,9 +464,9 @@ static const struct ethtool_ops mcs7830_ethtool_ops = {
        .get_link               = usbnet_get_link,
        .get_msglevel           = usbnet_get_msglevel,
        .set_msglevel           = usbnet_set_msglevel,
-       .get_settings           = usbnet_get_settings,
-       .set_settings           = usbnet_set_settings,
        .nway_reset             = usbnet_nway_reset,
+       .get_link_ksettings     = usbnet_get_link_ksettings,
+       .set_link_ksettings     = usbnet_set_link_ksettings,
 };
 
 static const struct net_device_ops mcs7830_netdev_ops = {
@@ -475,6 +475,7 @@ static const struct net_device_ops mcs7830_netdev_ops = {
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
        .ndo_change_mtu         = usbnet_change_mtu,
+       .ndo_get_stats64        = usbnet_get_stats64,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_do_ioctl           = mcs7830_ioctl,
        .ndo_set_rx_mode        = mcs7830_set_multicast,
index 36674484c6fb9b73011619824f7bc60c50b9c1ad..6514c86f043eeb8777354094c4d2fd9f2ffd9c9d 100644 (file)
@@ -501,13 +501,13 @@ static void read_bulk_callback(struct urb *urb)
        if (rx_status & 0x1e) {
                netif_dbg(pegasus, rx_err, net,
                          "RX packet error %x\n", rx_status);
-               pegasus->stats.rx_errors++;
+               net->stats.rx_errors++;
                if (rx_status & 0x06)   /* long or runt */
-                       pegasus->stats.rx_length_errors++;
+                       net->stats.rx_length_errors++;
                if (rx_status & 0x08)
-                       pegasus->stats.rx_crc_errors++;
+                       net->stats.rx_crc_errors++;
                if (rx_status & 0x10)   /* extra bits   */
-                       pegasus->stats.rx_frame_errors++;
+                       net->stats.rx_frame_errors++;
                goto goon;
        }
        if (pegasus->chip == 0x8513) {
@@ -535,8 +535,8 @@ static void read_bulk_callback(struct urb *urb)
        skb_put(pegasus->rx_skb, pkt_len);
        pegasus->rx_skb->protocol = eth_type_trans(pegasus->rx_skb, net);
        netif_rx(pegasus->rx_skb);
-       pegasus->stats.rx_packets++;
-       pegasus->stats.rx_bytes += pkt_len;
+       net->stats.rx_packets++;
+       net->stats.rx_bytes += pkt_len;
 
        if (pegasus->flags & PEGASUS_UNPLUG)
                return;
@@ -670,13 +670,13 @@ static void intr_callback(struct urb *urb)
                /* byte 0 == tx_status1, reg 2B */
                if (d[0] & (TX_UNDERRUN|EXCESSIVE_COL
                                        |LATE_COL|JABBER_TIMEOUT)) {
-                       pegasus->stats.tx_errors++;
+                       net->stats.tx_errors++;
                        if (d[0] & TX_UNDERRUN)
-                               pegasus->stats.tx_fifo_errors++;
+                               net->stats.tx_fifo_errors++;
                        if (d[0] & (EXCESSIVE_COL | JABBER_TIMEOUT))
-                               pegasus->stats.tx_aborted_errors++;
+                               net->stats.tx_aborted_errors++;
                        if (d[0] & LATE_COL)
-                               pegasus->stats.tx_window_errors++;
+                               net->stats.tx_window_errors++;
                }
 
                /* d[5].LINK_STATUS lies on some adapters.
@@ -685,7 +685,7 @@ static void intr_callback(struct urb *urb)
                 */
 
                /* bytes 3-4 == rx_lostpkt, reg 2E/2F */
-               pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4];
+               net->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4];
        }
 
        res = usb_submit_urb(urb, GFP_ATOMIC);
@@ -701,7 +701,7 @@ static void pegasus_tx_timeout(struct net_device *net)
        pegasus_t *pegasus = netdev_priv(net);
        netif_warn(pegasus, timer, net, "tx timeout\n");
        usb_unlink_urb(pegasus->tx_urb);
-       pegasus->stats.tx_errors++;
+       net->stats.tx_errors++;
 }
 
 static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb,
@@ -731,23 +731,18 @@ static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb,
                        netif_device_detach(pegasus->net);
                        break;
                default:
-                       pegasus->stats.tx_errors++;
+                       net->stats.tx_errors++;
                        netif_start_queue(net);
                }
        } else {
-               pegasus->stats.tx_packets++;
-               pegasus->stats.tx_bytes += skb->len;
+               net->stats.tx_packets++;
+               net->stats.tx_bytes += skb->len;
        }
        dev_kfree_skb(skb);
 
        return NETDEV_TX_OK;
 }
 
-static struct net_device_stats *pegasus_netdev_stats(struct net_device *dev)
-{
-       return &((pegasus_t *) netdev_priv(dev))->stats;
-}
-
 static inline void disable_net_traffic(pegasus_t *pegasus)
 {
        __le16 tmp = cpu_to_le16(0);
@@ -953,20 +948,22 @@ static inline void pegasus_reset_wol(struct net_device *dev)
 }
 
 static int
-pegasus_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+pegasus_get_link_ksettings(struct net_device *dev,
+                          struct ethtool_link_ksettings *ecmd)
 {
        pegasus_t *pegasus;
 
        pegasus = netdev_priv(dev);
-       mii_ethtool_gset(&pegasus->mii, ecmd);
+       mii_ethtool_get_link_ksettings(&pegasus->mii, ecmd);
        return 0;
 }
 
 static int
-pegasus_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+pegasus_set_link_ksettings(struct net_device *dev,
+                          const struct ethtool_link_ksettings *ecmd)
 {
        pegasus_t *pegasus = netdev_priv(dev);
-       return mii_ethtool_sset(&pegasus->mii, ecmd);
+       return mii_ethtool_set_link_ksettings(&pegasus->mii, ecmd);
 }
 
 static int pegasus_nway_reset(struct net_device *dev)
@@ -995,14 +992,14 @@ static void pegasus_set_msglevel(struct net_device *dev, u32 v)
 
 static const struct ethtool_ops ops = {
        .get_drvinfo = pegasus_get_drvinfo,
-       .get_settings = pegasus_get_settings,
-       .set_settings = pegasus_set_settings,
        .nway_reset = pegasus_nway_reset,
        .get_link = pegasus_get_link,
        .get_msglevel = pegasus_get_msglevel,
        .set_msglevel = pegasus_set_msglevel,
        .get_wol = pegasus_get_wol,
        .set_wol = pegasus_set_wol,
+       .get_link_ksettings = pegasus_get_link_ksettings,
+       .set_link_ksettings = pegasus_set_link_ksettings,
 };
 
 static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
@@ -1292,7 +1289,6 @@ static const struct net_device_ops pegasus_netdev_ops = {
        .ndo_do_ioctl =                 pegasus_ioctl,
        .ndo_start_xmit =               pegasus_start_xmit,
        .ndo_set_rx_mode =              pegasus_set_multicast,
-       .ndo_get_stats =                pegasus_netdev_stats,
        .ndo_tx_timeout =               pegasus_tx_timeout,
        .ndo_set_mac_address =          eth_mac_addr,
        .ndo_validate_addr =            eth_validate_addr,
index d15646244fdf06b7b474bff96922696e79caf230..9b7ea9c9167d6074abf5b883cfce955075d4c90b 100644 (file)
@@ -83,7 +83,6 @@ typedef struct pegasus {
        struct usb_device       *usb;
        struct usb_interface    *intf;
        struct net_device       *net;
-       struct net_device_stats stats;
        struct mii_if_info      mii;
        unsigned                flags;
        unsigned                features;
index 8056745506832867165f03ae0b24c2f1a578d849..adbed261cc8aed15138fb6090258988c6c112a85 100644 (file)
@@ -58,12 +58,198 @@ struct qmi_wwan_state {
 
 enum qmi_wwan_flags {
        QMI_WWAN_FLAG_RAWIP = 1 << 0,
+       QMI_WWAN_FLAG_MUX = 1 << 1,
 };
 
 enum qmi_wwan_quirks {
        QMI_WWAN_QUIRK_DTR = 1 << 0,    /* needs "set DTR" request */
 };
 
+struct qmimux_hdr {
+       u8 pad;
+       u8 mux_id;
+       __be16 pkt_len;
+};
+
+struct qmimux_priv {
+       struct net_device *real_dev;
+       u8 mux_id;
+};
+
+static int qmimux_open(struct net_device *dev)
+{
+       struct qmimux_priv *priv = netdev_priv(dev);
+       struct net_device *real_dev = priv->real_dev;
+
+       if (!(priv->real_dev->flags & IFF_UP))
+               return -ENETDOWN;
+
+       if (netif_carrier_ok(real_dev))
+               netif_carrier_on(dev);
+       return 0;
+}
+
+static int qmimux_stop(struct net_device *dev)
+{
+       netif_carrier_off(dev);
+       return 0;
+}
+
+static netdev_tx_t qmimux_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct qmimux_priv *priv = netdev_priv(dev);
+       unsigned int len = skb->len;
+       struct qmimux_hdr *hdr;
+
+       hdr = (struct qmimux_hdr *)skb_push(skb, sizeof(struct qmimux_hdr));
+       hdr->pad = 0;
+       hdr->mux_id = priv->mux_id;
+       hdr->pkt_len = cpu_to_be16(len);
+       skb->dev = priv->real_dev;
+       return dev_queue_xmit(skb);
+}
+
+static const struct net_device_ops qmimux_netdev_ops = {
+       .ndo_open       = qmimux_open,
+       .ndo_stop       = qmimux_stop,
+       .ndo_start_xmit = qmimux_start_xmit,
+};
+
+static void qmimux_setup(struct net_device *dev)
+{
+       dev->header_ops      = NULL;  /* No header */
+       dev->type            = ARPHRD_NONE;
+       dev->hard_header_len = 0;
+       dev->addr_len        = 0;
+       dev->flags           = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+       dev->netdev_ops      = &qmimux_netdev_ops;
+       dev->destructor      = free_netdev;
+}
+
+static struct net_device *qmimux_find_dev(struct usbnet *dev, u8 mux_id)
+{
+       struct qmimux_priv *priv;
+       struct list_head *iter;
+       struct net_device *ldev;
+
+       rcu_read_lock();
+       netdev_for_each_upper_dev_rcu(dev->net, ldev, iter) {
+               priv = netdev_priv(ldev);
+               if (priv->mux_id == mux_id) {
+                       rcu_read_unlock();
+                       return ldev;
+               }
+       }
+       rcu_read_unlock();
+       return NULL;
+}
+
+static bool qmimux_has_slaves(struct usbnet *dev)
+{
+       return !list_empty(&dev->net->adj_list.upper);
+}
+
+static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+       unsigned int len, offset = sizeof(struct qmimux_hdr);
+       struct qmimux_hdr *hdr;
+       struct net_device *net;
+       struct sk_buff *skbn;
+
+       while (offset < skb->len) {
+               hdr = (struct qmimux_hdr *)skb->data;
+               len = be16_to_cpu(hdr->pkt_len);
+
+               /* drop the packet, bogus length */
+               if (offset + len > skb->len)
+                       return 0;
+
+               /* control packet, we do not know what to do */
+               if (hdr->pad & 0x80)
+                       goto skip;
+
+               net = qmimux_find_dev(dev, hdr->mux_id);
+               if (!net)
+                       goto skip;
+               skbn = netdev_alloc_skb(net, len);
+               if (!skbn)
+                       return 0;
+               skbn->dev = net;
+
+               switch (skb->data[offset] & 0xf0) {
+               case 0x40:
+                       skbn->protocol = htons(ETH_P_IP);
+                       break;
+               case 0x60:
+                       skbn->protocol = htons(ETH_P_IPV6);
+                       break;
+               default:
+                       /* not ip - do not know what to do */
+                       goto skip;
+               }
+
+               memcpy(skb_put(skbn, len), skb->data + offset, len);
+               if (netif_rx(skbn) != NET_RX_SUCCESS)
+                       return 0;
+
+skip:
+               offset += len + sizeof(struct qmimux_hdr);
+       }
+       return 1;
+}
+
+static int qmimux_register_device(struct net_device *real_dev, u8 mux_id)
+{
+       struct net_device *new_dev;
+       struct qmimux_priv *priv;
+       int err;
+
+       new_dev = alloc_netdev(sizeof(struct qmimux_priv),
+                              "qmimux%d", NET_NAME_UNKNOWN, qmimux_setup);
+       if (!new_dev)
+               return -ENOBUFS;
+
+       dev_net_set(new_dev, dev_net(real_dev));
+       priv = netdev_priv(new_dev);
+       priv->mux_id = mux_id;
+       priv->real_dev = real_dev;
+
+       err = register_netdevice(new_dev);
+       if (err < 0)
+               goto out_free_newdev;
+
+       /* Account for reference in struct qmimux_priv_priv */
+       dev_hold(real_dev);
+
+       err = netdev_upper_dev_link(real_dev, new_dev);
+       if (err)
+               goto out_unregister_netdev;
+
+       netif_stacked_transfer_operstate(real_dev, new_dev);
+
+       return 0;
+
+out_unregister_netdev:
+       unregister_netdevice(new_dev);
+       dev_put(real_dev);
+
+out_free_newdev:
+       free_netdev(new_dev);
+       return err;
+}
+
+static void qmimux_unregister_device(struct net_device *dev)
+{
+       struct qmimux_priv *priv = netdev_priv(dev);
+       struct net_device *real_dev = priv->real_dev;
+
+       netdev_upper_dev_unlink(real_dev, dev);
+       unregister_netdevice(dev);
+
+       /* Get rid of the reference to real_dev */
+       dev_put(real_dev);
+}
+
 static void qmi_wwan_netdev_setup(struct net_device *net)
 {
        struct usbnet *dev = netdev_priv(net);
@@ -137,10 +323,114 @@ err:
        return ret;
 }
 
+static ssize_t add_mux_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+       struct net_device *dev = to_net_dev(d);
+       struct qmimux_priv *priv;
+       struct list_head *iter;
+       struct net_device *ldev;
+       ssize_t count = 0;
+
+       rcu_read_lock();
+       netdev_for_each_upper_dev_rcu(dev, ldev, iter) {
+               priv = netdev_priv(ldev);
+               count += scnprintf(&buf[count], PAGE_SIZE - count,
+                                  "0x%02x\n", priv->mux_id);
+       }
+       rcu_read_unlock();
+       return count;
+}
+
+static ssize_t add_mux_store(struct device *d,  struct device_attribute *attr, const char *buf, size_t len)
+{
+       struct usbnet *dev = netdev_priv(to_net_dev(d));
+       struct qmi_wwan_state *info = (void *)&dev->data;
+       u8 mux_id;
+       int ret;
+
+       if (kstrtou8(buf, 0, &mux_id))
+               return -EINVAL;
+
+       /* mux_id [1 - 0x7f] range empirically found */
+       if (mux_id < 1 || mux_id > 0x7f)
+               return -EINVAL;
+
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       if (qmimux_find_dev(dev, mux_id)) {
+               netdev_err(dev->net, "mux_id already present\n");
+               ret = -EINVAL;
+               goto err;
+       }
+
+       /* we don't want to modify a running netdev */
+       if (netif_running(dev->net)) {
+               netdev_err(dev->net, "Cannot change a running device\n");
+               ret = -EBUSY;
+               goto err;
+       }
+
+       ret = qmimux_register_device(dev->net, mux_id);
+       if (!ret) {
+               info->flags |= QMI_WWAN_FLAG_MUX;
+               ret = len;
+       }
+err:
+       rtnl_unlock();
+       return ret;
+}
+
+static ssize_t del_mux_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+       return add_mux_show(d, attr, buf);
+}
+
+static ssize_t del_mux_store(struct device *d,  struct device_attribute *attr, const char *buf, size_t len)
+{
+       struct usbnet *dev = netdev_priv(to_net_dev(d));
+       struct qmi_wwan_state *info = (void *)&dev->data;
+       struct net_device *del_dev;
+       u8 mux_id;
+       int ret = 0;
+
+       if (kstrtou8(buf, 0, &mux_id))
+               return -EINVAL;
+
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       /* we don't want to modify a running netdev */
+       if (netif_running(dev->net)) {
+               netdev_err(dev->net, "Cannot change a running device\n");
+               ret = -EBUSY;
+               goto err;
+       }
+
+       del_dev = qmimux_find_dev(dev, mux_id);
+       if (!del_dev) {
+               netdev_err(dev->net, "mux_id not present\n");
+               ret = -EINVAL;
+               goto err;
+       }
+       qmimux_unregister_device(del_dev);
+
+       if (!qmimux_has_slaves(dev))
+               info->flags &= ~QMI_WWAN_FLAG_MUX;
+       ret = len;
+err:
+       rtnl_unlock();
+       return ret;
+}
+
 static DEVICE_ATTR_RW(raw_ip);
+static DEVICE_ATTR_RW(add_mux);
+static DEVICE_ATTR_RW(del_mux);
 
 static struct attribute *qmi_wwan_sysfs_attrs[] = {
        &dev_attr_raw_ip.attr,
+       &dev_attr_add_mux.attr,
+       &dev_attr_del_mux.attr,
        NULL,
 };
 
@@ -184,6 +474,9 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        if (skb->len < dev->net->hard_header_len)
                return 0;
 
+       if (info->flags & QMI_WWAN_FLAG_MUX)
+               return qmimux_rx_fixup(dev, skb);
+
        switch (skb->data[0] & 0xf0) {
        case 0x40:
                proto = htons(ETH_P_IP);
@@ -249,6 +542,7 @@ static const struct net_device_ops qmi_wwan_netdev_ops = {
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
        .ndo_change_mtu         = usbnet_change_mtu,
+       .ndo_get_stats64        = usbnet_get_stats64,
        .ndo_set_mac_address    = qmi_wwan_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
 };
@@ -580,6 +874,10 @@ static const struct usb_device_id products[] = {
                USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
                .driver_info        = (unsigned long)&qmi_wwan_info,
        },
+       {       /* Motorola Mapphone devices with MDM6600 */
+               USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff),
+               .driver_info        = (unsigned long)&qmi_wwan_info,
+       },
 
        /* 2. Combined interface devices matching on class+protocol */
        {       /* Huawei E367 and possibly others in "Windows mode" */
@@ -925,6 +1223,8 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81a9, 8)},    /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81b1, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81b3, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
+       {QMI_FIXED_INTF(0x413c, 0x81b6, 8)},    /* Dell Wireless 5811e */
+       {QMI_FIXED_INTF(0x413c, 0x81b6, 10)},   /* Dell Wireless 5811e */
        {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},    /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
        {QMI_FIXED_INTF(0x22de, 0x9061, 3)},    /* WeTelecom WPD-600N */
        {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)},    /* SIMCom 7230E */
@@ -1030,11 +1330,33 @@ static int qmi_wwan_probe(struct usb_interface *intf,
        return usbnet_probe(intf, id);
 }
 
+static void qmi_wwan_disconnect(struct usb_interface *intf)
+{
+       struct usbnet *dev = usb_get_intfdata(intf);
+       struct qmi_wwan_state *info = (void *)&dev->data;
+       struct list_head *iter;
+       struct net_device *ldev;
+
+       if (info->flags & QMI_WWAN_FLAG_MUX) {
+               if (!rtnl_trylock()) {
+                       restart_syscall();
+                       return;
+               }
+               rcu_read_lock();
+               netdev_for_each_upper_dev_rcu(dev->net, ldev, iter)
+                       qmimux_unregister_device(ldev);
+               rcu_read_unlock();
+               rtnl_unlock();
+               info->flags &= ~QMI_WWAN_FLAG_MUX;
+       }
+       usbnet_disconnect(intf);
+}
+
 static struct usb_driver qmi_wwan_driver = {
        .name                 = "qmi_wwan",
        .id_table             = products,
        .probe                = qmi_wwan_probe,
-       .disconnect           = usbnet_disconnect,
+       .disconnect           = qmi_wwan_disconnect,
        .suspend              = qmi_wwan_suspend,
        .resume               = qmi_wwan_resume,
        .reset_resume         = qmi_wwan_resume,
index 986243c932ccd6fe19c592805c1c63274f5e5555..ddc62cb69be828a730e6ed32ecc9ee951fef8d3b 100644 (file)
@@ -32,7 +32,7 @@
 #define NETNEXT_VERSION                "08"
 
 /* Information for net */
-#define NET_VERSION            "8"
+#define NET_VERSION            "9"
 
 #define DRIVER_VERSION         "v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -501,6 +501,8 @@ enum rtl_register_content {
 #define RTL8153_RMS            RTL8153_MAX_PACKET
 #define RTL8152_TX_TIMEOUT     (5 * HZ)
 #define RTL8152_NAPI_WEIGHT    64
+#define rx_reserved_size(x)    ((x) + VLAN_ETH_HLEN + CRC_SIZE + \
+                                sizeof(struct rx_desc) + RX_ALIGN)
 
 /* rtl8152 flags */
 enum rtl8152_flags {
@@ -515,6 +517,7 @@ enum rtl8152_flags {
 
 /* Define these values to match your device */
 #define VENDOR_ID_REALTEK              0x0bda
+#define VENDOR_ID_MICROSOFT            0x045e
 #define VENDOR_ID_SAMSUNG              0x04e8
 #define VENDOR_ID_LENOVO               0x17ef
 #define VENDOR_ID_NVIDIA               0x0955
@@ -1292,6 +1295,7 @@ static void intr_callback(struct urb *urb)
                }
        } else {
                if (netif_carrier_ok(tp->netdev)) {
+                       netif_stop_queue(tp->netdev);
                        set_bit(RTL8152_LINK_CHG, &tp->flags);
                        schedule_delayed_work(&tp->schedule, 0);
                }
@@ -1362,6 +1366,7 @@ static int alloc_all_mem(struct r8152 *tp)
        spin_lock_init(&tp->rx_lock);
        spin_lock_init(&tp->tx_lock);
        INIT_LIST_HEAD(&tp->tx_free);
+       INIT_LIST_HEAD(&tp->rx_done);
        skb_queue_head_init(&tp->tx_queue);
        skb_queue_head_init(&tp->rx_queue);
 
@@ -1761,6 +1766,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
        unsigned long flags;
        struct list_head *cursor, *next, rx_queue;
        int ret = 0, work_done = 0;
+       struct napi_struct *napi = &tp->napi;
 
        if (!skb_queue_empty(&tp->rx_queue)) {
                while (work_done < budget) {
@@ -1773,7 +1779,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
                                break;
 
                        pkt_len = skb->len;
-                       napi_gro_receive(&tp->napi, skb);
+                       napi_gro_receive(napi, skb);
                        work_done++;
                        stats->rx_packets++;
                        stats->rx_bytes += pkt_len;
@@ -1823,7 +1829,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
                        pkt_len -= CRC_SIZE;
                        rx_data += sizeof(struct rx_desc);
 
-                       skb = napi_alloc_skb(&tp->napi, pkt_len);
+                       skb = napi_alloc_skb(napi, pkt_len);
                        if (!skb) {
                                stats->rx_dropped++;
                                goto find_next_rx;
@@ -1835,7 +1841,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
                        skb->protocol = eth_type_trans(skb, netdev);
                        rtl_rx_vlan_tag(rx_desc, skb);
                        if (work_done < budget) {
-                               napi_gro_receive(&tp->napi, skb);
+                               napi_gro_receive(napi, skb);
                                work_done++;
                                stats->rx_packets++;
                                stats->rx_bytes += pkt_len;
@@ -2252,8 +2258,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
 
 static void r8153_set_rx_early_size(struct r8152 *tp)
 {
-       u32 mtu = tp->netdev->mtu;
-       u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
+       u32 ocp_data = (agg_buf_sz - rx_reserved_size(tp->netdev->mtu)) / 4;
 
        ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
 }
@@ -2898,7 +2903,8 @@ static void r8153_first_init(struct r8152 *tp)
 
        rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX);
 
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS);
+       ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO);
 
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0);
@@ -2950,7 +2956,8 @@ static void r8153_enter_oob(struct r8152 *tp)
                usleep_range(1000, 2000);
        }
 
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS);
+       ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
 
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
        ocp_data &= ~TEREDO_WAKE_MASK;
@@ -3150,6 +3157,7 @@ static bool rtl8153_in_nway(struct r8152 *tp)
 static void set_carrier(struct r8152 *tp)
 {
        struct net_device *netdev = tp->netdev;
+       struct napi_struct *napi = &tp->napi;
        u8 speed;
 
        speed = rtl8152_get_speed(tp);
@@ -3159,19 +3167,22 @@ static void set_carrier(struct r8152 *tp)
                        tp->rtl_ops.enable(tp);
                        set_bit(RTL8152_SET_RX_MODE, &tp->flags);
                        netif_stop_queue(netdev);
-                       napi_disable(&tp->napi);
+                       napi_disable(napi);
                        netif_carrier_on(netdev);
                        rtl_start_rx(tp);
                        napi_enable(&tp->napi);
                        netif_wake_queue(netdev);
                        netif_info(tp, link, netdev, "carrier on\n");
+               } else if (netif_queue_stopped(netdev) &&
+                          skb_queue_len(&tp->tx_queue) < tp->tx_qlen) {
+                       netif_wake_queue(netdev);
                }
        } else {
                if (netif_carrier_ok(netdev)) {
                        netif_carrier_off(netdev);
-                       napi_disable(&tp->napi);
+                       napi_disable(napi);
                        tp->rtl_ops.disable(tp);
-                       napi_enable(&tp->napi);
+                       napi_enable(napi);
                        netif_info(tp, link, netdev, "carrier off\n");
                }
        }
@@ -3633,11 +3644,13 @@ static int rtl8152_runtime_suspend(struct r8152 *tp)
                tp->rtl_ops.autosuspend_en(tp, true);
 
                if (netif_carrier_ok(netdev)) {
-                       napi_disable(&tp->napi);
+                       struct napi_struct *napi = &tp->napi;
+
+                       napi_disable(napi);
                        rtl_stop_rx(tp);
                        rxdy_gated_en(tp, false);
                        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
-                       napi_enable(&tp->napi);
+                       napi_enable(napi);
                }
        }
 
@@ -3653,12 +3666,14 @@ static int rtl8152_system_suspend(struct r8152 *tp)
        netif_device_detach(netdev);
 
        if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
+               struct napi_struct *napi = &tp->napi;
+
                clear_bit(WORK_ENABLE, &tp->flags);
                usb_kill_urb(tp->intr_urb);
-               napi_disable(&tp->napi);
+               napi_disable(napi);
                cancel_delayed_work_sync(&tp->schedule);
                tp->rtl_ops.down(tp);
-               napi_enable(&tp->napi);
+               napi_enable(napi);
        }
 
        return ret;
@@ -3684,35 +3699,46 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
 static int rtl8152_resume(struct usb_interface *intf)
 {
        struct r8152 *tp = usb_get_intfdata(intf);
+       struct net_device *netdev = tp->netdev;
 
        mutex_lock(&tp->control);
 
        if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
                tp->rtl_ops.init(tp);
                queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
-               netif_device_attach(tp->netdev);
+               netif_device_attach(netdev);
        }
 
-       if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
+       if (netif_running(netdev) && netdev->flags & IFF_UP) {
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+                       struct napi_struct *napi = &tp->napi;
+
                        tp->rtl_ops.autosuspend_en(tp, false);
-                       napi_disable(&tp->napi);
+                       napi_disable(napi);
                        set_bit(WORK_ENABLE, &tp->flags);
-                       if (netif_carrier_ok(tp->netdev))
-                               rtl_start_rx(tp);
-                       napi_enable(&tp->napi);
+                       if (netif_carrier_ok(netdev)) {
+                               if (rtl8152_get_speed(tp) & LINK_STATUS) {
+                                       rtl_start_rx(tp);
+                               } else {
+                                       netif_carrier_off(netdev);
+                                       tp->rtl_ops.disable(tp);
+                                       netif_info(tp, link, netdev,
+                                                  "linking down\n");
+                               }
+                       }
+                       napi_enable(napi);
                        clear_bit(SELECTIVE_SUSPEND, &tp->flags);
                        smp_mb__after_atomic();
                        if (!list_empty(&tp->rx_done))
                                napi_schedule(&tp->napi);
                } else {
                        tp->rtl_ops.up(tp);
-                       netif_carrier_off(tp->netdev);
+                       netif_carrier_off(netdev);
                        set_bit(WORK_ENABLE, &tp->flags);
                }
                usb_submit_urb(tp->intr_urb, GFP_KERNEL);
        } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
-               if (tp->netdev->flags & IFF_UP)
+               if (netdev->flags & IFF_UP)
                        tp->rtl_ops.autosuspend_en(tp, false);
                clear_bit(SELECTIVE_SUSPEND, &tp->flags);
        }
@@ -3800,7 +3826,8 @@ static void rtl8152_get_drvinfo(struct net_device *netdev,
 }
 
 static
-int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+int rtl8152_get_link_ksettings(struct net_device *netdev,
+                              struct ethtool_link_ksettings *cmd)
 {
        struct r8152 *tp = netdev_priv(netdev);
        int ret;
@@ -3814,7 +3841,7 @@ int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
 
        mutex_lock(&tp->control);
 
-       ret = mii_ethtool_gset(&tp->mii, cmd);
+       ret = mii_ethtool_get_link_ksettings(&tp->mii, cmd);
 
        mutex_unlock(&tp->control);
 
@@ -3824,7 +3851,8 @@ out:
        return ret;
 }
 
-static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8152_set_link_ksettings(struct net_device *dev,
+                                     const struct ethtool_link_ksettings *cmd)
 {
        struct r8152 *tp = netdev_priv(dev);
        int ret;
@@ -3835,11 +3863,12 @@ static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
        mutex_lock(&tp->control);
 
-       ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
+       ret = rtl8152_set_speed(tp, cmd->base.autoneg, cmd->base.speed,
+                               cmd->base.duplex);
        if (!ret) {
-               tp->autoneg = cmd->autoneg;
-               tp->speed = cmd->speed;
-               tp->duplex = cmd->duplex;
+               tp->autoneg = cmd->base.autoneg;
+               tp->speed = cmd->base.speed;
+               tp->duplex = cmd->base.duplex;
        }
 
        mutex_unlock(&tp->control);
@@ -4117,8 +4146,6 @@ static int rtl8152_set_coalesce(struct net_device *netdev,
 
 static const struct ethtool_ops ops = {
        .get_drvinfo = rtl8152_get_drvinfo,
-       .get_settings = rtl8152_get_settings,
-       .set_settings = rtl8152_set_settings,
        .get_link = ethtool_op_get_link,
        .nway_reset = rtl8152_nway_reset,
        .get_msglevel = rtl8152_get_msglevel,
@@ -4132,6 +4159,8 @@ static const struct ethtool_ops ops = {
        .set_coalesce = rtl8152_set_coalesce,
        .get_eee = rtl_ethtool_get_eee,
        .set_eee = rtl_ethtool_set_eee,
+       .get_link_ksettings = rtl8152_get_link_ksettings,
+       .set_link_ksettings = rtl8152_set_link_ksettings,
 };
 
 static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -4200,8 +4229,14 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
 
        dev->mtu = new_mtu;
 
-       if (netif_running(dev) && netif_carrier_ok(dev))
-               r8153_set_rx_early_size(tp);
+       if (netif_running(dev)) {
+               u32 rms = new_mtu + VLAN_ETH_HLEN + CRC_SIZE;
+
+               ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rms);
+
+               if (netif_carrier_ok(dev))
+                       r8153_set_rx_early_size(tp);
+       }
 
        mutex_unlock(&tp->control);
 
@@ -4224,44 +4259,6 @@ static const struct net_device_ops rtl8152_netdev_ops = {
        .ndo_features_check     = rtl8152_features_check,
 };
 
-static void r8152b_get_version(struct r8152 *tp)
-{
-       u32     ocp_data;
-       u16     version;
-
-       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR1);
-       version = (u16)(ocp_data & VERSION_MASK);
-
-       switch (version) {
-       case 0x4c00:
-               tp->version = RTL_VER_01;
-               break;
-       case 0x4c10:
-               tp->version = RTL_VER_02;
-               break;
-       case 0x5c00:
-               tp->version = RTL_VER_03;
-               tp->mii.supports_gmii = 1;
-               break;
-       case 0x5c10:
-               tp->version = RTL_VER_04;
-               tp->mii.supports_gmii = 1;
-               break;
-       case 0x5c20:
-               tp->version = RTL_VER_05;
-               tp->mii.supports_gmii = 1;
-               break;
-       case 0x5c30:
-               tp->version = RTL_VER_06;
-               tp->mii.supports_gmii = 1;
-               break;
-       default:
-               netif_info(tp, probe, tp->netdev,
-                          "Unknown version 0x%04x\n", version);
-               break;
-       }
-}
-
 static void rtl8152_unload(struct r8152 *tp)
 {
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
@@ -4326,14 +4323,66 @@ static int rtl_ops_init(struct r8152 *tp)
        return ret;
 }
 
+static u8 rtl_get_version(struct usb_interface *intf)
+{
+       struct usb_device *udev = interface_to_usbdev(intf);
+       u32 ocp_data = 0;
+       __le32 *tmp;
+       u8 version;
+       int ret;
+
+       tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+       if (!tmp)
+               return 0;
+
+       ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+                             RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
+                             PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
+       if (ret > 0)
+               ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK;
+
+       kfree(tmp);
+
+       switch (ocp_data) {
+       case 0x4c00:
+               version = RTL_VER_01;
+               break;
+       case 0x4c10:
+               version = RTL_VER_02;
+               break;
+       case 0x5c00:
+               version = RTL_VER_03;
+               break;
+       case 0x5c10:
+               version = RTL_VER_04;
+               break;
+       case 0x5c20:
+               version = RTL_VER_05;
+               break;
+       case 0x5c30:
+               version = RTL_VER_06;
+               break;
+       default:
+               version = RTL_VER_UNKNOWN;
+               dev_info(&intf->dev, "Unknown version 0x%04x\n", ocp_data);
+               break;
+       }
+
+       return version;
+}
+
 static int rtl8152_probe(struct usb_interface *intf,
                         const struct usb_device_id *id)
 {
        struct usb_device *udev = interface_to_usbdev(intf);
+       u8 version = rtl_get_version(intf);
        struct r8152 *tp;
        struct net_device *netdev;
        int ret;
 
+       if (version == RTL_VER_UNKNOWN)
+               return -ENODEV;
+
        if (udev->actconfig->desc.bConfigurationValue != 1) {
                usb_driver_set_configuration(udev, 1);
                return -ENODEV;
@@ -4353,8 +4402,18 @@ static int rtl8152_probe(struct usb_interface *intf,
        tp->udev = udev;
        tp->netdev = netdev;
        tp->intf = intf;
+       tp->version = version;
+
+       switch (version) {
+       case RTL_VER_01:
+       case RTL_VER_02:
+               tp->mii.supports_gmii = 0;
+               break;
+       default:
+               tp->mii.supports_gmii = 1;
+               break;
+       }
 
-       r8152b_get_version(tp);
        ret = rtl_ops_init(tp);
        if (ret)
                goto out;
@@ -4497,6 +4556,8 @@ static void rtl8152_disconnect(struct usb_interface *intf)
 static struct usb_device_id rtl8152_table[] = {
        {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
        {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)},
        {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x304f)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x3062)},
index c5b21138b7eb1723579528532deb1b7ea9c2aa72..e96e2e5673d724ea5e009b686ca32afaf921190f 100644 (file)
@@ -291,6 +291,7 @@ static const struct net_device_ops rndis_netdev_ops = {
        .ndo_stop               = usbnet_stop,
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
+       .ndo_get_stats64        = usbnet_get_stats64,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
 };
index c81c79110cefca9443d614679d8e7cdd4b3295c3..daaa88a66f401fbe834b126d54125b79bbcfbad4 100644 (file)
@@ -791,47 +791,52 @@ static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinf
        usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
 }
 
-static int rtl8150_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static int rtl8150_get_link_ksettings(struct net_device *netdev,
+                                     struct ethtool_link_ksettings *ecmd)
 {
        rtl8150_t *dev = netdev_priv(netdev);
        short lpa, bmcr;
+       u32 supported;
 
-       ecmd->supported = (SUPPORTED_10baseT_Half |
+       supported = (SUPPORTED_10baseT_Half |
                          SUPPORTED_10baseT_Full |
                          SUPPORTED_100baseT_Half |
                          SUPPORTED_100baseT_Full |
                          SUPPORTED_Autoneg |
                          SUPPORTED_TP | SUPPORTED_MII);
-       ecmd->port = PORT_TP;
-       ecmd->transceiver = XCVR_INTERNAL;
-       ecmd->phy_address = dev->phy;
+       ecmd->base.port = PORT_TP;
+       ecmd->base.phy_address = dev->phy;
        get_registers(dev, BMCR, 2, &bmcr);
        get_registers(dev, ANLP, 2, &lpa);
        if (bmcr & BMCR_ANENABLE) {
                u32 speed = ((lpa & (LPA_100HALF | LPA_100FULL)) ?
                             SPEED_100 : SPEED_10);
-               ethtool_cmd_speed_set(ecmd, speed);
-               ecmd->autoneg = AUTONEG_ENABLE;
+               ecmd->base.speed = speed;
+               ecmd->base.autoneg = AUTONEG_ENABLE;
                if (speed == SPEED_100)
-                       ecmd->duplex = (lpa & LPA_100FULL) ?
+                       ecmd->base.duplex = (lpa & LPA_100FULL) ?
                            DUPLEX_FULL : DUPLEX_HALF;
                else
-                       ecmd->duplex = (lpa & LPA_10FULL) ?
+                       ecmd->base.duplex = (lpa & LPA_10FULL) ?
                            DUPLEX_FULL : DUPLEX_HALF;
        } else {
-               ecmd->autoneg = AUTONEG_DISABLE;
-               ethtool_cmd_speed_set(ecmd, ((bmcr & BMCR_SPEED100) ?
-                                            SPEED_100 : SPEED_10));
-               ecmd->duplex = (bmcr & BMCR_FULLDPLX) ?
+               ecmd->base.autoneg = AUTONEG_DISABLE;
+               ecmd->base.speed = ((bmcr & BMCR_SPEED100) ?
+                                            SPEED_100 : SPEED_10);
+               ecmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
                    DUPLEX_FULL : DUPLEX_HALF;
        }
+
+       ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
+                                               supported);
+
        return 0;
 }
 
 static const struct ethtool_ops ops = {
        .get_drvinfo = rtl8150_get_drvinfo,
-       .get_settings = rtl8150_get_settings,
-       .get_link = ethtool_op_get_link
+       .get_link = ethtool_op_get_link,
+       .get_link_ksettings = rtl8150_get_link_ksettings,
 };
 
 static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
index ac69f28d92d2360ddf3fc4ace031046db8d7d39d..2110ab3513f0446a9a090a83de66b0044b0791c9 100644 (file)
@@ -199,6 +199,7 @@ static const struct net_device_ops sierra_net_device_ops = {
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
        .ndo_change_mtu         = usbnet_change_mtu,
+       .ndo_get_stats64        = usbnet_get_stats64,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
 };
@@ -648,9 +649,9 @@ static const struct ethtool_ops sierra_net_ethtool_ops = {
        .get_link = sierra_net_get_link,
        .get_msglevel = usbnet_get_msglevel,
        .set_msglevel = usbnet_set_msglevel,
-       .get_settings = usbnet_get_settings,
-       .set_settings = usbnet_set_settings,
        .nway_reset = usbnet_nway_reset,
+       .get_link_ksettings = usbnet_get_link_ksettings,
+       .set_link_ksettings = usbnet_set_link_ksettings,
 };
 
 static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
index 0b17b40d7a4fa2653caf21406c4a6b3b45d868b0..1ce01dbd494f3581236a3afcd71e145a5eb6cb52 100644 (file)
@@ -743,13 +743,13 @@ static const struct ethtool_ops smsc75xx_ethtool_ops = {
        .get_drvinfo    = usbnet_get_drvinfo,
        .get_msglevel   = usbnet_get_msglevel,
        .set_msglevel   = usbnet_set_msglevel,
-       .get_settings   = usbnet_get_settings,
-       .set_settings   = usbnet_set_settings,
        .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len,
        .get_eeprom     = smsc75xx_ethtool_get_eeprom,
        .set_eeprom     = smsc75xx_ethtool_set_eeprom,
        .get_wol        = smsc75xx_ethtool_get_wol,
        .set_wol        = smsc75xx_ethtool_set_wol,
+       .get_link_ksettings     = usbnet_get_link_ksettings,
+       .set_link_ksettings     = usbnet_set_link_ksettings,
 };
 
 static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -1381,6 +1381,7 @@ static const struct net_device_ops smsc75xx_netdev_ops = {
        .ndo_stop               = usbnet_stop,
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
+       .ndo_get_stats64        = usbnet_get_stats64,
        .ndo_change_mtu         = smsc75xx_change_mtu,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
index 831aa33d078ae7d2dd57fdded5de71d1eb915f99..c2f67cecdf5bfcd4d7b07566c8aaaf7ec14269e2 100644 (file)
@@ -853,32 +853,32 @@ static void set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
        pdata->mdix_ctrl = mdix_ctrl;
 }
 
-static int smsc95xx_get_settings(struct net_device *net,
-                                struct ethtool_cmd *cmd)
+static int smsc95xx_get_link_ksettings(struct net_device *net,
+                                      struct ethtool_link_ksettings *cmd)
 {
        struct usbnet *dev = netdev_priv(net);
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
        int retval;
 
-       retval = usbnet_get_settings(net, cmd);
+       retval = usbnet_get_link_ksettings(net, cmd);
 
-       cmd->eth_tp_mdix = pdata->mdix_ctrl;
-       cmd->eth_tp_mdix_ctrl = pdata->mdix_ctrl;
+       cmd->base.eth_tp_mdix = pdata->mdix_ctrl;
+       cmd->base.eth_tp_mdix_ctrl = pdata->mdix_ctrl;
 
        return retval;
 }
 
-static int smsc95xx_set_settings(struct net_device *net,
-                                struct ethtool_cmd *cmd)
+static int smsc95xx_set_link_ksettings(struct net_device *net,
+                                      const struct ethtool_link_ksettings *cmd)
 {
        struct usbnet *dev = netdev_priv(net);
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
        int retval;
 
-       if (pdata->mdix_ctrl != cmd->eth_tp_mdix_ctrl)
-               set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
+       if (pdata->mdix_ctrl != cmd->base.eth_tp_mdix_ctrl)
+               set_mdix_status(net, cmd->base.eth_tp_mdix_ctrl);
 
-       retval = usbnet_set_settings(net, cmd);
+       retval = usbnet_set_link_ksettings(net, cmd);
 
        return retval;
 }
@@ -889,8 +889,6 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
        .get_drvinfo    = usbnet_get_drvinfo,
        .get_msglevel   = usbnet_get_msglevel,
        .set_msglevel   = usbnet_set_msglevel,
-       .get_settings   = smsc95xx_get_settings,
-       .set_settings   = smsc95xx_set_settings,
        .get_eeprom_len = smsc95xx_ethtool_get_eeprom_len,
        .get_eeprom     = smsc95xx_ethtool_get_eeprom,
        .set_eeprom     = smsc95xx_ethtool_set_eeprom,
@@ -898,6 +896,8 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
        .get_regs       = smsc95xx_ethtool_getregs,
        .get_wol        = smsc95xx_ethtool_get_wol,
        .set_wol        = smsc95xx_ethtool_set_wol,
+       .get_link_ksettings     = smsc95xx_get_link_ksettings,
+       .set_link_ksettings     = smsc95xx_set_link_ksettings,
 };
 
 static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -1248,6 +1248,7 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
        .ndo_change_mtu         = usbnet_change_mtu,
+       .ndo_get_stats64        = usbnet_get_stats64,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_do_ioctl           = smsc95xx_ioctl,
index 4a1e9c489f1f455388ffee289d65e1d6b36cba42..317287f4409c840638f2ef30df36754805fb2410 100644 (file)
@@ -249,9 +249,9 @@ static const struct ethtool_ops sr9700_ethtool_ops = {
        .set_msglevel   = usbnet_set_msglevel,
        .get_eeprom_len = sr9700_get_eeprom_len,
        .get_eeprom     = sr9700_get_eeprom,
-       .get_settings   = usbnet_get_settings,
-       .set_settings   = usbnet_set_settings,
        .nway_reset     = usbnet_nway_reset,
+       .get_link_ksettings     = usbnet_get_link_ksettings,
+       .set_link_ksettings     = usbnet_set_link_ksettings,
 };
 
 static void sr9700_set_multicast(struct net_device *netdev)
@@ -308,6 +308,7 @@ static const struct net_device_ops sr9700_netdev_ops = {
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
        .ndo_change_mtu         = usbnet_change_mtu,
+       .ndo_get_stats64        = usbnet_get_stats64,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_do_ioctl           = sr9700_ioctl,
        .ndo_set_rx_mode        = sr9700_set_multicast,
index a50df0d8fb9abbd548ad6646e4a066a1211363c5..9277a0f228dfa6de355c74d2652edcf2fb1d2f4b 100644 (file)
@@ -524,9 +524,9 @@ static const struct ethtool_ops sr9800_ethtool_ops = {
        .set_wol        = sr_set_wol,
        .get_eeprom_len = sr_get_eeprom_len,
        .get_eeprom     = sr_get_eeprom,
-       .get_settings   = usbnet_get_settings,
-       .set_settings   = usbnet_set_settings,
        .nway_reset     = usbnet_nway_reset,
+       .get_link_ksettings     = usbnet_get_link_ksettings,
+       .set_link_ksettings     = usbnet_set_link_ksettings,
 };
 
 static int sr9800_link_reset(struct usbnet *dev)
@@ -679,6 +679,7 @@ static const struct net_device_ops sr9800_netdev_ops = {
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
        .ndo_change_mtu         = usbnet_change_mtu,
+       .ndo_get_stats64        = usbnet_get_stats64,
        .ndo_set_mac_address    = sr_set_mac_address,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_do_ioctl           = sr_ioctl,
index 3de65ea6531a8add927c0a2d7c74e8923c0f3274..1cc945cbeaa3c0458ef9a729a847fd088d2f1c71 100644 (file)
@@ -82,8 +82,6 @@
 // randomly generated ethernet address
 static u8      node_id [ETH_ALEN];
 
-static const char driver_name [] = "usbnet";
-
 /* use ethtool to change the level for any given device */
 static int msg_level = -1;
 module_param (msg_level, int, 0);
@@ -316,6 +314,7 @@ static void __usbnet_status_stop_force(struct usbnet *dev)
  */
 void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
 {
+       struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
        int     status;
 
        if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
@@ -327,8 +326,10 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
        if (skb->protocol == 0)
                skb->protocol = eth_type_trans (skb, dev->net);
 
-       dev->net->stats.rx_packets++;
-       dev->net->stats.rx_bytes += skb->len;
+       u64_stats_update_begin(&stats64->syncp);
+       stats64->rx_packets++;
+       stats64->rx_bytes += skb->len;
+       u64_stats_update_end(&stats64->syncp);
 
        netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
                  skb->len + sizeof (struct ethhdr), skb->protocol);
@@ -947,18 +948,20 @@ EXPORT_SYMBOL_GPL(usbnet_open);
  * they'll probably want to use this base set.
  */
 
-int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd)
+int usbnet_get_link_ksettings(struct net_device *net,
+                             struct ethtool_link_ksettings *cmd)
 {
        struct usbnet *dev = netdev_priv(net);
 
        if (!dev->mii.mdio_read)
                return -EOPNOTSUPP;
 
-       return mii_ethtool_gset(&dev->mii, cmd);
+       return mii_ethtool_get_link_ksettings(&dev->mii, cmd);
 }
-EXPORT_SYMBOL_GPL(usbnet_get_settings);
+EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings);
 
-int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
+int usbnet_set_link_ksettings(struct net_device *net,
+                             const struct ethtool_link_ksettings *cmd)
 {
        struct usbnet *dev = netdev_priv(net);
        int retval;
@@ -966,7 +969,7 @@ int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
        if (!dev->mii.mdio_write)
                return -EOPNOTSUPP;
 
-       retval = mii_ethtool_sset(&dev->mii, cmd);
+       retval = mii_ethtool_set_link_ksettings(&dev->mii, cmd);
 
        /* link speed/duplex might have changed */
        if (dev->driver_info->link_reset)
@@ -976,9 +979,39 @@ int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
        usbnet_update_max_qlen(dev);
 
        return retval;
+}
+EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings);
 
+void usbnet_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats)
+{
+       struct usbnet *dev = netdev_priv(net);
+       unsigned int start;
+       int cpu;
+
+       netdev_stats_to_stats64(stats, &net->stats);
+
+       for_each_possible_cpu(cpu) {
+               struct pcpu_sw_netstats *stats64;
+               u64 rx_packets, rx_bytes;
+               u64 tx_packets, tx_bytes;
+
+               stats64 = per_cpu_ptr(dev->stats64, cpu);
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&stats64->syncp);
+                       rx_packets = stats64->rx_packets;
+                       rx_bytes = stats64->rx_bytes;
+                       tx_packets = stats64->tx_packets;
+                       tx_bytes = stats64->tx_bytes;
+               } while (u64_stats_fetch_retry_irq(&stats64->syncp, start));
+
+               stats->rx_packets += rx_packets;
+               stats->rx_bytes += rx_bytes;
+               stats->tx_packets += tx_packets;
+               stats->tx_bytes += tx_bytes;
+       }
 }
-EXPORT_SYMBOL_GPL(usbnet_set_settings);
+EXPORT_SYMBOL_GPL(usbnet_get_stats64);
 
 u32 usbnet_get_link (struct net_device *net)
 {
@@ -1038,14 +1071,14 @@ EXPORT_SYMBOL_GPL(usbnet_set_msglevel);
 
 /* drivers may override default ethtool_ops in their bind() routine */
 static const struct ethtool_ops usbnet_ethtool_ops = {
-       .get_settings           = usbnet_get_settings,
-       .set_settings           = usbnet_set_settings,
        .get_link               = usbnet_get_link,
        .nway_reset             = usbnet_nway_reset,
        .get_drvinfo            = usbnet_get_drvinfo,
        .get_msglevel           = usbnet_get_msglevel,
        .set_msglevel           = usbnet_set_msglevel,
        .get_ts_info            = ethtool_op_get_ts_info,
+       .get_link_ksettings     = usbnet_get_link_ksettings,
+       .set_link_ksettings     = usbnet_set_link_ksettings,
 };
 
 /*-------------------------------------------------------------------------*/
@@ -1211,8 +1244,12 @@ static void tx_complete (struct urb *urb)
        struct usbnet           *dev = entry->dev;
 
        if (urb->status == 0) {
-               dev->net->stats.tx_packets += entry->packets;
-               dev->net->stats.tx_bytes += entry->length;
+               struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
+
+               u64_stats_update_begin(&stats64->syncp);
+               stats64->tx_packets += entry->packets;
+               stats64->tx_bytes += entry->length;
+               u64_stats_update_end(&stats64->syncp);
        } else {
                dev->net->stats.tx_errors++;
 
@@ -1569,6 +1606,7 @@ void usbnet_disconnect (struct usb_interface *intf)
        usb_free_urb(dev->interrupt);
        kfree(dev->padding_pkt);
 
+       free_percpu(dev->stats64);
        free_netdev(net);
 }
 EXPORT_SYMBOL_GPL(usbnet_disconnect);
@@ -1580,6 +1618,7 @@ static const struct net_device_ops usbnet_netdev_ops = {
        .ndo_tx_timeout         = usbnet_tx_timeout,
        .ndo_set_rx_mode        = usbnet_set_rx_mode,
        .ndo_change_mtu         = usbnet_change_mtu,
+       .ndo_get_stats64        = usbnet_get_stats64,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
 };
@@ -1641,6 +1680,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
        dev->intf = udev;
        dev->driver_info = info;
        dev->driver_name = name;
+
+       dev->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+       if (!dev->stats64)
+               goto out0;
+
        dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
                                | NETIF_MSG_PROBE | NETIF_MSG_LINK);
        init_waitqueue_head(&dev->wait);
@@ -1780,6 +1824,8 @@ out1:
         */
        cancel_work_sync(&dev->kevent);
        del_timer_sync(&dev->delay);
+       free_percpu(dev->stats64);
+out0:
        free_netdev(net);
 out:
        return status;
index 8c39d6d690e5e7f8ea6e522b8eb01a2db550c61a..317103680675f97eb056e650d5adc07fe51f67e2 100644 (file)
@@ -45,18 +45,13 @@ static struct {
        { "peer_ifindex" },
 };
 
-static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int veth_get_link_ksettings(struct net_device *dev,
+                                  struct ethtool_link_ksettings *cmd)
 {
-       cmd->supported          = 0;
-       cmd->advertising        = 0;
-       ethtool_cmd_speed_set(cmd, SPEED_10000);
-       cmd->duplex             = DUPLEX_FULL;
-       cmd->port               = PORT_TP;
-       cmd->phy_address        = 0;
-       cmd->transceiver        = XCVR_INTERNAL;
-       cmd->autoneg            = AUTONEG_DISABLE;
-       cmd->maxtxpkt           = 0;
-       cmd->maxrxpkt           = 0;
+       cmd->base.speed         = SPEED_10000;
+       cmd->base.duplex        = DUPLEX_FULL;
+       cmd->base.port          = PORT_TP;
+       cmd->base.autoneg       = AUTONEG_DISABLE;
        return 0;
 }
 
@@ -95,12 +90,12 @@ static void veth_get_ethtool_stats(struct net_device *dev,
 }
 
 static const struct ethtool_ops veth_ethtool_ops = {
-       .get_settings           = veth_get_settings,
        .get_drvinfo            = veth_get_drvinfo,
        .get_link               = ethtool_op_get_link,
        .get_strings            = veth_get_strings,
        .get_sset_count         = veth_get_sset_count,
        .get_ethtool_stats      = veth_get_ethtool_stats,
+       .get_link_ksettings     = veth_get_link_ksettings,
 };
 
 static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
index ea9890d619670e1abfba75fe608c2925d824cb1c..b0d241d110ec608848ed65054dfd54014e0afb86 100644 (file)
@@ -1636,47 +1636,57 @@ static void virtnet_get_channels(struct net_device *dev,
 }
 
 /* Check if the user is trying to change anything besides speed/duplex */
-static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd)
+static bool
+virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd)
 {
-       struct ethtool_cmd diff1 = *cmd;
-       struct ethtool_cmd diff2 = {};
+       struct ethtool_link_ksettings diff1 = *cmd;
+       struct ethtool_link_ksettings diff2 = {};
 
        /* cmd is always set so we need to clear it, validate the port type
         * and also without autonegotiation we can ignore advertising
         */
-       ethtool_cmd_speed_set(&diff1, 0);
-       diff2.port = PORT_OTHER;
-       diff1.advertising = 0;
-       diff1.duplex = 0;
-       diff1.cmd = 0;
+       diff1.base.speed = 0;
+       diff2.base.port = PORT_OTHER;
+       ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
+       diff1.base.duplex = 0;
+       diff1.base.cmd = 0;
+       diff1.base.link_mode_masks_nwords = 0;
 
-       return !memcmp(&diff1, &diff2, sizeof(diff1));
+       return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) &&
+               bitmap_empty(diff1.link_modes.supported,
+                            __ETHTOOL_LINK_MODE_MASK_NBITS) &&
+               bitmap_empty(diff1.link_modes.advertising,
+                            __ETHTOOL_LINK_MODE_MASK_NBITS) &&
+               bitmap_empty(diff1.link_modes.lp_advertising,
+                            __ETHTOOL_LINK_MODE_MASK_NBITS);
 }
 
-static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int virtnet_set_link_ksettings(struct net_device *dev,
+                                     const struct ethtool_link_ksettings *cmd)
 {
        struct virtnet_info *vi = netdev_priv(dev);
        u32 speed;
 
-       speed = ethtool_cmd_speed(cmd);
+       speed = cmd->base.speed;
        /* don't allow custom speed and duplex */
        if (!ethtool_validate_speed(speed) ||
-           !ethtool_validate_duplex(cmd->duplex) ||
+           !ethtool_validate_duplex(cmd->base.duplex) ||
            !virtnet_validate_ethtool_cmd(cmd))
                return -EINVAL;
        vi->speed = speed;
-       vi->duplex = cmd->duplex;
+       vi->duplex = cmd->base.duplex;
 
        return 0;
 }
 
-static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int virtnet_get_link_ksettings(struct net_device *dev,
+                                     struct ethtool_link_ksettings *cmd)
 {
        struct virtnet_info *vi = netdev_priv(dev);
 
-       ethtool_cmd_speed_set(cmd, vi->speed);
-       cmd->duplex = vi->duplex;
-       cmd->port = PORT_OTHER;
+       cmd->base.speed = vi->speed;
+       cmd->base.duplex = vi->duplex;
+       cmd->base.port = PORT_OTHER;
 
        return 0;
 }
@@ -1696,8 +1706,8 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
        .set_channels = virtnet_set_channels,
        .get_channels = virtnet_get_channels,
        .get_ts_info = ethtool_op_get_ts_info,
-       .get_settings = virtnet_get_settings,
-       .set_settings = virtnet_set_settings,
+       .get_link_ksettings = virtnet_get_link_ksettings,
+       .set_link_ksettings = virtnet_set_link_ksettings,
 };
 
 static void virtnet_freeze_down(struct virtio_device *vdev)
index f88ffafebfbfd40192fd5919ce970f1bf15b73fc..2ff27314e04739034cef408b59aed6a77cd98911 100644 (file)
@@ -471,22 +471,25 @@ vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 
 
 static int
-vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+vmxnet3_get_link_ksettings(struct net_device *netdev,
+                          struct ethtool_link_ksettings *ecmd)
 {
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 
-       ecmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full |
-                         SUPPORTED_TP;
-       ecmd->advertising = ADVERTISED_TP;
-       ecmd->port = PORT_TP;
-       ecmd->transceiver = XCVR_INTERNAL;
+       ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+       ethtool_link_ksettings_add_link_mode(ecmd, supported, 10000baseT_Full);
+       ethtool_link_ksettings_add_link_mode(ecmd, supported, 1000baseT_Full);
+       ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
+       ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
+       ethtool_link_ksettings_add_link_mode(ecmd, advertising, TP);
+       ecmd->base.port = PORT_TP;
 
        if (adapter->link_speed) {
-               ethtool_cmd_speed_set(ecmd, adapter->link_speed);
-               ecmd->duplex = DUPLEX_FULL;
+               ecmd->base.speed = adapter->link_speed;
+               ecmd->base.duplex = DUPLEX_FULL;
        } else {
-               ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
-               ecmd->duplex = DUPLEX_UNKNOWN;
+               ecmd->base.speed = SPEED_UNKNOWN;
+               ecmd->base.duplex = DUPLEX_UNKNOWN;
        }
        return 0;
 }
@@ -880,7 +883,6 @@ done:
 }
 
 static const struct ethtool_ops vmxnet3_ethtool_ops = {
-       .get_settings      = vmxnet3_get_settings,
        .get_drvinfo       = vmxnet3_get_drvinfo,
        .get_regs_len      = vmxnet3_get_regs_len,
        .get_regs          = vmxnet3_get_regs,
@@ -900,6 +902,7 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = {
        .get_rxfh          = vmxnet3_get_rss,
        .set_rxfh          = vmxnet3_set_rss,
 #endif
+       .get_link_ksettings = vmxnet3_get_link_ksettings,
 };
 
 void vmxnet3_set_ethtool_ops(struct net_device *netdev)
index 22379da63400776ff70994097de6d472232ca908..eb5493e835569f8d95118a5a7f73fa9adac05843 100644 (file)
@@ -104,6 +104,23 @@ static void vrf_get_stats64(struct net_device *dev,
        }
 }
 
+/* by default VRF devices do not have a qdisc and are expected
+ * to be created with only a single queue.
+ */
+static bool qdisc_tx_is_default(const struct net_device *dev)
+{
+       struct netdev_queue *txq;
+       struct Qdisc *qdisc;
+
+       if (dev->num_tx_queues > 1)
+               return false;
+
+       txq = netdev_get_tx_queue(dev, 0);
+       qdisc = rcu_access_pointer(txq->qdisc);
+
+       return !qdisc->enqueue;
+}
+
 /* Local traffic destined to local address. Reinsert the packet to rx
  * path, similar to loopback handling.
  */
@@ -340,6 +357,7 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
 
 static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
 {
+       int len = skb->len;
        netdev_tx_t ret = is_ip_tx_frame(skb, dev);
 
        if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
@@ -347,7 +365,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
 
                u64_stats_update_begin(&dstats->syncp);
                dstats->tx_pkts++;
-               dstats->tx_bytes += skb->len;
+               dstats->tx_bytes += len;
                u64_stats_update_end(&dstats->syncp);
        } else {
                this_cpu_inc(dev->dstats->tx_drps);
@@ -356,6 +374,29 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
        return ret;
 }
 
+static int vrf_finish_direct(struct net *net, struct sock *sk,
+                            struct sk_buff *skb)
+{
+       struct net_device *vrf_dev = skb->dev;
+
+       if (!list_empty(&vrf_dev->ptype_all) &&
+           likely(skb_headroom(skb) >= ETH_HLEN)) {
+               struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
+
+               ether_addr_copy(eth->h_source, vrf_dev->dev_addr);
+               eth_zero_addr(eth->h_dest);
+               eth->h_proto = skb->protocol;
+
+               rcu_read_lock_bh();
+               dev_queue_xmit_nit(skb, vrf_dev);
+               rcu_read_unlock_bh();
+
+               skb_pull(skb, ETH_HLEN);
+       }
+
+       return 1;
+}
+
 #if IS_ENABLED(CONFIG_IPV6)
 /* modelled after ip6_finish_output2 */
 static int vrf_finish_output6(struct net *net, struct sock *sk,
@@ -404,18 +445,13 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
  * packet to go through device based features such as qdisc, netfilter
  * hooks and packet sockets with skb->dev set to vrf device.
  */
-static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
-                                  struct sock *sk,
-                                  struct sk_buff *skb)
+static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev,
+                                           struct sk_buff *skb)
 {
        struct net_vrf *vrf = netdev_priv(vrf_dev);
        struct dst_entry *dst = NULL;
        struct rt6_info *rt6;
 
-       /* don't divert link scope packets */
-       if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
-               return skb;
-
        rcu_read_lock();
 
        rt6 = rcu_dereference(vrf->rt6);
@@ -437,6 +473,55 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
        return skb;
 }
 
+static int vrf_output6_direct(struct net *net, struct sock *sk,
+                             struct sk_buff *skb)
+{
+       skb->protocol = htons(ETH_P_IPV6);
+
+       return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
+                           net, sk, skb, NULL, skb->dev,
+                           vrf_finish_direct,
+                           !(IPCB(skb)->flags & IPSKB_REROUTED));
+}
+
+static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
+                                         struct sock *sk,
+                                         struct sk_buff *skb)
+{
+       struct net *net = dev_net(vrf_dev);
+       int err;
+
+       skb->dev = vrf_dev;
+
+       err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
+                     skb, NULL, vrf_dev, vrf_output6_direct);
+
+       if (likely(err == 1))
+               err = vrf_output6_direct(net, sk, skb);
+
+       /* reset skb device */
+       if (likely(err == 1))
+               nf_reset(skb);
+       else
+               skb = NULL;
+
+       return skb;
+}
+
+static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
+                                  struct sock *sk,
+                                  struct sk_buff *skb)
+{
+       /* don't divert link scope packets */
+       if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
+               return skb;
+
+       if (qdisc_tx_is_default(vrf_dev))
+               return vrf_ip6_out_direct(vrf_dev, sk, skb);
+
+       return vrf_ip6_out_redirect(vrf_dev, skb);
+}
+
 /* holding rtnl */
 static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
 {
@@ -461,8 +546,10 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
        }
 
        if (rt6_local) {
-               if (rt6_local->rt6i_idev)
+               if (rt6_local->rt6i_idev) {
                        in6_dev_put(rt6_local->rt6i_idev);
+                       rt6_local->rt6i_idev = NULL;
+               }
 
                dst = &rt6_local->dst;
                dev_put(dst->dev);
@@ -606,18 +693,13 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
  * packet to go through device based features such as qdisc, netfilter
  * hooks and packet sockets with skb->dev set to vrf device.
  */
-static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
-                                 struct sock *sk,
-                                 struct sk_buff *skb)
+static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev,
+                                          struct sk_buff *skb)
 {
        struct net_vrf *vrf = netdev_priv(vrf_dev);
        struct dst_entry *dst = NULL;
        struct rtable *rth;
 
-       /* don't divert multicast */
-       if (ipv4_is_multicast(ip_hdr(skb)->daddr))
-               return skb;
-
        rcu_read_lock();
 
        rth = rcu_dereference(vrf->rth);
@@ -639,6 +721,55 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
        return skb;
 }
 
+static int vrf_output_direct(struct net *net, struct sock *sk,
+                            struct sk_buff *skb)
+{
+       skb->protocol = htons(ETH_P_IP);
+
+       return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
+                           net, sk, skb, NULL, skb->dev,
+                           vrf_finish_direct,
+                           !(IPCB(skb)->flags & IPSKB_REROUTED));
+}
+
+static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
+                                        struct sock *sk,
+                                        struct sk_buff *skb)
+{
+       struct net *net = dev_net(vrf_dev);
+       int err;
+
+       skb->dev = vrf_dev;
+
+       err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
+                     skb, NULL, vrf_dev, vrf_output_direct);
+
+       if (likely(err == 1))
+               err = vrf_output_direct(net, sk, skb);
+
+       /* reset skb device */
+       if (likely(err == 1))
+               nf_reset(skb);
+       else
+               skb = NULL;
+
+       return skb;
+}
+
+static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
+                                 struct sock *sk,
+                                 struct sk_buff *skb)
+{
+       /* don't divert multicast */
+       if (ipv4_is_multicast(ip_hdr(skb)->daddr))
+               return skb;
+
+       if (qdisc_tx_is_default(vrf_dev))
+               return vrf_ip_out_direct(vrf_dev, sk, skb);
+
+       return vrf_ip_out_redirect(vrf_dev, skb);
+}
+
 /* called with rcu lock held */
 static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev,
                                  struct sock *sk,
@@ -746,14 +877,18 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
 {
        int ret;
 
+       port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
        ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL);
        if (ret < 0)
-               return ret;
+               goto err;
 
-       port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
        cycle_netdev(port_dev);
 
        return 0;
+
+err:
+       port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
+       return ret;
 }
 
 static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
@@ -975,9 +1110,11 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
                skb->dev = vrf_dev;
                skb->skb_iif = vrf_dev->ifindex;
 
-               skb_push(skb, skb->mac_len);
-               dev_queue_xmit_nit(skb, vrf_dev);
-               skb_pull(skb, skb->mac_len);
+               if (!list_empty(&vrf_dev->ptype_all)) {
+                       skb_push(skb, skb->mac_len);
+                       dev_queue_xmit_nit(skb, vrf_dev);
+                       skb_pull(skb, skb->mac_len);
+               }
 
                IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
        }
@@ -1018,9 +1155,11 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
 
        vrf_rx_stats(vrf_dev, skb->len);
 
-       skb_push(skb, skb->mac_len);
-       dev_queue_xmit_nit(skb, vrf_dev);
-       skb_pull(skb, skb->mac_len);
+       if (!list_empty(&vrf_dev->ptype_all)) {
+               skb_push(skb, skb->mac_len);
+               dev_queue_xmit_nit(skb, vrf_dev);
+               skb_pull(skb, skb->mac_len);
+       }
 
        skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
 out:
index e375560cc74e5ffc09553ddab5c6b657fe1cb6f0..ebc98bb17a51088acecc1174b6ea3e39256f6bed 100644 (file)
@@ -276,9 +276,9 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
        send_eth = send_ip = true;
 
        if (type == RTM_GETNEIGH) {
-               ndm->ndm_family = AF_INET;
                send_ip = !vxlan_addr_any(&rdst->remote_ip);
                send_eth = !is_zero_ether_addr(fdb->eth_addr);
+               ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET;
        } else
                ndm->ndm_family = AF_BRIDGE;
        ndm->ndm_state = fdb->state;
@@ -1515,7 +1515,7 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
        int ns_olen;
        int i, len;
 
-       if (dev == NULL)
+       if (dev == NULL || !pskb_may_pull(request, request->len))
                return NULL;
 
        len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
@@ -1530,10 +1530,11 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
        skb_push(reply, sizeof(struct ethhdr));
        skb_reset_mac_header(reply);
 
-       ns = (struct nd_msg *)skb_transport_header(request);
+       ns = (struct nd_msg *)(ipv6_hdr(request) + 1);
 
        daddr = eth_hdr(request)->h_source;
-       ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
+       ns_olen = request->len - skb_network_offset(request) -
+               sizeof(struct ipv6hdr) - sizeof(*ns);
        for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
                if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
                        daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
@@ -1604,10 +1605,13 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
        if (!in6_dev)
                goto out;
 
+       if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
+               goto out;
+
        iphdr = ipv6_hdr(skb);
        daddr = &iphdr->daddr;
 
-       msg = (struct nd_msg *)skb_transport_header(skb);
+       msg = (struct nd_msg *)(iphdr + 1);
        if (msg->icmph.icmp6_code != 0 ||
            msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
                goto out;
@@ -2242,16 +2246,13 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
                if (ntohs(eth->h_proto) == ETH_P_ARP)
                        return arp_reduce(dev, skb, vni);
 #if IS_ENABLED(CONFIG_IPV6)
-               else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
-                        pskb_may_pull(skb, sizeof(struct ipv6hdr)
-                                      + sizeof(struct nd_msg)) &&
-                        ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
-                               struct nd_msg *msg;
-
-                               msg = (struct nd_msg *)skb_transport_header(skb);
-                               if (msg->icmph.icmp6_code == 0 &&
-                                   msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
-                                       return neigh_reduce(dev, skb, vni);
+               else if (ntohs(eth->h_proto) == ETH_P_IPV6) {
+                       struct ipv6hdr *hdr, _hdr;
+                       if ((hdr = skb_header_pointer(skb,
+                                                     skb_network_offset(skb),
+                                                     sizeof(_hdr), &_hdr)) &&
+                           hdr->nexthdr == IPPROTO_ICMPV6)
+                               return neigh_reduce(dev, skb, vni);
                }
 #endif
        }
@@ -2322,6 +2323,9 @@ static void vxlan_cleanup(unsigned long arg)
                        if (f->state & (NUD_PERMANENT | NUD_NOARP))
                                continue;
 
+                       if (f->flags & NTF_EXT_LEARNED)
+                               continue;
+
                        timeout = f->used + vxlan->cfg.age_interval * HZ;
                        if (time_before_eq(timeout, jiffies)) {
                                netdev_dbg(vxlan->dev,
@@ -2923,6 +2927,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
                return -EINVAL;
        }
 
+       if (lowerdev) {
+               dev->gso_max_size = lowerdev->gso_max_size;
+               dev->gso_max_segs = lowerdev->gso_max_segs;
+       }
+
        if (conf->mtu) {
                int max_mtu = ETH_MAX_MTU;
 
@@ -2976,6 +2985,44 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
        return 0;
 }
 
+static int __vxlan_dev_create(struct net *net, struct net_device *dev,
+                             struct vxlan_config *conf)
+{
+       struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       int err;
+
+       err = vxlan_dev_configure(net, dev, conf, false);
+       if (err)
+               return err;
+
+       dev->ethtool_ops = &vxlan_ethtool_ops;
+
+       /* create an fdb entry for a valid default destination */
+       if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+               err = vxlan_fdb_create(vxlan, all_zeros_mac,
+                                      &vxlan->default_dst.remote_ip,
+                                      NUD_REACHABLE | NUD_PERMANENT,
+                                      NLM_F_EXCL | NLM_F_CREATE,
+                                      vxlan->cfg.dst_port,
+                                      vxlan->default_dst.remote_vni,
+                                      vxlan->default_dst.remote_vni,
+                                      vxlan->default_dst.remote_ifindex,
+                                      NTF_SELF);
+               if (err)
+                       return err;
+       }
+
+       err = register_netdevice(dev);
+       if (err) {
+               vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
+               return err;
+       }
+
+       list_add(&vxlan->next, &vn->vxlan_list);
+       return 0;
+}
+
 static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
                         struct net_device *dev, struct vxlan_config *conf,
                         bool changelink)
@@ -3172,8 +3219,6 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
 static int vxlan_newlink(struct net *src_net, struct net_device *dev,
                         struct nlattr *tb[], struct nlattr *data[])
 {
-       struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
-       struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_config conf;
        int err;
 
@@ -3181,36 +3226,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
        if (err)
                return err;
 
-       err = vxlan_dev_configure(src_net, dev, &conf, false);
-       if (err)
-               return err;
-
-       dev->ethtool_ops = &vxlan_ethtool_ops;
-
-       /* create an fdb entry for a valid default destination */
-       if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
-               err = vxlan_fdb_create(vxlan, all_zeros_mac,
-                                      &vxlan->default_dst.remote_ip,
-                                      NUD_REACHABLE | NUD_PERMANENT,
-                                      NLM_F_EXCL | NLM_F_CREATE,
-                                      vxlan->cfg.dst_port,
-                                      vxlan->default_dst.remote_vni,
-                                      vxlan->default_dst.remote_vni,
-                                      vxlan->default_dst.remote_ifindex,
-                                      NTF_SELF);
-               if (err)
-                       return err;
-       }
-
-       err = register_netdevice(dev);
-       if (err) {
-               vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
-               return err;
-       }
-
-       list_add(&vxlan->next, &vn->vxlan_list);
-
-       return 0;
+       return __vxlan_dev_create(src_net, dev, &conf);
 }
 
 static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
@@ -3440,7 +3456,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name,
        if (IS_ERR(dev))
                return dev;
 
-       err = vxlan_dev_configure(net, dev, conf, false);
+       err = __vxlan_dev_create(net, dev, conf);
        if (err < 0) {
                free_netdev(dev);
                return ERR_PTR(err);
index a5045b5279d70a92c827424be3ff7869c6193dc8..6742ae605660454e19406d11f5aff7e84a2527f3 100644 (file)
@@ -381,8 +381,8 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
        /* set bd status and length */
        bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
 
-       iowrite16be(bd_status, &bd->status);
        iowrite16be(skb->len, &bd->length);
+       iowrite16be(bd_status, &bd->status);
 
        /* Move to next BD in the ring */
        if (!(bd_status & T_W_S))
@@ -457,7 +457,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
        struct sk_buff *skb;
        hdlc_device *hdlc = dev_to_hdlc(dev);
        struct qe_bd *bd;
-       u32 bd_status;
+       u16 bd_status;
        u16 length, howmany = 0;
        u8 *bdbuffer;
        int i;
index e7f5910a65191f4f013ae53db73d2a77510ae9e8..f8eb66ef2944ea9630237455cae4faf483b4f609 100644 (file)
@@ -467,6 +467,9 @@ int i2400mu_probe(struct usb_interface *iface,
        struct i2400mu *i2400mu;
        struct usb_device *usb_dev = interface_to_usbdev(iface);
 
+       if (iface->cur_altsetting->desc.bNumEndpoints < 4)
+               return -ENODEV;
+
        if (usb_dev->speed != USB_SPEED_HIGH)
                dev_err(dev, "device not connected as high speed\n");
 
index 85582bdd7524ea544a938f994950b10e70b3d6a4..c866ab524571ef1fc03844d832dea5b2a340c240 100644 (file)
@@ -52,7 +52,7 @@ const struct ath10k_hw_regs qca6174_regs = {
        .rtc_soc_base_address                   = 0x00000800,
        .rtc_wmac_base_address                  = 0x00001000,
        .soc_core_base_address                  = 0x0003a000,
-       .wlan_mac_base_address                  = 0x00020000,
+       .wlan_mac_base_address                  = 0x00010000,
        .ce_wrapper_base_address                = 0x00034000,
        .ce0_base_address                       = 0x00034400,
        .ce1_base_address                       = 0x00034800,
index 4b83e87f0b9450bf431b48a168901371f1cab33c..20bf967a70b969d6be6e593e7ae04330d1d85daa 100644 (file)
@@ -2,7 +2,7 @@ config WCN36XX
        tristate "Qualcomm Atheros WCN3660/3680 support"
        depends on MAC80211 && HAS_DMA
        depends on QCOM_WCNSS_CTRL || QCOM_WCNSS_CTRL=n
-       depends on QCOM_SMD || QCOM_SMD=n
+       depends on RPMSG || RPMSG=n
        ---help---
          This module adds support for wireless adapters based on
          Qualcomm Atheros WCN3660 and WCN3680 mobile chipsets.
index 2b05154d05a47fd3b5b2180e87fa6a686dc5429d..ac919e425a4f4f420830ba7ddd4ce01c6fe97727 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
-#include <linux/soc/qcom/smd.h>
+#include <linux/rpmsg.h>
 #include <linux/soc/qcom/smem_state.h>
 #include <linux/soc/qcom/wcnss_ctrl.h>
 #include "wcn36xx.h"
@@ -1218,15 +1218,13 @@ static int wcn36xx_probe(struct platform_device *pdev)
 
        INIT_WORK(&wcn->scan_work, wcn36xx_hw_scan_worker);
 
-       wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process);
+       wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process, hw);
        if (IS_ERR(wcn->smd_channel)) {
                wcn36xx_err("failed to open WLAN_CTRL channel\n");
                ret = PTR_ERR(wcn->smd_channel);
                goto out_wq;
        }
 
-       qcom_smd_set_drvdata(wcn->smd_channel, hw);
-
        addr = of_get_property(pdev->dev.of_node, "local-mac-address", &ret);
        if (addr && ret != ETH_ALEN) {
                wcn36xx_err("invalid local-mac-address\n");
index 1c2966f7db7a3f7c41660027444e17f4a908c54d..9c6590d5348ad53f64d130287f51b9fe7ccf95d8 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/etherdevice.h>
 #include <linux/firmware.h>
 #include <linux/bitops.h>
-#include <linux/soc/qcom/smd.h>
+#include <linux/rpmsg.h>
 #include "smd.h"
 
 struct wcn36xx_cfg_val {
@@ -254,7 +254,7 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
 
        init_completion(&wcn->hal_rsp_compl);
        start = jiffies;
-       ret = qcom_smd_send(wcn->smd_channel, wcn->hal_buf, len);
+       ret = rpmsg_send(wcn->smd_channel, wcn->hal_buf, len);
        if (ret) {
                wcn36xx_err("HAL TX failed\n");
                goto out;
@@ -2205,11 +2205,11 @@ out:
        return ret;
 }
 
-int wcn36xx_smd_rsp_process(struct qcom_smd_channel *channel,
-                           const void *buf, size_t len)
+int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
+                           void *buf, int len, void *priv, u32 addr)
 {
        const struct wcn36xx_hal_msg_header *msg_header = buf;
-       struct ieee80211_hw *hw = qcom_smd_get_drvdata(channel);
+       struct ieee80211_hw *hw = priv;
        struct wcn36xx *wcn = hw->priv;
        struct wcn36xx_hal_ind_msg *msg_ind;
        wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "SMD <<< ", buf, len);
index 8892ccd67b144903ae25cde3287e79f951e5a8c6..013fc9546f56d4b48d60a3452b4a72d3204cdaa6 100644 (file)
@@ -51,7 +51,7 @@ struct wcn36xx_hal_ind_msg {
 };
 
 struct wcn36xx;
-struct qcom_smd_channel;
+struct rpmsg_device;
 
 int wcn36xx_smd_open(struct wcn36xx *wcn);
 void wcn36xx_smd_close(struct wcn36xx *wcn);
@@ -129,8 +129,8 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
 
 int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
 
-int wcn36xx_smd_rsp_process(struct qcom_smd_channel *channel,
-                           const void *buf, size_t len);
+int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
+                           void *buf, int len, void *priv, u32 addr);
 
 int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
                            struct ieee80211_vif *vif,
index 7423998ddeb492cab624134fcb5d363db42093d9..b52b4da9a967bab2ad6b5298f76797f3557e8395 100644 (file)
@@ -195,7 +195,7 @@ struct wcn36xx {
        void __iomem            *ccu_base;
        void __iomem            *dxe_base;
 
-       struct qcom_smd_channel *smd_channel;
+       struct rpmsg_endpoint   *smd_channel;
 
        struct qcom_smem_state  *tx_enable_state;
        unsigned                tx_enable_state_bit;
index e12f62356fd14791299160c2f63d8a0d281ad206..27b110dc8cc622c5f17f184263ff28406c5154b8 100644 (file)
@@ -513,7 +513,7 @@ struct atmel_private {
        } station_state;
 
        int operating_mode, power_mode;
-       time_t last_qual;
+       unsigned long last_qual;
        int beacons_this_sec;
        int channel;
        int reg_domain, config_reg_domain;
index ab42b1fea03c170e6728f5b71fe720aa61174053..9d99eb42d9176f0f833048b3f87a906542c9e90c 100644 (file)
@@ -18,14 +18,14 @@ config BRCMSMAC
          module, the driver will be called brcmsmac.ko.
 
 config BRCMFMAC
-       tristate "Broadcom IEEE802.11n embedded FullMAC WLAN driver"
+       tristate "Broadcom FullMAC WLAN driver"
        depends on CFG80211
        select BRCMUTIL
        ---help---
-         This module adds support for embedded wireless adapters based on
-         Broadcom IEEE802.11n FullMAC chipsets. It has to work with at least
-         one of the bus interface support. If you choose to build a module,
-         it'll be called brcmfmac.ko.
+         This module adds support for wireless adapters based on Broadcom
+         FullMAC chipsets. It has to work with at least one of the bus
+         interface support. If you choose to build a module, it'll be called
+         brcmfmac.ko.
 
 config BRCMFMAC_PROTO_BCDC
        bool
index 0383ba559edccaaad719ff2ec08bec1606a56108..1f5a9b948abf49a2ce4baddf476c67bf6288ec5a 100644 (file)
@@ -25,7 +25,6 @@ brcmfmac-objs += \
                chip.o \
                fwil.o \
                fweh.o \
-               fwsignal.o \
                p2p.o \
                proto.o \
                common.o \
@@ -36,7 +35,8 @@ brcmfmac-objs += \
                vendor.o \
                pno.o
 brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \
-               bcdc.o
+               bcdc.o \
+               fwsignal.o
 brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \
                commonring.o \
                flowring.o \
index 384b1873e7e38dfbeac989ce7f46138548f8d750..9f2d0b0cf6e5c452ad85a3caef58cf16a8cdad46 100644 (file)
@@ -103,9 +103,17 @@ struct brcmf_bcdc {
        u8 bus_header[BUS_HEADER_LEN];
        struct brcmf_proto_bcdc_dcmd msg;
        unsigned char buf[BRCMF_DCMD_MAXLEN];
+       struct brcmf_fws_info *fws;
 };
 
 
+struct brcmf_fws_info *drvr_to_fws(struct brcmf_pub *drvr)
+{
+       struct brcmf_bcdc *bcdc = drvr->proto->pd;
+
+       return bcdc->fws;
+}
+
 static int
 brcmf_proto_bcdc_msg(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
                     uint len, bool set)
@@ -330,8 +338,9 @@ static int brcmf_proto_bcdc_tx_queue_data(struct brcmf_pub *drvr, int ifidx,
                                          struct sk_buff *skb)
 {
        struct brcmf_if *ifp = brcmf_get_ifp(drvr, ifidx);
+       struct brcmf_bcdc *bcdc = drvr->proto->pd;
 
-       if (!brcmf_fws_queue_skbs(drvr->fws))
+       if (!brcmf_fws_queue_skbs(bcdc->fws))
                return brcmf_proto_txdata(drvr, ifidx, 0, skb);
 
        return brcmf_fws_process_skb(ifp, skb);
@@ -345,6 +354,36 @@ brcmf_proto_bcdc_txdata(struct brcmf_pub *drvr, int ifidx, u8 offset,
        return brcmf_bus_txdata(drvr->bus_if, pktbuf);
 }
 
+void brcmf_proto_bcdc_txflowblock(struct device *dev, bool state)
+{
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_pub *drvr = bus_if->drvr;
+
+       brcmf_dbg(TRACE, "Enter\n");
+
+       brcmf_fws_bus_blocked(drvr, state);
+}
+
+void
+brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp,
+                           bool success)
+{
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_bcdc *bcdc = bus_if->drvr->proto->pd;
+       struct brcmf_if *ifp;
+
+       /* await txstatus signal for firmware if active */
+       if (brcmf_fws_fc_active(bcdc->fws)) {
+               if (!success)
+                       brcmf_fws_bustxfail(bcdc->fws, txp);
+       } else {
+               if (brcmf_proto_bcdc_hdrpull(bus_if->drvr, false, txp, &ifp))
+                       brcmu_pkt_buf_free_skb(txp);
+               else
+                       brcmf_txfinalize(ifp, txp, success);
+       }
+}
+
 static void
 brcmf_proto_bcdc_configure_addr_mode(struct brcmf_pub *drvr, int ifidx,
                                     enum proto_addr_mode addr_mode)
@@ -369,6 +408,38 @@ static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp,
        brcmf_fws_rxreorder(ifp, skb);
 }
 
+static void
+brcmf_proto_bcdc_add_if(struct brcmf_if *ifp)
+{
+       brcmf_fws_add_interface(ifp);
+}
+
+static void
+brcmf_proto_bcdc_del_if(struct brcmf_if *ifp)
+{
+       brcmf_fws_del_interface(ifp);
+}
+
+static void
+brcmf_proto_bcdc_reset_if(struct brcmf_if *ifp)
+{
+       brcmf_fws_reset_interface(ifp);
+}
+
+static int
+brcmf_proto_bcdc_init_done(struct brcmf_pub *drvr)
+{
+       struct brcmf_bcdc *bcdc = drvr->proto->pd;
+       struct brcmf_fws_info *fws;
+
+       fws = brcmf_fws_attach(drvr);
+       if (IS_ERR(fws))
+               return PTR_ERR(fws);
+
+       bcdc->fws = fws;
+       return 0;
+}
+
 int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
 {
        struct brcmf_bcdc *bcdc;
@@ -392,6 +463,10 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
        drvr->proto->delete_peer = brcmf_proto_bcdc_delete_peer;
        drvr->proto->add_tdls_peer = brcmf_proto_bcdc_add_tdls_peer;
        drvr->proto->rxreorder = brcmf_proto_bcdc_rxreorder;
+       drvr->proto->add_if = brcmf_proto_bcdc_add_if;
+       drvr->proto->del_if = brcmf_proto_bcdc_del_if;
+       drvr->proto->reset_if = brcmf_proto_bcdc_reset_if;
+       drvr->proto->init_done = brcmf_proto_bcdc_init_done;
        drvr->proto->pd = bcdc;
 
        drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
@@ -406,6 +481,9 @@ fail:
 
 void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr)
 {
-       kfree(drvr->proto->pd);
+       struct brcmf_bcdc *bcdc = drvr->proto->pd;
+
        drvr->proto->pd = NULL;
+       brcmf_fws_detach(bcdc->fws);
+       kfree(bcdc);
 }
index 6003179c0ceb286ce2a396d96eb1f436d727b80f..3b0e9eff21b5826883bd2a0377c3baeb023490c5 100644 (file)
 #ifdef CONFIG_BRCMFMAC_PROTO_BCDC
 int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr);
 void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr);
+void brcmf_proto_bcdc_txflowblock(struct device *dev, bool state);
+void brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp,
+                                bool success);
+struct brcmf_fws_info *drvr_to_fws(struct brcmf_pub *drvr);
 #else
 static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; }
 static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {}
index 5bc2ba214735af2a8f44394834e1c40c45820487..9b970dc2b922a8dd0e4c4199442f5976f7649798 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/pci_ids.h>
 #include <linux/sched.h>
 #include <linux/completion.h>
+#include <linux/interrupt.h>
 #include <linux/scatterlist.h>
 #include <linux/mmc/sdio.h>
 #include <linux/mmc/core.h>
index 76693df347425951397a211c863f19046dbf07f5..b55c3293c4b48054f803382f2f98c1bcf95527c4 100644 (file)
@@ -229,11 +229,6 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings);
 void brcmf_detach(struct device *dev);
 /* Indication from bus module that dongle should be reset */
 void brcmf_dev_reset(struct device *dev);
-/* Indication from bus module to change flow-control state */
-void brcmf_txflowblock(struct device *dev, bool state);
-
-/* Notify the bus has transferred the tx packet to firmware */
-void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
 
 /* Configure the "global" bus state used by upper layers */
 void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state);
index 944b83cfc51978d06ccc82a9f23ce64472af3fc8..0a067e59f90b5418d9f730067925be15660a1119 100644 (file)
@@ -3097,6 +3097,9 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
 
        status = e->status;
 
+       if (status == BRCMF_E_STATUS_ABORT)
+               goto exit;
+
        if (!test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
                brcmf_err("scan not ready, bsscfgidx=%d\n", ifp->bsscfgidx);
                return -EPERM;
@@ -3213,7 +3216,7 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
 {
        struct ieee80211_channel *chan;
        enum nl80211_band band;
-       int freq;
+       int freq, i;
 
        if (channel <= CH_MAX_2G_CHANNEL)
                band = NL80211_BAND_2GHZ;
@@ -3228,10 +3231,22 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
        if (!chan)
                return -EINVAL;
 
-       req->channels[req->n_channels++] = chan;
-       memcpy(req->ssids[req->n_ssids].ssid, ssid, ssid_len);
-       req->ssids[req->n_ssids++].ssid_len = ssid_len;
+       for (i = 0; i < req->n_channels; i++) {
+               if (req->channels[i] == chan)
+                       break;
+       }
+       if (i == req->n_channels)
+               req->channels[req->n_channels++] = chan;
 
+       for (i = 0; i < req->n_ssids; i++) {
+               if (req->ssids[i].ssid_len == ssid_len &&
+                   !memcmp(req->ssids[i].ssid, ssid, ssid_len))
+                       break;
+       }
+       if (i == req->n_ssids) {
+               memcpy(req->ssids[req->n_ssids].ssid, ssid, ssid_len);
+               req->ssids[req->n_ssids++].ssid_len = ssid_len;
+       }
        return 0;
 }
 
@@ -3297,6 +3312,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
        struct brcmf_pno_scanresults_le *pfn_result;
        u32 result_count;
        u32 status;
+       u32 datalen;
 
        brcmf_dbg(SCAN, "Enter\n");
 
@@ -3323,6 +3339,14 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
                brcmf_err("FALSE PNO Event. (pfn_count == 0)\n");
                goto out_err;
        }
+
+       netinfo_start = brcmf_get_netinfo_array(pfn_result);
+       datalen = e->datalen - ((void *)netinfo_start - (void *)pfn_result);
+       if (datalen < result_count * sizeof(*netinfo)) {
+               brcmf_err("insufficient event data\n");
+               goto out_err;
+       }
+
        request = brcmf_alloc_internal_escan_request(wiphy,
                                                     result_count);
        if (!request) {
@@ -3330,17 +3354,11 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
                goto out_err;
        }
 
-       netinfo_start = brcmf_get_netinfo_array(pfn_result);
-
        for (i = 0; i < result_count; i++) {
                netinfo = &netinfo_start[i];
-               if (!netinfo) {
-                       brcmf_err("Invalid netinfo ptr. index: %d\n",
-                                 i);
-                       err = -EINVAL;
-                       goto out_err;
-               }
 
+               if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN)
+                       netinfo->SSID_len = IEEE80211_MAX_SSID_LEN;
                brcmf_dbg(SCAN, "SSID:%.32s Channel:%d\n",
                          netinfo->SSID, netinfo->channel);
                err = brcmf_internal_escan_add_info(request,
@@ -6450,7 +6468,8 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
                                    BIT(NL80211_BSS_SELECT_ATTR_BAND_PREF) |
                                    BIT(NL80211_BSS_SELECT_ATTR_RSSI_ADJUST);
 
-       wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
+       wiphy->flags |= WIPHY_FLAG_NETNS_OK |
+                       WIPHY_FLAG_PS_ON_BY_DEFAULT |
                        WIPHY_FLAG_OFFCHAN_TX |
                        WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
        if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_TDLS))
@@ -6736,6 +6755,10 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
        s32 err;
        int i;
 
+       /* The country code gets set to "00" by default at boot, ignore */
+       if (req->alpha2[0] == '0' && req->alpha2[1] == '0')
+               return;
+
        /* ignore non-ISO3166 country codes */
        for (i = 0; i < sizeof(req->alpha2); i++)
                if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
index 33b133f7e63aad3b5a6bb14018fd461ec4fb90c6..7a2b49587b4d32dde1af56a2979d4e1818500f84 100644 (file)
@@ -161,7 +161,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
        strsep(&ptr, "\n");
 
        /* Print fw version info */
-       brcmf_err("Firmware version = %s\n", buf);
+       brcmf_info("Firmware version = %s\n", buf);
 
        /* locate firmware version number for ethtool */
        ptr = strrchr(buf, ' ') + 1;
index 60da86a8d95b0190b22c76e99cfc88543939e1a1..24118ce72b4fe99c426081979f080363edded58d 100644 (file)
@@ -32,7 +32,6 @@
 #include "p2p.h"
 #include "cfg80211.h"
 #include "fwil.h"
-#include "fwsignal.h"
 #include "feature.h"
 #include "proto.h"
 #include "pcie.h"
@@ -283,16 +282,6 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
        spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
 }
 
-void brcmf_txflowblock(struct device *dev, bool state)
-{
-       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
-       struct brcmf_pub *drvr = bus_if->drvr;
-
-       brcmf_dbg(TRACE, "Enter\n");
-
-       brcmf_fws_bus_blocked(drvr, state);
-}
-
 void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
 {
        if (skb->pkt_type == PACKET_MULTICAST)
@@ -393,24 +382,6 @@ void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
        brcmu_pkt_buf_free_skb(txp);
 }
 
-void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
-{
-       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
-       struct brcmf_pub *drvr = bus_if->drvr;
-       struct brcmf_if *ifp;
-
-       /* await txstatus signal for firmware if active */
-       if (brcmf_fws_fc_active(drvr->fws)) {
-               if (!success)
-                       brcmf_fws_bustxfail(drvr->fws, txp);
-       } else {
-               if (brcmf_proto_hdrpull(drvr, false, txp, &ifp))
-                       brcmu_pkt_buf_free_skb(txp);
-               else
-                       brcmf_txfinalize(ifp, txp, success);
-       }
-}
-
 static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
                                    struct ethtool_drvinfo *info)
 {
@@ -504,8 +475,9 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
        ndev->needed_headroom += drvr->hdrlen;
        ndev->ethtool_ops = &brcmf_ethtool_ops;
 
-       /* set the mac address */
+       /* set the mac address & netns */
        memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
+       dev_net_set(ndev, wiphy_net(cfg_to_wiphy(drvr->config)));
 
        INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
        INIT_WORK(&ifp->ndoffload_work, _brcmf_update_ndtable);
@@ -734,10 +706,28 @@ void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked)
                return;
        brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", ifp->bsscfgidx,
                  ifp->ifidx);
-       brcmf_fws_del_interface(ifp);
+       brcmf_proto_del_if(ifp->drvr, ifp);
        brcmf_del_if(ifp->drvr, ifp->bsscfgidx, rtnl_locked);
 }
 
+static int brcmf_psm_watchdog_notify(struct brcmf_if *ifp,
+                                    const struct brcmf_event_msg *evtmsg,
+                                    void *data)
+{
+       int err;
+
+       brcmf_dbg(TRACE, "enter: bsscfgidx=%d\n", ifp->bsscfgidx);
+
+       brcmf_err("PSM's watchdog has fired!\n");
+
+       err = brcmf_debug_create_memdump(ifp->drvr->bus_if, data,
+                                        evtmsg->datalen);
+       if (err)
+               brcmf_err("Failed to get memory dump, %d\n", err);
+
+       return err;
+}
+
 #ifdef CONFIG_INET
 #define ARPOL_MAX_ENTRIES      8
 static int brcmf_inetaddr_changed(struct notifier_block *nb,
@@ -917,6 +907,10 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings)
                goto fail;
        }
 
+       /* Attach to events important for core code */
+       brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG,
+                           brcmf_psm_watchdog_notify);
+
        /* attach firmware event handler */
        brcmf_fweh_attach(drvr);
 
@@ -992,11 +986,11 @@ int brcmf_bus_started(struct device *dev)
        }
        brcmf_feat_attach(drvr);
 
-       ret = brcmf_fws_init(drvr);
+       ret = brcmf_proto_init_done(drvr);
        if (ret < 0)
                goto fail;
 
-       brcmf_fws_add_interface(ifp);
+       brcmf_proto_add_if(drvr, ifp);
 
        drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev,
                                             drvr->settings->p2p_enable);
@@ -1040,10 +1034,6 @@ fail:
                brcmf_cfg80211_detach(drvr->config);
                drvr->config = NULL;
        }
-       if (drvr->fws) {
-               brcmf_fws_del_interface(ifp);
-               brcmf_fws_deinit(drvr);
-       }
        brcmf_net_detach(ifp->ndev, false);
        if (p2p_ifp)
                brcmf_net_detach(p2p_ifp->ndev, false);
@@ -1109,8 +1099,6 @@ void brcmf_detach(struct device *dev)
 
        brcmf_cfg80211_detach(drvr->config);
 
-       brcmf_fws_deinit(drvr);
-
        brcmf_bus_stop(drvr->bus_if);
 
        brcmf_proto_detach(drvr);
index 6aecd8dfd824d9656890b96359734aa80a03ca35..a4dd313140f37dc1fbb6955e1148f48bccf22f44 100644 (file)
@@ -127,8 +127,6 @@ struct brcmf_pub {
 
        struct brcmf_fweh_info fweh;
 
-       struct brcmf_fws_info *fws;
-
        struct brcmf_ampdu_rx_reorder
                *reorder_flows[BRCMF_AMPDU_RX_REORDER_MAXFLOWS];
 
index f4644cf371c7e058401fcba2797fb9bd11f63993..1447a8352383707f35ab04a67c32c7c3bb0d97bc 100644 (file)
@@ -27,8 +27,8 @@
 
 static struct dentry *root_folder;
 
-static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
-                                     size_t len)
+int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
+                              size_t len)
 {
        void *dump;
        size_t ramsize;
@@ -54,24 +54,6 @@ static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
        return 0;
 }
 
-static int brcmf_debug_psm_watchdog_notify(struct brcmf_if *ifp,
-                                          const struct brcmf_event_msg *evtmsg,
-                                          void *data)
-{
-       int err;
-
-       brcmf_dbg(TRACE, "enter: bsscfgidx=%d\n", ifp->bsscfgidx);
-
-       brcmf_err("PSM's watchdog has fired!\n");
-
-       err = brcmf_debug_create_memdump(ifp->drvr->bus_if, data,
-                                        evtmsg->datalen);
-       if (err)
-               brcmf_err("Failed to get memory dump, %d\n", err);
-
-       return err;
-}
-
 void brcmf_debugfs_init(void)
 {
        root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
@@ -99,9 +81,7 @@ int brcmf_debug_attach(struct brcmf_pub *drvr)
        if (IS_ERR(drvr->dbgfs_dir))
                return PTR_ERR(drvr->dbgfs_dir);
 
-
-       return brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG,
-                                  brcmf_debug_psm_watchdog_notify);
+       return 0;
 }
 
 void brcmf_debug_detach(struct brcmf_pub *drvr)
index 066126123e9663e89277a28d0dd2fa48ad9f0a72..fe264a5798f15f55e03741554a5dfe58e37b4892 100644 (file)
@@ -59,6 +59,10 @@ void __brcmf_err(const char *func, const char *fmt, ...);
        } while (0)
 
 #if defined(DEBUG) || defined(CONFIG_BRCM_TRACING)
+
+/* For debug/tracing purposes treat info messages as errors */
+#define brcmf_info brcmf_err
+
 __printf(3, 4)
 void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...);
 #define brcmf_dbg(level, fmt, ...)                             \
@@ -77,6 +81,11 @@ do {                                                         \
 
 #else /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
 
+#define brcmf_info(fmt, ...)                                           \
+       do {                                                            \
+               pr_info("%s: " fmt, __func__, ##__VA_ARGS__);           \
+       } while (0)
+
 #define brcmf_dbg(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
 
 #define BRCMF_DATA_ON()                0
@@ -99,6 +108,7 @@ do {                                                                 \
 
 extern int brcmf_msg_level;
 
+struct brcmf_bus;
 struct brcmf_pub;
 #ifdef DEBUG
 void brcmf_debugfs_init(void);
@@ -108,6 +118,8 @@ void brcmf_debug_detach(struct brcmf_pub *drvr);
 struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
 int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
                            int (*read_fn)(struct seq_file *seq, void *data));
+int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
+                              size_t len);
 #else
 static inline void brcmf_debugfs_init(void)
 {
@@ -128,6 +140,12 @@ int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
 {
        return 0;
 }
+static inline
+int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
+                              size_t len)
+{
+       return 0;
+}
 #endif
 
 #endif /* BRCMFMAC_DEBUG_H */
index c79306b575324f05a63b38a80068179db316f6a3..4eb1e1ce9aceccc9214657fa6081ba5327c021ca 100644 (file)
@@ -22,9 +22,9 @@
 #include "core.h"
 #include "debug.h"
 #include "tracepoint.h"
-#include "fwsignal.h"
 #include "fweh.h"
 #include "fwil.h"
+#include "proto.h"
 
 /**
  * struct brcmf_fweh_queue_item - event item on event queue.
@@ -172,14 +172,14 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
                if (IS_ERR(ifp))
                        return;
                if (!is_p2pdev)
-                       brcmf_fws_add_interface(ifp);
+                       brcmf_proto_add_if(drvr, ifp);
                if (!drvr->fweh.evt_handler[BRCMF_E_IF])
                        if (brcmf_net_attach(ifp, false) < 0)
                                return;
        }
 
        if (ifp && ifevent->action == BRCMF_E_IF_CHANGE)
-               brcmf_fws_reset_interface(ifp);
+               brcmf_proto_reset_if(drvr, ifp);
 
        err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
 
index 5f1a5929cb307e8c8e9353e264b1c313210cdd88..72373e59308e8fe54cf14090efd135a045273f4a 100644 (file)
@@ -36,6 +36,7 @@
 #include "p2p.h"
 #include "cfg80211.h"
 #include "proto.h"
+#include "bcdc.h"
 #include "common.h"
 
 /**
@@ -1586,7 +1587,7 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
                                       const struct brcmf_event_msg *e,
                                       void *data)
 {
-       struct brcmf_fws_info *fws = ifp->drvr->fws;
+       struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
        int i;
        u8 *credits = data;
 
@@ -1617,7 +1618,7 @@ static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp,
                                                const struct brcmf_event_msg *e,
                                                void *data)
 {
-       struct brcmf_fws_info *fws = ifp->drvr->fws;
+       struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
 
        if (fws) {
                brcmf_fws_lock(fws);
@@ -1826,7 +1827,7 @@ netif_rx:
 void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb)
 {
        struct brcmf_skb_reorder_data *rd;
-       struct brcmf_fws_info *fws = ifp->drvr->fws;
+       struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
        u8 *signal_data;
        s16 data_len;
        u8 type;
@@ -2091,8 +2092,7 @@ static int brcmf_fws_assign_htod(struct brcmf_fws_info *fws, struct sk_buff *p,
 
 int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
 {
-       struct brcmf_pub *drvr = ifp->drvr;
-       struct brcmf_fws_info *fws = drvr->fws;
+       struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
        struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
        struct ethhdr *eh = (struct ethhdr *)(skb->data);
        int fifo = BRCMF_FWS_FIFO_BCMC;
@@ -2142,10 +2142,10 @@ void brcmf_fws_reset_interface(struct brcmf_if *ifp)
 
 void brcmf_fws_add_interface(struct brcmf_if *ifp)
 {
-       struct brcmf_fws_info *fws = ifp->drvr->fws;
+       struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
        struct brcmf_fws_mac_descriptor *entry;
 
-       if (!ifp->ndev)
+       if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE)
                return;
 
        entry = &fws->desc.iface[ifp->ifidx];
@@ -2160,16 +2160,17 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
 void brcmf_fws_del_interface(struct brcmf_if *ifp)
 {
        struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc;
+       struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
 
        if (!entry)
                return;
 
-       brcmf_fws_lock(ifp->drvr->fws);
+       brcmf_fws_lock(fws);
        ifp->fws_desc = NULL;
        brcmf_dbg(TRACE, "deleting %s\n", entry->name);
        brcmf_fws_macdesc_deinit(entry);
-       brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx);
-       brcmf_fws_unlock(ifp->drvr->fws);
+       brcmf_fws_cleanup(fws, ifp->ifidx);
+       brcmf_fws_unlock(fws);
 }
 
 static void brcmf_fws_dequeue_worker(struct work_struct *worker)
@@ -2243,7 +2244,7 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
 static int brcmf_debugfs_fws_stats_read(struct seq_file *seq, void *data)
 {
        struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
-       struct brcmf_fws_stats *fwstats = &bus_if->drvr->fws->stats;
+       struct brcmf_fws_stats *fwstats = &(drvr_to_fws(bus_if->drvr)->stats);
 
        seq_printf(seq,
                   "header_pulls:      %u\n"
@@ -2308,7 +2309,7 @@ static int brcmf_debugfs_fws_stats_read(struct seq_file *seq, void *data)
 }
 #endif
 
-int brcmf_fws_init(struct brcmf_pub *drvr)
+struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr)
 {
        struct brcmf_fws_info *fws;
        struct brcmf_if *ifp;
@@ -2316,17 +2317,15 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
        int rc;
        u32 mode;
 
-       drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL);
-       if (!drvr->fws) {
+       fws = kzalloc(sizeof(*fws), GFP_KERNEL);
+       if (!fws) {
                rc = -ENOMEM;
                goto fail;
        }
 
-       fws = drvr->fws;
-
        spin_lock_init(&fws->spinlock);
 
-       /* set linkage back */
+       /* store drvr reference */
        fws->drvr = drvr;
        fws->fcmode = drvr->settings->fcmode;
 
@@ -2334,7 +2333,7 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
            (fws->fcmode == BRCMF_FWS_FCMODE_NONE)) {
                fws->avoid_queueing = true;
                brcmf_dbg(INFO, "FWS queueing will be avoided\n");
-               return 0;
+               return fws;
        }
 
        fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
@@ -2396,6 +2395,7 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
        brcmf_fws_hanger_init(&fws->hanger);
        brcmf_fws_macdesc_init(&fws->desc.other, NULL, 0);
        brcmf_fws_macdesc_set_name(fws, &fws->desc.other);
+       brcmf_dbg(INFO, "added %s\n", fws->desc.other.name);
        brcmu_pktq_init(&fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
                        BRCMF_FWS_PSQ_LEN);
 
@@ -2405,27 +2405,24 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
 
        brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n",
                  fws->fw_signals ? "enabled" : "disabled", tlv);
-       return 0;
+       return fws;
 
 fail:
-       brcmf_fws_deinit(drvr);
-       return rc;
+       brcmf_fws_detach(fws);
+       return ERR_PTR(rc);
 }
 
-void brcmf_fws_deinit(struct brcmf_pub *drvr)
+void brcmf_fws_detach(struct brcmf_fws_info *fws)
 {
-       struct brcmf_fws_info *fws = drvr->fws;
-
        if (!fws)
                return;
 
-       if (drvr->fws->fws_wq)
-               destroy_workqueue(drvr->fws->fws_wq);
+       if (fws->fws_wq)
+               destroy_workqueue(fws->fws_wq);
 
        /* cleanup */
        brcmf_fws_lock(fws);
        brcmf_fws_cleanup(fws, -1);
-       drvr->fws = NULL;
        brcmf_fws_unlock(fws);
 
        /* free top structure */
@@ -2461,7 +2458,7 @@ void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
 
 void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)
 {
-       struct brcmf_fws_info *fws = drvr->fws;
+       struct brcmf_fws_info *fws = drvr_to_fws(drvr);
        struct brcmf_if *ifp;
        int i;
 
index 96df66073b2a182d050dd777893ded4b4da13473..ba07bd972002025081cc1b2264054d93150bcc34 100644 (file)
@@ -18,8 +18,8 @@
 #ifndef FWSIGNAL_H_
 #define FWSIGNAL_H_
 
-int brcmf_fws_init(struct brcmf_pub *drvr);
-void brcmf_fws_deinit(struct brcmf_pub *drvr);
+struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr);
+void brcmf_fws_detach(struct brcmf_fws_info *fws);
 bool brcmf_fws_queue_skbs(struct brcmf_fws_info *fws);
 bool brcmf_fws_fc_active(struct brcmf_fws_info *fws);
 void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb);
index de19c7c92bc6c095b3b111abfc280fe228e588ac..85d949e03f79f7c9566c7b00a9bbe99853df7f16 100644 (file)
@@ -2238,14 +2238,16 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
        struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
        struct brcmf_p2p_info *p2p = &cfg->p2p;
        struct brcmf_cfg80211_vif *vif;
+       enum nl80211_iftype iftype;
        bool wait_for_disable = false;
        int err;
 
        brcmf_dbg(TRACE, "delete P2P vif\n");
        vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
 
+       iftype = vif->wdev.iftype;
        brcmf_cfg80211_arm_vif_event(cfg, vif);
-       switch (vif->wdev.iftype) {
+       switch (iftype) {
        case NL80211_IFTYPE_P2P_CLIENT:
                if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state))
                        wait_for_disable = true;
@@ -2275,7 +2277,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
                                            BRCMF_P2P_DISABLE_TIMEOUT);
 
        err = 0;
-       if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) {
+       if (iftype != NL80211_IFTYPE_P2P_DEVICE) {
                brcmf_vif_clear_mgmt_ies(vif);
                err = brcmf_p2p_release_p2p_if(vif);
        }
@@ -2291,7 +2293,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
        brcmf_remove_interface(vif->ifp, true);
 
        brcmf_cfg80211_arm_vif_event(cfg, NULL);
-       if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE)
+       if (iftype != NL80211_IFTYPE_P2P_DEVICE)
                p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
 
        return err;
index 6fae4cf3f6ab2876eb6ae2c3b5b07d02bcb0d3ca..f36b96dc6acdfc2160ba35e187a46356a3b41c28 100644 (file)
@@ -1877,6 +1877,7 @@ static int brcmf_pcie_pm_enter_D3(struct device *dev)
                           BRCMF_PCIE_MBDATA_TIMEOUT);
        if (!devinfo->mbdata_completed) {
                brcmf_err("Timeout on response for entering D3 substate\n");
+               brcmf_bus_change_state(bus, BRCMF_BUS_UP);
                return -EIO;
        }
 
index 9a25e79a46cf2d256c70c905bf9d03ebbe19afe8..6c3bde83d07085e46a5418673f09df5a5790a1aa 100644 (file)
@@ -182,7 +182,6 @@ int brcmf_pno_clean(struct brcmf_if *ifp)
 int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
                               struct cfg80211_sched_scan_request *req)
 {
-       struct brcmu_d11inf *d11inf;
        struct brcmf_pno_config_le pno_cfg;
        struct cfg80211_ssid *ssid;
        u16 chan;
@@ -209,7 +208,6 @@ int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
        }
 
        /* configure channels to use */
-       d11inf = &ifp->drvr->config->d11inf;
        for (i = 0; i < req->n_channels; i++) {
                chan = req->channels[i]->hw_value;
                pno_cfg.channel_list[i] = cpu_to_le16(chan);
index 34b59feedeba86b592dfb08c0d737b340b57c798..2404f8a7c31c9901b77e8929e9e2012fd2f55b2a 100644 (file)
@@ -44,6 +44,10 @@ struct brcmf_proto {
        void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx,
                              u8 peer[ETH_ALEN]);
        void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb);
+       void (*add_if)(struct brcmf_if *ifp);
+       void (*del_if)(struct brcmf_if *ifp);
+       void (*reset_if)(struct brcmf_if *ifp);
+       int (*init_done)(struct brcmf_pub *drvr);
        void *pd;
 };
 
@@ -118,4 +122,36 @@ brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
        ifp->drvr->proto->rxreorder(ifp, skb);
 }
 
+static inline void
+brcmf_proto_add_if(struct brcmf_pub *drvr, struct brcmf_if *ifp)
+{
+       if (!drvr->proto->add_if)
+               return;
+       drvr->proto->add_if(ifp);
+}
+
+static inline void
+brcmf_proto_del_if(struct brcmf_pub *drvr, struct brcmf_if *ifp)
+{
+       if (!drvr->proto->del_if)
+               return;
+       drvr->proto->del_if(ifp);
+}
+
+static inline void
+brcmf_proto_reset_if(struct brcmf_pub *drvr, struct brcmf_if *ifp)
+{
+       if (!drvr->proto->reset_if)
+               return;
+       drvr->proto->reset_if(ifp);
+}
+
+static inline int
+brcmf_proto_init_done(struct brcmf_pub *drvr)
+{
+       if (!drvr->proto->init_done)
+               return 0;
+       return drvr->proto->init_done(drvr);
+}
+
 #endif /* BRCMFMAC_PROTO_H */
index 65689469c5a12e2fcfd6123ca584944da79ec184..fc64b8913aa6a11c0111fec3b9d900174dc250c3 100644 (file)
@@ -44,6 +44,7 @@
 #include "firmware.h"
 #include "core.h"
 #include "common.h"
+#include "bcdc.h"
 
 #define DCMD_RESP_TIMEOUT      msecs_to_jiffies(2500)
 #define CTL_DONE_TIMEOUT       msecs_to_jiffies(2500)
@@ -539,7 +540,11 @@ static int qcount[NUMPRIO];
 /* Limit on rounding up frames */
 static const uint max_roundup = 512;
 
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+#define ALIGNMENT  8
+#else
 #define ALIGNMENT  4
+#endif
 
 enum brcmf_sdio_frmtype {
        BRCMF_SDIO_FT_NORMAL,
@@ -2265,7 +2270,8 @@ done:
                bus->tx_seq = (bus->tx_seq + pktq->qlen) % SDPCM_SEQ_WRAP;
        skb_queue_walk_safe(pktq, pkt_next, tmp) {
                __skb_unlink(pkt_next, pktq);
-               brcmf_txcomplete(bus->sdiodev->dev, pkt_next, ret == 0);
+               brcmf_proto_bcdc_txcomplete(bus->sdiodev->dev, pkt_next,
+                                           ret == 0);
        }
        return ret;
 }
@@ -2328,7 +2334,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
        if ((bus->sdiodev->state == BRCMF_SDIOD_DATA) &&
            bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
                bus->txoff = false;
-               brcmf_txflowblock(bus->sdiodev->dev, false);
+               brcmf_proto_bcdc_txflowblock(bus->sdiodev->dev, false);
        }
 
        return cnt;
@@ -2753,7 +2759,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
 
        if (pktq_len(&bus->txq) >= TXHI) {
                bus->txoff = true;
-               brcmf_txflowblock(dev, true);
+               brcmf_proto_bcdc_txflowblock(dev, true);
        }
        spin_unlock_bh(&bus->txq_lock);
 
index d93ebbdc773757adda218b16b816c30202792973..e4d545f9edeef6f119a0b67f50db679bedfcd91f 100644 (file)
@@ -29,6 +29,7 @@
 #include "usb.h"
 #include "core.h"
 #include "common.h"
+#include "bcdc.h"
 
 
 #define IOCTL_RESP_TIMEOUT             msecs_to_jiffies(2000)
@@ -482,13 +483,13 @@ static void brcmf_usb_tx_complete(struct urb *urb)
                  req->skb);
        brcmf_usb_del_fromq(devinfo, req);
 
-       brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
+       brcmf_proto_bcdc_txcomplete(devinfo->dev, req->skb, urb->status == 0);
        req->skb = NULL;
        brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
        spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
        if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
                devinfo->tx_flowblock) {
-               brcmf_txflowblock(devinfo->dev, false);
+               brcmf_proto_bcdc_txflowblock(devinfo->dev, false);
                devinfo->tx_flowblock = false;
        }
        spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
@@ -635,7 +636,7 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
        spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
        if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
            !devinfo->tx_flowblock) {
-               brcmf_txflowblock(dev, true);
+               brcmf_proto_bcdc_txflowblock(dev, true);
                devinfo->tx_flowblock = true;
        }
        spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
index 5ef3c5cc47c5f75e4aee7c727f4ae08e7225c98f..bbc579b647b61b6aa15b8ed3b7f4455cb8dd404c 100644 (file)
@@ -3539,9 +3539,6 @@ static int ipw_load(struct ipw_priv *priv)
        fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
                           le32_to_cpu(fw->ucode_size)];
 
-       if (rc < 0)
-               goto error;
-
        if (!priv->rxq)
                priv->rxq = ipw_rx_queue_alloc(priv);
        else
index 92e611841200d586c65202b336d3cfe5725e06c4..411cb91c102f98a0dd1c45d1792b0fcd2588e5c8 100644 (file)
@@ -7,6 +7,7 @@ iwlwifi-objs            += iwl-notif-wait.o
 iwlwifi-objs           += iwl-eeprom-read.o iwl-eeprom-parse.o
 iwlwifi-objs           += iwl-phy-db.o iwl-nvm-parse.o
 iwlwifi-objs           += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
+iwlwifi-objs           += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o
 iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
 iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o iwl-9000.o iwl-a000.o
 iwlwifi-objs           += iwl-trans.o
index a72e58623d3ab013137f7fdf4a469de3d2d28a40..aeefd42d23ad4d99689dbc308ca1d4af84745912 100644 (file)
@@ -73,8 +73,8 @@
 /* Highest firmware API version supported */
 #define IWL7260_UCODE_API_MAX  17
 #define IWL7265_UCODE_API_MAX  17
-#define IWL7265D_UCODE_API_MAX 28
-#define IWL3168_UCODE_API_MAX  28
+#define IWL7265D_UCODE_API_MAX 30
+#define IWL3168_UCODE_API_MAX  30
 
 /* Lowest firmware API version supported */
 #define IWL7260_UCODE_API_MIN  17
index b7953bf55f6fea72b45e7a44685ec421cca2cc94..b9718c0cf17480dc4c1ab212fdbfabecb3125775 100644 (file)
@@ -70,8 +70,8 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX  28
-#define IWL8265_UCODE_API_MAX  28
+#define IWL8000_UCODE_API_MAX  30
+#define IWL8265_UCODE_API_MAX  30
 
 /* Lowest firmware API version supported */
 #define IWL8000_UCODE_API_MIN  17
index a5f0c0bf85ec8dd7f271a8954cd6d0823acbc9b1..110ceefccc15387e0fa068a46fedf69ab6ab9a14 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2015-2016 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -18,7 +18,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2015-2016 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX  28
+#define IWL9000_UCODE_API_MAX  30
 
 /* Lowest firmware API version supported */
-#define IWL9000_UCODE_API_MIN  17
+#define IWL9000_UCODE_API_MIN  30
 
 /* NVM versions */
 #define IWL9000_NVM_VERSION            0x0a1d
 #define IWL9000_SMEM_LEN               0x68000
 
 #define  IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
-#define  IWL9260_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
-#define  IWL9000LC_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-"
+#define  IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
+#define  IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
 #define IWL9000_MODULE_FIRMWARE(api) \
        IWL9000_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL9260_MODULE_FIRMWARE(api) \
-       IWL9260_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL9000LC_MODULE_FIRMWARE(api) \
-       IWL9000LC_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL9260A_MODULE_FIRMWARE(api) \
+       IWL9260A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL9260B_MODULE_FIRMWARE(api) \
+       IWL9260B_FW_PRE "-" __stringify(api) ".ucode"
 
 #define NVM_HW_SECTION_NUM_FAMILY_9000         10
 
@@ -148,7 +148,8 @@ static const struct iwl_tt_params iwl9000_tt_params = {
 
 const struct iwl_cfg iwl9160_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 9160",
-       .fw_name_pre = IWL9260_FW_PRE,
+       .fw_name_pre = IWL9260A_FW_PRE,
+       .fw_name_pre_next_step = IWL9260B_FW_PRE,
        IWL_DEVICE_9000,
        .ht_params = &iwl9000_ht_params,
        .nvm_ver = IWL9000_NVM_VERSION,
@@ -158,7 +159,8 @@ const struct iwl_cfg iwl9160_2ac_cfg = {
 
 const struct iwl_cfg iwl9260_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 9260",
-       .fw_name_pre = IWL9260_FW_PRE,
+       .fw_name_pre = IWL9260A_FW_PRE,
+       .fw_name_pre_next_step = IWL9260B_FW_PRE,
        IWL_DEVICE_9000,
        .ht_params = &iwl9000_ht_params,
        .nvm_ver = IWL9000_NVM_VERSION,
@@ -168,7 +170,8 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
 
 const struct iwl_cfg iwl9270_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 9270",
-       .fw_name_pre = IWL9260_FW_PRE,
+       .fw_name_pre = IWL9260A_FW_PRE,
+       .fw_name_pre_next_step = IWL9260B_FW_PRE,
        IWL_DEVICE_9000,
        .ht_params = &iwl9000_ht_params,
        .nvm_ver = IWL9000_NVM_VERSION,
@@ -198,21 +201,6 @@ const struct iwl_cfg iwl9560_2ac_cfg = {
        .integrated = true,
 };
 
-/*
- * TODO the struct below is for internal testing only this should be
- * removed by EO 2016~
- */
-const struct iwl_cfg iwl9000lc_2ac_cfg = {
-       .name = "Intel(R) Dual Band Wireless AC 9000",
-       .fw_name_pre = IWL9000LC_FW_PRE,
-       IWL_DEVICE_9000,
-       .ht_params = &iwl9000_ht_params,
-       .nvm_ver = IWL9000_NVM_VERSION,
-       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
-       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-       .integrated = true,
-};
-
 MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL9000LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
index 15dd7f6137c8fff1bae35a9b8eb165ab8ce7d3b6..097cb45c8ad92a9172fefb7c317a151d645624c0 100644 (file)
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2015-2016 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -18,7 +18,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2015-2016 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -55,7 +55,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL_A000_UCODE_API_MAX 28
+#define IWL_A000_UCODE_API_MAX 30
 
 /* Lowest firmware API version supported */
 #define IWL_A000_UCODE_API_MIN 24
 #define IWL_A000_TX_POWER_VERSION      0xffff /* meaningless */
 
 /* Memory offsets and lengths */
-#define IWL_A000_DCCM_OFFSET           0x800000
-#define IWL_A000_DCCM_LEN              0x18000
+#define IWL_A000_DCCM_OFFSET           0x800000 /* LMAC1 */
+#define IWL_A000_DCCM_LEN              0x10000 /* LMAC1 */
 #define IWL_A000_DCCM2_OFFSET          0x880000
 #define IWL_A000_DCCM2_LEN             0x8000
 #define IWL_A000_SMEM_OFFSET           0x400000
-#define IWL_A000_SMEM_LEN              0x68000
+#define IWL_A000_SMEM_LEN              0xD0000
 
-#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
-#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
+#define IWL_A000_JF_FW_PRE     "iwlwifi-Qu-a0-jf-b0-"
+#define IWL_A000_HR_FW_PRE     "iwlwifi-Qu-a0-hr-a0-"
+#define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-a0-hrcdb-a0-"
 
 #define IWL_A000_HR_MODULE_FIRMWARE(api) \
        IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
@@ -121,7 +122,8 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
        .vht_mu_mimo_supported = true,                                  \
        .mac_addr_from_csr = true,                                      \
        .use_tfh = true,                                                \
-       .rf_id = true
+       .rf_id = true,                                                  \
+       .gen2 = true
 
 const struct iwl_cfg iwla000_2ac_cfg_hr = {
                .name = "Intel(R) Dual Band Wireless AC a000",
@@ -133,6 +135,17 @@ const struct iwl_cfg iwla000_2ac_cfg_hr = {
                .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
 };
 
+const struct iwl_cfg iwla000_2ac_cfg_hr_cdb = {
+               .name = "Intel(R) Dual Band Wireless AC a000",
+               .fw_name_pre = IWL_A000_HR_CDB_FW_PRE,
+               IWL_DEVICE_A000,
+               .ht_params = &iwl_a000_ht_params,
+               .nvm_ver = IWL_A000_NVM_VERSION,
+               .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
+               .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+               .cdb = true,
+};
+
 const struct iwl_cfg iwla000_2ac_cfg_jf = {
                .name = "Intel(R) Dual Band Wireless AC a000",
                .fw_name_pre = IWL_A000_JF_FW_PRE,
index 94f8a51b633eb25e7c5731944c5e5f00b5673743..4af1267181a96b1c6a33d155b7f05e53c175234f 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2016 Intel Deutschland GmbH
+ * Copyright (C) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2016 Intel Deutschland GmbH
+ * Copyright (C) 2016 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -90,16 +90,6 @@ enum iwl_device_family {
        IWL_DEVICE_FAMILY_8000,
 };
 
-static inline bool iwl_has_secure_boot(u32 hw_rev,
-                                      enum iwl_device_family family)
-{
-       /* return 1 only for family 8000 B0 */
-       if ((family == IWL_DEVICE_FAMILY_8000) && (hw_rev & 0xC))
-               return true;
-
-       return false;
-}
-
 /*
  * LED mode
  *    IWL_LED_DEFAULT:  use device default
@@ -283,6 +273,8 @@ struct iwl_pwr_tx_backoff {
  * @fw_name_pre: Firmware filename prefix. The api version and extension
  *     (.ucode) will be added to filename before loading from disk. The
  *     filename is constructed as fw_name_pre<api>.ucode.
+ * @fw_name_pre_next_step: same as @fw_name_pre, only for next step
+ *     (if supported)
  * @ucode_api_max: Highest version of uCode API supported by driver.
  * @ucode_api_min: Lowest version of uCode API supported by driver.
  * @max_inst_size: The maximal length of the fw inst section
@@ -321,6 +313,8 @@ struct iwl_pwr_tx_backoff {
  * @vht_mu_mimo_supported: VHT MU-MIMO support
  * @rf_id: need to read rf_id to determine the firmware image
  * @integrated: discrete or integrated
+ * @gen2: a000 and on transport operation
+ * @cdb: CDB support
  *
  * We enable the driver to be backward compatible wrt. hardware features.
  * API differences in uCode shouldn't be handled here but through TLVs
@@ -330,6 +324,7 @@ struct iwl_cfg {
        /* params specific to an individual device within a device family */
        const char *name;
        const char *fw_name_pre;
+       const char *fw_name_pre_next_step;
        /* params not likely to change within a device family */
        const struct iwl_base_params *base_params;
        /* params likely to change within a device family */
@@ -365,7 +360,9 @@ struct iwl_cfg {
            vht_mu_mimo_supported:1,
            rf_id:1,
            integrated:1,
-           use_tfh:1;
+           use_tfh:1,
+           gen2:1,
+           cdb:1;
        u8 valid_tx_ant;
        u8 valid_rx_ant;
        u8 non_shared_ant;
@@ -449,13 +446,13 @@ extern const struct iwl_cfg iwl4165_2ac_cfg;
 extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
 extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
 extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
-extern const struct iwl_cfg iwl9000lc_2ac_cfg;
 extern const struct iwl_cfg iwl9160_2ac_cfg;
 extern const struct iwl_cfg iwl9260_2ac_cfg;
 extern const struct iwl_cfg iwl9270_2ac_cfg;
 extern const struct iwl_cfg iwl9460_2ac_cfg;
 extern const struct iwl_cfg iwl9560_2ac_cfg;
 extern const struct iwl_cfg iwla000_2ac_cfg_hr;
+extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
 extern const struct iwl_cfg iwla000_2ac_cfg_jf;
 #endif /* CONFIG_IWLMVM */
 
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
new file mode 100644 (file)
index 0000000..b870c09
--- /dev/null
@@ -0,0 +1,203 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_context_info_file_h__
+#define __iwl_context_info_file_h__
+
+/* maximmum number of DRAM map entries supported by FW */
+#define IWL_MAX_DRAM_ENTRY     64
+#define CSR_CTXT_INFO_BA       0x40
+
+/**
+ * enum iwl_context_info_flags - Context information control flags
+ * @IWL_CTXT_INFO_AUTO_FUNC_INIT: If set, FW will not wait before interrupting
+ *     the init done for driver command that configures several system modes
+ * @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug
+ * @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump
+ * @IWL_CTXT_INFO_RB_SIZE_4K: Use 4K RB size (the default is 2K)
+ * @IWL_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size
+ *     exponent, the actual size is 2**value, valid sizes are 8-2048.
+ *     The value is four bits long. Maximum valid exponent is 12
+ * @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the
+ *     default is short format - not supported by the driver)
+ */
+enum iwl_context_info_flags {
+       IWL_CTXT_INFO_AUTO_FUNC_INIT    = BIT(0),
+       IWL_CTXT_INFO_EARLY_DEBUG       = BIT(1),
+       IWL_CTXT_INFO_ENABLE_CDMP       = BIT(2),
+       IWL_CTXT_INFO_RB_SIZE_4K        = BIT(3),
+       IWL_CTXT_INFO_RB_CB_SIZE_POS    = 4,
+       IWL_CTXT_INFO_TFD_FORMAT_LONG   = BIT(8),
+};
+
+/*
+ * struct iwl_context_info_version - version structure
+ * @mac_id: SKU and revision id
+ * @version: context information version id
+ * @size: the size of the context information in DWs
+ */
+struct iwl_context_info_version {
+       __le16 mac_id;
+       __le16 version;
+       __le16 size;
+       __le16 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info_control - version structure
+ * @control_flags: context information flags see &enum iwl_context_info_flags
+ */
+struct iwl_context_info_control {
+       __le32 control_flags;
+       __le32 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info_dram - images DRAM map
+ * each entry in the map represents a DRAM chunk of up to 32 KB
+ * @umac_img: UMAC image DRAM map
+ * @lmac_img: LMAC image DRAM map
+ * @virtual_img: paged image DRAM map
+ */
+struct iwl_context_info_dram {
+       __le64 umac_img[IWL_MAX_DRAM_ENTRY];
+       __le64 lmac_img[IWL_MAX_DRAM_ENTRY];
+       __le64 virtual_img[IWL_MAX_DRAM_ENTRY];
+} __packed;
+
+/*
+ * struct iwl_context_info_rbd_cfg - RBDs configuration
+ * @free_rbd_addr: default queue free RB CB base address
+ * @used_rbd_addr: default queue used RB CB base address
+ * @status_wr_ptr: default queue used RB status write pointer
+ */
+struct iwl_context_info_rbd_cfg {
+       __le64 free_rbd_addr;
+       __le64 used_rbd_addr;
+       __le64 status_wr_ptr;
+} __packed;
+
+/*
+ * struct iwl_context_info_hcmd_cfg  - command queue configuration
+ * @cmd_queue_addr: address of command queue
+ * @cmd_queue_size: number of entries
+ */
+struct iwl_context_info_hcmd_cfg {
+       __le64 cmd_queue_addr;
+       u8 cmd_queue_size;
+       u8 reserved[7];
+} __packed;
+
+/*
+ * struct iwl_context_info_dump_cfg - Core Dump configuration
+ * @core_dump_addr: core dump (debug DRAM address) start address
+ * @core_dump_size: size, in DWs
+ */
+struct iwl_context_info_dump_cfg {
+       __le64 core_dump_addr;
+       __le32 core_dump_size;
+       __le32 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info_pnvm_cfg - platform NVM data configuration
+ * @platform_nvm_addr: Platform NVM data start address
+ * @platform_nvm_size: size in DWs
+ */
+struct iwl_context_info_pnvm_cfg {
+       __le64 platform_nvm_addr;
+       __le32 platform_nvm_size;
+       __le32 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info_early_dbg_cfg - early debug configuration for
+ *     dumping DRAM addresses
+ * @early_debug_addr: early debug start address
+ * @early_debug_size: size in DWs
+ */
+struct iwl_context_info_early_dbg_cfg {
+       __le64 early_debug_addr;
+       __le32 early_debug_size;
+       __le32 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info - device INIT configuration
+ * @version: version information of context info and HW
+ * @control: control flags of FH configurations
+ * @rbd_cfg: default RX queue configuration
+ * @hcmd_cfg: command queue configuration
+ * @dump_cfg: core dump data
+ * @edbg_cfg: early debug configuration
+ * @pnvm_cfg: platform nvm configuration
+ * @dram: firmware image addresses in DRAM
+ */
+struct iwl_context_info {
+       struct iwl_context_info_version version;
+       struct iwl_context_info_control control;
+       __le64 reserved0;
+       struct iwl_context_info_rbd_cfg rbd_cfg;
+       struct iwl_context_info_hcmd_cfg hcmd_cfg;
+       __le32 reserved1[4];
+       struct iwl_context_info_dump_cfg dump_cfg;
+       struct iwl_context_info_early_dbg_cfg edbg_cfg;
+       struct iwl_context_info_pnvm_cfg pnvm_cfg;
+       __le32 reserved2[16];
+       struct iwl_context_info_dram dram;
+       __le32 reserved3[16];
+} __packed;
+
+int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw);
+void iwl_pcie_ctxt_info_free(struct iwl_trans *trans);
+void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
+
+#endif /* __iwl_context_info_file_h__ */
index 4ee3b621ec27ab58c899e91c6552f15fc0d5a57a..fa120fb553737b8b5bdd96100ffb69a3679b86c4 100644 (file)
@@ -348,7 +348,6 @@ enum {
 
 /* RF_ID value */
 #define CSR_HW_RF_ID_TYPE_JF           (0x00105000)
-#define CSR_HW_RF_ID_TYPE_LC           (0x00101000)
 #define CSR_HW_RF_ID_TYPE_HR           (0x00109000)
 
 /* EEPROM REG */
index be466a074c1df8ad351c2b1ce9142a13955fae47..212fb8d5c0642456643c18a79ba60f3b394adfbb 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -211,24 +211,46 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw,
 
 static int iwl_request_firmware(struct iwl_drv *drv, bool first)
 {
-       const char *name_pre = drv->trans->cfg->fw_name_pre;
+       const struct iwl_cfg *cfg = drv->trans->cfg;
        char tag[8];
+       const char *fw_pre_name;
+
+       if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
+           CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP)
+               fw_pre_name = cfg->fw_name_pre_next_step;
+       else
+               fw_pre_name = cfg->fw_name_pre;
 
        if (first) {
-               drv->fw_index = drv->trans->cfg->ucode_api_max;
+               drv->fw_index = cfg->ucode_api_max;
                sprintf(tag, "%d", drv->fw_index);
        } else {
                drv->fw_index--;
                sprintf(tag, "%d", drv->fw_index);
        }
 
-       if (drv->fw_index < drv->trans->cfg->ucode_api_min) {
+       if (drv->fw_index < cfg->ucode_api_min) {
                IWL_ERR(drv, "no suitable firmware found!\n");
+
+               if (cfg->ucode_api_min == cfg->ucode_api_max) {
+                       IWL_ERR(drv, "%s%d is required\n", fw_pre_name,
+                               cfg->ucode_api_max);
+               } else {
+                       IWL_ERR(drv, "minimum version required: %s%d\n",
+                               fw_pre_name,
+                               cfg->ucode_api_min);
+                       IWL_ERR(drv, "maximum version supported: %s%d\n",
+                               fw_pre_name,
+                               cfg->ucode_api_max);
+               }
+
+               IWL_ERR(drv,
+                       "check git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git\n");
                return -ENOENT;
        }
 
        snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
-                name_pre, tag);
+                fw_pre_name, tag);
 
        IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
                       drv->firmware_name);
index 33ef5372d1951b2b58618a5acb9c4b89657252eb..62f9fe926d781ee3dc87fff75b4fd270e5ab97f0 100644 (file)
@@ -614,6 +614,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
 #define RX_POOL_SIZE           (MQ_RX_NUM_RBDS +       \
                                 IWL_MAX_RX_HW_QUEUES * \
                                 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC))
+/* cb size is the exponent */
+#define RX_QUEUE_CB_SIZE(x)    ilog2(x)
 
 #define RX_QUEUE_SIZE                         256
 #define RX_QUEUE_MASK                         255
@@ -639,6 +641,8 @@ struct iwl_rb_status {
 
 
 #define TFD_QUEUE_SIZE_MAX      (256)
+/* cb size is the exponent - 3 */
+#define TFD_QUEUE_CB_SIZE(x)   (ilog2(x) - 3)
 #define TFD_QUEUE_SIZE_BC_DUP  (64)
 #define TFD_QUEUE_BC_SIZE      (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
 #define IWL_TX_DMA_MASK        DMA_BIT_MASK(36)
@@ -647,7 +651,7 @@ struct iwl_rb_status {
 
 static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
 {
-       return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
+       return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF;
 }
 /**
  * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
index d01701ee477702272d4597a33d7c5710510f4fda..287e83eb30d9827067654552a28b88e4726f6ac8 100644 (file)
@@ -241,6 +241,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
  *     iteration complete notification, and the timestamp reported for RX
  *     received during scan, are reported in TSF of the mac specified in the
  *     scan request.
+ * @IWL_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of
+ *     ADD_MODIFY_STA_KEY_API_S_VER_2.
  *
  * @NUM_IWL_UCODE_TLV_API: number of bits used
  */
@@ -250,6 +252,7 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_LQ_SS_PARAMS          = (__force iwl_ucode_tlv_api_t)18,
        IWL_UCODE_TLV_API_NEW_VERSION           = (__force iwl_ucode_tlv_api_t)20,
        IWL_UCODE_TLV_API_SCAN_TSF_REPORT       = (__force iwl_ucode_tlv_api_t)28,
+       IWL_UCODE_TLV_API_TKIP_MIC_KEYS         = (__force iwl_ucode_tlv_api_t)29,
 
        NUM_IWL_UCODE_TLV_API
 #ifdef __CHECKER__
@@ -344,6 +347,8 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_BT_COEX_RRC                  = (__force iwl_ucode_tlv_capa_t)30,
        IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT                = (__force iwl_ucode_tlv_capa_t)31,
        IWL_UCODE_TLV_CAPA_STA_PM_NOTIF                 = (__force iwl_ucode_tlv_capa_t)38,
+       IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT          = (__force iwl_ucode_tlv_capa_t)39,
+       IWL_UCODE_TLV_CAPA_CDB_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)40,
        IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE         = (__force iwl_ucode_tlv_capa_t)64,
        IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS            = (__force iwl_ucode_tlv_capa_t)65,
        IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT             = (__force iwl_ucode_tlv_capa_t)67,
index a9f69fdd170b0fd0fb039ffb949a657c7dcefefd..0f893ae6e715b52c96a1886cca445a3612afd434 100644 (file)
@@ -54,8 +54,8 @@ IWL_EXPORT_SYMBOL(iwl_write32);
 void iwl_write64(struct iwl_trans *trans, u64 ofs, u64 val)
 {
        trace_iwlwifi_dev_iowrite64(trans->dev, ofs, val);
-       iwl_trans_write32(trans, ofs, val & 0xffffffff);
-       iwl_trans_write32(trans, ofs + 4, val >> 32);
+       iwl_trans_write32(trans, ofs, lower_32_bits(val));
+       iwl_trans_write32(trans, ofs + 4, upper_32_bits(val));
 }
 IWL_EXPORT_SYMBOL(iwl_write64);
 
index 88f260db3744006244509a71b938d0d2d26b4082..68412ff2112e34ec4cba0bc00a5814baec966798 100644 (file)
@@ -76,8 +76,8 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
 }
 IWL_EXPORT_SYMBOL(iwl_notification_wait_init);
 
-void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
-                                 struct iwl_rx_packet *pkt)
+bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
+                          struct iwl_rx_packet *pkt)
 {
        bool triggered = false;
 
@@ -118,13 +118,11 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
                        }
                }
                spin_unlock(&notif_wait->notif_wait_lock);
-
        }
 
-       if (triggered)
-               wake_up_all(&notif_wait->notif_waitq);
+       return triggered;
 }
-IWL_EXPORT_SYMBOL(iwl_notification_wait_notify);
+IWL_EXPORT_SYMBOL(iwl_notification_wait);
 
 void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
 {
index 0f9995ed71cdef320a518cea5c2178dce4ec635a..368884be4e7c9641f80e402bb94b9e4a92d114fe 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,6 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -89,10 +90,10 @@ struct iwl_notif_wait_data {
  *
  * This structure is not used directly, to wait for a
  * notification declare it on the stack, and call
- * iwlagn_init_notification_wait() with appropriate
+ * iwl_init_notification_wait() with appropriate
  * parameters. Then do whatever will cause the ucode
  * to notify the driver, and to wait for that then
- * call iwlagn_wait_notification().
+ * call iwl_wait_notification().
  *
  * Each notification is one-shot. If at some point we
  * need to support multi-shot notifications (which
@@ -114,10 +115,24 @@ struct iwl_notification_wait {
 
 /* caller functions */
 void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data);
-void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
-                                 struct iwl_rx_packet *pkt);
+bool iwl_notification_wait(struct iwl_notif_wait_data *notif_data,
+                          struct iwl_rx_packet *pkt);
 void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
 
+static inline void
+iwl_notification_notify(struct iwl_notif_wait_data *notif_data)
+{
+       wake_up_all(&notif_data->notif_waitq);
+}
+
+static inline void
+iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
+                            struct iwl_rx_packet *pkt)
+{
+       if (iwl_notification_wait(notif_data, pkt))
+               iwl_notification_notify(notif_data);
+}
+
 /* user functions */
 void __acquires(wait_entry)
 iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
index 406ef301b8ab8aedd8a1e8e93e8800d53872a233..f832e58e0ef99fc24e529ff4d93d5ca2b4b3c085 100644 (file)
 
 /*********************** END TX SCHEDULER *************************************/
 
-/* tcp checksum offload */
-#define RX_EN_CSUM             (0x00a00d88)
-
 /* Oscillator clock */
 #define OSC_CLK                                (0xa04068)
 #define OSC_CLK_FORCE_CONTROL          (0x8)
  * Note this address is cleared after MAC reset.
  */
 #define UREG_UCODE_LOAD_STATUS         (0xa05c40)
+#define UREG_CPU_INIT_RUN              (0xa05c44)
 
 #define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR   (0x1E78)
 #define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR   (0x1E7C)
 #define LMPM_SECURE_CPU1_HDR_MEM_SPACE         (0x420000)
 #define LMPM_SECURE_CPU2_HDR_MEM_SPACE         (0x420400)
 
+#define LMAC2_PRPH_OFFSET              (0x100000)
+
 /* Rx FIFO */
 #define RXF_SIZE_ADDR                  (0xa00c88)
 #define RXF_RD_D_SPACE                 (0xa00c40)
 #define RADIO_REG_SYS_MANUAL_DFT_0     0xAD4078
 #define RFIC_REG_RD                    0xAD0470
 #define WFPM_CTRL_REG                  0xA03030
+#define WFPM_GP2                       0xA030B4
 enum {
        ENABLE_WFPM = BIT(31),
        WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK       = 0x80000000,
@@ -398,6 +399,8 @@ enum aux_misc_master1_en {
 #define PREG_AUX_BUS_WPROT_0           0xA04CC0
 #define SB_CPU_1_STATUS                        0xA01E30
 #define SB_CPU_2_STATUS                        0xA01E34
+#define UMAG_SB_CPU_1_STATUS           0xA038C0
+#define UMAG_SB_CPU_2_STATUS           0xA038C4
 
 /* FW chicken bits */
 #define LMPM_CHICK                     0xA01FF8
index d42cab2910257d816c2281fdeb1aae1ae3dda0fc..0bde26bab15df35c2d43cd38252975cc59ebb675 100644 (file)
@@ -70,8 +70,7 @@
 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
                                  struct device *dev,
                                  const struct iwl_cfg *cfg,
-                                 const struct iwl_trans_ops *ops,
-                                 size_t dev_cmd_headroom)
+                                 const struct iwl_trans_ops *ops)
 {
        struct iwl_trans *trans;
 #ifdef CONFIG_LOCKDEP
@@ -90,15 +89,13 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
        trans->dev = dev;
        trans->cfg = cfg;
        trans->ops = ops;
-       trans->dev_cmd_headroom = dev_cmd_headroom;
        trans->num_rx_queues = 1;
 
        snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
                 "iwl_cmd_pool:%s", dev_name(trans->dev));
        trans->dev_cmd_pool =
                kmem_cache_create(trans->dev_cmd_pool_name,
-                                 sizeof(struct iwl_device_cmd)
-                                 + trans->dev_cmd_headroom,
+                                 sizeof(struct iwl_device_cmd),
                                  sizeof(void *),
                                  SLAB_HWCACHE_ALIGN,
                                  NULL);
index 0296124a7f9cf013c01f46583d11d5c110185707..626e2703a57f172b415b2ccdf9b106345a00b6f8 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -397,6 +397,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
  */
 #define IWL_MAX_HW_QUEUES              32
 #define IWL_MAX_TID_COUNT      8
+#define IWL_MGMT_TID           15
 #define IWL_FRAME_LIMIT        64
 #define IWL_MAX_RX_HW_QUEUES   16
 
@@ -530,6 +531,44 @@ struct iwl_trans_txq_scd_cfg {
        int frame_limit;
 };
 
+/* Available options for &struct iwl_tx_queue_cfg_cmd */
+enum iwl_tx_queue_cfg_actions {
+       TX_QUEUE_CFG_ENABLE_QUEUE               = BIT(0),
+       TX_QUEUE_CFG_TFD_SHORT_FORMAT           = BIT(1),
+};
+
+/**
+ * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
+ * @sta_id: station id
+ * @tid: tid of the queue
+ * @flags: Bit 0 - on enable, off - disable, Bit 1 - short TFD format
+ * @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
+ *     Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
+ * @byte_cnt_addr: address of byte count table
+ * @tfdq_addr: address of TFD circular buffer
+ */
+struct iwl_tx_queue_cfg_cmd {
+       u8 sta_id;
+       u8 tid;
+       __le16 flags;
+       __le32 cb_size;
+       __le64 byte_cnt_addr;
+       __le64 tfdq_addr;
+} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config
+ * @queue_number: queue number assigned to this RA -TID
+ * @flags: set on failure
+ * @write_pointer: initial value for write pointer
+ */
+struct iwl_tx_queue_cfg_rsp {
+       __le16 queue_number;
+       __le16 flags;
+       __le16 write_pointer;
+       __le16 reserved;
+} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
+
 /**
  * struct iwl_trans_ops - transport specific operations
  *
@@ -640,12 +679,16 @@ struct iwl_trans_ops {
                           unsigned int queue_wdg_timeout);
        void (*txq_disable)(struct iwl_trans *trans, int queue,
                            bool configure_scd);
+       /* a000 functions */
+       int (*txq_alloc)(struct iwl_trans *trans,
+                        struct iwl_tx_queue_cfg_cmd *cmd,
+                        int cmd_id,
+                        unsigned int queue_wdg_timeout);
+       void (*txq_free)(struct iwl_trans *trans, int queue);
 
        void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
                                    bool shared);
 
-       dma_addr_t (*get_txq_byte_table)(struct iwl_trans *trans, int txq_id);
-
        int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
        void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
                                 bool freeze);
@@ -774,9 +817,6 @@ enum iwl_plat_pm_mode {
  *     the transport must set this before calling iwl_drv_start()
  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
  *     The user should use iwl_trans_{alloc,free}_tx_cmd.
- * @dev_cmd_headroom: room needed for the transport's private use before the
- *     device_cmd for Tx - for internal use only
- *     The user should use iwl_trans_{alloc,free}_tx_cmd.
  * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
  *     starting the firmware, used for tracing
  * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
@@ -827,7 +867,6 @@ struct iwl_trans {
 
        /* The following fields are internal only */
        struct kmem_cache *dev_cmd_pool;
-       size_t dev_cmd_headroom;
        char dev_cmd_pool_name[50];
 
        struct dentry *dbgfs_dir;
@@ -1000,13 +1039,13 @@ iwl_trans_dump_data(struct iwl_trans *trans,
 static inline struct iwl_device_cmd *
 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
 {
-       u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
+       struct iwl_device_cmd *dev_cmd_ptr =
+               kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
 
        if (unlikely(dev_cmd_ptr == NULL))
                return NULL;
 
-       return (struct iwl_device_cmd *)
-                       (dev_cmd_ptr + trans->dev_cmd_headroom);
+       return dev_cmd_ptr;
 }
 
 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
@@ -1014,9 +1053,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
                                         struct iwl_device_cmd *dev_cmd)
 {
-       u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;
-
-       kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
+       kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
 }
 
 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
@@ -1065,20 +1102,39 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
        trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
 }
 
-static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
-                                                int queue, bool shared_mode)
+static inline void
+iwl_trans_txq_free(struct iwl_trans *trans, int queue)
 {
-       if (trans->ops->txq_set_shared_mode)
-               trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
+       if (WARN_ON_ONCE(!trans->ops->txq_free))
+               return;
+
+       trans->ops->txq_free(trans, queue);
 }
 
-static inline dma_addr_t iwl_trans_get_txq_byte_table(struct iwl_trans *trans,
-                                                     int queue)
+static inline int
+iwl_trans_txq_alloc(struct iwl_trans *trans,
+                   struct iwl_tx_queue_cfg_cmd *cmd,
+                   int cmd_id,
+                   unsigned int queue_wdg_timeout)
 {
-       /* we should never be called if the trans doesn't support it */
-       BUG_ON(!trans->ops->get_txq_byte_table);
+       might_sleep();
+
+       if (WARN_ON_ONCE(!trans->ops->txq_alloc))
+               return -ENOTSUPP;
 
-       return trans->ops->get_txq_byte_table(trans, queue);
+       if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+               IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+               return -EIO;
+       }
+
+       return trans->ops->txq_alloc(trans, cmd, cmd_id, queue_wdg_timeout);
+}
+
+static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
+                                                int queue, bool shared_mode)
+{
+       if (trans->ops->txq_set_shared_mode)
+               trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
 }
 
 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
@@ -1248,8 +1304,7 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
                                  struct device *dev,
                                  const struct iwl_cfg *cfg,
-                                 const struct iwl_trans_ops *ops,
-                                 size_t dev_cmd_headroom);
+                                 const struct iwl_trans_ops *ops);
 void iwl_trans_free(struct iwl_trans *trans);
 
 /*****************************************************
index 7cb68f6ed1b0d49f58c152d796cd750d13833ff8..2e0ed080457f8470f3d98fe16c0ab86b5a233f3c 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -82,6 +84,19 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
        struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt;
        int i, ret;
        u32 status;
+       int size;
+
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
+               size = sizeof(cmd);
+               if (phyctxt->channel->band == NL80211_BAND_2GHZ ||
+                   !iwl_mvm_is_cdb_supported(mvm))
+                       cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
+               else
+                       cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
+       } else {
+               size = IWL_BINDING_CMD_SIZE_V1;
+       }
 
        memset(&cmd, 0, sizeof(cmd));
 
@@ -99,7 +114,7 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
 
        status = 0;
        ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
-                                         sizeof(cmd), &cmd, &status);
+                                         size, &cmd, &status);
        if (ret) {
                IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n",
                        action, ret);
index 5bdb6c2c8390b65149c07f61f986cc8f577a43ff..49b4418e6c35dac968a57fc8a9232af4db3af090 100644 (file)
@@ -756,7 +756,7 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
         * Rssi update while not associated - can happen since the statistics
         * are handled asynchronously
         */
-       if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
+       if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA)
                return;
 
        /* No BT - reports should be disabled */
index c7eb1983c4f9a191fc20d3cec151f069e7704b5d..119a3bd92c50f72c7c47756928734b99b7397f90 100644 (file)
@@ -665,6 +665,19 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        struct iwl_binding_cmd binding_cmd = {};
        struct iwl_time_quota_cmd quota_cmd = {};
        u32 status;
+       int size;
+
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
+               size = sizeof(binding_cmd);
+               if (mvmvif->phy_ctxt->channel->band == NL80211_BAND_2GHZ ||
+                   !iwl_mvm_is_cdb_supported(mvm))
+                       binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
+               else
+                       binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
+       } else {
+               size = IWL_BINDING_CMD_SIZE_V1;
+       }
 
        /* add back the PHY */
        if (WARN_ON(!mvmvif->phy_ctxt))
@@ -711,8 +724,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        status = 0;
        ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
-                                         sizeof(binding_cmd), &binding_cmd,
-                                         &status);
+                                         size, &binding_cmd, &status);
        if (ret) {
                IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
                return ret;
@@ -986,7 +998,9 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
                        goto out;
        }
 
-       if (key_data.use_tkip) {
+       if (key_data.use_tkip &&
+           !fw_has_api(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
                ret = iwl_mvm_send_cmd_pdu(mvm,
                                           WOWLAN_TKIP_PARAM,
                                           cmd_flags, sizeof(tkip_cmd),
@@ -1194,7 +1208,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 
        mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-       if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) {
+       if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) {
                /* if we're not associated, this must be netdetect */
                if (!wowlan->nd_config) {
                        ret = 1;
@@ -2102,6 +2116,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
         */
        iwl_mvm_update_changed_regdom(mvm);
 
+       if (!unified_image)
+               /*  Re-configure default SAR profile */
+               iwl_mvm_sar_select_profile(mvm, 1, 1);
+
        if (mvm->net_detect) {
                /* If this is a non-unified image, we restart the FW,
                 * so no need to stop the netdetect scan.  If that
index f4d75ffe3d8a8cfae24d1cea61bf918622564ed4..5d475b4850ae331002cf35c1d42a5d61dc5e7c84 100644 (file)
@@ -280,7 +280,7 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
                                 mvmvif->queue_params[i].uapsd);
 
        if (vif->type == NL80211_IFTYPE_STATION &&
-           ap_sta_id != IWL_MVM_STATION_COUNT) {
+           ap_sta_id != IWL_MVM_INVALID_STA) {
                struct iwl_mvm_sta *mvm_sta;
 
                mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
index a260cd5032005bcbf520e98f8be188750c987439..402846650cbec93a3a871f64381cde6b7021220d 100644 (file)
@@ -330,7 +330,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
 
        mutex_lock(&mvm->mutex);
 
-       for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+       for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
                pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
                sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
                                                lockdep_is_held(&mvm->mutex));
@@ -1056,6 +1056,8 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
 
        if (ret)
                return ret;
+       if (count == 0)
+               return 0;
 
        iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf,
                               (count - 1), NULL);
index 480a54af453477ac7194766d9a8ada9b3f10c923..d3cdd889c85c11c75ce584df8341423c90f0cb11 100644 (file)
@@ -73,7 +73,9 @@
 #define NUM_MAC_INDEX          (NUM_MAC_INDEX_DRIVER + 1)
 #define NUM_MAC_INDEX_CDB      (NUM_MAC_INDEX_DRIVER + 2)
 
-#define IWL_MVM_STATION_COUNT  16
+#define IWL_MVM_STATION_COUNT          16
+#define IWL_MVM_INVALID_STA            0xFF
+
 #define IWL_MVM_TDLS_STA_COUNT 4
 
 enum iwl_ac {
index 3fa43d1348a2231ed3b5ac583a69bffcf1241582..750510aff70b0e8195015c282224d2700a5c496d 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -351,6 +351,45 @@ struct iwl_dev_tx_power_cmd {
        u8 reserved[3];
 } __packed; /* TX_REDUCED_POWER_API_S_VER_4 */
 
+#define IWL_NUM_GEO_PROFILES   3
+
+/**
+ * enum iwl_geo_per_chain_offset_operation - type of operation
+ * @IWL_PER_CHAIN_OFFSET_SET_TABLES: send the tables from the host to the FW.
+ * @IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE: retrieve the last configured table.
+ */
+enum iwl_geo_per_chain_offset_operation {
+       IWL_PER_CHAIN_OFFSET_SET_TABLES,
+       IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE,
+};  /* GEO_TX_POWER_LIMIT FLAGS TYPE */
+
+/**
+ * struct iwl_per_chain_offset - embedded struct for GEO_TX_POWER_LIMIT.
+ * @max_tx_power: maximum allowed tx power.
+ * @chain_a: tx power offset for chain a.
+ * @chain_b: tx power offset for chain b.
+ */
+struct iwl_per_chain_offset {
+       __le16 max_tx_power;
+       u8 chain_a;
+       u8 chain_b;
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */
+
+struct iwl_per_chain_offset_group {
+       struct iwl_per_chain_offset lb;
+       struct iwl_per_chain_offset hb;
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */
+
+/**
+ * struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd.
+ * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
+ * @table: offset profile per band.
+ */
+struct iwl_geo_tx_power_profiles_cmd {
+       __le32 ops;
+       struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
+} __packed; /* GEO_TX_POWER_LIMIT */
+
 /**
  * struct iwl_beacon_filter_cmd
  * REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
index c78a0c49945981de464cda0d1d863a95a0173178..3178eb96e395098515784888259fb5c1f670ed79 100644 (file)
@@ -516,7 +516,7 @@ struct iwl_scan_dwell {
  *                             scan_config_channel_flag
  * @channel_array:             default supported channels
  */
-struct iwl_scan_config {
+struct iwl_scan_config_v1 {
        __le32 flags;
        __le32 tx_chains;
        __le32 rx_chains;
@@ -532,7 +532,7 @@ struct iwl_scan_config {
 
 #define SCAN_TWO_LMACS 2
 
-struct iwl_scan_config_cdb {
+struct iwl_scan_config {
        __le32 flags;
        __le32 tx_chains;
        __le32 rx_chains;
@@ -669,7 +669,7 @@ struct iwl_scan_req_umac {
                        u8 n_channels;
                        __le16 reserved;
                        u8 data[];
-               } no_cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
+               } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
                struct {
                        __le32 max_out_time[SCAN_TWO_LMACS];
                        __le32 suspend_time[SCAN_TWO_LMACS];
@@ -679,13 +679,13 @@ struct iwl_scan_req_umac {
                        u8 n_channels;
                        __le16 reserved;
                        u8 data[];
-               } cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_5 */
+               } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
        };
 } __packed;
 
-#define IWL_SCAN_REQ_UMAC_SIZE_CDB sizeof(struct iwl_scan_req_umac)
-#define IWL_SCAN_REQ_UMAC_SIZE (sizeof(struct iwl_scan_req_umac) - \
-                               2 * sizeof(__le32))
+#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
+                                  2 * sizeof(__le32))
 
 /**
  * struct iwl_umac_scan_abort
index 3b5150e9975d6e49e3e191b67020d5656d0cd0b4..e79df1c53d68316b42ee58d10d554df597fd9551 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -179,7 +179,7 @@ enum iwl_sta_key_flag {
  * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
  * @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
  * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
- * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_trigger_acs
+ * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_acs
  * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
  * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
  * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
@@ -351,10 +351,12 @@ struct iwl_mvm_add_sta_cmd_v7 {
  * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
  *     mac-addr.
  * @beamform_flags: beam forming controls
- * @tfd_queue_msk: tfd queues used by this station
+ * @tfd_queue_msk: tfd queues used by this station.
+ *     Obselete for new TX API (9 and above).
  * @rx_ba_window: aggregation window size
- * @scd_queue_bank: queue bank in used. Each bank contains 32 queues. 0 means
- *     that the queues used by this station are in the first 32.
+ * @sp_length: the size of the SP as it appears in the WME IE
+ * @uapsd_acs:  4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
+ *     enabled ACs.
  *
  * The device contains an internal table of per-station information, with info
  * on security keys, aggregation parameters, and Tx rates for initial Tx
@@ -384,32 +386,54 @@ struct iwl_mvm_add_sta_cmd {
        __le16 beamform_flags;
        __le32 tfd_queue_msk;
        __le16 rx_ba_window;
-       u8 scd_queue_bank;
-       u8 uapsd_trigger_acs;
-} __packed; /* ADD_STA_CMD_API_S_VER_8 */
+       u8 sp_length;
+       u8 uapsd_acs;
+} __packed; /* ADD_STA_CMD_API_S_VER_9 */
 
 /**
- * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
+ * struct iwl_mvm_add_sta_key_common - add/modify sta key common part
  * ( REPLY_ADD_STA_KEY = 0x17 )
  * @sta_id: index of station in uCode's station table
  * @key_offset: key offset in key storage
  * @key_flags: type %iwl_sta_key_flag
  * @key: key material data
  * @rx_secur_seq_cnt: RX security sequence counter for the key
- * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
- * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
  */
-struct iwl_mvm_add_sta_key_cmd {
+struct iwl_mvm_add_sta_key_common {
        u8 sta_id;
        u8 key_offset;
        __le16 key_flags;
        u8 key[32];
        u8 rx_secur_seq_cnt[16];
+} __packed;
+
+/**
+ * struct iwl_mvm_add_sta_key_cmd_v1 - add/modify sta key
+ * @common: see &struct iwl_mvm_add_sta_key_common
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ */
+struct iwl_mvm_add_sta_key_cmd_v1 {
+       struct iwl_mvm_add_sta_key_common common;
        u8 tkip_rx_tsc_byte2;
        u8 reserved;
        __le16 tkip_rx_ttak[5];
 } __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
 
+/**
+ * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
+ * @common: see &struct iwl_mvm_add_sta_key_common
+ * @rx_mic_key: TKIP RX unicast or multicast key
+ * @tx_mic_key: TKIP TX key
+ * @transmit_seq_cnt: TSC, transmit packet number
+ */
+struct iwl_mvm_add_sta_key_cmd {
+       struct iwl_mvm_add_sta_key_common common;
+       __le64 rx_mic_key;
+       __le64 tx_mic_key;
+       __le64 transmit_seq_cnt;
+} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2 */
+
 /**
  * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
  * @ADD_STA_SUCCESS: operation was executed successfully
index b38cc073adcc7b0eb876bb4773dc2ad22762a8f9..81b98915b1a42e21a318d293c459015688489c7f 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,6 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -123,6 +124,20 @@ enum iwl_tx_flags {
        TX_CMD_FLG_HCCA_CHUNK           = BIT(31)
 }; /* TX_FLAGS_BITS_API_S_VER_1 */
 
+/**
+ * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for a000
+ * @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command
+ * @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs
+ *     to a secured STA
+ * @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate
+ *     selection, retry limits and BT kill
+ */
+enum iwl_tx_cmd_flags {
+       IWL_TX_FLAGS_CMD_RATE           = BIT(0),
+       IWL_TX_FLAGS_ENCRYPT_DIS        = BIT(1),
+       IWL_TX_FLAGS_HIGH_PRI           = BIT(2),
+}; /* TX_FLAGS_BITS_API_S_VER_3 */
+
 /**
  * enum iwl_tx_pm_timeouts - pm timeout values in TX command
  * @PM_FRAME_NONE: no need to suspend sleep mode
@@ -159,7 +174,7 @@ enum iwl_tx_cmd_sec_ctrl {
        TX_CMD_SEC_EXT                  = 0x04,
        TX_CMD_SEC_GCMP                 = 0x05,
        TX_CMD_SEC_KEY128               = 0x08,
-       TX_CMD_SEC_KEY_FROM_TABLE       = 0x08,
+       TX_CMD_SEC_KEY_FROM_TABLE       = 0x10,
 };
 
 /* TODO: how does these values are OK with only 16 bit variable??? */
@@ -301,6 +316,31 @@ struct iwl_tx_cmd {
        struct ieee80211_hdr hdr[0];
 } __packed; /* TX_CMD_API_S_VER_6 */
 
+struct iwl_dram_sec_info {
+       __le32 pn_low;
+       __le16 pn_high;
+       __le16 aux_info;
+} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
+
+/**
+ * struct iwl_tx_cmd_gen2 - TX command struct to FW for a000 devices
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @offload_assist: TX offload configuration
+ * @tx_flags: combination of &iwl_tx_cmd_flags
+ * @dram_info: FW internal DRAM storage
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ *     cleared. Combination of RATE_MCS_*
+ */
+struct iwl_tx_cmd_gen2 {
+       __le16 len;
+       __le16 offload_assist;
+       __le32 flags;
+       struct iwl_dram_sec_info dram_info;
+       __le32 rate_n_flags;
+       struct ieee80211_hdr hdr[0];
+} __packed; /* TX_CMD_API_S_VER_7 */
+
 /*
  * TX response related data
  */
@@ -508,9 +548,11 @@ struct agg_tx_status {
  * @tlc_info: TLC rate info
  * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
  * @frame_ctrl: frame control
+ * @tx_queue: TX queue for this response
  * @status: for non-agg:  frame status TX_STATUS_*
  *     for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields
  *     follow this one, up to frame_count.
+ *     For version 6 TX response isn't received for aggregation at all.
  *
  * After the array of statuses comes the SSN of the SCD. Look at
  * %iwl_mvm_get_scd_ssn for more details.
@@ -537,9 +579,17 @@ struct iwl_mvm_tx_resp {
        u8 tlc_info;
        u8 ra_tid;
        __le16 frame_ctrl;
-
-       struct agg_tx_status status;
-} __packed; /* TX_RSP_API_S_VER_3 */
+       union {
+               struct {
+                       struct agg_tx_status status;
+               } v3;/* TX_RSP_API_S_VER_3 */
+               struct {
+                       __le16 tx_queue;
+                       __le16 reserved2;
+                       struct agg_tx_status status;
+               } v6;
+       };
+} __packed; /* TX_RSP_API_S_VER_6 */
 
 /**
  * struct iwl_mvm_ba_notif - notifies about reception of BA
@@ -579,11 +629,14 @@ struct iwl_mvm_ba_notif {
  * struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue
  * @q_num: TFD queue number
  * @tfd_index: Index of first un-acked frame in the  TFD queue
+ * @scd_queue: For debug only - the physical queue the TFD queue is bound to
  */
 struct iwl_mvm_compressed_ba_tfd {
-       u8 q_num;
-       u8 reserved;
+       __le16 q_num;
        __le16 tfd_index;
+       u8 scd_queue;
+       u8 reserved;
+       __le16 reserved2;
 } __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
 
 /**
@@ -635,6 +688,10 @@ enum iwl_mvm_ba_resp_flags {
  * @tx_rate: the rate the aggregation was sent at
  * @tfd_cnt: number of TFD-Q elements
  * @ra_tid_cnt: number of RATID-Q elements
+ * @ba_tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd
+ *     for details.
+ * @ra_tid: array of RA-TID queue status updates. For debug purposes only. See
+ *     &iwl_mvm_compressed_ba_ratid for more details.
  */
 struct iwl_mvm_compressed_ba_notif {
        __le32 flags;
@@ -646,6 +703,7 @@ struct iwl_mvm_compressed_ba_notif {
        __le16 query_frame_cnt;
        __le16 txed;
        __le16 done;
+       __le16 reserved;
        __le32 wireless_time;
        __le32 tx_rate;
        __le16 tfd_cnt;
@@ -754,25 +812,6 @@ struct iwl_tx_path_flush_cmd {
        __le16 reserved;
 } __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */
 
-/**
- * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
- * @tx_resp: the Tx response from the fw (agg or non-agg)
- *
- * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
- * it can't know that everything will go well until the end of the AMPDU, it
- * can't know in advance the number of MPDUs that will be sent in the current
- * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
- * Hence, it can't know in advance what the SSN of the SCD will be at the end
- * of the batch. This is why the SSN of the SCD is written at the end of the
- * whole struct at a variable offset. This function knows how to cope with the
- * variable offset and returns the SSN of the SCD.
- */
-static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
-{
-       return le32_to_cpup((__le32 *)&tx_resp->status +
-                           tx_resp->frame_count) & 0xfff;
-}
-
 /* Available options for the SCD_QUEUE_CFG HCMD */
 enum iwl_scd_cfg_actions {
        SCD_CFG_DISABLE_QUEUE           = 0x0,
index cf2b836f38881364c440be63cc7feb76ae738bdc..f545c5f9e4e38e8c1d565885b4b60222589b6230 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -320,12 +320,14 @@ enum iwl_phy_ops_subcmd_ids {
        CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
        CTDP_CONFIG_CMD = 0x03,
        TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
+       GEO_TX_POWER_LIMIT = 0x05,
        CT_KILL_NOTIFICATION = 0xFE,
        DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
 };
 
 enum iwl_system_subcmd_ids {
        SHARED_MEM_CFG_CMD = 0x0,
+       INIT_EXTENDED_CFG_CMD = 0x03,
 };
 
 enum iwl_data_path_subcmd_ids {
@@ -345,9 +347,10 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
        NVM_ACCESS_COMPLETE = 0x0,
 };
 
-enum iwl_fmac_debug_cmds {
+enum iwl_debug_cmds {
        LMAC_RD_WR = 0x0,
        UMAC_RD_WR = 0x1,
+       MFU_ASSERT_DUMP_NTF = 0xFE,
 };
 
 /* command groups */
@@ -673,10 +676,8 @@ struct iwl_error_resp {
 
 
 /* Common PHY, MAC and Bindings definitions */
-
 #define MAX_MACS_IN_BINDING    (3)
 #define MAX_BINDINGS           (4)
-#define AUX_BINDING_INDEX      (3)
 
 /* Used to extract ID and color from the context dword */
 #define FW_CTXT_ID_POS   (0)
@@ -689,7 +690,7 @@ struct iwl_error_resp {
                                          (_color << FW_CTXT_COLOR_POS))
 
 /* Possible actions on PHYs, MACs and Bindings */
-enum {
+enum iwl_phy_ctxt_action {
        FW_CTXT_ACTION_STUB = 0,
        FW_CTXT_ACTION_ADD,
        FW_CTXT_ACTION_MODIFY,
@@ -960,6 +961,7 @@ struct iwl_time_event_notif {
  * @action: action to perform, one of FW_CTXT_ACTION_*
  * @macs: array of MAC id and colors which belong to the binding
  * @phy: PHY id and color which belongs to the binding
+ * @lmac_id: the lmac id the binding belongs to
  */
 struct iwl_binding_cmd {
        /* COMMON_INDEX_HDR_API_S_VER_1 */
@@ -968,7 +970,13 @@ struct iwl_binding_cmd {
        /* BINDING_DATA_API_S_VER_1 */
        __le32 macs[MAX_MACS_IN_BINDING];
        __le32 phy;
-} __packed; /* BINDING_CMD_API_S_VER_1 */
+       /* BINDING_CMD_API_S_VER_1 */
+       __le32 lmac_id;
+} __packed; /* BINDING_CMD_API_S_VER_2 */
+
+#define IWL_BINDING_CMD_SIZE_V1        offsetof(struct iwl_binding_cmd, lmac_id)
+#define IWL_LMAC_24G_INDEX             0
+#define IWL_LMAC_5G_INDEX              1
 
 /* The maximal number of fragments in the FW's schedule session */
 #define IWL_MVM_MAX_QUOTA 128
@@ -990,6 +998,9 @@ struct iwl_time_quota_data {
  * struct iwl_time_quota_cmd - configuration of time quota between bindings
  * ( TIME_QUOTA_CMD = 0x2c )
  * @quotas: allocations per binding
+ * Note: on non-CDB the fourth one is the auxilary mac and is
+ *     essentially zero.
+ *     On CDB the fourth one is a regular binding.
  */
 struct iwl_time_quota_cmd {
        struct iwl_time_quota_data quotas[MAX_BINDINGS];
@@ -1230,6 +1241,25 @@ struct iwl_mfuart_load_notif {
        __le32 image_size;
 } __packed; /*MFU_LOADER_NTFY_API_S_VER_2*/
 
+/**
+ * struct iwl_mfu_assert_dump_notif - mfuart dump logs
+ * ( MFU_ASSERT_DUMP_NTF = 0xfe )
+ * @assert_id: mfuart assert id that cause the notif
+ * @curr_reset_num: number of asserts since uptime
+ * @index_num: current chunk id
+ * @parts_num: total number of chunks
+ * @data_size: number of data bytes sent
+ * @data: data buffer
+ */
+struct iwl_mfu_assert_dump_notif {
+       __le32   assert_id;
+       __le32   curr_reset_num;
+       __le16   index_num;
+       __le16   parts_num;
+       __le32   data_size;
+       __le32   data[0];
+} __packed; /*MFU_DUMP_ASSERT_API_S_VER_1*/
+
 /**
  * struct iwl_set_calib_default_cmd - set default value for calibration.
  * ( SET_CALIB_DEFAULT_CMD = 0x8e )
@@ -1998,19 +2028,48 @@ struct iwl_shared_mem_cfg_v1 {
        __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
 } __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
 
+/**
+ * struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration
+ *
+ * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB)
+ * @txfifo_size: size of TX FIFOs
+ * @rxfifo1_addr: RXF1 addr
+ * @rxfifo1_size: RXF1 size
+ */
+struct iwl_shared_mem_lmac_cfg {
+       __le32 txfifo_addr;
+       __le32 txfifo_size[TX_FIFO_MAX_NUM];
+       __le32 rxfifo1_addr;
+       __le32 rxfifo1_size;
+
+} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */
+
+/**
+ * Shared memory configuration information from the FW
+ *
+ * @shared_mem_addr: shared memory address
+ * @shared_mem_size: shared memory size
+ * @sample_buff_addr: internal sample (mon/adc) buff addr
+ * @sample_buff_size: internal sample buff size
+ * @rxfifo2_addr: start addr of RXF2
+ * @rxfifo2_size: size of RXF2
+ * @page_buff_addr: used by UMAC and performance debug (page miss analysis),
+ *     when paging is not supported this should be 0
+ * @page_buff_size: size of %page_buff_addr
+ * @lmac_num: number of LMACs (1 or 2)
+ * @lmac_smem: per - LMAC smem data
+ */
 struct iwl_shared_mem_cfg {
        __le32 shared_mem_addr;
        __le32 shared_mem_size;
        __le32 sample_buff_addr;
        __le32 sample_buff_size;
-       __le32 txfifo_addr;
-       __le32 txfifo_size[TX_FIFO_MAX_NUM];
-       __le32 rxfifo_size[RX_FIFO_MAX_NUM];
+       __le32 rxfifo2_addr;
+       __le32 rxfifo2_size;
        __le32 page_buff_addr;
        __le32 page_buff_size;
-       __le32 rxfifo_addr;
-       __le32 internal_txfifo_addr;
-       __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+       __le32 lmac_num;
+       struct iwl_shared_mem_lmac_cfg lmac_smem[2];
 } __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
 
 /**
@@ -2178,4 +2237,26 @@ struct iwl_nvm_access_complete_cmd {
        __le32 reserved;
 } __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
 
+/**
+ * enum iwl_extended_cfg_flag - commands driver may send before
+ *     finishing init flow
+ * @IWL_INIT_DEBUG_CFG: driver is going to send debug config command
+ * @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands
+ * @IWL_INIT_PHY: driver is going to send PHY_DB commands
+ */
+enum iwl_extended_cfg_flags {
+       IWL_INIT_DEBUG_CFG,
+       IWL_INIT_NVM,
+       IWL_INIT_PHY,
+};
+
+/**
+ * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for
+ * before finishing init flows
+ * @init_flags: values from iwl_extended_cfg_flags
+ */
+struct iwl_init_extended_cfg_cmd {
+       __le32 init_flags;
+} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */
+
 #endif /* __fw_api_h__ */
index a027b11bbdb38b5040d679982762959c03df97d2..7b86a4f1b574c6f507fbde87c1276241fd534a4a 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -99,10 +99,120 @@ static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm,
        iwl_trans_release_nic_access(mvm->trans, &flags);
 }
 
+static void iwl_mvm_dump_rxf(struct iwl_mvm *mvm,
+                            struct iwl_fw_error_dump_data **dump_data,
+                            int size, u32 offset, int fifo_num)
+{
+       struct iwl_fw_error_dump_fifo *fifo_hdr;
+       u32 *fifo_data;
+       u32 fifo_len;
+       int i;
+
+       fifo_hdr = (void *)(*dump_data)->data;
+       fifo_data = (void *)fifo_hdr->data;
+       fifo_len = size;
+
+       /* No need to try to read the data if the length is 0 */
+       if (fifo_len == 0)
+               return;
+
+       /* Add a TLV for the RXF */
+       (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
+       (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+       fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
+       fifo_hdr->available_bytes =
+               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                               RXF_RD_D_SPACE + offset));
+       fifo_hdr->wr_ptr =
+               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                               RXF_RD_WR_PTR + offset));
+       fifo_hdr->rd_ptr =
+               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                               RXF_RD_RD_PTR + offset));
+       fifo_hdr->fence_ptr =
+               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                               RXF_RD_FENCE_PTR + offset));
+       fifo_hdr->fence_mode =
+               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                               RXF_SET_FENCE_MODE + offset));
+
+       /* Lock fence */
+       iwl_trans_write_prph(mvm->trans, RXF_SET_FENCE_MODE + offset, 0x1);
+       /* Set fence pointer to the same place like WR pointer */
+       iwl_trans_write_prph(mvm->trans, RXF_LD_WR2FENCE + offset, 0x1);
+       /* Set fence offset */
+       iwl_trans_write_prph(mvm->trans,
+                            RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0);
+
+       /* Read FIFO */
+       fifo_len /= sizeof(u32); /* Size in DWORDS */
+       for (i = 0; i < fifo_len; i++)
+               fifo_data[i] = iwl_trans_read_prph(mvm->trans,
+                                                RXF_FIFO_RD_FENCE_INC +
+                                                offset);
+       *dump_data = iwl_fw_error_next_data(*dump_data);
+}
+
+static void iwl_mvm_dump_txf(struct iwl_mvm *mvm,
+                            struct iwl_fw_error_dump_data **dump_data,
+                            int size, u32 offset, int fifo_num)
+{
+       struct iwl_fw_error_dump_fifo *fifo_hdr;
+       u32 *fifo_data;
+       u32 fifo_len;
+       int i;
+
+       fifo_hdr = (void *)(*dump_data)->data;
+       fifo_data = (void *)fifo_hdr->data;
+       fifo_len = size;
+
+       /* No need to try to read the data if the length is 0 */
+       if (fifo_len == 0)
+               return;
+
+       /* Add a TLV for the FIFO */
+       (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
+       (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+       fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
+       fifo_hdr->available_bytes =
+               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                               TXF_FIFO_ITEM_CNT + offset));
+       fifo_hdr->wr_ptr =
+               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                               TXF_WR_PTR + offset));
+       fifo_hdr->rd_ptr =
+               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                               TXF_RD_PTR + offset));
+       fifo_hdr->fence_ptr =
+               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                               TXF_FENCE_PTR + offset));
+       fifo_hdr->fence_mode =
+               cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                               TXF_LOCK_FENCE + offset));
+
+       /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
+       iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR + offset,
+                            TXF_WR_PTR + offset);
+
+       /* Dummy-read to advance the read pointer to the head */
+       iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA + offset);
+
+       /* Read FIFO */
+       fifo_len /= sizeof(u32); /* Size in DWORDS */
+       for (i = 0; i < fifo_len; i++)
+               fifo_data[i] = iwl_trans_read_prph(mvm->trans,
+                                                 TXF_READ_MODIFY_DATA +
+                                                 offset);
+       *dump_data = iwl_fw_error_next_data(*dump_data);
+}
+
 static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
                               struct iwl_fw_error_dump_data **dump_data)
 {
        struct iwl_fw_error_dump_fifo *fifo_hdr;
+       struct iwl_mvm_shared_mem_cfg *cfg = &mvm->smem_cfg;
        u32 *fifo_data;
        u32 fifo_len;
        unsigned long flags;
@@ -111,126 +221,47 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
        if (!iwl_trans_grab_nic_access(mvm->trans, &flags))
                return;
 
-       /* Pull RXF data from all RXFs */
-       for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
-               /*
-                * Keep aside the additional offset that might be needed for
-                * next RXF
-                */
-               u32 offset_diff = RXF_DIFF_FROM_PREV * i;
-
-               fifo_hdr = (void *)(*dump_data)->data;
-               fifo_data = (void *)fifo_hdr->data;
-               fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
-
-               /* No need to try to read the data if the length is 0 */
-               if (fifo_len == 0)
-                       continue;
-
-               /* Add a TLV for the RXF */
-               (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
-               (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
-
-               fifo_hdr->fifo_num = cpu_to_le32(i);
-               fifo_hdr->available_bytes =
-                       cpu_to_le32(iwl_trans_read_prph(mvm->trans,
-                                                       RXF_RD_D_SPACE +
-                                                       offset_diff));
-               fifo_hdr->wr_ptr =
-                       cpu_to_le32(iwl_trans_read_prph(mvm->trans,
-                                                       RXF_RD_WR_PTR +
-                                                       offset_diff));
-               fifo_hdr->rd_ptr =
-                       cpu_to_le32(iwl_trans_read_prph(mvm->trans,
-                                                       RXF_RD_RD_PTR +
-                                                       offset_diff));
-               fifo_hdr->fence_ptr =
-                       cpu_to_le32(iwl_trans_read_prph(mvm->trans,
-                                                       RXF_RD_FENCE_PTR +
-                                                       offset_diff));
-               fifo_hdr->fence_mode =
-                       cpu_to_le32(iwl_trans_read_prph(mvm->trans,
-                                                       RXF_SET_FENCE_MODE +
-                                                       offset_diff));
-
-               /* Lock fence */
-               iwl_trans_write_prph(mvm->trans,
-                                    RXF_SET_FENCE_MODE + offset_diff, 0x1);
-               /* Set fence pointer to the same place like WR pointer */
-               iwl_trans_write_prph(mvm->trans,
-                                    RXF_LD_WR2FENCE + offset_diff, 0x1);
-               /* Set fence offset */
-               iwl_trans_write_prph(mvm->trans,
-                                    RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
-                                    0x0);
-
-               /* Read FIFO */
-               fifo_len /= sizeof(u32); /* Size in DWORDS */
-               for (j = 0; j < fifo_len; j++)
-                       fifo_data[j] = iwl_trans_read_prph(mvm->trans,
-                                                        RXF_FIFO_RD_FENCE_INC +
-                                                        offset_diff);
-               *dump_data = iwl_fw_error_next_data(*dump_data);
-       }
-
-       /* Pull TXF data from all TXFs */
-       for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
+       /* Pull RXF1 */
+       iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0);
+       /* Pull RXF2 */
+       iwl_mvm_dump_rxf(mvm, dump_data, cfg->rxfifo2_size,
+                        RXF_DIFF_FROM_PREV, 1);
+       /* Pull LMAC2 RXF1 */
+       if (mvm->smem_cfg.num_lmacs > 1)
+               iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[1].rxfifo1_size,
+                                LMAC2_PRPH_OFFSET, 2);
+
+       /* Pull TXF data from LMAC1 */
+       for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) {
                /* Mark the number of TXF we're pulling now */
                iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
+               iwl_mvm_dump_txf(mvm, dump_data, cfg->lmac[0].txfifo_size[i],
+                                0, i);
+       }
 
-               fifo_hdr = (void *)(*dump_data)->data;
-               fifo_data = (void *)fifo_hdr->data;
-               fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
-
-               /* No need to try to read the data if the length is 0 */
-               if (fifo_len == 0)
-                       continue;
-
-               /* Add a TLV for the FIFO */
-               (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
-               (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
-
-               fifo_hdr->fifo_num = cpu_to_le32(i);
-               fifo_hdr->available_bytes =
-                       cpu_to_le32(iwl_trans_read_prph(mvm->trans,
-                                                       TXF_FIFO_ITEM_CNT));
-               fifo_hdr->wr_ptr =
-                       cpu_to_le32(iwl_trans_read_prph(mvm->trans,
-                                                       TXF_WR_PTR));
-               fifo_hdr->rd_ptr =
-                       cpu_to_le32(iwl_trans_read_prph(mvm->trans,
-                                                       TXF_RD_PTR));
-               fifo_hdr->fence_ptr =
-                       cpu_to_le32(iwl_trans_read_prph(mvm->trans,
-                                                       TXF_FENCE_PTR));
-               fifo_hdr->fence_mode =
-                       cpu_to_le32(iwl_trans_read_prph(mvm->trans,
-                                                       TXF_LOCK_FENCE));
-
-               /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
-               iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
-                                    TXF_WR_PTR);
-
-               /* Dummy-read to advance the read pointer to the head */
-               iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
-
-               /* Read FIFO */
-               fifo_len /= sizeof(u32); /* Size in DWORDS */
-               for (j = 0; j < fifo_len; j++)
-                       fifo_data[j] = iwl_trans_read_prph(mvm->trans,
-                                                         TXF_READ_MODIFY_DATA);
-               *dump_data = iwl_fw_error_next_data(*dump_data);
+       /* Pull TXF data from LMAC2 */
+       if (mvm->smem_cfg.num_lmacs > 1) {
+               for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) {
+                       /* Mark the number of TXF we're pulling now */
+                       iwl_trans_write_prph(mvm->trans,
+                                            TXF_LARC_NUM + LMAC2_PRPH_OFFSET,
+                                            i);
+                       iwl_mvm_dump_txf(mvm, dump_data,
+                                        cfg->lmac[1].txfifo_size[i],
+                                        LMAC2_PRPH_OFFSET,
+                                        i + cfg->num_txfifo_entries);
+               }
        }
 
        if (fw_has_capa(&mvm->fw->ucode_capa,
                        IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
                /* Pull UMAC internal TXF data from all TXFs */
                for (i = 0;
-                    i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+                    i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
                     i++) {
                        fifo_hdr = (void *)(*dump_data)->data;
                        fifo_data = (void *)fifo_hdr->data;
-                       fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i];
+                       fifo_len = mvm->smem_cfg.internal_txfifo_size[i];
 
                        /* No need to try to read the data if the length is 0 */
                        if (fifo_len == 0)
@@ -246,7 +277,7 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
 
                        /* Mark the number of TXF we're pulling now */
                        iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i +
-                               ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size));
+                               mvm->smem_cfg.num_txfifo_entries);
 
                        fifo_hdr->available_bytes =
                                cpu_to_le32(iwl_trans_read_prph(mvm->trans,
@@ -553,31 +584,45 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
 
        /* reading RXF/TXF sizes */
        if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
-               struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
+               struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->smem_cfg;
 
                fifo_data_len = 0;
 
-               /* Count RXF size */
-               for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
-                       if (!mem_cfg->rxfifo_size[i])
-                               continue;
-
+               /* Count RXF2 size */
+               if (mem_cfg->rxfifo2_size) {
                        /* Add header info */
-                       fifo_data_len += mem_cfg->rxfifo_size[i] +
+                       fifo_data_len += mem_cfg->rxfifo2_size +
                                         sizeof(*dump_data) +
                                         sizeof(struct iwl_fw_error_dump_fifo);
                }
 
-               for (i = 0; i < mem_cfg->num_txfifo_entries; i++) {
-                       if (!mem_cfg->txfifo_size[i])
+               /* Count RXF1 sizes */
+               for (i = 0; i < mem_cfg->num_lmacs; i++) {
+                       if (!mem_cfg->lmac[i].rxfifo1_size)
                                continue;
 
                        /* Add header info */
-                       fifo_data_len += mem_cfg->txfifo_size[i] +
+                       fifo_data_len += mem_cfg->lmac[i].rxfifo1_size +
                                         sizeof(*dump_data) +
                                         sizeof(struct iwl_fw_error_dump_fifo);
                }
 
+               /* Count TXF sizes */
+               for (i = 0; i < mem_cfg->num_lmacs; i++) {
+                       int j;
+
+                       for (j = 0; j < mem_cfg->num_txfifo_entries; j++) {
+                               if (!mem_cfg->lmac[i].txfifo_size[j])
+                                       continue;
+
+                               /* Add header info */
+                               fifo_data_len +=
+                                       mem_cfg->lmac[i].txfifo_size[j] +
+                                       sizeof(*dump_data) +
+                                       sizeof(struct iwl_fw_error_dump_fifo);
+                       }
+               }
+
                if (fw_has_capa(&mvm->fw->ucode_capa,
                                IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
                        for (i = 0;
index 45cb4f476e761b18dd7a2326ff01cc571e9b3df9..900f1e25b9dac7b9c0f1098d35702eac36c523b2 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -271,6 +272,27 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
        return 0;
 }
 
+void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
+                                  struct iwl_rx_cmd_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
+       __le32 *dump_data = mfu_dump_notif->data;
+       int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
+       int i;
+
+       if (mfu_dump_notif->index_num == 0)
+               IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
+                        le32_to_cpu(mfu_dump_notif->assert_id));
+
+       for (i = 0; i < n_words; i++)
+               IWL_DEBUG_INFO(mvm,
+                              "MFUART assert dump, dword %u: 0x%08x\n",
+                              le16_to_cpu(mfu_dump_notif->index_num) *
+                              n_words + i,
+                              le32_to_cpu(dump_data[i]));
+}
+
 static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
                                   const struct fw_img *image)
 {
@@ -617,11 +639,18 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
        ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
                                    MVM_UCODE_ALIVE_TIMEOUT);
        if (ret) {
-               if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+               struct iwl_trans *trans = mvm->trans;
+
+               if (trans->cfg->gen2)
+                       IWL_ERR(mvm,
+                               "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+                               iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
+                               iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS));
+               else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
                        IWL_ERR(mvm,
                                "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
-                               iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
-                               iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
+                               iwl_read_prph(trans, SB_CPU_1_STATUS),
+                               iwl_read_prph(trans, SB_CPU_2_STATUS));
                mvm->cur_ucode = old_type;
                return ret;
        }
@@ -807,6 +836,9 @@ int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 {
        struct iwl_notification_wait init_wait;
        struct iwl_nvm_access_complete_cmd nvm_complete = {};
+       struct iwl_init_extended_cfg_cmd init_cfg = {
+               .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
+       };
        static const u16 init_complete[] = {
                INIT_COMPLETE_NOTIF,
        };
@@ -828,10 +860,14 @@ int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
                goto error;
        }
 
-       /* TODO: remove when integrating context info */
-       ret = iwl_mvm_init_paging(mvm);
+       /* Send init config command to mark that we are sending NVM access
+        * commands
+        */
+       ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
+                                               INIT_EXTENDED_CFG_CMD), 0,
+                                  sizeof(init_cfg), &init_cfg);
        if (ret) {
-               IWL_ERR(mvm, "Failed to init paging: %d\n",
+               IWL_ERR(mvm, "Failed to run init config command: %d\n",
                        ret);
                goto error;
        }
@@ -876,24 +912,27 @@ static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
                                          struct iwl_rx_packet *pkt)
 {
        struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
-       int i;
+       int i, lmac;
+       int lmac_num = le32_to_cpu(mem_cfg->lmac_num);
 
-       mvm->shared_mem_cfg.num_txfifo_entries =
-               ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
-       for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
-               mvm->shared_mem_cfg.txfifo_size[i] =
-                       le32_to_cpu(mem_cfg->txfifo_size[i]);
-       for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
-               mvm->shared_mem_cfg.rxfifo_size[i] =
-                       le32_to_cpu(mem_cfg->rxfifo_size[i]);
+       if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem)))
+               return;
 
-       BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
-                    sizeof(mem_cfg->internal_txfifo_size));
+       mvm->smem_cfg.num_lmacs = lmac_num;
+       mvm->smem_cfg.num_txfifo_entries =
+               ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size);
+       mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
 
-       for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
-            i++)
-               mvm->shared_mem_cfg.internal_txfifo_size[i] =
-                       le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
+       for (lmac = 0; lmac < lmac_num; lmac++) {
+               struct iwl_shared_mem_lmac_cfg *lmac_cfg =
+                       &mem_cfg->lmac_smem[lmac];
+
+               for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++)
+                       mvm->smem_cfg.lmac[lmac].txfifo_size[i] =
+                               le32_to_cpu(lmac_cfg->txfifo_size[i]);
+               mvm->smem_cfg.lmac[lmac].rxfifo1_size =
+                       le32_to_cpu(lmac_cfg->rxfifo1_size);
+       }
 }
 
 static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
@@ -902,25 +941,27 @@ static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
        struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data;
        int i;
 
-       mvm->shared_mem_cfg.num_txfifo_entries =
-               ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
+       mvm->smem_cfg.num_lmacs = 1;
+
+       mvm->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size);
        for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
-               mvm->shared_mem_cfg.txfifo_size[i] =
+               mvm->smem_cfg.lmac[0].txfifo_size[i] =
                        le32_to_cpu(mem_cfg->txfifo_size[i]);
-       for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
-               mvm->shared_mem_cfg.rxfifo_size[i] =
-                       le32_to_cpu(mem_cfg->rxfifo_size[i]);
+
+       mvm->smem_cfg.lmac[0].rxfifo1_size =
+               le32_to_cpu(mem_cfg->rxfifo_size[0]);
+       mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]);
 
        /* new API has more data, from rxfifo_addr field and on */
        if (fw_has_capa(&mvm->fw->ucode_capa,
                        IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
-               BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
+               BUILD_BUG_ON(sizeof(mvm->smem_cfg.internal_txfifo_size) !=
                             sizeof(mem_cfg->internal_txfifo_size));
 
                for (i = 0;
-                    i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+                    i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
                     i++)
-                       mvm->shared_mem_cfg.internal_txfifo_size[i] =
+                       mvm->smem_cfg.internal_txfifo_size[i] =
                                le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
        }
 }
@@ -969,85 +1010,94 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
                                    sizeof(cmd), &cmd);
 }
 
-#define ACPI_WRDS_METHOD       "WRDS"
-#define ACPI_WRDS_WIFI         (0x07)
-#define ACPI_WRDS_TABLE_SIZE   10
+#ifdef CONFIG_ACPI
+#define ACPI_WRDS_METHOD               "WRDS"
+#define ACPI_EWRD_METHOD               "EWRD"
+#define ACPI_WGDS_METHOD               "WGDS"
+#define ACPI_WIFI_DOMAIN               (0x07)
+#define ACPI_WRDS_WIFI_DATA_SIZE       (IWL_MVM_SAR_TABLE_SIZE + 2)
+#define ACPI_EWRD_WIFI_DATA_SIZE       ((IWL_MVM_SAR_PROFILE_NUM - 1) * \
+                                        IWL_MVM_SAR_TABLE_SIZE + 3)
+#define ACPI_WGDS_WIFI_DATA_SIZE       18
+#define ACPI_WGDS_NUM_BANDS            2
+#define ACPI_WGDS_TABLE_SIZE           3
+
+static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
+                                  union acpi_object *table,
+                                  struct iwl_mvm_sar_profile *profile,
+                                  bool enabled)
+{
+       int i;
 
-struct iwl_mvm_sar_table {
-       bool enabled;
-       u8 values[ACPI_WRDS_TABLE_SIZE];
-};
+       profile->enabled = enabled;
 
-#ifdef CONFIG_ACPI
-static int iwl_mvm_sar_get_wrds(struct iwl_mvm *mvm, union acpi_object *wrds,
-                               struct iwl_mvm_sar_table *sar_table)
+       for (i = 0; i < IWL_MVM_SAR_TABLE_SIZE; i++) {
+               if ((table[i].type != ACPI_TYPE_INTEGER) ||
+                   (table[i].integer.value > U8_MAX))
+                       return -EINVAL;
+
+               profile->table[i] = table[i].integer.value;
+       }
+
+       return 0;
+}
+
+static union acpi_object *iwl_mvm_sar_find_wifi_pkg(struct iwl_mvm *mvm,
+                                                   union acpi_object *data,
+                                                   int data_size)
 {
-       union acpi_object *data_pkg;
-       u32 i;
+       int i;
+       union acpi_object *wifi_pkg;
 
-       /* We need at least two packages, one for the revision and one
+       /*
+        * We need at least two packages, one for the revision and one
         * for the data itself.  Also check that the revision is valid
         * (i.e. it is an integer set to 0).
-       */
-       if (wrds->type != ACPI_TYPE_PACKAGE ||
-           wrds->package.count < 2 ||
-           wrds->package.elements[0].type != ACPI_TYPE_INTEGER ||
-           wrds->package.elements[0].integer.value != 0) {
-               IWL_DEBUG_RADIO(mvm, "Unsupported wrds structure\n");
-               return -EINVAL;
+        */
+       if (data->type != ACPI_TYPE_PACKAGE ||
+           data->package.count < 2 ||
+           data->package.elements[0].type != ACPI_TYPE_INTEGER ||
+           data->package.elements[0].integer.value != 0) {
+               IWL_DEBUG_RADIO(mvm, "Unsupported packages structure\n");
+               return ERR_PTR(-EINVAL);
        }
 
        /* loop through all the packages to find the one for WiFi */
-       for (i = 1; i < wrds->package.count; i++) {
+       for (i = 1; i < data->package.count; i++) {
                union acpi_object *domain;
 
-               data_pkg = &wrds->package.elements[i];
+               wifi_pkg = &data->package.elements[i];
 
                /* Skip anything that is not a package with the right
                 * amount of elements (i.e. domain_type,
-                * enabled/disabled plus the sar table size.
+                * enabled/disabled plus the actual data size.
                 */
-               if (data_pkg->type != ACPI_TYPE_PACKAGE ||
-                   data_pkg->package.count != ACPI_WRDS_TABLE_SIZE + 2)
+               if (wifi_pkg->type != ACPI_TYPE_PACKAGE ||
+                   wifi_pkg->package.count != data_size)
                        continue;
 
-               domain = &data_pkg->package.elements[0];
+               domain = &wifi_pkg->package.elements[0];
                if (domain->type == ACPI_TYPE_INTEGER &&
-                   domain->integer.value == ACPI_WRDS_WIFI)
+                   domain->integer.value == ACPI_WIFI_DOMAIN)
                        break;
 
-               data_pkg = NULL;
+               wifi_pkg = NULL;
        }
 
-       if (!data_pkg)
-               return -ENOENT;
+       if (!wifi_pkg)
+               return ERR_PTR(-ENOENT);
 
-       if (data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
-               return -EINVAL;
-
-       sar_table->enabled = !!(data_pkg->package.elements[1].integer.value);
-
-       for (i = 0; i < ACPI_WRDS_TABLE_SIZE; i++) {
-               union acpi_object *entry;
-
-               entry = &data_pkg->package.elements[i + 2];
-               if ((entry->type != ACPI_TYPE_INTEGER) ||
-                   (entry->integer.value > U8_MAX))
-                       return -EINVAL;
-
-               sar_table->values[i] = entry->integer.value;
-       }
-
-       return 0;
+       return wifi_pkg;
 }
 
-static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
-                                struct iwl_mvm_sar_table *sar_table)
+static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
 {
+       union acpi_object *wifi_pkg, *table;
        acpi_handle root_handle;
        acpi_handle handle;
        struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL};
        acpi_status status;
+       bool enabled;
        int ret;
 
        root_handle = ACPI_HANDLE(mvm->dev);
@@ -1072,62 +1122,301 @@ static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
                return -ENOENT;
        }
 
-       ret = iwl_mvm_sar_get_wrds(mvm, wrds.pointer, sar_table);
+       wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wrds.pointer,
+                                            ACPI_WRDS_WIFI_DATA_SIZE);
+       if (IS_ERR(wifi_pkg)) {
+               ret = PTR_ERR(wifi_pkg);
+               goto out_free;
+       }
+
+       if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+               ret = -EINVAL;
+               goto out_free;
+       }
+
+       enabled = !!(wifi_pkg->package.elements[1].integer.value);
+
+       /* position of the actual table */
+       table = &wifi_pkg->package.elements[2];
+
+       /* The profile from WRDS is officially profile 1, but goes
+        * into sar_profiles[0] (because we don't have a profile 0).
+        */
+       ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0],
+                                     enabled);
+
+out_free:
        kfree(wrds.pointer);
+       return ret;
+}
+
+static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+{
+       union acpi_object *wifi_pkg;
+       acpi_handle root_handle;
+       acpi_handle handle;
+       struct acpi_buffer ewrd = {ACPI_ALLOCATE_BUFFER, NULL};
+       acpi_status status;
+       bool enabled;
+       int i, n_profiles, ret;
 
+       root_handle = ACPI_HANDLE(mvm->dev);
+       if (!root_handle) {
+               IWL_DEBUG_RADIO(mvm,
+                               "Could not retrieve root port ACPI handle\n");
+               return -ENOENT;
+       }
+
+       /* Get the method's handle */
+       status = acpi_get_handle(root_handle, (acpi_string)ACPI_EWRD_METHOD,
+                                &handle);
+       if (ACPI_FAILURE(status)) {
+               IWL_DEBUG_RADIO(mvm, "EWRD method not found\n");
+               return -ENOENT;
+       }
+
+       /* Call EWRD with no arguments */
+       status = acpi_evaluate_object(handle, NULL, NULL, &ewrd);
+       if (ACPI_FAILURE(status)) {
+               IWL_DEBUG_RADIO(mvm, "EWRD invocation failed (0x%x)\n", status);
+               return -ENOENT;
+       }
+
+       wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, ewrd.pointer,
+                                            ACPI_EWRD_WIFI_DATA_SIZE);
+       if (IS_ERR(wifi_pkg)) {
+               ret = PTR_ERR(wifi_pkg);
+               goto out_free;
+       }
+
+       if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
+           (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
+               ret = -EINVAL;
+               goto out_free;
+       }
+
+       enabled = !!(wifi_pkg->package.elements[1].integer.value);
+       n_profiles = wifi_pkg->package.elements[2].integer.value;
+
+       for (i = 0; i < n_profiles; i++) {
+               /* the tables start at element 3 */
+               static int pos = 3;
+
+               /* The EWRD profiles officially go from 2 to 4, but we
+                * save them in sar_profiles[1-3] (because we don't
+                * have profile 0).  So in the array we start from 1.
+                */
+               ret = iwl_mvm_sar_set_profile(mvm,
+                                             &wifi_pkg->package.elements[pos],
+                                             &mvm->sar_profiles[i + 1],
+                                             enabled);
+               if (ret < 0)
+                       break;
+
+               /* go to the next table */
+               pos += IWL_MVM_SAR_TABLE_SIZE;
+       }
+
+out_free:
+       kfree(ewrd.pointer);
        return ret;
 }
-#else /* CONFIG_ACPI */
-static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
-                                struct iwl_mvm_sar_table *sar_table)
+
+static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm,
+                                     struct iwl_mvm_geo_table *geo_table)
 {
-       return -ENOENT;
+       union acpi_object *wifi_pkg;
+       acpi_handle root_handle;
+       acpi_handle handle;
+       struct acpi_buffer wgds = {ACPI_ALLOCATE_BUFFER, NULL};
+       acpi_status status;
+       int i, ret;
+
+       root_handle = ACPI_HANDLE(mvm->dev);
+       if (!root_handle) {
+               IWL_DEBUG_RADIO(mvm,
+                               "Could not retrieve root port ACPI handle\n");
+               return -ENOENT;
+       }
+
+       /* Get the method's handle */
+       status = acpi_get_handle(root_handle, (acpi_string)ACPI_WGDS_METHOD,
+                                &handle);
+       if (ACPI_FAILURE(status)) {
+               IWL_DEBUG_RADIO(mvm, "WGDS method not found\n");
+               return -ENOENT;
+       }
+
+       /* Call WGDS with no arguments */
+       status = acpi_evaluate_object(handle, NULL, NULL, &wgds);
+       if (ACPI_FAILURE(status)) {
+               IWL_DEBUG_RADIO(mvm, "WGDS invocation failed (0x%x)\n", status);
+               return -ENOENT;
+       }
+
+       wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wgds.pointer,
+                                            ACPI_WGDS_WIFI_DATA_SIZE);
+       if (IS_ERR(wifi_pkg)) {
+               ret = PTR_ERR(wifi_pkg);
+               goto out_free;
+       }
+
+       for (i = 0; i < ACPI_WGDS_WIFI_DATA_SIZE; i++) {
+               union acpi_object *entry;
+
+               entry = &wifi_pkg->package.elements[i + 1];
+               if ((entry->type != ACPI_TYPE_INTEGER) ||
+                   (entry->integer.value > U8_MAX))
+                       return -EINVAL;
+
+               geo_table->values[i] = entry->integer.value;
+       }
+       ret = 0;
+out_free:
+       kfree(wgds.pointer);
+       return ret;
 }
-#endif /* CONFIG_ACPI */
 
-static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
+int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
 {
-       struct iwl_mvm_sar_table sar_table;
        struct iwl_dev_tx_power_cmd cmd = {
                .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
        };
-       int ret, i, j, idx;
+       int i, j, idx;
+       int profs[IWL_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
        int len = sizeof(cmd);
 
+       BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS < 2);
+       BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
+                    IWL_MVM_SAR_TABLE_SIZE);
+
        if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
                len = sizeof(cmd.v3);
 
-       ret = iwl_mvm_sar_get_table(mvm, &sar_table);
+       for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+               struct iwl_mvm_sar_profile *prof;
+
+               /* don't allow SAR to be disabled (profile 0 means disable) */
+               if (profs[i] == 0)
+                       return -EPERM;
+
+               /* we are off by one, so allow up to IWL_MVM_SAR_PROFILE_NUM */
+               if (profs[i] > IWL_MVM_SAR_PROFILE_NUM)
+                       return -EINVAL;
+
+               /* profiles go from 1 to 4, so decrement to access the array */
+               prof = &mvm->sar_profiles[profs[i] - 1];
+
+               /* if the profile is disabled, do nothing */
+               if (!prof->enabled) {
+                       IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n",
+                                       profs[i]);
+                       /* if one of the profiles is disabled, we fail all */
+                       return -ENOENT;
+               }
+
+               IWL_DEBUG_RADIO(mvm, "  Chain[%d]:\n", i);
+               for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
+                       idx = (i * IWL_NUM_SUB_BANDS) + j;
+                       cmd.v3.per_chain_restriction[i][j] =
+                               cpu_to_le16(prof->table[idx]);
+                       IWL_DEBUG_RADIO(mvm, "    Band[%d] = %d * .125dBm\n",
+                                       j, prof->table[idx]);
+               }
+       }
+
+       IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
+
+       return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+}
+
+static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+{
+       struct iwl_mvm_geo_table geo_table;
+       struct iwl_geo_tx_power_profiles_cmd cmd = {
+               .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES),
+       };
+       int ret, i, j, idx;
+       u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
+
+       ret = iwl_mvm_sar_get_wgds_table(mvm, &geo_table);
        if (ret < 0) {
                IWL_DEBUG_RADIO(mvm,
-                               "SAR BIOS table invalid or unavailable. (%d)\n",
+                               "Geo SAR BIOS table invalid or unavailable. (%d)\n",
                                ret);
                /* we don't fail if the table is not available */
                return 0;
        }
 
-       if (!sar_table.enabled)
-               return 0;
+       IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
 
-       IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
+       BUILD_BUG_ON(IWL_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
+                    ACPI_WGDS_TABLE_SIZE !=  ACPI_WGDS_WIFI_DATA_SIZE);
 
-       BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
-                    ACPI_WRDS_TABLE_SIZE);
+       for (i = 0; i < IWL_NUM_GEO_PROFILES; i++) {
+               struct iwl_per_chain_offset *chain =
+                       (struct iwl_per_chain_offset *)&cmd.table[i];
 
-       for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
-               IWL_DEBUG_RADIO(mvm, "  Chain[%d]:\n", i);
-               for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
-                       idx = (i * IWL_NUM_SUB_BANDS) + j;
-                       cmd.v3.per_chain_restriction[i][j] =
-                               cpu_to_le16(sar_table.values[idx]);
-                       IWL_DEBUG_RADIO(mvm, "    Band[%d] = %d * .125dBm\n",
-                                       j, sar_table.values[idx]);
+               for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
+                       u8 *value;
+
+                       idx = i * ACPI_WGDS_NUM_BANDS * ACPI_WGDS_TABLE_SIZE +
+                               j * ACPI_WGDS_TABLE_SIZE;
+                       value = &geo_table.values[idx];
+                       chain[j].max_tx_power = cpu_to_le16(value[0]);
+                       chain[j].chain_a = value[1];
+                       chain[j].chain_b = value[2];
+                       IWL_DEBUG_RADIO(mvm,
+                                       "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
+                                       i, j, value[1], value[2], value[0]);
                }
        }
+       return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
+}
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
-       if (ret)
-               IWL_ERR(mvm, "failed to set per-chain TX power: %d\n", ret);
+#else /* CONFIG_ACPI */
+static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
+{
+       return -ENOENT;
+}
+
+static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+{
+       return -ENOENT;
+}
+
+static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+{
+       return 0;
+}
+#endif /* CONFIG_ACPI */
+
+static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
+{
+       int ret;
+
+       ret = iwl_mvm_sar_get_wrds_table(mvm);
+       if (ret < 0) {
+               IWL_DEBUG_RADIO(mvm,
+                               "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
+                               ret);
+               /* if not available, don't fail and don't bother with EWRD */
+               return 0;
+       }
+
+       ret = iwl_mvm_sar_get_ewrd_table(mvm);
+       /* if EWRD is not available, we can still use WRDS, so don't fail */
+       if (ret < 0)
+               IWL_DEBUG_RADIO(mvm,
+                               "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
+                               ret);
+
+       /* choose profile 1 (WRDS) as default for both chains */
+       ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
+
+       /* if we don't have profile 0 from BIOS, just skip it */
+       if (ret == -ENOENT)
+               return 0;
 
        return ret;
 }
@@ -1219,7 +1508,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        }
 
        /* Init RSS configuration */
-       if (iwl_mvm_has_new_rx_api(mvm)) {
+       /* TODO - remove a000 disablement when we have RXQ config API */
+       if (iwl_mvm_has_new_rx_api(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
                ret = iwl_send_rss_cfg_cmd(mvm);
                if (ret) {
                        IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
@@ -1229,10 +1519,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        }
 
        /* init the fw <-> mac80211 STA mapping */
-       for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
+       for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
                RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
 
-       mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
+       mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
 
        /* reset quota debouncing buffer - 0xff will yield invalid data */
        memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
@@ -1313,10 +1603,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                        goto error;
        }
 
-       if (iwl_mvm_is_csum_supported(mvm) &&
-           mvm->cfg->features & NETIF_F_RXCSUM)
-               iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
-
        /* allow FW/transport low power modes if not during restart */
        if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
                iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
@@ -1325,6 +1611,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        if (ret)
                goto error;
 
+       ret = iwl_mvm_sar_geo_init(mvm);
+       if (ret)
+               goto error;
+
        IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
        return 0;
  error:
@@ -1362,7 +1652,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
                goto error;
 
        /* init the fw <-> mac80211 STA mapping */
-       for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
+       for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
                RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
 
        /* Add auxiliary station for scanning */
index 99132ea16ede08e0e7ebd5f8734eeb0ab204e0fa..9e69b9d2012cca424abb5fffd4763670e09d90ce 100644 (file)
@@ -216,7 +216,8 @@ u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
                        qmask |= BIT(vif->hw_queue[ac]);
        }
 
-       if (vif->type == NL80211_IFTYPE_AP)
+       if (vif->type == NL80211_IFTYPE_AP ||
+           vif->type == NL80211_IFTYPE_ADHOC)
                qmask |= BIT(vif->cab_queue);
 
        return qmask;
@@ -471,8 +472,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
                vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
        }
 
-       mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT;
-       mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+       mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
+       mvmvif->mcast_sta.sta_id = IWL_MVM_INVALID_STA;
+       mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
 
        for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
                mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
@@ -1441,6 +1443,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
        struct iwl_mvm_tx_resp *beacon_notify_hdr;
        struct ieee80211_vif *csa_vif;
        struct ieee80211_vif *tx_blocked_vif;
+       struct agg_tx_status *agg_status;
        u16 status;
 
        lockdep_assert_held(&mvm->mutex);
@@ -1448,7 +1451,8 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
        beacon_notify_hdr = &beacon->beacon_notify_hdr;
        mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
 
-       status = le16_to_cpu(beacon_notify_hdr->status.status) & TX_STATUS_MSK;
+       agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
+       status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
        IWL_DEBUG_RX(mvm,
                     "beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
                     status, beacon_notify_hdr->failure_frame,
index d37b1695c64eac9096cfe7c11b3ef06764d4626a..f35f295d0c819ab4d62f805cf8fadf83fce04be5 100644 (file)
@@ -6,8 +6,8 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,7 +33,8 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -766,7 +767,7 @@ static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
                goto out;
 
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
+       if (mvmsta->sta_id == IWL_MVM_INVALID_STA ||
            mvmsta->sta_id != mvm->d0i3_ap_sta_id)
                goto out;
 
@@ -1010,7 +1011,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
        mvmvif->uploaded = false;
-       mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+       mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
 
        spin_lock_bh(&mvm->time_event_lock);
        iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
@@ -1053,7 +1054,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
        ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
 
        mvm->p2p_device_vif = NULL;
-       mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+       mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
 
        iwl_mvm_reset_phy_ctxts(mvm);
        memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
@@ -1351,6 +1352,17 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
                        goto out_release;
                }
 
+               if (iwl_mvm_is_dqa_supported(mvm)) {
+                       /*
+                        * Only queue for this station is the mcast queue,
+                        * which shouldn't be in TFD mask anyway
+                        */
+                       ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta,
+                                                      0, vif->type);
+                       if (ret)
+                               goto out_release;
+               }
+
                iwl_mvm_vif_dbgfs_register(mvm, vif);
                goto out_unlock;
        }
@@ -1516,6 +1528,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
                        mvm->noa_duration = 0;
                }
 #endif
+               iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta);
                iwl_mvm_dealloc_bcast_sta(mvm, vif);
                goto out_release;
        }
@@ -1952,7 +1965,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                                                    IWL_MVM_SMPS_REQ_PROT,
                                                    IEEE80211_SMPS_DYNAMIC);
                        }
-               } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+               } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
                        /*
                         * If update fails - SF might be running in associated
                         * mode while disassociated - which is forbidden.
@@ -1966,8 +1979,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                                IWL_ERR(mvm, "failed to remove AP station\n");
 
                        if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
-                               mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
-                       mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+                               mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
+                       mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
                        /* remove quota for this interface */
                        ret = iwl_mvm_update_quotas(mvm, false, NULL);
                        if (ret)
@@ -2104,6 +2117,10 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
        if (ret)
                goto out_unbind;
 
+       ret = iwl_mvm_add_mcast_sta(mvm, vif);
+       if (ret)
+               goto out_rm_bcast;
+
        /* must be set before quota calculations */
        mvmvif->ap_ibss_active = true;
 
@@ -2131,6 +2148,8 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
 out_quota_failed:
        iwl_mvm_power_update_mac(mvm);
        mvmvif->ap_ibss_active = false;
+       iwl_mvm_rm_mcast_sta(mvm, vif);
+out_rm_bcast:
        iwl_mvm_send_rm_bcast_sta(mvm, vif);
 out_unbind:
        iwl_mvm_binding_remove_vif(mvm, vif);
@@ -2177,6 +2196,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
                iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
 
        iwl_mvm_update_quotas(mvm, false, NULL);
+       iwl_mvm_rm_mcast_sta(mvm, vif);
        iwl_mvm_send_rm_bcast_sta(mvm, vif);
        iwl_mvm_binding_remove_vif(mvm, vif);
 
@@ -2319,7 +2339,7 @@ iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
-       /* Called when we need to transmit (a) frame(s) from agg queue */
+       /* Called when we need to transmit (a) frame(s) from agg or dqa queue */
 
        iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
                                          tids, more_data, true);
@@ -2338,10 +2358,14 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
        for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
                struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
 
-               if (tid_data->state != IWL_AGG_ON &&
+               if (!iwl_mvm_is_dqa_supported(mvm) &&
+                   tid_data->state != IWL_AGG_ON &&
                    tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
                        continue;
 
+               if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE)
+                       continue;
+
                __set_bit(tid_data->txq_id, &txqs);
 
                if (iwl_mvm_tid_queued(tid_data) == 0)
@@ -2367,7 +2391,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
                 */
                break;
        case STA_NOTIFY_AWAKE:
-               if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
+               if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA))
                        break;
 
                if (txqs)
@@ -2400,7 +2424,7 @@ void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
                return;
 
        rcu_read_lock();
-       sta = mvm->fw_id_to_mac_id[notif->sta_id];
+       sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
        if (WARN_ON(IS_ERR_OR_NULL(sta))) {
                rcu_read_unlock();
                return;
@@ -3937,7 +3961,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
        mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
        /* flush the AP-station and all TDLS peers */
-       for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+       for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
                sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
                                                lockdep_is_held(&mvm->mutex));
                if (IS_ERR_OR_NULL(sta))
@@ -4194,7 +4218,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (!iwl_mvm_has_new_rx_api(mvm))
+       /* TODO - remove a000 disablement when we have RXQ config API */
+       if (!iwl_mvm_has_new_rx_api(mvm) || iwl_mvm_has_new_tx_api(mvm))
                return;
 
        notif->cookie = mvm->queue_sync_cookie;
index 73a216524af2984a07aa321f152df12013314e9c..1938dfb44152531d05aaeee32c3eafbad30d92ab 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -407,6 +407,7 @@ struct iwl_mvm_vif {
        struct iwl_mvm_time_event_data hs_time_event_data;
 
        struct iwl_mvm_int_sta bcast_sta;
+       struct iwl_mvm_int_sta mcast_sta;
 
        /*
         * Assigned while mac80211 has the interface in a channel context,
@@ -603,10 +604,15 @@ enum iwl_mvm_tdls_cs_state {
        IWL_MVM_TDLS_SW_ACTIVE,
 };
 
+#define MAX_NUM_LMAC 2
 struct iwl_mvm_shared_mem_cfg {
+       int num_lmacs;
        int num_txfifo_entries;
-       u32 txfifo_size[TX_FIFO_MAX_NUM];
-       u32 rxfifo_size[RX_FIFO_MAX_NUM];
+       struct {
+               u32 txfifo_size[TX_FIFO_MAX_NUM];
+               u32 rxfifo1_size;
+       } lmac[MAX_NUM_LMAC];
+       u32 rxfifo2_size;
        u32 internal_txfifo_addr;
        u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
 };
@@ -625,6 +631,7 @@ struct iwl_mvm_shared_mem_cfg {
  * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
  *     it is the time of last received sub-frame
  * @removed: prevent timer re-arming
+ * @valid: reordering is valid for this queue
  * @lock: protect reorder buffer internal state
  * @mvm: mvm pointer, needed for frame timer context
  */
@@ -640,6 +647,7 @@ struct iwl_mvm_reorder_buffer {
        unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF];
        struct timer_list reorder_timer;
        bool removed;
+       bool valid;
        spinlock_t lock;
        struct iwl_mvm *mvm;
 } ____cacheline_aligned_in_smp;
@@ -709,6 +717,21 @@ enum iwl_mvm_queue_status {
 #define IWL_MVM_DQA_QUEUE_TIMEOUT      (5 * HZ)
 #define IWL_MVM_NUM_CIPHERS             10
 
+#ifdef CONFIG_ACPI
+#define IWL_MVM_SAR_TABLE_SIZE         10
+#define IWL_MVM_SAR_PROFILE_NUM                4
+#define IWL_MVM_GEO_TABLE_SIZE         18
+
+struct iwl_mvm_sar_profile {
+       bool enabled;
+       u8 table[IWL_MVM_SAR_TABLE_SIZE];
+};
+
+struct iwl_mvm_geo_table {
+       u8 values[IWL_MVM_GEO_TABLE_SIZE];
+};
+#endif
+
 struct iwl_mvm {
        /* for logger access */
        struct device *dev;
@@ -975,7 +998,10 @@ struct iwl_mvm {
 #endif
 
        /* Tx queues */
-       u8 aux_queue;
+       u16 aux_queue;
+       u16 probe_queue;
+       u16 p2p_dev_queue;
+
        u8 first_agg_queue;
        u8 last_agg_queue;
 
@@ -1018,7 +1044,7 @@ struct iwl_mvm {
                } peer;
        } tdls_cs;
 
-       struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
+       struct iwl_mvm_shared_mem_cfg smem_cfg;
 
        u32 ciphers[IWL_MVM_NUM_CIPHERS];
        struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
@@ -1035,6 +1061,9 @@ struct iwl_mvm {
        bool drop_bcn_ap_mode;
 
        struct delayed_work cs_tx_unblock_dwork;
+#ifdef CONFIG_ACPI
+       struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM];
+#endif
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -1222,13 +1251,25 @@ static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
 {
        /*
         * TODO:
-        * The issue of how to determine CDB support is still not well defined.
-        * It may be that it will be for all next HW devices and it may be per
-        * FW compilation and it may also differ between different devices.
-        * For now take a ride on the new TX API and get back to it when
-        * it is well defined.
+        * The issue of how to determine CDB APIs and usage is still not fully
+        * defined.
+        * There is a compilation for CDB and non-CDB FW, but there may
+        * be also runtime check.
+        * For now there is a TLV for checking compilation mode, but a
+        * runtime check will also have to be here - once defined.
         */
-       return iwl_mvm_has_new_tx_api(mvm);
+       return fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_CDB_SUPPORT);
+}
+
+static inline struct agg_tx_status*
+iwl_mvm_get_agg_status(struct iwl_mvm *mvm,
+                      struct iwl_mvm_tx_resp *tx_resp)
+{
+       if (iwl_mvm_has_new_tx_api(mvm))
+               return &tx_resp->v6.status;
+       else
+               return &tx_resp->v3.status;
 }
 
 static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
@@ -1389,6 +1430,8 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
 void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                            int queue);
 void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
+                                  struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
                                   struct iwl_rx_cmd_buffer *rxb);
@@ -1668,6 +1711,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
 void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
                        u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
                        unsigned int wdg_timeout);
+int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
+                           u8 sta_id, u8 tid, unsigned int timeout);
+
 /*
  * Disable a TXQ.
  * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
@@ -1701,7 +1747,8 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
 
 static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
 {
-       iwl_free_fw_paging(mvm);
+       if (!iwl_mvm_has_new_tx_api(mvm))
+               iwl_free_fw_paging(mvm);
        mvm->ucode_loaded = false;
        iwl_trans_stop_device(mvm->trans);
 }
@@ -1797,4 +1844,14 @@ int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
                         u32 duration, u32 timeout);
 bool iwl_mvm_lqm_active(struct iwl_mvm *mvm);
 
+#ifdef CONFIG_ACPI
+int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
+#else
+static inline
+int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
+{
+       return -ENOENT;
+}
+#endif /* CONFIG_ACPI */
+
 #endif /* __IWL_MVM_H__ */
index 4cd72d4cdc47c4de8fac948d22c59a6848e381d0..888053323c925511e7102242174aa980a80b5e5c 100644 (file)
@@ -302,6 +302,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
                   RX_HANDLER_SYNC),
        RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
                   RX_HANDLER_ASYNC_LOCKED),
+       RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
+                      iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
        RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
                       iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
        RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
@@ -426,6 +428,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
  */
 static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
        HCMD_NAME(SHARED_MEM_CFG_CMD),
+       HCMD_NAME(INIT_EXTENDED_CFG_CMD),
 };
 
 /* Please keep this array *SORTED* by hex value.
@@ -444,6 +447,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
        HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
        HCMD_NAME(CTDP_CONFIG_CMD),
        HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
+       HCMD_NAME(GEO_TX_POWER_LIMIT),
        HCMD_NAME(CT_KILL_NOTIFICATION),
        HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
 };
@@ -452,6 +456,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
  * Access is done through binary search
  */
 static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
+       HCMD_NAME(DQA_ENABLE_CMD),
        HCMD_NAME(UPDATE_MU_GROUPS_CMD),
        HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
        HCMD_NAME(STA_PM_NOTIF),
@@ -459,6 +464,13 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
        HCMD_NAME(RX_QUEUES_NOTIFICATION),
 };
 
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
+       HCMD_NAME(MFU_ASSERT_DUMP_NTF),
+};
+
 /* Please keep this array *SORTED* by hex value.
  * Access is done through binary search
  */
@@ -602,6 +614,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
                }
        } else {
                mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
+               mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+               mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
                mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE;
                mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
        }
@@ -1256,7 +1270,7 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
        u8 tid;
 
        if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
-                   mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
+                   mvmvif->ap_sta_id == IWL_MVM_INVALID_STA))
                return false;
 
        mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
@@ -1344,7 +1358,7 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
        struct ieee80211_sta *ap_sta;
        struct iwl_mvm_sta *mvm_ap_sta;
 
-       if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT)
+       if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA)
                return;
 
        rcu_read_lock();
@@ -1414,7 +1428,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
                mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
        } else {
                WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
-               mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+               mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
                mvm->d0i3_offloading = false;
        }
 
@@ -1427,7 +1441,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
                return ret;
 
        /* configure wowlan configuration only if needed */
-       if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) {
+       if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
                /* wake on beacons only if beacon storing isn't supported */
                if (!fw_has_capa(&mvm->fw->ucode_capa,
                                 IWL_UCODE_TLV_CAPA_BEACON_STORING))
@@ -1504,7 +1518,7 @@ void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
 
        spin_lock_bh(&mvm->d0i3_tx_lock);
 
-       if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT)
+       if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA)
                goto out;
 
        IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
@@ -1542,7 +1556,7 @@ out:
        }
        clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
        wake_up(&mvm->d0i3_exit_waitq);
-       mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+       mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
        if (wake_queues)
                ieee80211_wake_queues(mvm->hw);
 
index 95138830b9f8e4ee8ba83b68278f0478e313f47d..d59efe804356168c0ab87fbb7a3a0633b33666ef 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2017           Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -250,12 +251,30 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
                             struct cfg80211_chan_def *chandef,
                             u8 chains_static, u8 chains_dynamic)
 {
+       enum iwl_phy_ctxt_action action = FW_CTXT_ACTION_MODIFY;
+
        lockdep_assert_held(&mvm->mutex);
 
+       /* In CDB mode we cannot modify PHY context between bands so... */
+       if (iwl_mvm_has_new_tx_api(mvm) &&
+           ctxt->channel->band != chandef->chan->band) {
+               int ret;
+
+               /* ... remove it here ...*/
+               ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+                                            chains_static, chains_dynamic,
+                                            FW_CTXT_ACTION_REMOVE, 0);
+               if (ret)
+                       return ret;
+
+               /* ... and proceed to add it again */
+               action = FW_CTXT_ACTION_ADD;
+       }
+
        ctxt->channel = chandef->chan;
        return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
                                      chains_static, chains_dynamic,
-                                     FW_CTXT_ACTION_MODIFY, 0);
+                                     action, 0);
 }
 
 void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
index 20473df79c945f8f48024785d9cac5c00508f9cc..d4c0ca7ccb342736a8d7cf23b5200d6adc25bcda 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -104,7 +104,20 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
                                            u8 crypt_len,
                                            struct iwl_rx_cmd_buffer *rxb)
 {
-       unsigned int hdrlen, fraglen;
+       unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+       unsigned int fraglen;
+
+       /*
+        * The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len,
+        * but those are all multiples of 4 long) all goes away, but we
+        * want the *end* of it, which is going to be the start of the IP
+        * header, to be aligned when it gets pulled in.
+        * The beginning of the skb->data is aligned on at least a 4-byte
+        * boundary after allocation. Everything here is aligned at least
+        * on a 2-byte boundary so we can just take hdrlen & 3 and pad by
+        * the result.
+        */
+       skb_reserve(skb, hdrlen & 3);
 
        /* If frame is small enough to fit in skb->head, pull it completely.
         * If not, only pull ieee80211_hdr (including crypto if present, and
@@ -118,8 +131,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
         * If the latter changes (there are efforts in the standards group
         * to do so) we should revisit this and ieee80211_data_to_8023().
         */
-       hdrlen = (len <= skb_tailroom(skb)) ? len :
-                                             sizeof(*hdr) + crypt_len + 8;
+       hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;
 
        memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
        fraglen = len - hdrlen;
@@ -339,7 +351,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
 
                id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT;
 
-               if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) {
+               if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
                        sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
                        if (IS_ERR(sta))
                                sta = NULL;
@@ -448,9 +460,16 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
                if (rate_n_flags & RATE_MCS_BF_MSK)
                        rx_status->vht_flag |= RX_VHT_FLAG_BF;
        } else {
-               rx_status->rate_idx =
-                       iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
-                                                           rx_status->band);
+               int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+                                                              rx_status->band);
+
+               if (WARN(rate < 0 || rate > 0xFF,
+                        "Invalid rate flags 0x%x, band %d,\n",
+                        rate_n_flags, rx_status->band)) {
+                       kfree_skb(skb);
+                       return;
+               }
+               rx_status->rate_idx = rate;
        }
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -637,6 +656,9 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
                .mvm = mvm,
        };
        int expected_size;
+       int i;
+       u8 *energy;
+       __le32 *bytes, *air_time;
 
        if (iwl_mvm_is_cdb_supported(mvm))
                expected_size = sizeof(*stats);
@@ -645,8 +667,11 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
        else
                expected_size = sizeof(struct iwl_notif_statistics_v10);
 
-       if (iwl_rx_packet_payload_len(pkt) != expected_size)
-               goto invalid;
+       if (iwl_rx_packet_payload_len(pkt) != expected_size) {
+               IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
+                       iwl_rx_packet_payload_len(pkt));
+               return;
+       }
 
        data.mac_id = stats->rx.general.mac_id;
        data.beacon_filter_average_energy =
@@ -662,38 +687,6 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
                le64_to_cpu(stats->general.common.on_time_scan);
 
        data.general = &stats->general;
-       if (iwl_mvm_has_new_rx_api(mvm)) {
-               int i;
-               u8 *energy;
-               __le32 *bytes, *air_time;
-
-               if (!iwl_mvm_is_cdb_supported(mvm)) {
-                       struct iwl_notif_statistics_v11 *v11 =
-                               (void *)&pkt->data;
-
-                       energy = (void *)&v11->load_stats.avg_energy;
-                       bytes = (void *)&v11->load_stats.byte_count;
-                       air_time = (void *)&v11->load_stats.air_time;
-               } else {
-                       energy = (void *)&stats->load_stats.avg_energy;
-                       bytes = (void *)&stats->load_stats.byte_count;
-                       air_time = (void *)&stats->load_stats.air_time;
-               }
-
-               rcu_read_lock();
-               for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
-                       struct iwl_mvm_sta *sta;
-
-                       if (!energy[i])
-                               continue;
-
-                       sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
-                       if (!sta)
-                               continue;
-                       sta->avg_energy = energy[i];
-               }
-               rcu_read_unlock();
-       }
 
        iwl_mvm_rx_stats_check_trigger(mvm, pkt);
 
@@ -701,10 +694,36 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
                                            IEEE80211_IFACE_ITER_NORMAL,
                                            iwl_mvm_stat_iterator,
                                            &data);
-       return;
- invalid:
-       IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
-               iwl_rx_packet_payload_len(pkt));
+
+       if (!iwl_mvm_has_new_rx_api(mvm))
+               return;
+
+       if (!iwl_mvm_is_cdb_supported(mvm)) {
+               struct iwl_notif_statistics_v11 *v11 =
+                       (void *)&pkt->data;
+
+               energy = (void *)&v11->load_stats.avg_energy;
+               bytes = (void *)&v11->load_stats.byte_count;
+               air_time = (void *)&v11->load_stats.air_time;
+       } else {
+               energy = (void *)&stats->load_stats.avg_energy;
+               bytes = (void *)&stats->load_stats.byte_count;
+               air_time = (void *)&stats->load_stats.air_time;
+       }
+
+       rcu_read_lock();
+       for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+               struct iwl_mvm_sta *sta;
+
+               if (!energy[i])
+                       continue;
+
+               sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
+               if (!sta)
+                       continue;
+               sta->avg_energy = energy[i];
+       }
+       rcu_read_unlock();
 }
 
 void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
index d79e9c2a2654aed22fd8c359cb27a923cdcc5e50..24c4fbe139a36f80de0ce2387ea28eb132981dda 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -462,6 +462,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
        int i;
        u16 sn = 0, index = 0;
        bool expired = false;
+       bool cont = false;
 
        spin_lock(&buf->lock);
 
@@ -473,12 +474,21 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
        for (i = 0; i < buf->buf_size ; i++) {
                index = (buf->head_sn + i) % buf->buf_size;
 
-               if (skb_queue_empty(&buf->entries[index]))
+               if (skb_queue_empty(&buf->entries[index])) {
+                       /*
+                        * If there is a hole and the next frame didn't expire
+                        * we want to break and not advance SN
+                        */
+                       cont = false;
                        continue;
-               if (!time_after(jiffies, buf->reorder_time[index] +
-                               RX_REORDER_BUF_TIMEOUT_MQ))
+               }
+               if (!cont && !time_after(jiffies, buf->reorder_time[index] +
+                                        RX_REORDER_BUF_TIMEOUT_MQ))
                        break;
+
                expired = true;
+               /* continue until next hole after this expired frames */
+               cont = true;
                sn = ieee80211_sn_add(buf->head_sn, i + 1);
        }
 
@@ -626,9 +636,13 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
                return false;
 
        baid_data = rcu_dereference(mvm->baid_map[baid]);
-       if (WARN(!baid_data,
-                "Received baid %d, but no data exists for this BAID\n", baid))
+       if (!baid_data) {
+               WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN),
+                    "Received baid %d, but no data exists for this BAID\n",
+                    baid);
                return false;
+       }
+
        if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
                 "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
                 baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
@@ -643,6 +657,14 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
 
        spin_lock_bh(&buffer->lock);
 
+       if (!buffer->valid) {
+               if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) {
+                       spin_unlock_bh(&buffer->lock);
+                       return false;
+               }
+               buffer->valid = true;
+       }
+
        if (ieee80211_is_back_req(hdr->frame_control)) {
                iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
                goto drop;
@@ -727,7 +749,8 @@ drop:
        return true;
 }
 
-static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
+static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
+                                   u32 reorder_data, u8 baid)
 {
        unsigned long now = jiffies;
        unsigned long timeout;
@@ -736,8 +759,10 @@ static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
        rcu_read_lock();
 
        data = rcu_dereference(mvm->baid_map[baid]);
-       if (WARN_ON(!data))
+       if (!data) {
+               WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN));
                goto out;
+       }
 
        if (!data->timeout)
                goto out;
@@ -831,7 +856,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
        if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) {
                u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
 
-               if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) {
+               if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
                        sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
                        if (IS_ERR(sta))
                                sta = NULL;
@@ -893,26 +918,39 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 
                if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
                        kfree_skb(skb);
-                       rcu_read_unlock();
-                       return;
+                       goto out;
                }
 
                /*
                 * Our hardware de-aggregates AMSDUs but copies the mac header
                 * as it to the de-aggregated MPDUs. We need to turn off the
                 * AMSDU bit in the QoS control ourselves.
+                * In addition, HW reverses addr3 and addr4 - reverse it back.
                 */
                if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
                    !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
+                       int i;
                        u8 *qc = ieee80211_get_qos_ctl(hdr);
+                       u8 mac_addr[ETH_ALEN];
 
                        *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
-                       if (!(desc->amsdu_info &
-                             IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
-                               rx_status->flag |= RX_FLAG_AMSDU_MORE;
+
+                       for (i = 0; i < ETH_ALEN; i++)
+                               mac_addr[i] = hdr->addr3[ETH_ALEN - i - 1];
+                       ether_addr_copy(hdr->addr3, mac_addr);
+
+                       if (ieee80211_has_a4(hdr->frame_control)) {
+                               for (i = 0; i < ETH_ALEN; i++)
+                                       mac_addr[i] =
+                                               hdr->addr4[ETH_ALEN - i - 1];
+                               ether_addr_copy(hdr->addr4, mac_addr);
+                       }
+               }
+               if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) {
+                       u32 reorder_data = le32_to_cpu(desc->reorder_data);
+
+                       iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
                }
-               if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
-                       iwl_mvm_agg_rx_received(mvm, baid);
        }
 
        /* Set up the HT phy flags */
@@ -953,9 +991,17 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                if (rate_n_flags & RATE_MCS_BF_MSK)
                        rx_status->vht_flag |= RX_VHT_FLAG_BF;
        } else {
-               rx_status->rate_idx =
-                       iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
-                                                           rx_status->band);
+               int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+                                                              rx_status->band);
+
+               if (WARN(rate < 0 || rate > 0xFF,
+                        "Invalid rate flags 0x%x, band %d,\n",
+                        rate_n_flags, rx_status->band)) {
+                       kfree_skb(skb);
+                       goto out;
+               }
+               rx_status->rate_idx = rate;
+
        }
 
        /* management stuff on default queue */
@@ -974,6 +1020,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
        iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
        if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
                iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
+out:
        rcu_read_unlock();
 }
 
index 0a64efa844b7565bba9ba83666fca2316096aedd..9668f945b4e6fc4301dc28b74492e7e82c97413a 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -966,11 +966,11 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
                channels[j] = band->channels[i].hw_value;
 }
 
-static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
-                                    u32 flags, u8 channel_flags)
+static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
+                                       u32 flags, u8 channel_flags)
 {
        enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
-       struct iwl_scan_config *cfg = config;
+       struct iwl_scan_config_v1 *cfg = config;
 
        cfg->flags = cpu_to_le32(flags);
        cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
@@ -989,11 +989,11 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
        iwl_mvm_fill_channels(mvm, cfg->channel_array);
 }
 
-static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config,
-                                        u32 flags, u8 channel_flags)
+static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
+                                    u32 flags, u8 channel_flags)
 {
        enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
-       struct iwl_scan_config_cdb *cfg = config;
+       struct iwl_scan_config *cfg = config;
 
        cfg->flags = cpu_to_le32(flags);
        cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
@@ -1001,10 +1001,14 @@ static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config,
        cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
        cfg->out_of_channel_time[0] =
                cpu_to_le32(scan_timing[type].max_out_time);
-       cfg->out_of_channel_time[1] =
-               cpu_to_le32(scan_timing[type].max_out_time);
        cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time);
-       cfg->suspend_time[1] = cpu_to_le32(scan_timing[type].suspend_time);
+
+       if (iwl_mvm_is_cdb_supported(mvm)) {
+               cfg->suspend_time[1] =
+                       cpu_to_le32(scan_timing[type].suspend_time);
+               cfg->out_of_channel_time[1] =
+                       cpu_to_le32(scan_timing[type].max_out_time);
+       }
 
        iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
 
@@ -1033,16 +1037,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
                return -ENOBUFS;
 
-       if (type == mvm->scan_type) {
-               IWL_DEBUG_SCAN(mvm,
-                              "Ignoring UMAC scan config of the same type\n");
+       if (type == mvm->scan_type)
                return 0;
-       }
 
-       if (iwl_mvm_is_cdb_supported(mvm))
-               cmd_size = sizeof(struct iwl_scan_config_cdb);
-       else
+       if (iwl_mvm_has_new_tx_api(mvm))
                cmd_size = sizeof(struct iwl_scan_config);
+       else
+               cmd_size = sizeof(struct iwl_scan_config_v1);
        cmd_size += mvm->fw->ucode_capa.n_scan_channels;
 
        cfg = kzalloc(cmd_size, GFP_KERNEL);
@@ -1068,13 +1069,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
                        IWL_CHANNEL_FLAG_EBS_ADD |
                        IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
 
-       if (iwl_mvm_is_cdb_supported(mvm)) {
+       if (iwl_mvm_has_new_tx_api(mvm)) {
                flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ?
                         SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
                         SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
-               iwl_mvm_fill_scan_config_cdb(mvm, cfg, flags, channel_flags);
-       } else {
                iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
+       } else {
+               iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags);
        }
 
        cmd.data[0] = cfg;
@@ -1119,16 +1120,20 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
        }
        cmd->fragmented_dwell = timing->dwell_fragmented;
 
-       if (iwl_mvm_is_cdb_supported(mvm)) {
-               cmd->cdb.max_out_time[0] = cpu_to_le32(timing->max_out_time);
-               cmd->cdb.suspend_time[0] = cpu_to_le32(timing->suspend_time);
-               cmd->cdb.max_out_time[1] = cpu_to_le32(timing->max_out_time);
-               cmd->cdb.suspend_time[1] = cpu_to_le32(timing->suspend_time);
-               cmd->cdb.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+       if (iwl_mvm_has_new_tx_api(mvm)) {
+               cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+               cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time);
+               cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time);
+               if (iwl_mvm_is_cdb_supported(mvm)) {
+                       cmd->v6.max_out_time[1] =
+                               cpu_to_le32(timing->max_out_time);
+                       cmd->v6.suspend_time[1] =
+                               cpu_to_le32(timing->suspend_time);
+               }
        } else {
-               cmd->no_cdb.max_out_time = cpu_to_le32(timing->max_out_time);
-               cmd->no_cdb.suspend_time = cpu_to_le32(timing->suspend_time);
-               cmd->no_cdb.scan_priority =
+               cmd->v1.max_out_time = cpu_to_le32(timing->max_out_time);
+               cmd->v1.suspend_time = cpu_to_le32(timing->suspend_time);
+               cmd->v1.scan_priority =
                        cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
        }
 
@@ -1207,8 +1212,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                             int type)
 {
        struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
-       void *cmd_data = iwl_mvm_is_cdb_supported(mvm) ?
-                        (void *)&cmd->cdb.data : (void *)&cmd->no_cdb.data;
+       void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ?
+                        (void *)&cmd->v6.data : (void *)&cmd->v1.data;
        struct iwl_scan_req_umac_tail *sec_part = cmd_data +
                sizeof(struct iwl_scan_channel_cfg_umac) *
                        mvm->fw->ucode_capa.n_scan_channels;
@@ -1245,12 +1250,12 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
                                IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
 
-       if (iwl_mvm_is_cdb_supported(mvm)) {
-               cmd->cdb.channel_flags = channel_flags;
-               cmd->cdb.n_channels = params->n_channels;
+       if (iwl_mvm_has_new_tx_api(mvm)) {
+               cmd->v6.channel_flags = channel_flags;
+               cmd->v6.n_channels = params->n_channels;
        } else {
-               cmd->no_cdb.channel_flags = channel_flags;
-               cmd->no_cdb.n_channels = params->n_channels;
+               cmd->v1.channel_flags = channel_flags;
+               cmd->v1.n_channels = params->n_channels;
        }
 
        iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
@@ -1692,10 +1697,10 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
 
 int iwl_mvm_scan_size(struct iwl_mvm *mvm)
 {
-       int base_size = IWL_SCAN_REQ_UMAC_SIZE;
+       int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
 
-       if (iwl_mvm_is_cdb_supported(mvm))
-               base_size = IWL_SCAN_REQ_UMAC_SIZE_CDB;
+       if (iwl_mvm_has_new_tx_api(mvm))
+               base_size = IWL_SCAN_REQ_UMAC_SIZE;
 
        if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
                return base_size +
index 101fb04a8573492c51a343c3d41b0eb1df12cbbe..539b06bf08031b321eed18ab6b51614d812e783c 100644 (file)
@@ -235,7 +235,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
                iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
                break;
        case SF_FULL_ON:
-               if (sta_id == IWL_MVM_STATION_COUNT) {
+               if (sta_id == IWL_MVM_INVALID_STA) {
                        IWL_ERR(mvm,
                                "No station: Cannot switch SF to FULL_ON\n");
                        return -EINVAL;
@@ -276,12 +276,12 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
                      bool remove_vif)
 {
        enum iwl_sf_state new_state;
-       u8 sta_id = IWL_MVM_STATION_COUNT;
+       u8 sta_id = IWL_MVM_INVALID_STA;
        struct iwl_mvm_vif *mvmvif = NULL;
        struct iwl_mvm_active_iface_iterator_data data = {
                .ignore_vif = changed_vif,
                .sta_vif_state = SF_UNINIT,
-               .sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
+               .sta_vif_ap_sta_id = IWL_MVM_INVALID_STA,
        };
 
        /*
index bd1dcc863d8f338df994a9b177d498df2eef49cb..a2a1fa06b78146344e6b723124acba53db955eb9 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -98,7 +98,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
                reserved_ids = BIT(0);
 
        /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
-       for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
+       for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
                if (BIT(sta_id) & reserved_ids)
                        continue;
 
@@ -106,7 +106,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
                                               lockdep_is_held(&mvm->mutex)))
                        return sta_id;
        }
-       return IWL_MVM_STATION_COUNT;
+       return IWL_MVM_INVALID_STA;
 }
 
 /* send station add/update command to firmware */
@@ -127,11 +127,17 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        u32 agg_size = 0, mpdu_dens = 0;
 
        if (!update || (flags & STA_MODIFY_QUEUES)) {
-               add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
                memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
 
-               if (flags & STA_MODIFY_QUEUES)
-                       add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
+               if (!iwl_mvm_has_new_tx_api(mvm)) {
+                       add_sta_cmd.tfd_queue_msk =
+                               cpu_to_le32(mvm_sta->tfd_queue_msk);
+
+                       if (flags & STA_MODIFY_QUEUES)
+                               add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
+               } else {
+                       WARN_ON(flags & STA_MODIFY_QUEUES);
+               }
        }
 
        switch (sta->bandwidth) {
@@ -209,13 +215,15 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
 
                if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
-                       add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BK);
+                       add_sta_cmd.uapsd_acs |= BIT(AC_BK);
                if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
-                       add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BE);
+                       add_sta_cmd.uapsd_acs |= BIT(AC_BE);
                if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
-                       add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VI);
+                       add_sta_cmd.uapsd_acs |= BIT(AC_VI);
                if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
-                       add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VO);
+                       add_sta_cmd.uapsd_acs |= BIT(AC_VO);
+               add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
+               add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
        }
 
        status = ADD_STA_SUCCESS;
@@ -337,6 +345,9 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
        u8 sta_id;
        int ret;
 
+       if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+               return -EINVAL;
+
        spin_lock_bh(&mvm->queue_info_lock);
        sta_id = mvm->queue_info[queue].ra_sta_id;
        spin_unlock_bh(&mvm->queue_info_lock);
@@ -387,6 +398,9 @@ static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
 
        lockdep_assert_held(&mvm->mutex);
 
+       if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+               return -EINVAL;
+
        spin_lock_bh(&mvm->queue_info_lock);
        sta_id = mvm->queue_info[queue].ra_sta_id;
        tid_bitmap = mvm->queue_info[queue].tid_bitmap;
@@ -426,6 +440,9 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
 
        lockdep_assert_held(&mvm->mutex);
 
+       if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+               return -EINVAL;
+
        spin_lock_bh(&mvm->queue_info_lock);
        sta_id = mvm->queue_info[queue].ra_sta_id;
        tid_bitmap = mvm->queue_info[queue].tid_bitmap;
@@ -468,6 +485,9 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
 
        lockdep_assert_held(&mvm->mutex);
 
+       if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+               return -EINVAL;
+
        spin_lock_bh(&mvm->queue_info_lock);
        txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
        sta_id = mvm->queue_info[queue].ra_sta_id;
@@ -512,6 +532,8 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
        int i;
 
        lockdep_assert_held(&mvm->queue_info_lock);
+       if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+               return -EINVAL;
 
        memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
 
@@ -596,6 +618,9 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
        unsigned long mq;
        int ret;
 
+       if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+               return -EINVAL;
+
        /*
         * If the AC is lower than current one - FIFO needs to be redirected to
         * the lowest one of the streams in the queue. Check if this is needed
@@ -677,6 +702,41 @@ out:
        return ret;
 }
 
+static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
+                                       struct ieee80211_sta *sta, u8 ac,
+                                       int tid)
+{
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       unsigned int wdg_timeout =
+               iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+       u8 mac_queue = mvmsta->vif->hw_queue[ac];
+       int queue = -1;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       IWL_DEBUG_TX_QUEUES(mvm,
+                           "Allocating queue for sta %d on tid %d\n",
+                           mvmsta->sta_id, tid);
+       queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
+                                       wdg_timeout);
+       if (queue < 0)
+               return queue;
+
+       IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
+
+       spin_lock_bh(&mvmsta->lock);
+       mvmsta->tid_data[tid].txq_id = queue;
+       mvmsta->tid_data[tid].is_tid_active = true;
+       mvmsta->tfd_queue_msk |= BIT(queue);
+       spin_unlock_bh(&mvmsta->lock);
+
+       spin_lock_bh(&mvm->queue_info_lock);
+       mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+       spin_unlock_bh(&mvm->queue_info_lock);
+
+       return 0;
+}
+
 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
                                   struct ieee80211_sta *sta, u8 ac, int tid,
                                   struct ieee80211_hdr *hdr)
@@ -702,6 +762,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
 
        lockdep_assert_held(&mvm->mutex);
 
+       if (iwl_mvm_has_new_tx_api(mvm))
+               return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
+
        spin_lock_bh(&mvmsta->lock);
        tfd_queue_mask = mvmsta->tfd_queue_msk;
        spin_unlock_bh(&mvmsta->lock);
@@ -880,6 +943,9 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
 
        lockdep_assert_held(&mvm->mutex);
 
+       if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+               return;
+
        spin_lock_bh(&mvm->queue_info_lock);
        tid_bitmap = mvm->queue_info[queue].tid_bitmap;
        spin_unlock_bh(&mvm->queue_info_lock);
@@ -917,6 +983,10 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
        int ssn;
        int ret = true;
 
+       /* queue sharing is disabled on new TX path */
+       if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+               return;
+
        lockdep_assert_held(&mvm->mutex);
 
        spin_lock_bh(&mvm->queue_info_lock);
@@ -1199,18 +1269,30 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
                ac = tid_to_mac80211_ac[i];
                mac_queue = mvm_sta->vif->hw_queue[ac];
 
-               cfg.tid = i;
-               cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
-               cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
-                                txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+               if (iwl_mvm_has_new_tx_api(mvm)) {
+                       IWL_DEBUG_TX_QUEUES(mvm,
+                                           "Re-mapping sta %d tid %d\n",
+                                           mvm_sta->sta_id, i);
+                       txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
+                                                        mvm_sta->sta_id,
+                                                        i, wdg_timeout);
+                       tid_data->txq_id = txq_id;
+               } else {
+                       u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
 
-               IWL_DEBUG_TX_QUEUES(mvm,
-                                   "Re-mapping sta %d tid %d to queue %d\n",
-                                   mvm_sta->sta_id, i, txq_id);
+                       cfg.tid = i;
+                       cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
+                       cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+                                        txq_id ==
+                                        IWL_MVM_DQA_BSS_CLIENT_QUEUE);
 
-               iwl_mvm_enable_txq(mvm, txq_id, mac_queue,
-                                  IEEE80211_SEQ_TO_SN(tid_data->seq_number),
-                                  &cfg, wdg_timeout);
+                       IWL_DEBUG_TX_QUEUES(mvm,
+                                           "Re-mapping sta %d tid %d to queue %d\n",
+                                           mvm_sta->sta_id, i, txq_id);
+
+                       iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
+                                          wdg_timeout);
+               }
 
                mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
        }
@@ -1235,7 +1317,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
        else
                sta_id = mvm_sta->sta_id;
 
-       if (sta_id == IWL_MVM_STATION_COUNT)
+       if (sta_id == IWL_MVM_INVALID_STA)
                return -ENOSPC;
 
        spin_lock_init(&mvm_sta->lock);
@@ -1317,10 +1399,10 @@ update_fw:
 
        if (vif->type == NL80211_IFTYPE_STATION) {
                if (!sta->tdls) {
-                       WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
+                       WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
                        mvmvif->ap_sta_id = sta_id;
                } else {
-                       WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
+                       WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
                }
        }
 
@@ -1571,11 +1653,11 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
                                return ret;
 
                        /* unassoc - go ahead - remove the AP STA now */
-                       mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+                       mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
 
                        /* clear d0i3_ap_sta_id if no longer relevant */
                        if (mvm->d0i3_ap_sta_id == sta_id)
-                               mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+                               mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
                }
        }
 
@@ -1584,7 +1666,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
         * before the STA is removed.
         */
        if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
-               mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
+               mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
                cancel_delayed_work(&mvm->tdls_cs.dwork);
        }
 
@@ -1641,7 +1723,7 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
 {
        if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
                sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
-               if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
+               if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
                        return -ENOSPC;
        }
 
@@ -1652,12 +1734,11 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
        return 0;
 }
 
-static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
-                                   struct iwl_mvm_int_sta *sta)
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
 {
        RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
        memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
-       sta->sta_id = IWL_MVM_STATION_COUNT;
+       sta->sta_id = IWL_MVM_INVALID_STA;
 }
 
 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
@@ -1676,7 +1757,8 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
        cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
                                                             color));
 
-       cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
+       if (!iwl_mvm_has_new_tx_api(mvm))
+               cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
        cmd.tid_disable_tx = cpu_to_le16(0xffff);
 
        if (addr)
@@ -1701,27 +1783,19 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
        return ret;
 }
 
-int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
+static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
 {
        unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
                                        mvm->cfg->base_params->wd_timeout :
                                        IWL_WATCHDOG_DISABLED;
-       int ret;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       /* Map Aux queue to fifo - needs to happen before adding Aux station */
-       if (!iwl_mvm_is_dqa_supported(mvm))
-               iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
-                                     IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
-
-       /* Allocate aux station and assign to it the aux queue */
-       ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
-                                      NL80211_IFTYPE_UNSPECIFIED);
-       if (ret)
-               return ret;
 
-       if (iwl_mvm_is_dqa_supported(mvm)) {
+       if (iwl_mvm_has_new_tx_api(mvm)) {
+               int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue,
+                                                   mvm->aux_sta.sta_id,
+                                                   IWL_MAX_TID_COUNT,
+                                                   wdg_timeout);
+               mvm->aux_queue = queue;
+       } else if (iwl_mvm_is_dqa_supported(mvm)) {
                struct iwl_trans_txq_scd_cfg cfg = {
                        .fifo = IWL_MVM_TX_FIFO_MCAST,
                        .sta_id = mvm->aux_sta.sta_id,
@@ -1732,14 +1806,43 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
 
                iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
                                   wdg_timeout);
+       } else {
+               iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
+                                     IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
        }
+}
 
-       ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
-                                        MAC_INDEX_AUX, 0);
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
+{
+       int ret;
 
+       lockdep_assert_held(&mvm->mutex);
+
+       /* Allocate aux station and assign to it the aux queue */
+       ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
+                                      NL80211_IFTYPE_UNSPECIFIED);
        if (ret)
+               return ret;
+
+       /* Map Aux queue to fifo - needs to happen before adding Aux station */
+       if (!iwl_mvm_has_new_tx_api(mvm))
+               iwl_mvm_enable_aux_queue(mvm);
+
+       ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
+                                        MAC_INDEX_AUX, 0);
+       if (ret) {
                iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
-       return ret;
+               return ret;
+       }
+
+       /*
+        * For a000 firmware and on we cannot add queue to a station unknown
+        * to firmware so enable queue here - after the station was added
+        */
+       if (iwl_mvm_has_new_tx_api(mvm))
+               iwl_mvm_enable_aux_queue(mvm);
+
+       return 0;
 }
 
 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -1790,38 +1893,39 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
        static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
        const u8 *baddr = _baddr;
+       int queue = 0;
        int ret;
+       unsigned int wdg_timeout =
+               iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+       struct iwl_trans_txq_scd_cfg cfg = {
+               .fifo = IWL_MVM_TX_FIFO_VO,
+               .sta_id = mvmvif->bcast_sta.sta_id,
+               .tid = IWL_MAX_TID_COUNT,
+               .aggregate = false,
+               .frame_limit = IWL_FRAME_LIMIT,
+       };
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (iwl_mvm_is_dqa_supported(mvm)) {
-               struct iwl_trans_txq_scd_cfg cfg = {
-                       .fifo = IWL_MVM_TX_FIFO_VO,
-                       .sta_id = mvmvif->bcast_sta.sta_id,
-                       .tid = IWL_MAX_TID_COUNT,
-                       .aggregate = false,
-                       .frame_limit = IWL_FRAME_LIMIT,
-               };
-               unsigned int wdg_timeout =
-                       iwl_mvm_get_wd_timeout(mvm, vif, false, false);
-               int queue;
-
-               if (vif->type == NL80211_IFTYPE_AP)
-                       queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+       if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
+               if (vif->type == NL80211_IFTYPE_AP ||
+                   vif->type == NL80211_IFTYPE_ADHOC)
+                       queue = mvm->probe_queue;
                else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
-                       queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+                       queue = mvm->p2p_dev_queue;
                else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
                        return -EINVAL;
 
-               iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
-                                  wdg_timeout);
                bsta->tfd_queue_msk |= BIT(queue);
+
+               iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
+                                  &cfg, wdg_timeout);
        }
 
        if (vif->type == NL80211_IFTYPE_ADHOC)
                baddr = vif->bss_conf.bssid;
 
-       if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
+       if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
                return -ENOSPC;
 
        ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
@@ -1830,26 +1934,20 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                return ret;
 
        /*
-        * In AP vif type, we also need to enable the cab_queue. However, we
-        * have to enable it after the ADD_STA command is sent, otherwise the
-        * FW will throw an assert once we send the ADD_STA command (it'll
-        * detect a mismatch in the tfd_queue_msk, as we can't add the
-        * enabled-cab_queue to the mask)
+        * For a000 firmware and on we cannot add queue to a station unknown
+        * to firmware so enable queue here - after the station was added
         */
-       if (iwl_mvm_is_dqa_supported(mvm) &&
-           vif->type == NL80211_IFTYPE_AP) {
-               struct iwl_trans_txq_scd_cfg cfg = {
-                       .fifo = IWL_MVM_TX_FIFO_MCAST,
-                       .sta_id = mvmvif->bcast_sta.sta_id,
-                       .tid = IWL_MAX_TID_COUNT,
-                       .aggregate = false,
-                       .frame_limit = IWL_FRAME_LIMIT,
-               };
-               unsigned int wdg_timeout =
-                       iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+       if (iwl_mvm_has_new_tx_api(mvm)) {
+               int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
+                                                   bsta->sta_id,
+                                                   IWL_MAX_TID_COUNT,
+                                                   wdg_timeout);
+               if (vif->type == NL80211_IFTYPE_AP)
+                       mvm->probe_queue = queue;
+               else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+                       mvm->p2p_dev_queue = queue;
 
-               iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue,
-                                  0, &cfg, wdg_timeout);
+               bsta->tfd_queue_msk |= BIT(queue);
        }
 
        return 0;
@@ -1862,28 +1960,23 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (vif->type == NL80211_IFTYPE_AP)
+       if (vif->type == NL80211_IFTYPE_AP ||
+           vif->type == NL80211_IFTYPE_ADHOC)
                iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
                                    IWL_MAX_TID_COUNT, 0);
 
-       if (mvmvif->bcast_sta.tfd_queue_msk &
-           BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)) {
-               iwl_mvm_disable_txq(mvm,
-                                   IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
+       if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->probe_queue)) {
+               iwl_mvm_disable_txq(mvm, mvm->probe_queue,
                                    vif->hw_queue[0], IWL_MAX_TID_COUNT,
                                    0);
-               mvmvif->bcast_sta.tfd_queue_msk &=
-                       ~BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
+               mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->probe_queue);
        }
 
-       if (mvmvif->bcast_sta.tfd_queue_msk &
-           BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)) {
-               iwl_mvm_disable_txq(mvm,
-                                   IWL_MVM_DQA_P2P_DEVICE_QUEUE,
+       if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->p2p_dev_queue)) {
+               iwl_mvm_disable_txq(mvm, mvm->p2p_dev_queue,
                                    vif->hw_queue[0], IWL_MAX_TID_COUNT,
                                    0);
-               mvmvif->bcast_sta.tfd_queue_msk &=
-                       ~BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
+               mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->p2p_dev_queue);
        }
 }
 
@@ -1979,6 +2072,88 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        return ret;
 }
 
+/*
+ * Allocate a new station entry for the multicast station to the given vif,
+ * and send it to the FW.
+ * Note that each AP/GO mac should have its own multicast station.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the multicast station is added
+ */
+int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
+       static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
+       const u8 *maddr = _maddr;
+       struct iwl_trans_txq_scd_cfg cfg = {
+               .fifo = IWL_MVM_TX_FIFO_MCAST,
+               .sta_id = msta->sta_id,
+               .tid = IWL_MAX_TID_COUNT,
+               .aggregate = false,
+               .frame_limit = IWL_FRAME_LIMIT,
+       };
+       unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!iwl_mvm_is_dqa_supported(mvm))
+               return 0;
+
+       if (WARN_ON(vif->type != NL80211_IFTYPE_AP))
+               return -ENOTSUPP;
+
+       ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
+                                        mvmvif->id, mvmvif->color);
+       if (ret) {
+               iwl_mvm_dealloc_int_sta(mvm, msta);
+               return ret;
+       }
+
+       /*
+        * Enable cab queue after the ADD_STA command is sent.
+        * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
+        * command with unknown station id.
+        */
+       if (iwl_mvm_has_new_tx_api(mvm)) {
+               int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
+                                                   msta->sta_id,
+                                                   IWL_MAX_TID_COUNT,
+                                                   timeout);
+               vif->cab_queue = queue;
+       } else {
+               iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
+                                  &cfg, timeout);
+       }
+
+       return 0;
+}
+
+/*
+ * Send the FW a request to remove the station from it's internal data
+ * structures, and in addition remove it from the local data structure.
+ */
+int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!iwl_mvm_is_dqa_supported(mvm))
+               return 0;
+
+       iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
+                           IWL_MAX_TID_COUNT, 0);
+
+       ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
+       if (ret)
+               IWL_WARN(mvm, "Failed sending remove station\n");
+
+       return ret;
+}
+
 #define IWL_MAX_RX_BA_SESSIONS 16
 
 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
@@ -2056,6 +2231,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
                reorder_buf->mvm = mvm;
                reorder_buf->queue = i;
                reorder_buf->sta_id = sta_id;
+               reorder_buf->valid = false;
                for (j = 0; j < reorder_buf->buf_size; j++)
                        __skb_queue_head_init(&reorder_buf->entries[j]);
        }
@@ -2223,7 +2399,9 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
        cmd.sta_id = mvm_sta->sta_id;
        cmd.add_modify = STA_MODE_MODIFY;
-       cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
+       if (!iwl_mvm_has_new_tx_api(mvm))
+               cmd.modify_mask = STA_MODIFY_QUEUES;
+       cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
        cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
        cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
 
@@ -2423,6 +2601,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                 * changed from current (become smaller)
                 */
                if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
+                       /*
+                        * On new TX API rs and BA manager are offloaded.
+                        * For now though, just don't support being reconfigured
+                        */
+                       if (iwl_mvm_has_new_tx_api(mvm))
+                               return -ENOTSUPP;
+
                        /*
                         * If reconfiguring an existing queue, it first must be
                         * drained
@@ -2672,7 +2857,7 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
         * station ID, then use AP's station ID.
         */
        if (vif->type == NL80211_IFTYPE_STATION &&
-           mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+           mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
                u8 sta_id = mvmvif->ap_sta_id;
 
                sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
@@ -2694,68 +2879,97 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
 
 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
                                struct iwl_mvm_sta *mvm_sta,
-                               struct ieee80211_key_conf *keyconf, bool mcast,
+                               struct ieee80211_key_conf *key, bool mcast,
                                u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
                                u8 key_offset)
 {
-       struct iwl_mvm_add_sta_key_cmd cmd = {};
+       union {
+               struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
+               struct iwl_mvm_add_sta_key_cmd cmd;
+       } u = {};
        __le16 key_flags;
        int ret;
        u32 status;
        u16 keyidx;
-       int i;
-       u8 sta_id = mvm_sta->sta_id;
+       u64 pn = 0;
+       int i, size;
+       bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+                                 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
 
-       keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
+       keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
                 STA_KEY_FLG_KEYID_MSK;
        key_flags = cpu_to_le16(keyidx);
        key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
 
-       switch (keyconf->cipher) {
+       switch (key->cipher) {
        case WLAN_CIPHER_SUITE_TKIP:
                key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
-               cmd.tkip_rx_tsc_byte2 = tkip_iv32;
-               for (i = 0; i < 5; i++)
-                       cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
-               memcpy(cmd.key, keyconf->key, keyconf->keylen);
+               if (new_api) {
+                       memcpy((void *)&u.cmd.tx_mic_key,
+                              &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+                              IWL_MIC_KEY_SIZE);
+
+                       memcpy((void *)&u.cmd.rx_mic_key,
+                              &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+                              IWL_MIC_KEY_SIZE);
+                       pn = atomic64_read(&key->tx_pn);
+
+               } else {
+                       u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
+                       for (i = 0; i < 5; i++)
+                               u.cmd_v1.tkip_rx_ttak[i] =
+                                       cpu_to_le16(tkip_p1k[i]);
+               }
+               memcpy(u.cmd.common.key, key->key, key->keylen);
                break;
        case WLAN_CIPHER_SUITE_CCMP:
                key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
-               memcpy(cmd.key, keyconf->key, keyconf->keylen);
+               memcpy(u.cmd.common.key, key->key, key->keylen);
+               if (new_api)
+                       pn = atomic64_read(&key->tx_pn);
                break;
        case WLAN_CIPHER_SUITE_WEP104:
                key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
                /* fall through */
        case WLAN_CIPHER_SUITE_WEP40:
                key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
-               memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
+               memcpy(u.cmd.common.key + 3, key->key, key->keylen);
                break;
        case WLAN_CIPHER_SUITE_GCMP_256:
                key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
                /* fall through */
        case WLAN_CIPHER_SUITE_GCMP:
                key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
-               memcpy(cmd.key, keyconf->key, keyconf->keylen);
+               memcpy(u.cmd.common.key, key->key, key->keylen);
+               if (new_api)
+                       pn = atomic64_read(&key->tx_pn);
                break;
        default:
                key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
-               memcpy(cmd.key, keyconf->key, keyconf->keylen);
+               memcpy(u.cmd.common.key, key->key, key->keylen);
        }
 
        if (mcast)
                key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
 
-       cmd.key_offset = key_offset;
-       cmd.key_flags = key_flags;
-       cmd.sta_id = sta_id;
+       u.cmd.common.key_offset = key_offset;
+       u.cmd.common.key_flags = key_flags;
+       u.cmd.common.sta_id = mvm_sta->sta_id;
+
+       if (new_api) {
+               u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
+               size = sizeof(u.cmd);
+       } else {
+               size = sizeof(u.cmd_v1);
+       }
 
        status = ADD_STA_SUCCESS;
        if (cmd_flags & CMD_ASYNC)
-               ret =  iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
-                                           sizeof(cmd), &cmd);
+               ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
+                                          &u.cmd);
        else
-               ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
-                                                 &cmd, &status);
+               ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
+                                                 &u.cmd, &status);
 
        switch (status) {
        case ADD_STA_SUCCESS:
@@ -2855,7 +3069,7 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
                return sta->addr;
 
        if (vif->type == NL80211_IFTYPE_STATION &&
-           mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+           mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
                u8 sta_id = mvmvif->ap_sta_id;
                sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
                                                lockdep_is_held(&mvm->mutex));
@@ -2908,9 +3122,14 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
                                    struct ieee80211_key_conf *keyconf,
                                    bool mcast)
 {
-       struct iwl_mvm_add_sta_key_cmd cmd = {};
+       union {
+               struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
+               struct iwl_mvm_add_sta_key_cmd cmd;
+       } u = {};
+       bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+                                 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
        __le16 key_flags;
-       int ret;
+       int ret, size;
        u32 status;
 
        key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
@@ -2921,13 +3140,19 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
        if (mcast)
                key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
 
-       cmd.key_flags = key_flags;
-       cmd.key_offset = keyconf->hw_key_idx;
-       cmd.sta_id = sta_id;
+       /*
+        * The fields assigned here are in the same location at the start
+        * of the command, so we can do this union trick.
+        */
+       u.cmd.common.key_flags = key_flags;
+       u.cmd.common.key_offset = keyconf->hw_key_idx;
+       u.cmd.common.sta_id = sta_id;
+
+       size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
-                                         &cmd, &status);
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
+                                         &status);
 
        switch (status) {
        case ADD_STA_SUCCESS:
@@ -3041,7 +3266,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
 {
        bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
        struct iwl_mvm_sta *mvm_sta;
-       u8 sta_id = IWL_MVM_STATION_COUNT;
+       u8 sta_id = IWL_MVM_INVALID_STA;
        int ret, i;
 
        lockdep_assert_held(&mvm->mutex);
@@ -3135,7 +3360,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
                                       struct ieee80211_sta *sta,
                                       enum ieee80211_frame_release_type reason,
                                       u16 cnt, u16 tids, bool more_data,
-                                      bool agg)
+                                      bool single_sta_queue)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_mvm_add_sta_cmd cmd = {
@@ -3155,14 +3380,14 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
        for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
                cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
 
-       /* If we're releasing frames from aggregation queues then check if the
-        * all queues combined that we're releasing frames from have
+       /* If we're releasing frames from aggregation or dqa queues then check
+        * if all the queues that we're releasing frames from, combined, have:
         *  - more frames than the service period, in which case more_data
         *    needs to be set
         *  - fewer than 'cnt' frames, in which case we need to adjust the
         *    firmware command (but do that unconditionally)
         */
-       if (agg) {
+       if (single_sta_queue) {
                int remaining = cnt;
                int sleep_tx_count;
 
@@ -3172,7 +3397,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
                        u16 n_queued;
 
                        tid_data = &mvmsta->tid_data[tid];
-                       if (WARN(tid_data->state != IWL_AGG_ON &&
+                       if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
+                                tid_data->state != IWL_AGG_ON &&
                                 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
                                 "TID %d state is %d\n",
                                 tid, tid_data->state)) {
@@ -3297,7 +3523,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
        lockdep_assert_held(&mvm->mutex);
 
        /* Block/unblock all the stations of the given mvmvif */
-       for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+       for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
                sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
                                                lockdep_is_held(&mvm->mutex));
                if (IS_ERR_OR_NULL(sta))
index 4be34f902278c8bb36521346dfcd7980976e8786..a143a8757e278e4535897d2deb587870c78b314e 100644 (file)
@@ -532,10 +532,13 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
                             struct iwl_mvm_int_sta *sta,
                                    u32 qmask, enum nl80211_iftype iftype);
 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta);
 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm);
@@ -547,7 +550,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
                                       struct ieee80211_sta *sta,
                                       enum ieee80211_frame_release_type reason,
                                       u16 cnt, u16 tids, bool more_data,
-                                      bool agg);
+                                      bool single_sta_queue);
 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
                      bool drain);
 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
index 9f160fc58cd079395cafeff5995574d22225a161..df7cd87199ea298565729a79c561117cbcc5edc1 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -78,7 +80,7 @@ void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
 
        lockdep_assert_held(&mvm->mutex);
 
-       for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+       for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
                sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
                                                lockdep_is_held(&mvm->mutex));
                if (!sta || IS_ERR(sta) || !sta->tdls)
@@ -101,7 +103,7 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        lockdep_assert_held(&mvm->mutex);
 
-       for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+       for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
                sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
                                                lockdep_is_held(&mvm->mutex));
                if (!sta || IS_ERR(sta) || !sta->tdls)
@@ -145,7 +147,7 @@ static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        /* populate TDLS peer data */
        cnt = 0;
-       for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+       for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
                sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
                                                lockdep_is_held(&mvm->mutex));
                if (IS_ERR_OR_NULL(sta) || !sta->tdls)
@@ -251,7 +253,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
                        iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
 
        if (state == IWL_MVM_TDLS_SW_IDLE)
-               mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
+               mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
 }
 
 void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
@@ -305,7 +307,7 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
 
        /* get the existing peer if it's there */
        if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
-           mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
+           mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
                struct ieee80211_sta *sta = rcu_dereference_protected(
                                mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
                                lockdep_is_held(&mvm->mutex));
@@ -523,7 +525,7 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
        iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
 
        /* station might be gone, in that case do nothing */
-       if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT)
+       if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
                goto out;
 
        sta = rcu_dereference_protected(
@@ -573,7 +575,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
                       sta->addr, chandef->chan->center_freq, chandef->width);
 
        /* we only support a single peer for channel switching */
-       if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) {
+       if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
                IWL_DEBUG_TDLS(mvm,
                               "Existing peer. Can't start switch with %pM\n",
                               sta->addr);
@@ -633,7 +635,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
        IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
 
        /* we only support a single peer for channel switching */
-       if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) {
+       if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
                IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
                goto out;
        }
@@ -654,7 +656,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
            mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
                wait_for_phy = true;
 
-       mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
+       mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
        dev_kfree_skb(mvm->tdls_cs.peer.skb);
        mvm->tdls_cs.peer.skb = NULL;
 
@@ -697,7 +699,7 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
        if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
            params->status != 0 &&
            mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
-           mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
+           mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
                struct ieee80211_sta *cur_sta;
 
                /* make sure it's the same peer */
index a1947d6f3a2cc1e676663885d65bde5b31c11773..16ce8a56b5b94d9b5356011a98e19888be8faef9 100644 (file)
@@ -80,7 +80,7 @@ void iwl_mvm_tof_init(struct iwl_mvm *mvm)
        if (IWL_MVM_TOF_IS_RESPONDER) {
                tof_data->responder_cfg.sub_grp_cmd_id =
                        cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
-               tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT;
+               tof_data->responder_cfg.sta_id = IWL_MVM_INVALID_STA;
        }
 #endif
 
index bec7d9c46087d3c8fed48d5858f4f116797eceed..f9cbd197246f7ba6ba9e5e0af816cbec54eb9790 100644 (file)
@@ -356,7 +356,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
        struct iwl_mvm_sta *mvmsta;
        int i, err;
 
-       for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+       for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
                mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i);
                if (!mvmsta)
                        continue;
index dd2b4a30081993823634e18752187c576a656b79..8f737f6cdd80b8458b66b8aace13ab3e33a51d47 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -474,6 +475,39 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        memset(dev_cmd, 0, sizeof(*dev_cmd));
        dev_cmd->hdr.cmd = TX_CMD;
+
+       if (iwl_mvm_has_new_tx_api(mvm)) {
+               struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
+               u16 offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info);
+
+               /* padding is inserted later in transport */
+               /* FIXME - check for AMSDU may need to be removed */
+               if (ieee80211_hdrlen(hdr->frame_control) % 4 &&
+                   !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
+                       offload_assist |= BIT(TX_CMD_OFFLD_PAD);
+
+               cmd->offload_assist |= cpu_to_le16(offload_assist);
+
+               /* Total # bytes to be transmitted */
+               cmd->len = cpu_to_le16((u16)skb->len);
+
+               /* Copy MAC header from skb into command buffer */
+               memcpy(cmd->hdr, hdr, hdrlen);
+
+               if (!info->control.hw_key)
+                       cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS);
+
+               /* For data packets rate info comes from the fw */
+               if (ieee80211_is_data(hdr->frame_control) && sta)
+                       goto out;
+
+               cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE);
+               cmd->rate_n_flags =
+                       cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
+
+               goto out;
+       }
+
        tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 
        if (info->control.hw_key)
@@ -483,6 +517,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
 
+       /* Copy MAC header from skb into command buffer */
+       memcpy(tx_cmd->hdr, hdr, hdrlen);
+
+out:
        return dev_cmd;
 }
 
@@ -505,6 +543,7 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
 
        switch (info->control.vif->type) {
        case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_ADHOC:
                /*
                 * Handle legacy hostapd as well, where station may be added
                 * only after assoc. Take care of the case where we send a
@@ -512,20 +551,21 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
                 */
                if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
                    ieee80211_is_deauth(fc))
-                       return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+                       return mvm->probe_queue;
                if (info->hw_queue == info->control.vif->cab_queue)
                        return info->hw_queue;
 
-               WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc));
-               return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+               WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
+                         "fc=0x%02x", le16_to_cpu(fc));
+               return mvm->probe_queue;
        case NL80211_IFTYPE_P2P_DEVICE:
                if (ieee80211_is_mgmt(fc))
-                       return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+                       return mvm->p2p_dev_queue;
                if (info->hw_queue == info->control.vif->cab_queue)
                        return info->hw_queue;
 
                WARN_ON_ONCE(1);
-               return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+               return mvm->p2p_dev_queue;
        default:
                WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
                return -1;
@@ -538,7 +578,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
        struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_tx_info info;
        struct iwl_device_cmd *dev_cmd;
-       struct iwl_tx_cmd *tx_cmd;
        u8 sta_id;
        int hdrlen = ieee80211_hdrlen(hdr->frame_control);
        int queue;
@@ -583,7 +622,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
                        iwl_mvm_vif_from_mac80211(info.control.vif);
 
                if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
-                   info.control.vif->type == NL80211_IFTYPE_AP) {
+                   info.control.vif->type == NL80211_IFTYPE_AP ||
+                   info.control.vif->type == NL80211_IFTYPE_ADHOC) {
                        sta_id = mvmvif->bcast_sta.sta_id;
                        queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
                                                           hdr->frame_control);
@@ -594,7 +634,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
                           is_multicast_ether_addr(hdr->addr1)) {
                        u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
 
-                       if (ap_sta_id != IWL_MVM_STATION_COUNT)
+                       if (ap_sta_id != IWL_MVM_INVALID_STA)
                                sta_id = ap_sta_id;
                } else if (iwl_mvm_is_dqa_supported(mvm) &&
                           info.control.vif->type == NL80211_IFTYPE_STATION &&
@@ -612,11 +652,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
        /* From now on, we cannot access info->control */
        iwl_mvm_skb_prepare_status(skb, dev_cmd);
 
-       tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
-
-       /* Copy MAC header from skb into command buffer */
-       memcpy(tx_cmd->hdr, hdr, hdrlen);
-
        if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
                iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
                return -1;
@@ -628,8 +663,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
         * values.
         * Note that we don't need to make sure it isn't agg'd, since we're
         * TXing non-sta
+        * For DQA mode - we shouldn't increase it though
         */
-       atomic_inc(&mvm->pending_frames[sta_id]);
+       if (!iwl_mvm_is_dqa_supported(mvm))
+               atomic_inc(&mvm->pending_frames[sta_id]);
 
        return 0;
 }
@@ -707,7 +744,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
         * fifo to be able to send bursts.
         */
        max_amsdu_len = min_t(unsigned int, max_amsdu_len,
-                             mvm->shared_mem_cfg.txfifo_size[txf] - 256);
+                             mvm->smem_cfg.lmac[0].txfifo_size[txf] - 256);
 
        if (unlikely(dbg_max_amsdu_len))
                max_amsdu_len = min_t(unsigned int, max_amsdu_len,
@@ -856,6 +893,9 @@ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
        unsigned long now = jiffies;
        int tid;
 
+       if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+               return false;
+
        for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
                if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
                                IWL_MVM_DQA_QUEUE_TIMEOUT, now))
@@ -875,7 +915,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct iwl_mvm_sta *mvmsta;
        struct iwl_device_cmd *dev_cmd;
-       struct iwl_tx_cmd *tx_cmd;
        __le16 fc;
        u16 seq_number = 0;
        u8 tid = IWL_MAX_TID_COUNT;
@@ -890,7 +929,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (WARN_ON_ONCE(!mvmsta))
                return -1;
 
-       if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
+       if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
                return -1;
 
        dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
@@ -898,8 +937,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (!dev_cmd)
                goto drop;
 
-       tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
-
        /*
         * we handle that entirely ourselves -- for uAPSD the firmware
         * will always send a notification, and for PS-Poll responses
@@ -920,18 +957,27 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
                if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
                        goto drop_unlock_sta;
 
-               seq_number = mvmsta->tid_data[tid].seq_number;
-               seq_number &= IEEE80211_SCTL_SEQ;
-               hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
-               hdr->seq_ctrl |= cpu_to_le16(seq_number);
                is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
                if (WARN_ON_ONCE(is_ampdu &&
                                 mvmsta->tid_data[tid].state != IWL_AGG_ON))
                        goto drop_unlock_sta;
+
+               seq_number = mvmsta->tid_data[tid].seq_number;
+               seq_number &= IEEE80211_SCTL_SEQ;
+
+               if (!iwl_mvm_has_new_tx_api(mvm)) {
+                       struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+
+                       hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+                       hdr->seq_ctrl |= cpu_to_le16(seq_number);
+                       /* update the tx_cmd hdr as it was already copied */
+                       tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
+               }
        }
 
        if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu)
                txq_id = mvmsta->tid_data[tid].txq_id;
+
        if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
                /* default to TID 0 for non-QoS packets */
                u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
@@ -939,9 +985,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
                txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
        }
 
-       /* Copy MAC header from skb into command buffer */
-       memcpy(tx_cmd->hdr, hdr, hdrlen);
-
        WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
 
        /* Check if TXQ needs to be allocated or re-activated */
@@ -1005,11 +1048,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        spin_unlock(&mvmsta->lock);
 
-       /* Increase pending frames count if this isn't AMPDU */
-       if ((iwl_mvm_is_dqa_supported(mvm) &&
-            mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON &&
-            mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) ||
-           (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu))
+       /* Increase pending frames count if this isn't AMPDU or DQA queue */
+       if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)
                atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
 
        return 0;
@@ -1033,7 +1073,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (WARN_ON_ONCE(!mvmsta))
                return -1;
 
-       if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
+       if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
                return -1;
 
        memcpy(&info, skb->cb, sizeof(info));
@@ -1079,12 +1119,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
        lockdep_assert_held(&mvmsta->lock);
 
        if ((tid_data->state == IWL_AGG_ON ||
-            tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
+            tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
+            iwl_mvm_is_dqa_supported(mvm)) &&
            iwl_mvm_tid_queued(tid_data) == 0) {
                /*
-                * Now that this aggregation queue is empty tell mac80211 so it
-                * knows we no longer have frames buffered for the station on
-                * this TID (for the TIM bitmap calculation.)
+                * Now that this aggregation or DQA queue is empty tell
+                * mac80211 so it knows we no longer have frames buffered for
+                * the station on this TID (for the TIM bitmap calculation.)
                 */
                ieee80211_sta_set_buffered(sta, tid, false);
        }
@@ -1241,6 +1282,26 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
        }
 }
 
+/**
+ * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
+ * @tx_resp: the Tx response from the fw (agg or non-agg)
+ *
+ * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
+ * it can't know that everything will go well until the end of the AMPDU, it
+ * can't know in advance the number of MPDUs that will be sent in the current
+ * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
+ * Hence, it can't know in advance what the SSN of the SCD will be at the end
+ * of the batch. This is why the SSN of the SCD is written at the end of the
+ * whole struct at a variable offset. This function knows how to cope with the
+ * variable offset and returns the SSN of the SCD.
+ */
+static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
+                                     struct iwl_mvm_tx_resp *tx_resp)
+{
+       return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
+                           tx_resp->frame_count) & 0xfff;
+}
+
 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                                     struct iwl_rx_packet *pkt)
 {
@@ -1250,17 +1311,21 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
        struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
        int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
        int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
-       u32 status = le16_to_cpu(tx_resp->status.status);
-       u16 ssn = iwl_mvm_get_scd_ssn(tx_resp);
+       struct agg_tx_status *agg_status =
+               iwl_mvm_get_agg_status(mvm, tx_resp);
+       u32 status = le16_to_cpu(agg_status->status);
+       u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
        struct iwl_mvm_sta *mvmsta;
        struct sk_buff_head skbs;
        u8 skb_freed = 0;
        u16 next_reclaimed, seq_ctl;
        bool is_ndp = false;
-       bool txq_agg = false; /* Is this TXQ aggregated */
 
        __skb_queue_head_init(&skbs);
 
+       if (iwl_mvm_has_new_tx_api(mvm))
+               txq_id = le16_to_cpu(tx_resp->v6.tx_queue);
+
        seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
 
        /* we can free until ssn % q.n_bd not inclusive */
@@ -1283,6 +1348,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                        info->flags |= IEEE80211_TX_STAT_ACK;
                        break;
                case TX_STATUS_FAIL_DEST_PS:
+                       /* In DQA, the FW should have stopped the queue and not
+                        * return this status
+                        */
+                       WARN_ON(iwl_mvm_is_dqa_supported(mvm));
                        info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
                        break;
                default:
@@ -1381,21 +1450,12 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
        if (!IS_ERR(sta)) {
                mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
-               if (tid != IWL_TID_NON_QOS) {
+               if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) {
                        struct iwl_mvm_tid_data *tid_data =
                                &mvmsta->tid_data[tid];
                        bool send_eosp_ndp = false;
 
                        spin_lock_bh(&mvmsta->lock);
-                       if (iwl_mvm_is_dqa_supported(mvm)) {
-                               enum iwl_mvm_agg_state state;
-
-                               state = mvmsta->tid_data[tid].state;
-                               txq_agg = (state == IWL_AGG_ON ||
-                                       state == IWL_EMPTYING_HW_QUEUE_DELBA);
-                       } else {
-                               txq_agg = txq_id >= mvm->first_agg_queue;
-                       }
 
                        if (!is_ndp) {
                                tid_data->next_reclaimed = next_reclaimed;
@@ -1452,11 +1512,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
         * If the txq is not an AMPDU queue, there is no chance we freed
         * several skbs. Check that out...
         */
-       if (txq_agg)
+       if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue)
                goto out;
 
        /* We can't free more than one frame at once on a shared queue */
-       WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1));
+       WARN_ON(skb_freed > 1);
 
        /* If we have still frames for this STA nothing to do here */
        if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
@@ -1522,7 +1582,8 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
                                      struct iwl_rx_packet *pkt)
 {
        struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
-       struct agg_tx_status *frame_status = &tx_resp->status;
+       struct agg_tx_status *frame_status =
+               iwl_mvm_get_agg_status(mvm, tx_resp);
        int i;
 
        for (i = 0; i < tx_resp->frame_count; i++) {
@@ -1724,6 +1785,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
                ba_info.status.status_driver_data[0] =
                        (void *)(uintptr_t)ba_res->reduced_txp;
 
+               if (!le16_to_cpu(ba_res->tfd_cnt))
+                       goto out;
+
                /*
                 * TODO:
                 * When supporting multi TID aggregations - we need to move
@@ -1732,12 +1796,16 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
                 * This will go together with SN and AddBA offload and cannot
                 * be handled properly for now.
                 */
-               WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1);
-               iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid,
-                                  (int)ba_res->tfd[0].q_num,
+               WARN_ON(le16_to_cpu(ba_res->ra_tid_cnt) != 1);
+               tid = ba_res->ra_tid[0].tid;
+               if (tid == IWL_MGMT_TID)
+                       tid = IWL_MAX_TID_COUNT;
+               iwl_mvm_tx_reclaim(mvm, sta_id, tid,
+                                  (int)(le16_to_cpu(ba_res->tfd[0].q_num)),
                                   le16_to_cpu(ba_res->tfd[0].tfd_index),
                                   &ba_info, le32_to_cpu(ba_res->tx_rate));
 
+out:
                IWL_DEBUG_TX_REPLY(mvm,
                                   "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
                                   sta_id, le32_to_cpu(ba_res->flags),
index dedea96a8e0ff7cd3c609d9e5a45976705b54451..1dde05697c29ec0d957640026c15d33971f154b8 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright (C) 2015 Intel Deutschland GmbH
+ * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -597,6 +598,9 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
                    mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
                        return i;
 
+       if (iwl_mvm_has_new_tx_api(mvm))
+               return -ENOSPC;
+
        /*
         * If no free queue found - settle for an inactive one to reconfigure
         * Make sure that the inactive queue either already belongs to this STA,
@@ -627,6 +631,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
        };
        int ret;
 
+       if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+               return -EINVAL;
+
        spin_lock_bh(&mvm->queue_info_lock);
        if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
                 "Trying to reconfig unallocated queue %d\n", queue)) {
@@ -644,20 +651,19 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
        return ret;
 }
 
-void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
-                       u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
-                       unsigned int wdg_timeout)
+static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
+                                      int mac80211_queue, u8 sta_id, u8 tid)
 {
        bool enable_queue = true;
 
        spin_lock_bh(&mvm->queue_info_lock);
 
        /* Make sure this TID isn't already enabled */
-       if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) {
+       if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
                spin_unlock_bh(&mvm->queue_info_lock);
                IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
-                       queue, cfg->tid);
-               return;
+                       queue, tid);
+               return false;
        }
 
        /* Update mappings and refcounts */
@@ -666,17 +672,17 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
 
        mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue);
        mvm->queue_info[queue].hw_queue_refcount++;
-       mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
-       mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
+       mvm->queue_info[queue].tid_bitmap |= BIT(tid);
+       mvm->queue_info[queue].ra_sta_id = sta_id;
 
        if (enable_queue) {
-               if (cfg->tid != IWL_MAX_TID_COUNT)
+               if (tid != IWL_MAX_TID_COUNT)
                        mvm->queue_info[queue].mac80211_ac =
-                               tid_to_mac80211_ac[cfg->tid];
+                               tid_to_mac80211_ac[tid];
                else
                        mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
 
-               mvm->queue_info[queue].txq_tid = cfg->tid;
+               mvm->queue_info[queue].txq_tid = tid;
        }
 
        IWL_DEBUG_TX_QUEUES(mvm,
@@ -686,8 +692,49 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
 
        spin_unlock_bh(&mvm->queue_info_lock);
 
+       return enable_queue;
+}
+
+int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
+                           u8 sta_id, u8 tid, unsigned int timeout)
+{
+       struct iwl_tx_queue_cfg_cmd cmd = {
+               .flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
+               .sta_id = sta_id,
+               .tid = tid,
+       };
+       int queue;
+
+       if (cmd.tid == IWL_MAX_TID_COUNT)
+               cmd.tid = IWL_MGMT_TID;
+       queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd,
+                                   SCD_QUEUE_CFG, timeout);
+
+       if (queue < 0) {
+               IWL_DEBUG_TX_QUEUES(mvm,
+                                   "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
+                                   sta_id, tid, queue);
+               return queue;
+       }
+
+       IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
+                           queue, sta_id, tid);
+
+       iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, sta_id, tid);
+
+       return queue;
+}
+
+void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+                       u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
+                       unsigned int wdg_timeout)
+{
+       if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+               return;
+
        /* Send the enabling command if we need to */
-       if (enable_queue) {
+       if (iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
+                                      cfg->sta_id, cfg->tid)) {
                struct iwl_scd_txq_cfg_cmd cmd = {
                        .scd_queue = queue,
                        .action = SCD_CFG_ENABLE_QUEUE,
@@ -701,7 +748,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
 
                iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
                                         wdg_timeout);
-               WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
+               WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
+                                         sizeof(struct iwl_scd_txq_cfg_cmd),
                                          &cmd),
                     "Failed to configure queue %d on FIFO %d\n", queue,
                     cfg->fifo);
@@ -716,7 +764,6 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
                .action = SCD_CFG_DISABLE_QUEUE,
        };
        bool remove_mac_queue = true;
-       int ret;
 
        spin_lock_bh(&mvm->queue_info_lock);
 
@@ -787,14 +834,23 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
 
        spin_unlock_bh(&mvm->queue_info_lock);
 
-       iwl_trans_txq_disable(mvm->trans, queue, false);
-       ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
-                                  sizeof(cmd), &cmd);
-       if (ret)
-               IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
-                       queue, ret);
+       if (iwl_mvm_has_new_tx_api(mvm)) {
+               iwl_trans_txq_free(mvm->trans, queue);
+       } else {
+               int ret;
 
-       return ret;
+               iwl_trans_txq_disable(mvm->trans, queue, false);
+               ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
+                                          sizeof(struct iwl_scd_txq_cfg_cmd),
+                                          &cmd);
+
+               if (ret)
+                       IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
+                               queue, ret);
+               return ret;
+       }
+
+       return 0;
 }
 
 /**
@@ -816,7 +872,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
                .data = { lq, },
        };
 
-       if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT))
+       if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA))
                return -EINVAL;
 
        return iwl_mvm_send_cmd(mvm, &cmd);
@@ -1088,6 +1144,9 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
        lockdep_assert_held(&mvmsta->lock);
        lockdep_assert_held(&mvm->queue_info_lock);
 
+       if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+               return;
+
        /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
        for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
                /* If some TFDs are still queued - don't mark TID as inactive */
@@ -1154,6 +1213,9 @@ void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
        unsigned long now = jiffies;
        int i;
 
+       if (iwl_mvm_has_new_tx_api(mvm))
+               return;
+
        spin_lock_bh(&mvm->queue_info_lock);
        for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
                if (mvm->queue_info[i].hw_queue_refcount > 0)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
new file mode 100644 (file)
index 0000000..1d95512
--- /dev/null
@@ -0,0 +1,277 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "iwl-trans.h"
+#include "iwl-fh.h"
+#include "iwl-context-info.h"
+#include "internal.h"
+#include "iwl-prph.h"
+
+static int iwl_pcie_get_num_sections(const struct fw_img *fw,
+                                    int start)
+{
+       int i = 0;
+
+       while (start < fw->num_sec &&
+              fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
+              fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
+               start++;
+               i++;
+       }
+
+       return i;
+}
+
+static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
+                                       const struct fw_desc *sec,
+                                       struct iwl_dram_data *dram)
+{
+       dram->block = dma_alloc_coherent(trans->dev, sec->len,
+                                        &dram->physical,
+                                        GFP_KERNEL);
+       if (!dram->block)
+               return -ENOMEM;
+
+       dram->size = sec->len;
+       memcpy(dram->block, sec->data, sec->len);
+
+       return 0;
+}
+
+static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+       int i;
+
+       if (!dram->fw) {
+               WARN_ON(dram->fw_cnt);
+               return;
+       }
+
+       for (i = 0; i < dram->fw_cnt; i++)
+               dma_free_coherent(trans->dev, dram->fw[i].size,
+                                 dram->fw[i].block, dram->fw[i].physical);
+
+       kfree(dram->fw);
+       dram->fw_cnt = 0;
+}
+
+void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+       int i;
+
+       if (!dram->paging) {
+               WARN_ON(dram->paging_cnt);
+               return;
+       }
+
+       /* free paging*/
+       for (i = 0; i < dram->paging_cnt; i++)
+               dma_free_coherent(trans->dev, dram->paging[i].size,
+                                 dram->paging[i].block,
+                                 dram->paging[i].physical);
+
+       kfree(dram->paging);
+       dram->paging_cnt = 0;
+}
+
+static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans,
+                                         const struct fw_img *fw,
+                                         struct iwl_context_info *ctxt_info)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+       struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram;
+       int i, ret, lmac_cnt, umac_cnt, paging_cnt;
+
+       lmac_cnt = iwl_pcie_get_num_sections(fw, 0);
+       /* add 1 due to separator */
+       umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1);
+       /* add 2 due to separators */
+       paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2);
+
+       dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL);
+       if (!dram->fw)
+               return -ENOMEM;
+       dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL);
+       if (!dram->paging)
+               return -ENOMEM;
+
+       /* initialize lmac sections */
+       for (i = 0; i < lmac_cnt; i++) {
+               ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[i],
+                                                  &dram->fw[dram->fw_cnt]);
+               if (ret)
+                       return ret;
+               ctxt_dram->lmac_img[i] =
+                       cpu_to_le64(dram->fw[dram->fw_cnt].physical);
+               dram->fw_cnt++;
+       }
+
+       /* initialize umac sections */
+       for (i = 0; i < umac_cnt; i++) {
+               /* access FW with +1 to make up for lmac separator */
+               ret = iwl_pcie_ctxt_info_alloc_dma(trans,
+                                                  &fw->sec[dram->fw_cnt + 1],
+                                                  &dram->fw[dram->fw_cnt]);
+               if (ret)
+                       return ret;
+               ctxt_dram->umac_img[i] =
+                       cpu_to_le64(dram->fw[dram->fw_cnt].physical);
+               dram->fw_cnt++;
+       }
+
+       /*
+        * Initialize paging.
+        * Paging memory isn't stored in dram->fw as the umac and lmac - it is
+        * stored separately.
+        * This is since the timing of its release is different -
+        * while fw memory can be released on alive, the paging memory can be
+        * freed only when the device goes down.
+        * Given that, the logic here in accessing the fw image is a bit
+        * different - fw_cnt isn't changing so loop counter is added to it.
+        */
+       for (i = 0; i < paging_cnt; i++) {
+               /* access FW with +2 to make up for lmac & umac separators */
+               int fw_idx = dram->fw_cnt + i + 2;
+
+               ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[fw_idx],
+                                                  &dram->paging[i]);
+               if (ret)
+                       return ret;
+
+               ctxt_dram->virtual_img[i] =
+                       cpu_to_le64(dram->paging[i].physical);
+               dram->paging_cnt++;
+       }
+
+       return 0;
+}
+
+int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
+                           const struct fw_img *fw)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_context_info *ctxt_info;
+       struct iwl_context_info_rbd_cfg *rx_cfg;
+       u32 control_flags = 0;
+       int ret;
+
+       ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
+                                      &trans_pcie->ctxt_info_dma_addr,
+                                      GFP_KERNEL);
+       if (!ctxt_info)
+               return -ENOMEM;
+
+       ctxt_info->version.version = 0;
+       ctxt_info->version.mac_id =
+               cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+       /* size is in DWs */
+       ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
+
+       BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
+       control_flags = IWL_CTXT_INFO_RB_SIZE_4K |
+                       IWL_CTXT_INFO_TFD_FORMAT_LONG |
+                       RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
+                       IWL_CTXT_INFO_RB_CB_SIZE_POS;
+       ctxt_info->control.control_flags = cpu_to_le32(control_flags);
+
+       /* initialize RX default queue */
+       rx_cfg = &ctxt_info->rbd_cfg;
+       rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma);
+       rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma);
+       rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
+
+       /* initialize TX command queue */
+       ctxt_info->hcmd_cfg.cmd_queue_addr =
+               cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+       ctxt_info->hcmd_cfg.cmd_queue_size =
+               TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX);
+
+       /* allocate ucode sections in dram and set addresses */
+       ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
+       if (ret) {
+               dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
+                                 ctxt_info, trans_pcie->ctxt_info_dma_addr);
+               return ret;
+       }
+
+       trans_pcie->ctxt_info = ctxt_info;
+
+       iwl_enable_interrupts(trans);
+
+       /* kick FW self load */
+       iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
+       iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
+
+       /* Context info will be released upon alive or failure to get one */
+
+       return 0;
+}
+
+void iwl_pcie_ctxt_info_free(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       if (!trans_pcie->ctxt_info)
+               return;
+
+       dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
+                         trans_pcie->ctxt_info,
+                         trans_pcie->ctxt_info_dma_addr);
+       trans_pcie->ctxt_info_dma_addr = 0;
+       trans_pcie->ctxt_info = NULL;
+
+       iwl_pcie_ctxt_info_free_fw_img(trans);
+}
index ba8a81cb0e2b7118ae761f9be68e4157477c26e5..e51760e752d48cf8e559bc918d71f928b46917b3 100644 (file)
@@ -501,6 +501,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8275_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)},
 
 /* 9000 Series */
@@ -533,7 +537,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
 
 /* a000 Series */
-       {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr)},
+       {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
+       {IWL_PCI_DEVICE(0x2722, 0x0A10, iwla000_2ac_cfg_hr)},
 #endif /* CONFIG_IWLMVM */
 
        {0}
@@ -667,18 +672,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                iwl_trans->cfg = cfg_7265d;
        }
 
-       if (iwl_trans->cfg->rf_id) {
-               if (cfg == &iwl9460_2ac_cfg &&
-                   iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
-                       cfg = &iwl9000lc_2ac_cfg;
-                       iwl_trans->cfg = cfg;
-               }
-
-               if (cfg == &iwla000_2ac_cfg_hr &&
-                   iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) {
-                       cfg = &iwla000_2ac_cfg_jf;
-                       iwl_trans->cfg = cfg;
-               }
+       if (iwl_trans->cfg->rf_id &&
+           (cfg == &iwla000_2ac_cfg_hr || cfg == &iwla000_2ac_cfg_hr_cdb) &&
+            iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) {
+               cfg = &iwla000_2ac_cfg_jf;
+               iwl_trans->cfg = cfg;
        }
 #endif
 
index 10937309641a5f097e5232acffca07665adf1316..b9e9e10c32fad3bac06d77653757df16eef9762d 100644 (file)
@@ -2,7 +2,7 @@
  *
  * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -205,11 +205,11 @@ struct iwl_cmd_meta {
  * into the buffer regardless of whether it should be mapped or not.
  * This indicates how big the first TB must be to include the scratch buffer
  * and the assigned PN.
- * Since PN location is 16 bytes at offset 24, it's 40 now.
+ * Since PN location is 8 bytes at offset 12, it's 20 now.
  * If we make it bigger then allocations will be bigger and copy slower, so
  * that's probably not useful.
  */
-#define IWL_FIRST_TB_SIZE      40
+#define IWL_FIRST_TB_SIZE      20
 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
 
 struct iwl_pcie_txq_entry {
@@ -237,11 +237,11 @@ struct iwl_pcie_first_tb_buf {
  * @stuck_timer: timer that fires if queue gets stuck
  * @trans_pcie: pointer back to transport (for timer)
  * @need_update: indicates need to update read/write index
- * @active: stores if queue is active
  * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
  * @wd_timeout: queue watchdog timeout (jiffies) - per queue
  * @frozen: tx stuck queue timer is frozen
  * @frozen_expiry_remainder: remember how long until the timer fires
+ * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
  * @write_ptr: 1-st empty entry (index) host_w
  * @read_ptr: last used entry (index) host_r
  * @dma_addr:  physical addr for BD's
@@ -277,11 +277,11 @@ struct iwl_txq {
        struct iwl_trans_pcie *trans_pcie;
        bool need_update;
        bool frozen;
-       u8 active;
        bool ampdu;
        int block;
        unsigned long wd_timeout;
        struct sk_buff_head overflow_q;
+       struct iwl_dma_ptr bc_tbl;
 
        int write_ptr;
        int read_ptr;
@@ -314,12 +314,44 @@ enum iwl_shared_irq_flags {
        IWL_SHARED_IRQ_FIRST_RSS        = BIT(1),
 };
 
+/**
+ * struct iwl_dram_data
+ * @physical: page phy pointer
+ * @block: pointer to the allocated block/page
+ * @size: size of the block/page
+ */
+struct iwl_dram_data {
+       dma_addr_t physical;
+       void *block;
+       int size;
+};
+
+/**
+ * struct iwl_self_init_dram - dram data used by self init process
+ * @fw: lmac and umac dram data
+ * @fw_cnt: total number of items in array
+ * @paging: paging dram data
+ * @paging_cnt: total number of items in array
+ */
+struct iwl_self_init_dram {
+       struct iwl_dram_data *fw;
+       int fw_cnt;
+       struct iwl_dram_data *paging;
+       int paging_cnt;
+};
+
 /**
  * struct iwl_trans_pcie - PCIe transport specific data
  * @rxq: all the RX queue data
  * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
  * @global_table: table mapping received VID from hw to rxb
  * @rba: allocator for RX replenishing
+ * @ctxt_info: context information for FW self init
+ * @ctxt_info_dma_addr: dma addr of context information
+ * @init_dram: DRAM data of firmware image (including paging).
+ *     Context information addresses will be taken from here.
+ *     This is driver's local copy for keeping track of size and
+ *     count for allocating and freeing the memory.
  * @trans: pointer to the generic transport area
  * @scd_base_addr: scheduler sram base address in SRAM
  * @scd_bc_tbls: pointer to the byte count table of the scheduler
@@ -357,6 +389,9 @@ struct iwl_trans_pcie {
        struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
        struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
        struct iwl_rb_allocator rba;
+       struct iwl_context_info *ctxt_info;
+       dma_addr_t ctxt_info_dma_addr;
+       struct iwl_self_init_dram init_dram;
        struct iwl_trans *trans;
 
        struct net_device napi_dev;
@@ -378,7 +413,8 @@ struct iwl_trans_pcie {
        struct iwl_dma_ptr scd_bc_tbls;
        struct iwl_dma_ptr kw;
 
-       struct iwl_txq *txq;
+       struct iwl_txq *txq_memory;
+       struct iwl_txq *txq[IWL_MAX_HW_QUEUES];
        unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
        unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
 
@@ -454,6 +490,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
 * RX
 ******************************************************/
 int iwl_pcie_rx_init(struct iwl_trans *trans);
+int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
 irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
@@ -474,6 +511,7 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
 * TX / HCMD
 ******************************************************/
 int iwl_pcie_tx_init(struct iwl_trans *trans);
+int iwl_pcie_gen2_tx_init(struct iwl_trans *trans);
 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
 int iwl_pcie_tx_stop(struct iwl_trans *trans);
 void iwl_pcie_tx_free(struct iwl_trans *trans);
@@ -484,7 +522,6 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
                                bool configure_scd);
 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
                                        bool shared_mode);
-dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq);
 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
                                  struct iwl_txq *txq);
 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
@@ -616,6 +653,12 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
        }
 }
 
+static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
+                                    struct iwl_txq *txq, int idx)
+{
+       return txq->tfds + trans_pcie->tfd_size * idx;
+}
+
 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -719,4 +762,40 @@ int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
 
 void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
 
+/* common functions that are used by gen2 transport */
+void iwl_pcie_apm_config(struct iwl_trans *trans);
+int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
+void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
+bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans);
+void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
+int iwl_queue_space(const struct iwl_txq *q);
+int iwl_pcie_apm_stop_master(struct iwl_trans *trans);
+void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
+int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+                     int slots_num, bool cmd_queue);
+int iwl_pcie_txq_alloc(struct iwl_trans *trans,
+                      struct iwl_txq *txq, int slots_num,  bool cmd_queue);
+int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
+                          struct iwl_dma_ptr *ptr, size_t size);
+void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
+
+/* transport gen 2 exported functions */
+int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
+                                const struct fw_img *fw, bool run_in_rfkill);
+void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
+                                struct iwl_tx_queue_cfg_cmd *cmd,
+                                int cmd_id,
+                                unsigned int timeout);
+void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
+int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+                          struct iwl_device_cmd *dev_cmd, int txq_id);
+int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
+                                 struct iwl_host_cmd *cmd);
+void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans,
+                                    bool low_power);
+void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power);
+void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
+void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
+void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
 #endif /* __iwl_trans_int_pcie_h__ */
index de94dfdf2ec9972ee7c78e695b26f04f3a022154..f98f2d2b8a1b69946c1f7d4ee9692c81986db4af 100644 (file)
@@ -2,7 +2,7 @@
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -880,7 +880,7 @@ static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
        return 0;
 }
 
-int iwl_pcie_rx_init(struct iwl_trans *trans)
+static int _iwl_pcie_rx_init(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *def_rxq;
@@ -958,20 +958,40 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
 
        iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
 
+       return 0;
+}
+
+int iwl_pcie_rx_init(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int ret = _iwl_pcie_rx_init(trans);
+
+       if (ret)
+               return ret;
+
        if (trans->cfg->mq_rx_supported)
                iwl_pcie_rx_mq_hw_init(trans);
        else
-               iwl_pcie_rx_hw_init(trans, def_rxq);
+               iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
 
-       iwl_pcie_rxq_restock(trans, def_rxq);
+       iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
 
-       spin_lock(&def_rxq->lock);
-       iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq);
-       spin_unlock(&def_rxq->lock);
+       spin_lock(&trans_pcie->rxq->lock);
+       iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
+       spin_unlock(&trans_pcie->rxq->lock);
 
        return 0;
 }
 
+int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
+{
+       /*
+        * We don't configure the RFH.
+        * Restock will be done at alive, after firmware configured the RFH.
+        */
+       return _iwl_pcie_rx_init(trans);
+}
+
 void iwl_pcie_rx_free(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1074,7 +1094,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                                bool emergency)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+       struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
        bool page_stolen = false;
        int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
        u32 offset = 0;
@@ -1393,17 +1413,17 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
                return;
        }
 
-       iwl_pcie_dump_csr(trans);
-       iwl_dump_fh(trans, NULL);
-
        local_bh_disable();
        /* The STATUS_FW_ERROR bit is set in this function. This must happen
         * before we wake up the command caller, to ensure a proper cleanup. */
        iwl_trans_fw_error(trans);
        local_bh_enable();
 
-       for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
-               del_timer(&trans_pcie->txq[i].stuck_timer);
+       for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+               if (!trans_pcie->txq[i])
+                       continue;
+               del_timer(&trans_pcie->txq[i]->stuck_timer);
+       }
 
        clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
        wake_up(&trans_pcie->wait_command_queue);
@@ -1597,6 +1617,13 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
                if (inta & CSR_INT_BIT_ALIVE) {
                        IWL_DEBUG_ISR(trans, "Alive interrupt\n");
                        isr_stats->alive++;
+                       if (trans->cfg->gen2) {
+                               /*
+                                * We can restock, since firmware configured
+                                * the RFH
+                                */
+                               iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
+                       }
                }
        }
 
@@ -1933,6 +1960,10 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
        if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
                IWL_DEBUG_ISR(trans, "Alive interrupt\n");
                isr_stats->alive++;
+               if (trans->cfg->gen2) {
+                       /* We can restock, since firmware configured the RFH */
+                       iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
+               }
        }
 
        /* uCode wakes up after power-down sleep */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
new file mode 100644 (file)
index 0000000..ac60a28
--- /dev/null
@@ -0,0 +1,374 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "iwl-trans.h"
+#include "iwl-context-info.h"
+#include "internal.h"
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
+ * NOTE:  This does not load uCode nor start the embedded processor
+ */
+static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
+{
+       int ret = 0;
+
+       IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
+
+       /*
+        * Use "set_bit" below rather than "write", to preserve any hardware
+        * bits already set by default after reset.
+        */
+
+       /*
+        * Disable L0s without affecting L1;
+        * don't wait for ICH L0s (ICH bug W/A)
+        */
+       iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+                   CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+       /* Set FH wait threshold to maximum (HW error during stress W/A) */
+       iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+       /*
+        * Enable HAP INTA (interrupt from management bus) to
+        * wake device's PCI Express link L1a -> L0s
+        */
+       iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+                   CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+       iwl_pcie_apm_config(trans);
+
+       /*
+        * Set "initialization complete" bit to move adapter from
+        * D0U* --> D0A* (powered-up active) state.
+        */
+       iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+       /*
+        * Wait for clock stabilization; once stabilized, access to
+        * device-internal resources is supported, e.g. iwl_write_prph()
+        * and accesses to uCode SRAM.
+        */
+       ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+                          CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+                          CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+       if (ret < 0) {
+               IWL_DEBUG_INFO(trans, "Failed to init the card\n");
+               return ret;
+       }
+
+       set_bit(STATUS_DEVICE_ENABLED, &trans->status);
+
+       return 0;
+}
+
+static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
+{
+       IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
+
+       if (op_mode_leave) {
+               if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+                       iwl_pcie_gen2_apm_init(trans);
+
+               /* inform ME that we are leaving */
+               iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                           CSR_RESET_LINK_PWR_MGMT_DISABLED);
+               iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+                           CSR_HW_IF_CONFIG_REG_PREPARE |
+                           CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+               mdelay(1);
+               iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                             CSR_RESET_LINK_PWR_MGMT_DISABLED);
+               mdelay(5);
+       }
+
+       clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
+
+       /* Stop device's DMA activity */
+       iwl_pcie_apm_stop_master(trans);
+
+       /* Reset the entire device */
+       iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+       usleep_range(1000, 2000);
+
+       /*
+        * Clear "initialization complete" bit to move adapter from
+        * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+        */
+       iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+
+void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       bool hw_rfkill, was_hw_rfkill;
+
+       lockdep_assert_held(&trans_pcie->mutex);
+
+       if (trans_pcie->is_down)
+               return;
+
+       trans_pcie->is_down = true;
+
+       was_hw_rfkill = iwl_is_rfkill_set(trans);
+
+       /* tell the device to stop sending interrupts */
+       iwl_disable_interrupts(trans);
+
+       /* device going down, Stop using ICT table */
+       iwl_pcie_disable_ict(trans);
+
+       /*
+        * If a HW restart happens during firmware loading,
+        * then the firmware loading might call this function
+        * and later it might be called again due to the
+        * restart. So don't process again if the device is
+        * already dead.
+        */
+       if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+               IWL_DEBUG_INFO(trans,
+                              "DEVICE_ENABLED bit was set and is now cleared\n");
+               iwl_pcie_gen2_tx_stop(trans);
+               iwl_pcie_rx_stop(trans);
+       }
+
+       iwl_pcie_ctxt_info_free_paging(trans);
+       iwl_pcie_ctxt_info_free(trans);
+
+       /* Make sure (redundant) we've released our request to stay awake */
+       iwl_clear_bit(trans, CSR_GP_CNTRL,
+                     CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+       /* Stop the device, and put it in low power state */
+       iwl_pcie_gen2_apm_stop(trans, false);
+
+       /* stop and reset the on-board processor */
+       iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+       usleep_range(1000, 2000);
+
+       /*
+        * Upon stop, the IVAR table gets erased, so msi-x won't
+        * work. This causes a bug in RF-KILL flows, since the interrupt
+        * that enables radio won't fire on the correct irq, and the
+        * driver won't be able to handle the interrupt.
+        * Configure the IVAR table again after reset.
+        */
+       iwl_pcie_conf_msix_hw(trans_pcie);
+
+       /*
+        * Upon stop, the APM issues an interrupt if HW RF kill is set.
+        * This is a bug in certain verions of the hardware.
+        * Certain devices also keep sending HW RF kill interrupt all
+        * the time, unless the interrupt is ACKed even if the interrupt
+        * should be masked. Re-ACK all the interrupts here.
+        */
+       iwl_disable_interrupts(trans);
+
+       /* clear all status bits */
+       clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+       clear_bit(STATUS_INT_ENABLED, &trans->status);
+       clear_bit(STATUS_TPOWER_PMI, &trans->status);
+       clear_bit(STATUS_RFKILL, &trans->status);
+
+       /*
+        * Even if we stop the HW, we still want the RF kill
+        * interrupt
+        */
+       iwl_enable_rfkill_int(trans);
+
+       /*
+        * Check again since the RF kill state may have changed while
+        * all the interrupts were disabled, in this case we couldn't
+        * receive the RF kill interrupt and update the state in the
+        * op_mode.
+        * Don't call the op_mode if the rkfill state hasn't changed.
+        * This allows the op_mode to call stop_device from the rfkill
+        * notification without endless recursion. Under very rare
+        * circumstances, we might have a small recursion if the rfkill
+        * state changed exactly now while we were called from stop_device.
+        * This is very unlikely but can happen and is supported.
+        */
+       hw_rfkill = iwl_is_rfkill_set(trans);
+       if (hw_rfkill)
+               set_bit(STATUS_RFKILL, &trans->status);
+       else
+               clear_bit(STATUS_RFKILL, &trans->status);
+       if (hw_rfkill != was_hw_rfkill)
+               iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+
+       /* re-take ownership to prevent other users from stealing the device */
+       iwl_pcie_prepare_card_hw(trans);
+}
+
+void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       mutex_lock(&trans_pcie->mutex);
+       _iwl_trans_pcie_gen2_stop_device(trans, low_power);
+       mutex_unlock(&trans_pcie->mutex);
+}
+
+static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       /* TODO: most of the logic can be removed in A0 - but not in Z0 */
+       spin_lock(&trans_pcie->irq_lock);
+       iwl_pcie_gen2_apm_init(trans);
+       spin_unlock(&trans_pcie->irq_lock);
+
+       iwl_op_mode_nic_config(trans->op_mode);
+
+       /* Allocate the RX queue, or reset if it is already allocated */
+       if (iwl_pcie_gen2_rx_init(trans))
+               return -ENOMEM;
+
+       /* Allocate or reset and init all Tx and Command queues */
+       if (iwl_pcie_gen2_tx_init(trans))
+               return -ENOMEM;
+
+       /* enable shadow regs in HW */
+       iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
+       IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
+
+       return 0;
+}
+
+void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       iwl_pcie_reset_ict(trans);
+
+       /* make sure all queue are not stopped/used */
+       memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+       memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+       /* now that we got alive we can free the fw image & the context info.
+        * paging memory cannot be freed included since FW will still use it
+        */
+       iwl_pcie_ctxt_info_free(trans);
+}
+
+int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
+                                const struct fw_img *fw, bool run_in_rfkill)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       bool hw_rfkill;
+       int ret;
+
+       /* This may fail if AMT took ownership of the device */
+       if (iwl_pcie_prepare_card_hw(trans)) {
+               IWL_WARN(trans, "Exit HW not ready\n");
+               ret = -EIO;
+               goto out;
+       }
+
+       iwl_enable_rfkill_int(trans);
+
+       iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+       /*
+        * We enabled the RF-Kill interrupt and the handler may very
+        * well be running. Disable the interrupts to make sure no other
+        * interrupt can be fired.
+        */
+       iwl_disable_interrupts(trans);
+
+       /* Make sure it finished running */
+       iwl_pcie_synchronize_irqs(trans);
+
+       mutex_lock(&trans_pcie->mutex);
+
+       /* If platform's RF_KILL switch is NOT set to KILL */
+       hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
+       if (hw_rfkill && !run_in_rfkill) {
+               ret = -ERFKILL;
+               goto out;
+       }
+
+       /* Someone called stop_device, don't try to start_fw */
+       if (trans_pcie->is_down) {
+               IWL_WARN(trans,
+                        "Can't start_fw since the HW hasn't been started\n");
+               ret = -EIO;
+               goto out;
+       }
+
+       /* make sure rfkill handshake bits are cleared */
+       iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+       iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
+                   CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+       /* clear (again), then enable host interrupts */
+       iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+       ret = iwl_pcie_gen2_nic_init(trans);
+       if (ret) {
+               IWL_ERR(trans, "Unable to init nic\n");
+               goto out;
+       }
+
+       ret = iwl_pcie_ctxt_info_init(trans, fw);
+       if (ret)
+               goto out;
+
+       /* re-check RF-Kill state since we may have missed the interrupt */
+       hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
+       if (hw_rfkill && !run_in_rfkill)
+               ret = -ERFKILL;
+
+out:
+       mutex_unlock(&trans_pcie->mutex);
+       return ret;
+}
index 7f05fc56587add6336fc4b453b2282e6ff8c4730..91f6030529b32ef2848e3d94f09922b627d6df3b 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -201,7 +201,7 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 /* PCI registers */
 #define PCI_CFG_RETRY_TIMEOUT  0x041
 
-static void iwl_pcie_apm_config(struct iwl_trans *trans)
+void iwl_pcie_apm_config(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        u16 lctl;
@@ -448,7 +448,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
                                 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
 }
 
-static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
+int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
 {
        int ret = 0;
 
@@ -567,7 +567,7 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
 }
 
 /* Note: returns standard 0/-ERROR code */
-static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 {
        int ret;
        int t = 0;
@@ -636,29 +636,6 @@ static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
                    FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
 }
 
-static void iwl_pcie_load_firmware_chunk_tfh(struct iwl_trans *trans,
-                                            u32 dst_addr, dma_addr_t phy_addr,
-                                            u32 byte_cnt)
-{
-       /* Stop DMA channel */
-       iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, 0);
-
-       /* Configure SRAM address */
-       iwl_write32(trans, TFH_SRV_DMA_CHNL0_SRAM_ADDR,
-                   dst_addr);
-
-       /* Configure DRAM address - 64 bit */
-       iwl_write64(trans, TFH_SRV_DMA_CHNL0_DRAM_ADDR, phy_addr);
-
-       /* Configure byte count to transfer */
-       iwl_write32(trans, TFH_SRV_DMA_CHNL0_BC, byte_cnt);
-
-       /* Enable the DRAM2SRAM to start */
-       iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, TFH_SRV_DMA_SNOOP |
-                                                  TFH_SRV_DMA_TO_DRIVER |
-                                                  TFH_SRV_DMA_START);
-}
-
 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
                                        u32 dst_addr, dma_addr_t phy_addr,
                                        u32 byte_cnt)
@@ -672,12 +649,8 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
        if (!iwl_trans_grab_nic_access(trans, &flags))
                return -EIO;
 
-       if (trans->cfg->use_tfh)
-               iwl_pcie_load_firmware_chunk_tfh(trans, dst_addr, phy_addr,
-                                                byte_cnt);
-       else
-               iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
-                                               byte_cnt);
+       iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
+                                       byte_cnt);
        iwl_trans_release_nic_access(trans, &flags);
 
        ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
@@ -747,47 +720,6 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
        return ret;
 }
 
-/*
- * Driver Takes the ownership on secure machine before FW load
- * and prevent race with the BT load.
- * W/A for ROM bug. (should be remove in the next Si step)
- */
-static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
-{
-       u32 val, loop = 1000;
-
-       /*
-        * Check the RSA semaphore is accessible.
-        * If the HW isn't locked and the rsa semaphore isn't accessible,
-        * we are in trouble.
-        */
-       val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
-       if (val & (BIT(1) | BIT(17))) {
-               IWL_DEBUG_INFO(trans,
-                              "can't access the RSA semaphore it is write protected\n");
-               return 0;
-       }
-
-       /* take ownership on the AUX IF */
-       iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
-       iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
-
-       do {
-               iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
-               val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
-               if (val == 0x1) {
-                       iwl_write_prph(trans, RSA_ENABLE, 0);
-                       return 0;
-               }
-
-               udelay(10);
-               loop--;
-       } while (loop > 0);
-
-       IWL_ERR(trans, "Failed to take ownership on secure machine\n");
-       return -EIO;
-}
-
 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
                                           const struct fw_img *image,
                                           int cpu,
@@ -828,15 +760,10 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
                        return ret;
 
                /* Notify ucode of loaded section number and status */
-               if (trans->cfg->use_tfh) {
-                       val = iwl_read_prph(trans, UREG_UCODE_LOAD_STATUS);
-                       val = val | (sec_num << shift_param);
-                       iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, val);
-               } else {
-                       val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
-                       val = val | (sec_num << shift_param);
-                       iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
-               }
+               val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
+               val = val | (sec_num << shift_param);
+               iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
+
                sec_num = (sec_num << 1) | 0x1;
        }
 
@@ -1042,10 +969,15 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
        if (trans->dbg_dest_tlv)
                iwl_pcie_apply_destination(trans);
 
-       /* TODO: remove in the next Si step */
-       ret = iwl_pcie_rsa_race_bug_wa(trans);
-       if (ret)
-               return ret;
+       IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
+                       iwl_read_prph(trans, WFPM_GP2));
+
+       /*
+        * Set default value. On resume reading the values that were
+        * zeored can provide debug data on the resume flow.
+        * This is for debugging only and has no functional impact.
+        */
+       iwl_write_prph(trans, WFPM_GP2, 0x01010101);
 
        /* configure the ucode to be ready to get the secured image */
        /* release CPU reset */
@@ -1062,7 +994,7 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
                                               &first_ucode_section);
 }
 
-static bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans)
+bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans)
 {
        bool hw_rfkill = iwl_is_rfkill_set(trans);
 
@@ -1147,7 +1079,7 @@ static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
                iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
 }
 
-static void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
+void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
 {
        struct iwl_trans *trans = trans_pcie->trans;
 
@@ -1299,7 +1231,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
        iwl_pcie_prepare_card_hw(trans);
 }
 
-static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
+void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
@@ -1423,8 +1355,12 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
 
        lockdep_assert_held(&trans_pcie->mutex);
 
-       if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
-               _iwl_trans_pcie_stop_device(trans, true);
+       if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
+               if (trans->cfg->gen2)
+                       _iwl_trans_pcie_gen2_stop_device(trans, true);
+               else
+                       _iwl_trans_pcie_stop_device(trans, true);
+       }
 }
 
 static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
@@ -1527,6 +1463,9 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
                }
        }
 
+       IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
+                       iwl_read_prph(trans, WFPM_GP2));
+
        val = iwl_read32(trans, CSR_RESET);
        if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
                *status = IWL_D3_STATUS_RESET;
@@ -1828,7 +1767,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
 
        iwl_pcie_synchronize_irqs(trans);
 
-       iwl_pcie_tx_free(trans);
+       if (trans->cfg->gen2)
+               iwl_pcie_gen2_tx_free(trans);
+       else
+               iwl_pcie_tx_free(trans);
        iwl_pcie_rx_free(trans);
 
        if (trans_pcie->msix_enabled) {
@@ -1998,7 +1940,7 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
        int queue;
 
        for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
-               struct iwl_txq *txq = &trans_pcie->txq[queue];
+               struct iwl_txq *txq = trans_pcie->txq[queue];
                unsigned long now;
 
                spin_lock_bh(&txq->lock);
@@ -2050,7 +1992,7 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
        int i;
 
        for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
-               struct iwl_txq *txq = &trans_pcie->txq[i];
+               struct iwl_txq *txq = trans_pcie->txq[i];
 
                if (i == trans_pcie->cmd_queue)
                        continue;
@@ -2075,48 +2017,32 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
 
 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       u32 scd_sram_addr;
-       u8 buf[16];
-       int cnt;
+       u32 txq_id = txq->id;
+       u32 status;
+       bool active;
+       u8 fifo;
 
-       IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
-               txq->read_ptr, txq->write_ptr);
-
-       if (trans->cfg->use_tfh)
+       if (trans->cfg->use_tfh) {
+               IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
+                       txq->read_ptr, txq->write_ptr);
                /* TODO: access new SCD registers and dump them */
                return;
+       }
 
-       scd_sram_addr = trans_pcie->scd_base_addr +
-                       SCD_TX_STTS_QUEUE_OFFSET(txq->id);
-       iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
-
-       iwl_print_hex_error(trans, buf, sizeof(buf));
-
-       for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
-               IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
-                       iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
-
-       for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
-               u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
-               u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
-               bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
-               u32 tbl_dw =
-                       iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
-                                            SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
-
-               if (cnt & 0x1)
-                       tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
-               else
-                       tbl_dw = tbl_dw & 0x0000FFFF;
+       status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
+       fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+       active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
 
-               IWL_ERR(trans,
-                       "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
-                       cnt, active ? "" : "in", fifo, tbl_dw,
-                       iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
-                               (TFD_QUEUE_SIZE_MAX - 1),
-                       iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
-       }
+       IWL_ERR(trans,
+               "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
+               txq_id, active ? "" : "in", fifo,
+               jiffies_to_msecs(txq->wd_timeout),
+               txq->read_ptr, txq->write_ptr,
+               iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
+                       (TFD_QUEUE_SIZE_MAX - 1),
+               iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
+                       (TFD_QUEUE_SIZE_MAX - 1),
+               iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
 }
 
 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
@@ -2139,7 +2065,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
                        continue;
 
                IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
-               txq = &trans_pcie->txq[cnt];
+               txq = trans_pcie->txq[cnt];
                wr_ptr = ACCESS_ONCE(txq->write_ptr);
 
                while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
@@ -2330,7 +2256,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
 
        bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
 
-       if (!trans_pcie->txq)
+       if (!trans_pcie->txq_memory)
                return -EAGAIN;
 
        buf = kzalloc(bufsz, GFP_KERNEL);
@@ -2338,7 +2264,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
                return -ENOMEM;
 
        for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
-               txq = &trans_pcie->txq[cnt];
+               txq = trans_pcie->txq[cnt];
                pos += scnprintf(buf + pos, bufsz - pos,
                                "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
                                cnt, txq->read_ptr, txq->write_ptr,
@@ -2755,7 +2681,7 @@ static struct iwl_trans_dump_data
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_fw_error_dump_data *data;
-       struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
+       struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
        struct iwl_fw_error_dump_txcmd *txcmd;
        struct iwl_trans_dump_data *dump_data;
        u32 len, num_rbs;
@@ -2890,21 +2816,43 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
 }
 #endif /* CONFIG_PM_SLEEP */
 
+#define IWL_TRANS_COMMON_OPS                                           \
+       .op_mode_leave = iwl_trans_pcie_op_mode_leave,                  \
+       .write8 = iwl_trans_pcie_write8,                                \
+       .write32 = iwl_trans_pcie_write32,                              \
+       .read32 = iwl_trans_pcie_read32,                                \
+       .read_prph = iwl_trans_pcie_read_prph,                          \
+       .write_prph = iwl_trans_pcie_write_prph,                        \
+       .read_mem = iwl_trans_pcie_read_mem,                            \
+       .write_mem = iwl_trans_pcie_write_mem,                          \
+       .configure = iwl_trans_pcie_configure,                          \
+       .set_pmi = iwl_trans_pcie_set_pmi,                              \
+       .grab_nic_access = iwl_trans_pcie_grab_nic_access,              \
+       .release_nic_access = iwl_trans_pcie_release_nic_access,        \
+       .set_bits_mask = iwl_trans_pcie_set_bits_mask,                  \
+       .ref = iwl_trans_pcie_ref,                                      \
+       .unref = iwl_trans_pcie_unref,                                  \
+       .dump_data = iwl_trans_pcie_dump_data,                          \
+       .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,           \
+       .d3_suspend = iwl_trans_pcie_d3_suspend,                        \
+       .d3_resume = iwl_trans_pcie_d3_resume
+
+#ifdef CONFIG_PM_SLEEP
+#define IWL_TRANS_PM_OPS                                               \
+       .suspend = iwl_trans_pcie_suspend,                              \
+       .resume = iwl_trans_pcie_resume,
+#else
+#define IWL_TRANS_PM_OPS
+#endif /* CONFIG_PM_SLEEP */
+
 static const struct iwl_trans_ops trans_ops_pcie = {
+       IWL_TRANS_COMMON_OPS,
+       IWL_TRANS_PM_OPS
        .start_hw = iwl_trans_pcie_start_hw,
-       .op_mode_leave = iwl_trans_pcie_op_mode_leave,
        .fw_alive = iwl_trans_pcie_fw_alive,
        .start_fw = iwl_trans_pcie_start_fw,
        .stop_device = iwl_trans_pcie_stop_device,
 
-       .d3_suspend = iwl_trans_pcie_d3_suspend,
-       .d3_resume = iwl_trans_pcie_d3_resume,
-
-#ifdef CONFIG_PM_SLEEP
-       .suspend = iwl_trans_pcie_suspend,
-       .resume = iwl_trans_pcie_resume,
-#endif /* CONFIG_PM_SLEEP */
-
        .send_cmd = iwl_trans_pcie_send_hcmd,
 
        .tx = iwl_trans_pcie_tx,
@@ -2913,31 +2861,27 @@ static const struct iwl_trans_ops trans_ops_pcie = {
        .txq_disable = iwl_trans_pcie_txq_disable,
        .txq_enable = iwl_trans_pcie_txq_enable,
 
-       .get_txq_byte_table = iwl_trans_pcie_get_txq_byte_table,
-
        .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
 
-       .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
        .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
        .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
+};
+
+static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
+       IWL_TRANS_COMMON_OPS,
+       IWL_TRANS_PM_OPS
+       .start_hw = iwl_trans_pcie_start_hw,
+       .fw_alive = iwl_trans_pcie_gen2_fw_alive,
+       .start_fw = iwl_trans_pcie_gen2_start_fw,
+       .stop_device = iwl_trans_pcie_gen2_stop_device,
 
-       .write8 = iwl_trans_pcie_write8,
-       .write32 = iwl_trans_pcie_write32,
-       .read32 = iwl_trans_pcie_read32,
-       .read_prph = iwl_trans_pcie_read_prph,
-       .write_prph = iwl_trans_pcie_write_prph,
-       .read_mem = iwl_trans_pcie_read_mem,
-       .write_mem = iwl_trans_pcie_write_mem,
-       .configure = iwl_trans_pcie_configure,
-       .set_pmi = iwl_trans_pcie_set_pmi,
-       .grab_nic_access = iwl_trans_pcie_grab_nic_access,
-       .release_nic_access = iwl_trans_pcie_release_nic_access,
-       .set_bits_mask = iwl_trans_pcie_set_bits_mask,
-
-       .ref = iwl_trans_pcie_ref,
-       .unref = iwl_trans_pcie_unref,
-
-       .dump_data = iwl_trans_pcie_dump_data,
+       .send_cmd = iwl_trans_pcie_gen2_send_hcmd,
+
+       .tx = iwl_trans_pcie_gen2_tx,
+       .reclaim = iwl_trans_pcie_reclaim,
+
+       .txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
+       .txq_free = iwl_trans_pcie_dyn_txq_free,
 };
 
 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -2952,8 +2896,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        if (ret)
                return ERR_PTR(ret);
 
-       trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
-                               &pdev->dev, cfg, &trans_ops_pcie, 0);
+       if (cfg->gen2)
+               trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
+                                       &pdev->dev, cfg, &trans_ops_pcie_gen2);
+       else
+               trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
+                                       &pdev->dev, cfg, &trans_ops_pcie);
        if (!trans)
                return ERR_PTR(-ENOMEM);
 
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
new file mode 100644 (file)
index 0000000..9fb46a6
--- /dev/null
@@ -0,0 +1,1018 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/pm_runtime.h>
+
+#include "iwl-debug.h"
+#include "iwl-csr.h"
+#include "iwl-io.h"
+#include "internal.h"
+#include "mvm/fw-api.h"
+
+ /*
+ * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels
+ */
+void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int txq_id;
+
+       /*
+        * This function can be called before the op_mode disabled the
+        * queues. This happens when we have an rfkill interrupt.
+        * Since we stop Tx altogether - mark the queues as stopped.
+        */
+       memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+       memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+       /* Unmap DMA from host system and free skb's */
+       for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
+               if (!trans_pcie->txq[txq_id])
+                       continue;
+               iwl_pcie_gen2_txq_unmap(trans, txq_id);
+       }
+}
+
+/*
+ * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
+                                         int num_tbs)
+{
+       struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+       int write_ptr = txq->write_ptr;
+       u8 filled_tfd_size, num_fetch_chunks;
+       u16 len = byte_cnt;
+       __le16 bc_ent;
+
+       len = DIV_ROUND_UP(len, 4);
+
+       if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
+               return;
+
+       filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
+                                  num_tbs * sizeof(struct iwl_tfh_tb);
+       /*
+        * filled_tfd_size contains the number of filled bytes in the TFD.
+        * Dividing it by 64 will give the number of chunks to fetch
+        * to SRAM- 0 for one chunk, 1 for 2 and so on.
+        * If, for example, TFD contains only 3 TBs then 32 bytes
+        * of the TFD are used, and only one chunk of 64 bytes should
+        * be fetched
+        */
+       num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
+
+       bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
+       scd_bc_tbl->tfd_offset[write_ptr] = bc_ent;
+}
+
+/*
+ * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
+ */
+static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
+                                        struct iwl_txq *txq)
+{
+       lockdep_assert_held(&txq->lock);
+
+       IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
+
+       /*
+        * if not in power-save mode, uCode will never sleep when we're
+        * trying to tx (during RFKILL, we're not trying to tx).
+        */
+       iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
+}
+
+static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans,
+                                   struct iwl_tfh_tfd *tfd)
+{
+       return le16_to_cpu(tfd->num_tbs) & 0x1f;
+}
+
+static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
+                                   struct iwl_cmd_meta *meta,
+                                   struct iwl_tfh_tfd *tfd)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int i, num_tbs;
+
+       /* Sanity check on number of chunks */
+       num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
+
+       if (num_tbs >= trans_pcie->max_tbs) {
+               IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+               return;
+       }
+
+       /* first TB is never freed - it's the bidirectional DMA data */
+       for (i = 1; i < num_tbs; i++) {
+               if (meta->tbs & BIT(i))
+                       dma_unmap_page(trans->dev,
+                                      le64_to_cpu(tfd->tbs[i].addr),
+                                      le16_to_cpu(tfd->tbs[i].tb_len),
+                                      DMA_TO_DEVICE);
+               else
+                       dma_unmap_single(trans->dev,
+                                        le64_to_cpu(tfd->tbs[i].addr),
+                                        le16_to_cpu(tfd->tbs[i].tb_len),
+                                        DMA_TO_DEVICE);
+       }
+
+       tfd->num_tbs = 0;
+}
+
+static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+        * idx is bounded by n_window
+        */
+       int rd_ptr = txq->read_ptr;
+       int idx = get_cmd_index(txq, rd_ptr);
+
+       lockdep_assert_held(&txq->lock);
+
+       /* We have only q->n_window txq->entries, but we use
+        * TFD_QUEUE_SIZE_MAX tfds
+        */
+       iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
+                               iwl_pcie_get_tfd(trans_pcie, txq, rd_ptr));
+
+       /* free SKB */
+       if (txq->entries) {
+               struct sk_buff *skb;
+
+               skb = txq->entries[idx].skb;
+
+               /* Can be called from irqs-disabled context
+                * If skb is not NULL, it means that the whole queue is being
+                * freed and that the queue is not empty - free the skb
+                */
+               if (skb) {
+                       iwl_op_mode_free_skb(trans->op_mode, skb);
+                       txq->entries[idx].skb = NULL;
+               }
+       }
+}
+
+static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
+                               struct iwl_tfh_tfd *tfd, dma_addr_t addr,
+                               u16 len)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
+       struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+       /* Each TFD can point to a maximum max_tbs Tx buffers */
+       if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
+               IWL_ERR(trans, "Error can not send more than %d chunks\n",
+                       trans_pcie->max_tbs);
+               return -EINVAL;
+       }
+
+       put_unaligned_le64(addr, &tb->addr);
+       tb->tb_len = cpu_to_le16(len);
+
+       tfd->num_tbs = cpu_to_le16(idx + 1);
+
+       return idx;
+}
+
+static
+struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
+                                           struct iwl_txq *txq,
+                                           struct iwl_device_cmd *dev_cmd,
+                                           struct sk_buff *skb,
+                                           struct iwl_cmd_meta *out_meta)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct iwl_tfh_tfd *tfd =
+               iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
+       dma_addr_t tb_phys;
+       int i, len, tb1_len, tb2_len, hdr_len;
+       void *tb1_addr;
+
+       memset(tfd, 0, sizeof(*tfd));
+
+       tb_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
+       /* The first TB points to bi-directional DMA data */
+       memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
+              IWL_FIRST_TB_SIZE);
+
+       iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+       /* there must be data left over for TB1 or this code must be changed */
+       BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
+
+       /*
+        * The second TB (tb1) points to the remainder of the TX command
+        * and the 802.11 header - dword aligned size
+        * (This calculation modifies the TX command, so do it before the
+        * setup of the first TB)
+        */
+       len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) +
+             ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE;
+
+       tb1_len = ALIGN(len, 4);
+
+       /* map the data for TB1 */
+       tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+       tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+               goto out_err;
+       iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
+
+       /* set up TFD's third entry to point to remainder of skb's head */
+       hdr_len = ieee80211_hdrlen(hdr->frame_control);
+       tb2_len = skb_headlen(skb) - hdr_len;
+
+       if (tb2_len > 0) {
+               tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
+                                        tb2_len, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+                       goto out_err;
+               iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
+       }
+
+       /* set up the remaining entries to point to the data */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+               int tb_idx;
+
+               if (!skb_frag_size(frag))
+                       continue;
+
+               tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+                                          skb_frag_size(frag), DMA_TO_DEVICE);
+
+               if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+                       goto out_err;
+               tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
+                                             skb_frag_size(frag));
+
+               out_meta->tbs |= BIT(tb_idx);
+       }
+
+       trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
+                            IWL_FIRST_TB_SIZE + tb1_len,
+                            skb->data + hdr_len, tb2_len);
+       trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len,
+                                 skb->len - hdr_len);
+
+       return tfd;
+
+out_err:
+       iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+       return NULL;
+}
+
+int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+                          struct iwl_device_cmd *dev_cmd, int txq_id)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
+       struct iwl_cmd_meta *out_meta;
+       struct iwl_txq *txq = trans_pcie->txq[txq_id];
+       void *tfd;
+
+       if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
+                     "TX on unused queue %d\n", txq_id))
+               return -EINVAL;
+
+       if (skb_is_nonlinear(skb) &&
+           skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
+           __skb_linearize(skb))
+               return -ENOMEM;
+
+       spin_lock(&txq->lock);
+
+       /* Set up driver data for this TFD */
+       txq->entries[txq->write_ptr].skb = skb;
+       txq->entries[txq->write_ptr].cmd = dev_cmd;
+
+       dev_cmd->hdr.sequence =
+               cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+                           INDEX_TO_SEQ(txq->write_ptr)));
+
+       /* Set up first empty entry in queue's array of Tx/cmd buffers */
+       out_meta = &txq->entries[txq->write_ptr].meta;
+       out_meta->flags = 0;
+
+       tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
+       if (!tfd) {
+               spin_unlock(&txq->lock);
+               return -1;
+       }
+
+       /* Set up entry for this TFD in Tx byte-count array */
+       iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len),
+                                     iwl_pcie_gen2_get_num_tbs(trans, tfd));
+
+       /* start timer if queue currently empty */
+       if (txq->read_ptr == txq->write_ptr) {
+               if (txq->wd_timeout)
+                       mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+               IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
+               iwl_trans_ref(trans);
+       }
+
+       /* Tell device the write index *just past* this latest filled TFD */
+       txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+       iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
+       if (iwl_queue_space(txq) < txq->high_mark)
+               iwl_stop_queue(trans, txq);
+
+       /*
+        * At this point the frame is "transmitted" successfully
+        * and we will get a TX status notification eventually.
+        */
+       spin_unlock(&txq->lock);
+       return 0;
+}
+
+/*************** HOST COMMAND QUEUE FUNCTIONS   *****/
+
+/*
+ * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
+ * @priv: device private data point
+ * @cmd: a pointer to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation
+ * failed. On success, it returns the index (>= 0) of command in the
+ * command queue.
+ */
+static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+                                     struct iwl_host_cmd *cmd)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+       struct iwl_device_cmd *out_cmd;
+       struct iwl_cmd_meta *out_meta;
+       unsigned long flags;
+       void *dup_buf = NULL;
+       dma_addr_t phys_addr;
+       int idx, i, cmd_pos;
+       u16 copy_size, cmd_size, tb0_size;
+       bool had_nocopy = false;
+       u8 group_id = iwl_cmd_groupid(cmd->id);
+       const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
+       u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
+       struct iwl_tfh_tfd *tfd =
+               iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
+
+       memset(tfd, 0, sizeof(*tfd));
+
+       copy_size = sizeof(struct iwl_cmd_header_wide);
+       cmd_size = sizeof(struct iwl_cmd_header_wide);
+
+       for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+               cmddata[i] = cmd->data[i];
+               cmdlen[i] = cmd->len[i];
+
+               if (!cmd->len[i])
+                       continue;
+
+               /* need at least IWL_FIRST_TB_SIZE copied */
+               if (copy_size < IWL_FIRST_TB_SIZE) {
+                       int copy = IWL_FIRST_TB_SIZE - copy_size;
+
+                       if (copy > cmdlen[i])
+                               copy = cmdlen[i];
+                       cmdlen[i] -= copy;
+                       cmddata[i] += copy;
+                       copy_size += copy;
+               }
+
+               if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
+                       had_nocopy = true;
+                       if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
+                               idx = -EINVAL;
+                               goto free_dup_buf;
+                       }
+               } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
+                       /*
+                        * This is also a chunk that isn't copied
+                        * to the static buffer so set had_nocopy.
+                        */
+                       had_nocopy = true;
+
+                       /* only allowed once */
+                       if (WARN_ON(dup_buf)) {
+                               idx = -EINVAL;
+                               goto free_dup_buf;
+                       }
+
+                       dup_buf = kmemdup(cmddata[i], cmdlen[i],
+                                         GFP_ATOMIC);
+                       if (!dup_buf)
+                               return -ENOMEM;
+               } else {
+                       /* NOCOPY must not be followed by normal! */
+                       if (WARN_ON(had_nocopy)) {
+                               idx = -EINVAL;
+                               goto free_dup_buf;
+                       }
+                       copy_size += cmdlen[i];
+               }
+               cmd_size += cmd->len[i];
+       }
+
+       /*
+        * If any of the command structures end up being larger than the
+        * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
+        * separate TFDs, then we will need to increase the size of the buffers
+        */
+       if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
+                "Command %s (%#x) is too large (%d bytes)\n",
+                iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
+               idx = -EINVAL;
+               goto free_dup_buf;
+       }
+
+       spin_lock_bh(&txq->lock);
+
+       if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+               spin_unlock_bh(&txq->lock);
+
+               IWL_ERR(trans, "No space in command queue\n");
+               iwl_op_mode_cmd_queue_full(trans->op_mode);
+               idx = -ENOSPC;
+               goto free_dup_buf;
+       }
+
+       idx = get_cmd_index(txq, txq->write_ptr);
+       out_cmd = txq->entries[idx].cmd;
+       out_meta = &txq->entries[idx].meta;
+
+       /* re-initialize to NULL */
+       memset(out_meta, 0, sizeof(*out_meta));
+       if (cmd->flags & CMD_WANT_SKB)
+               out_meta->source = cmd;
+
+       /* set up the header */
+       out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
+       out_cmd->hdr_wide.group_id = group_id;
+       out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
+       out_cmd->hdr_wide.length =
+               cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
+       out_cmd->hdr_wide.reserved = 0;
+       out_cmd->hdr_wide.sequence =
+               cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+                                        INDEX_TO_SEQ(txq->write_ptr));
+
+       cmd_pos = sizeof(struct iwl_cmd_header_wide);
+       copy_size = sizeof(struct iwl_cmd_header_wide);
+
+       /* and copy the data that needs to be copied */
+       for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+               int copy;
+
+               if (!cmd->len[i])
+                       continue;
+
+               /* copy everything if not nocopy/dup */
+               if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+                                          IWL_HCMD_DFL_DUP))) {
+                       copy = cmd->len[i];
+
+                       memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+                       cmd_pos += copy;
+                       copy_size += copy;
+                       continue;
+               }
+
+               /*
+                * Otherwise we need at least IWL_FIRST_TB_SIZE copied
+                * in total (for bi-directional DMA), but copy up to what
+                * we can fit into the payload for debug dump purposes.
+                */
+               copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
+
+               memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+               cmd_pos += copy;
+
+               /* However, treat copy_size the proper way, we need it below */
+               if (copy_size < IWL_FIRST_TB_SIZE) {
+                       copy = IWL_FIRST_TB_SIZE - copy_size;
+
+                       if (copy > cmd->len[i])
+                               copy = cmd->len[i];
+                       copy_size += copy;
+               }
+       }
+
+       IWL_DEBUG_HC(trans,
+                    "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+                    iwl_get_cmd_string(trans, cmd->id), group_id,
+                    out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
+                    cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
+
+       /* start the TFD with the minimum copy bytes */
+       tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
+       memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
+       iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
+                            tb0_size);
+
+       /* map first command fragment, if any remains */
+       if (copy_size > tb0_size) {
+               phys_addr = dma_map_single(trans->dev,
+                                          ((u8 *)&out_cmd->hdr) + tb0_size,
+                                          copy_size - tb0_size,
+                                          DMA_TO_DEVICE);
+               if (dma_mapping_error(trans->dev, phys_addr)) {
+                       idx = -ENOMEM;
+                       iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+                       goto out;
+               }
+               iwl_pcie_gen2_set_tb(trans, tfd, phys_addr,
+                                    copy_size - tb0_size);
+       }
+
+       /* map the remaining (adjusted) nocopy/dup fragments */
+       for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+               const void *data = cmddata[i];
+
+               if (!cmdlen[i])
+                       continue;
+               if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+                                          IWL_HCMD_DFL_DUP)))
+                       continue;
+               if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
+                       data = dup_buf;
+               phys_addr = dma_map_single(trans->dev, (void *)data,
+                                          cmdlen[i], DMA_TO_DEVICE);
+               if (dma_mapping_error(trans->dev, phys_addr)) {
+                       idx = -ENOMEM;
+                       iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+                       goto out;
+               }
+               iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
+       }
+
+       BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
+       out_meta->flags = cmd->flags;
+       if (WARN_ON_ONCE(txq->entries[idx].free_buf))
+               kzfree(txq->entries[idx].free_buf);
+       txq->entries[idx].free_buf = dup_buf;
+
+       trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
+
+       /* start timer if queue currently empty */
+       if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+               mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+
+       spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+       if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
+           !trans_pcie->ref_cmd_in_flight) {
+               trans_pcie->ref_cmd_in_flight = true;
+               IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
+               iwl_trans_ref(trans);
+       }
+       /* Increment and update queue's write index */
+       txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+       iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
+       spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+
+out:
+       spin_unlock_bh(&txq->lock);
+free_dup_buf:
+       if (idx < 0)
+               kfree(dup_buf);
+       return idx;
+}
+
+#define HOST_COMPLETE_TIMEOUT  (2 * HZ)
+
+static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
+                                       struct iwl_host_cmd *cmd)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
+       struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+       int cmd_idx;
+       int ret;
+
+       IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
+
+       if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
+                                 &trans->status),
+                "Command %s: a command is already active!\n", cmd_str))
+               return -EIO;
+
+       IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
+
+       if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
+               ret = wait_event_timeout(trans_pcie->d0i3_waitq,
+                                pm_runtime_active(&trans_pcie->pci_dev->dev),
+                                msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
+               if (!ret) {
+                       IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
+                       return -ETIMEDOUT;
+               }
+       }
+
+       cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
+       if (cmd_idx < 0) {
+               ret = cmd_idx;
+               clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+               IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+                       cmd_str, ret);
+               return ret;
+       }
+
+       ret = wait_event_timeout(trans_pcie->wait_command_queue,
+                                !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+                                          &trans->status),
+                                HOST_COMPLETE_TIMEOUT);
+       if (!ret) {
+               IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
+                       cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+               IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
+                       txq->read_ptr, txq->write_ptr);
+
+               clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+               IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+                              cmd_str);
+               ret = -ETIMEDOUT;
+
+               iwl_force_nmi(trans);
+               iwl_trans_fw_error(trans);
+
+               goto cancel;
+       }
+
+       if (test_bit(STATUS_FW_ERROR, &trans->status)) {
+               IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
+               dump_stack();
+               ret = -EIO;
+               goto cancel;
+       }
+
+       if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+           test_bit(STATUS_RFKILL, &trans->status)) {
+               IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
+               ret = -ERFKILL;
+               goto cancel;
+       }
+
+       if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
+               IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
+               ret = -EIO;
+               goto cancel;
+       }
+
+       return 0;
+
+cancel:
+       if (cmd->flags & CMD_WANT_SKB) {
+               /*
+                * Cancel the CMD_WANT_SKB flag for the cmd in the
+                * TX cmd queue. Otherwise in case the cmd comes
+                * in later, it will possibly set an invalid
+                * address (cmd->meta.source).
+                */
+               txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+       }
+
+       if (cmd->resp_pkt) {
+               iwl_free_resp(cmd);
+               cmd->resp_pkt = NULL;
+       }
+
+       return ret;
+}
+
+int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
+                                 struct iwl_host_cmd *cmd)
+{
+       if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+           test_bit(STATUS_RFKILL, &trans->status)) {
+               IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
+                                 cmd->id);
+               return -ERFKILL;
+       }
+
+       if (cmd->flags & CMD_ASYNC) {
+               int ret;
+
+               /* An asynchronous command can not expect an SKB to be set. */
+               if (WARN_ON(cmd->flags & CMD_WANT_SKB))
+                       return -EINVAL;
+
+               ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
+               if (ret < 0) {
+                       IWL_ERR(trans,
+                               "Error sending %s: enqueue_hcmd failed: %d\n",
+                               iwl_get_cmd_string(trans, cmd->id), ret);
+                       return ret;
+               }
+               return 0;
+       }
+
+       return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
+}
+
+/*
+ * iwl_pcie_gen2_txq_unmap -  Unmap any remaining DMA mappings and free skb's
+ */
+void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_txq *txq = trans_pcie->txq[txq_id];
+
+       spin_lock_bh(&txq->lock);
+       while (txq->write_ptr != txq->read_ptr) {
+               IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
+                                  txq_id, txq->read_ptr);
+
+               iwl_pcie_gen2_free_tfd(trans, txq);
+               txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
+
+               if (txq->read_ptr == txq->write_ptr) {
+                       unsigned long flags;
+
+                       spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+                       if (txq_id != trans_pcie->cmd_queue) {
+                               IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
+                                             txq->id);
+                               iwl_trans_unref(trans);
+                       } else if (trans_pcie->ref_cmd_in_flight) {
+                               trans_pcie->ref_cmd_in_flight = false;
+                               IWL_DEBUG_RPM(trans,
+                                             "clear ref_cmd_in_flight\n");
+                               iwl_trans_unref(trans);
+                       }
+                       spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+               }
+       }
+       spin_unlock_bh(&txq->lock);
+
+       /* just in case - this queue may have been stopped */
+       iwl_wake_queue(trans, txq);
+}
+
+static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
+                                         struct iwl_txq *txq)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct device *dev = trans->dev;
+
+       /* De-alloc circular buffer of TFDs */
+       if (txq->tfds) {
+               dma_free_coherent(dev,
+                                 trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
+                                 txq->tfds, txq->dma_addr);
+               dma_free_coherent(dev,
+                                 sizeof(*txq->first_tb_bufs) * txq->n_window,
+                                 txq->first_tb_bufs, txq->first_tb_dma);
+       }
+
+       kfree(txq->entries);
+       iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
+       kfree(txq);
+}
+
+/*
+ * iwl_pcie_txq_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_txq *txq = trans_pcie->txq[txq_id];
+       int i;
+
+       if (WARN_ON(!txq))
+               return;
+
+       iwl_pcie_gen2_txq_unmap(trans, txq_id);
+
+       /* De-alloc array of command/tx buffers */
+       if (txq_id == trans_pcie->cmd_queue)
+               for (i = 0; i < txq->n_window; i++) {
+                       kzfree(txq->entries[i].cmd);
+                       kzfree(txq->entries[i].free_buf);
+               }
+       del_timer_sync(&txq->stuck_timer);
+
+       iwl_pcie_gen2_txq_free_memory(trans, txq);
+
+       trans_pcie->txq[txq_id] = NULL;
+
+       clear_bit(txq_id, trans_pcie->queue_used);
+}
+
+int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
+                                struct iwl_tx_queue_cfg_cmd *cmd,
+                                int cmd_id,
+                                unsigned int timeout)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_tx_queue_cfg_rsp *rsp;
+       struct iwl_txq *txq;
+       struct iwl_host_cmd hcmd = {
+               .id = cmd_id,
+               .len = { sizeof(*cmd) },
+               .data = { cmd, },
+               .flags = CMD_WANT_SKB,
+       };
+       int ret, qid;
+
+       txq = kzalloc(sizeof(*txq), GFP_KERNEL);
+       if (!txq)
+               return -ENOMEM;
+       ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
+                                    sizeof(struct iwlagn_scd_bc_tbl));
+       if (ret) {
+               IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+               kfree(txq);
+               return -ENOMEM;
+       }
+
+       ret = iwl_pcie_txq_alloc(trans, txq, TFD_TX_CMD_SLOTS, false);
+       if (ret) {
+               IWL_ERR(trans, "Tx queue alloc failed\n");
+               goto error;
+       }
+       ret = iwl_pcie_txq_init(trans, txq, TFD_TX_CMD_SLOTS, false);
+       if (ret) {
+               IWL_ERR(trans, "Tx queue init failed\n");
+               goto error;
+       }
+
+       txq->wd_timeout = msecs_to_jiffies(timeout);
+
+       cmd->tfdq_addr = cpu_to_le64(txq->dma_addr);
+       cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
+       cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX));
+
+       ret = iwl_trans_send_cmd(trans, &hcmd);
+       if (ret)
+               goto error;
+
+       if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
+               ret = -EINVAL;
+               goto error;
+       }
+
+       rsp = (void *)hcmd.resp_pkt->data;
+       qid = le16_to_cpu(rsp->queue_number);
+
+       if (qid > ARRAY_SIZE(trans_pcie->txq)) {
+               WARN_ONCE(1, "queue index %d unsupported", qid);
+               ret = -EIO;
+               goto error;
+       }
+
+       if (test_and_set_bit(qid, trans_pcie->queue_used)) {
+               WARN_ONCE(1, "queue %d already used", qid);
+               ret = -EIO;
+               goto error;
+       }
+
+       txq->id = qid;
+       trans_pcie->txq[qid] = txq;
+
+       /* Place first TFD at index corresponding to start sequence number */
+       txq->read_ptr = le16_to_cpu(rsp->write_pointer);
+       txq->write_ptr = le16_to_cpu(rsp->write_pointer);
+       iwl_write_direct32(trans, HBUS_TARG_WRPTR,
+                          (txq->write_ptr) | (qid << 16));
+       IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
+
+       return qid;
+
+error:
+       iwl_pcie_gen2_txq_free_memory(trans, txq);
+       return ret;
+}
+
+void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       /*
+        * Upon HW Rfkill - we stop the device, and then stop the queues
+        * in the op_mode. Just for the sake of the simplicity of the op_mode,
+        * allow the op_mode to call txq_disable after it already called
+        * stop_device.
+        */
+       if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
+               WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+                         "queue %d not used", queue);
+               return;
+       }
+
+       iwl_pcie_gen2_txq_unmap(trans, queue);
+
+       IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
+}
+
+void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int i;
+
+       memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+       /* Free all TX queues */
+       for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) {
+               if (!trans_pcie->txq[i])
+                       continue;
+
+               iwl_pcie_gen2_txq_free(trans, i);
+       }
+}
+
+int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_txq *cmd_queue;
+       int txq_id = trans_pcie->cmd_queue, ret;
+
+       /* alloc and init the command queue */
+       if (!trans_pcie->txq[txq_id]) {
+               cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL);
+               if (!cmd_queue) {
+                       IWL_ERR(trans, "Not enough memory for command queue\n");
+                       return -ENOMEM;
+               }
+               trans_pcie->txq[txq_id] = cmd_queue;
+               ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true);
+               if (ret) {
+                       IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+                       goto error;
+               }
+       } else {
+               cmd_queue = trans_pcie->txq[txq_id];
+       }
+
+       ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true);
+       if (ret) {
+               IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+               goto error;
+       }
+       trans_pcie->txq[txq_id]->id = txq_id;
+       set_bit(txq_id, trans_pcie->queue_used);
+
+       return 0;
+
+error:
+       iwl_pcie_gen2_tx_free(trans);
+       return ret;
+}
+
index 911cf98681074725b449f2b281379994269312f7..386950a2d6162845ccf3aa2ca24c968750d5d2cd 100644 (file)
@@ -2,7 +2,7 @@
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -71,7 +71,7 @@
  *
  ***************************************************/
 
-static int iwl_queue_space(const struct iwl_txq *q)
+int iwl_queue_space(const struct iwl_txq *q)
 {
        unsigned int max;
        unsigned int used;
@@ -102,10 +102,9 @@ static int iwl_queue_space(const struct iwl_txq *q)
 /*
  * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
  */
-static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id)
+static int iwl_queue_init(struct iwl_txq *q, int slots_num)
 {
        q->n_window = slots_num;
-       q->id = id;
 
        /* slots_num must be power-of-two size, otherwise
         * get_cmd_index is broken. */
@@ -126,8 +125,8 @@ static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id)
        return 0;
 }
 
-static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
-                                 struct iwl_dma_ptr *ptr, size_t size)
+int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
+                          struct iwl_dma_ptr *ptr, size_t size)
 {
        if (WARN_ON(ptr->addr))
                return -EINVAL;
@@ -140,8 +139,7 @@ static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
        return 0;
 }
 
-static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
-                                 struct iwl_dma_ptr *ptr)
+void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
 {
        if (unlikely(!ptr->addr))
                return;
@@ -164,9 +162,6 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
        }
        spin_unlock(&txq->lock);
 
-       IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->id,
-               jiffies_to_msecs(txq->wd_timeout));
-
        iwl_trans_pcie_log_scd_error(trans, txq);
 
        iwl_force_nmi(trans);
@@ -188,6 +183,7 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
        __le16 bc_ent;
        struct iwl_tx_cmd *tx_cmd =
                (void *)txq->entries[txq->write_ptr].cmd->payload;
+       u8 sta_id = tx_cmd->sta_id;
 
        scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
 
@@ -210,26 +206,7 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
        if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
                return;
 
-       if (trans->cfg->use_tfh) {
-               u8 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
-                                    num_tbs * sizeof(struct iwl_tfh_tb);
-               /*
-                * filled_tfd_size contains the number of filled bytes in the
-                * TFD.
-                * Dividing it by 64 will give the number of chunks to fetch
-                * to SRAM- 0 for one chunk, 1 for 2 and so on.
-                * If, for example, TFD contains only 3 TBs then 32 bytes
-                * of the TFD are used, and only one chunk of 64 bytes should
-                * be fetched
-                */
-               u8 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
-
-               bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
-       } else {
-               u8 sta_id = tx_cmd->sta_id;
-
-               bc_ent = cpu_to_le16(len | (sta_id << 12));
-       }
+       bc_ent = cpu_to_le16(len | (sta_id << 12));
 
        scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
 
@@ -319,23 +296,17 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
        int i;
 
        for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
-               struct iwl_txq *txq = &trans_pcie->txq[i];
+               struct iwl_txq *txq = trans_pcie->txq[i];
 
                spin_lock_bh(&txq->lock);
-               if (trans_pcie->txq[i].need_update) {
+               if (txq->need_update) {
                        iwl_pcie_txq_inc_wr_ptr(trans, txq);
-                       trans_pcie->txq[i].need_update = false;
+                       txq->need_update = false;
                }
                spin_unlock_bh(&txq->lock);
        }
 }
 
-static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
-                                    struct iwl_txq *txq, int idx)
-{
-       return txq->tfds + trans_pcie->tfd_size * idx;
-}
-
 static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
                                                  void *_tfd, u8 idx)
 {
@@ -368,28 +339,17 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
 static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
                                       u8 idx, dma_addr_t addr, u16 len)
 {
-       if (trans->cfg->use_tfh) {
-               struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
-               struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx];
+       struct iwl_tfd *tfd_fh = (void *)tfd;
+       struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
 
-               put_unaligned_le64(addr, &tb->addr);
-               tb->tb_len = cpu_to_le16(len);
+       u16 hi_n_len = len << 4;
 
-               tfd_fh->num_tbs = cpu_to_le16(idx + 1);
-       } else {
-               struct iwl_tfd *tfd_fh = (void *)tfd;
-               struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
-
-               u16 hi_n_len = len << 4;
-
-               put_unaligned_le32(addr, &tb->lo);
-               if (sizeof(dma_addr_t) > sizeof(u32))
-                       hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+       put_unaligned_le32(addr, &tb->lo);
+       hi_n_len |= iwl_get_dma_hi_addr(addr);
 
-               tb->hi_n_len = cpu_to_le16(hi_n_len);
+       tb->hi_n_len = cpu_to_le16(hi_n_len);
 
-               tfd_fh->num_tbs = idx + 1;
-       }
+       tfd_fh->num_tbs = idx + 1;
 }
 
 static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd)
@@ -460,7 +420,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
  * Does NOT advance any TFD circular buffer read/write indexes
  * Does NOT free the TFD itself (which is within circular buffer)
  */
-static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
+void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
 {
        /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
         * idx is bounded by n_window
@@ -522,9 +482,8 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
        return num_tbs;
 }
 
-static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
-                              struct iwl_txq *txq, int slots_num,
-                              u32 txq_id)
+int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
+                      int slots_num, bool cmd_queue)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
@@ -547,7 +506,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
        if (!txq->entries)
                goto error;
 
-       if (txq_id == trans_pcie->cmd_queue)
+       if (cmd_queue)
                for (i = 0; i < slots_num; i++) {
                        txq->entries[i].cmd =
                                kmalloc(sizeof(struct iwl_device_cmd),
@@ -573,13 +532,11 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
        if (!txq->first_tb_bufs)
                goto err_free_tfds;
 
-       txq->id = txq_id;
-
        return 0;
 err_free_tfds:
        dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
 error:
-       if (txq->entries && txq_id == trans_pcie->cmd_queue)
+       if (txq->entries && cmd_queue)
                for (i = 0; i < slots_num; i++)
                        kfree(txq->entries[i].cmd);
        kfree(txq->entries);
@@ -589,10 +546,9 @@ error:
 
 }
 
-static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
-                             int slots_num, u32 txq_id)
+int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+                     int slots_num, bool cmd_queue)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        int ret;
 
        txq->need_update = false;
@@ -602,13 +558,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
        BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
 
        /* Initialize queue's high/low-water marks, and head/tail indexes */
-       ret = iwl_queue_init(txq, slots_num, txq_id);
+       ret = iwl_queue_init(txq, slots_num);
        if (ret)
                return ret;
 
        spin_lock_init(&txq->lock);
 
-       if (txq_id == trans_pcie->cmd_queue) {
+       if (cmd_queue) {
                static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
 
                lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
@@ -616,18 +572,6 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
 
        __skb_queue_head_init(&txq->overflow_q);
 
-       /*
-        * Tell nic where to find circular buffer of Tx Frame Descriptors for
-        * given Tx queue, and enable the DMA channel used for that queue.
-        * Circular buffer (TFD queue in DRAM) physical base address */
-       if (trans->cfg->use_tfh)
-               iwl_write_direct64(trans,
-                                  FH_MEM_CBBC_QUEUE(trans, txq_id),
-                                  txq->dma_addr);
-       else
-               iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
-                                  txq->dma_addr >> 8);
-
        return 0;
 }
 
@@ -672,7 +616,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+       struct iwl_txq *txq = trans_pcie->txq[txq_id];
 
        spin_lock_bh(&txq->lock);
        while (txq->write_ptr != txq->read_ptr) {
@@ -704,7 +648,6 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
                        spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
                }
        }
-       txq->active = false;
 
        while (!skb_queue_empty(&txq->overflow_q)) {
                struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
@@ -729,7 +672,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+       struct iwl_txq *txq = trans_pcie->txq[txq_id];
        struct device *dev = trans->dev;
        int i;
 
@@ -780,9 +723,6 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
        memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
        memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
 
-       if (trans->cfg->use_tfh)
-               return;
-
        trans_pcie->scd_base_addr =
                iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
 
@@ -832,9 +772,16 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        int txq_id;
 
+       /*
+        * we should never get here in gen2 trans mode return early to avoid
+        * having invalid accesses
+        */
+       if (WARN_ON_ONCE(trans->cfg->gen2))
+               return;
+
        for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
             txq_id++) {
-               struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+               struct iwl_txq *txq = trans_pcie->txq[txq_id];
                if (trans->cfg->use_tfh)
                        iwl_write_direct64(trans,
                                           FH_MEM_CBBC_QUEUE(trans, txq_id),
@@ -914,7 +861,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
        memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
 
        /* This can happen: start_hw, stop_device */
-       if (!trans_pcie->txq)
+       if (!trans_pcie->txq_memory)
                return 0;
 
        /* Unmap DMA from host system and free skb's */
@@ -935,15 +882,20 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
        int txq_id;
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
+       memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
        /* Tx queues */
-       if (trans_pcie->txq) {
+       if (trans_pcie->txq_memory) {
                for (txq_id = 0;
-                    txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
+                    txq_id < trans->cfg->base_params->num_of_queues;
+                    txq_id++) {
                        iwl_pcie_txq_free(trans, txq_id);
+                       trans_pcie->txq[txq_id] = NULL;
+               }
        }
 
-       kfree(trans_pcie->txq);
-       trans_pcie->txq = NULL;
+       kfree(trans_pcie->txq_memory);
+       trans_pcie->txq_memory = NULL;
 
        iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
 
@@ -965,7 +917,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
 
        /*It is not allowed to alloc twice, so warn when this happens.
         * We cannot rely on the previous allocation, so free and fail */
-       if (WARN_ON(trans_pcie->txq)) {
+       if (WARN_ON(trans_pcie->txq_memory)) {
                ret = -EINVAL;
                goto error;
        }
@@ -984,9 +936,9 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
                goto error;
        }
 
-       trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
-                                 sizeof(struct iwl_txq), GFP_KERNEL);
-       if (!trans_pcie->txq) {
+       trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues,
+                                        sizeof(struct iwl_txq), GFP_KERNEL);
+       if (!trans_pcie->txq_memory) {
                IWL_ERR(trans, "Not enough memory for txq\n");
                ret = -ENOMEM;
                goto error;
@@ -995,14 +947,17 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
        /* Alloc and init all Tx queues, including the command queue (#4/#9) */
        for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
             txq_id++) {
-               slots_num = (txq_id == trans_pcie->cmd_queue) ?
-                                       TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
-               ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
-                                         slots_num, txq_id);
+               bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
+
+               slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+               trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
+               ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
+                                        slots_num, cmd_queue);
                if (ret) {
                        IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
                        goto error;
                }
+               trans_pcie->txq[txq_id]->id = txq_id;
        }
 
        return 0;
@@ -1012,6 +967,7 @@ error:
 
        return ret;
 }
+
 int iwl_pcie_tx_init(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1019,7 +975,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
        int txq_id, slots_num;
        bool alloc = false;
 
-       if (!trans_pcie->txq) {
+       if (!trans_pcie->txq_memory) {
                ret = iwl_pcie_tx_alloc(trans);
                if (ret)
                        goto error;
@@ -1040,22 +996,24 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
        /* Alloc and init all Tx queues, including the command queue (#4/#9) */
        for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
             txq_id++) {
-               slots_num = (txq_id == trans_pcie->cmd_queue) ?
-                                       TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
-               ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
-                                        slots_num, txq_id);
+               bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
+
+               slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+               ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
+                                       slots_num, cmd_queue);
                if (ret) {
                        IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
                        goto error;
                }
-       }
 
-       if (trans->cfg->use_tfh) {
-               iwl_write_direct32(trans, TFH_TRANSFER_MODE,
-                                  TFH_TRANSFER_MAX_PENDING_REQ |
-                                  TFH_CHUNK_SIZE_128 |
-                                  TFH_CHUNK_SPLIT_MODE);
-               return 0;
+               /*
+                * Tell nic where to find circular buffer of TFDs for a
+                * given Tx queue, and enable the DMA channel used for that
+                * queue.
+                * Circular buffer (TFD queue in DRAM) physical base address
+                */
+               iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
+                                  trans_pcie->txq[txq_id]->dma_addr >> 8);
        }
 
        iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
@@ -1100,7 +1058,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
                            struct sk_buff_head *skbs)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+       struct iwl_txq *txq = trans_pcie->txq[txq_id];
        int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
        int last_to_free;
 
@@ -1110,7 +1068,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 
        spin_lock_bh(&txq->lock);
 
-       if (!txq->active) {
+       if (!test_bit(txq_id, trans_pcie->queue_used)) {
                IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
                                    txq_id, ssn);
                goto out;
@@ -1257,7 +1215,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+       struct iwl_txq *txq = trans_pcie->txq[txq_id];
        unsigned long flags;
        int nfreed = 0;
 
@@ -1324,15 +1282,12 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
                               unsigned int wdg_timeout)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+       struct iwl_txq *txq = trans_pcie->txq[txq_id];
        int fifo = -1;
 
        if (test_and_set_bit(txq_id, trans_pcie->queue_used))
                WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
 
-       if (cfg && trans->cfg->use_tfh)
-               WARN_ONCE(1, "Expected no calls to SCD configuration");
-
        txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
 
        if (cfg) {
@@ -1414,27 +1369,17 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
                                    "Activate queue %d WrPtr: %d\n",
                                    txq_id, ssn & 0xff);
        }
-
-       txq->active = true;
 }
 
 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
                                        bool shared_mode)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+       struct iwl_txq *txq = trans_pcie->txq[txq_id];
 
        txq->ampdu = !shared_mode;
 }
 
-dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq)
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
-       return trans_pcie->scd_bc_tbls.dma +
-              txq * sizeof(struct iwlagn_scd_bc_tbl);
-}
-
 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
                                bool configure_scd)
 {
@@ -1443,8 +1388,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
                        SCD_TX_STTS_QUEUE_OFFSET(txq_id);
        static const u32 zero_val[4] = {};
 
-       trans_pcie->txq[txq_id].frozen_expiry_remainder = 0;
-       trans_pcie->txq[txq_id].frozen = false;
+       trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0;
+       trans_pcie->txq[txq_id]->frozen = false;
 
        /*
         * Upon HW Rfkill - we stop the device, and then stop the queues
@@ -1458,9 +1403,6 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
                return;
        }
 
-       if (configure_scd && trans->cfg->use_tfh)
-               WARN_ONCE(1, "Expected no calls to SCD configuration");
-
        if (configure_scd) {
                iwl_scd_txq_set_inactive(trans, txq_id);
 
@@ -1469,7 +1411,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
        }
 
        iwl_pcie_txq_unmap(trans, txq_id);
-       trans_pcie->txq[txq_id].ampdu = false;
+       trans_pcie->txq[txq_id]->ampdu = false;
 
        IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
 }
@@ -1489,7 +1431,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                                 struct iwl_host_cmd *cmd)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+       struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
        struct iwl_device_cmd *out_cmd;
        struct iwl_cmd_meta *out_meta;
        unsigned long flags;
@@ -1774,16 +1716,15 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
        struct iwl_device_cmd *cmd;
        struct iwl_cmd_meta *meta;
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+       struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
 
        /* If a Tx command is being handled and it isn't in the actual
         * command queue then there a command routing bug has been introduced
         * in the queue management code. */
        if (WARN(txq_id != trans_pcie->cmd_queue,
                 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
-                txq_id, trans_pcie->cmd_queue, sequence,
-                trans_pcie->txq[trans_pcie->cmd_queue].read_ptr,
-                trans_pcie->txq[trans_pcie->cmd_queue].write_ptr)) {
+                txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr,
+                txq->write_ptr)) {
                iwl_print_hex_error(trans, pkt, 32);
                return;
        }
@@ -1867,6 +1808,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
                                   struct iwl_host_cmd *cmd)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
        int cmd_idx;
        int ret;
 
@@ -1907,8 +1849,6 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
                                           &trans->status),
                                 HOST_COMPLETE_TIMEOUT);
        if (!ret) {
-               struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
-
                IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
                        iwl_get_cmd_string(trans, cmd->id),
                        jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
@@ -1959,8 +1899,7 @@ cancel:
                 * in later, it will possibly set an invalid
                 * address (cmd->meta.source).
                 */
-               trans_pcie->txq[trans_pcie->cmd_queue].
-                       entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+               txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
        }
 
        if (cmd->resp_pkt) {
@@ -2314,7 +2253,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        u16 wifi_seq;
        bool amsdu;
 
-       txq = &trans_pcie->txq[txq_id];
+       txq = trans_pcie->txq[txq_id];
 
        if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
                      "TX on unused queue %d\n", txq_id))
index 43dccd5b0291f98fcf184822d8a50374c142fc9c..366eb4991a7d88ffbd2965a43449f74e1ab7e551 100644 (file)
@@ -153,7 +153,8 @@ int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
 
        cmd->command = cpu_to_le16(HostCmd_CMD_CHAN_REPORT_REQUEST);
        cmd->size = cpu_to_le16(S_DS_GEN);
-       le16_add_cpu(&cmd->size, sizeof(struct host_cmd_ds_chan_rpt_req));
+       le16_unaligned_add_cpu(&cmd->size,
+                              sizeof(struct host_cmd_ds_chan_rpt_req));
 
        cr_req->chan_desc.start_freq = cpu_to_le16(MWIFIEX_A_BAND_START_FREQ);
        cr_req->chan_desc.chan_num = radar_params->chandef->chan->hw_value;
index 1e3bd435a694534f4a450b32e8b0bd69702e5269..4107663130657e358aa4f86fbaa18ca5cff62e20 100644 (file)
@@ -594,6 +594,24 @@ int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
        return 0;
 }
 
+static void mwifiex_reg_apply_radar_flags(struct wiphy *wiphy)
+{
+       struct ieee80211_supported_band *sband;
+       struct ieee80211_channel *chan;
+       unsigned int i;
+
+       if (!wiphy->bands[NL80211_BAND_5GHZ])
+               return;
+       sband = wiphy->bands[NL80211_BAND_5GHZ];
+
+       for (i = 0; i < sband->n_channels; i++) {
+               chan = &sband->channels[i];
+               if ((!(chan->flags & IEEE80211_CHAN_DISABLED)) &&
+                   (chan->flags & IEEE80211_CHAN_RADAR))
+                       chan->flags |= IEEE80211_CHAN_NO_IR;
+       }
+}
+
 /*
  * CFG802.11 regulatory domain callback function.
  *
@@ -613,6 +631,7 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy,
        mwifiex_dbg(adapter, INFO,
                    "info: cfg80211 regulatory domain callback for %c%c\n",
                    request->alpha2[0], request->alpha2[1]);
+       mwifiex_reg_apply_radar_flags(wiphy);
 
        switch (request->initiator) {
        case NL80211_REGDOM_SET_BY_DRIVER:
@@ -2528,9 +2547,11 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
                        priv->random_mac[i] |= get_random_int() &
                                               ~(request->mac_addr_mask[i]);
                }
+               ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
+       } else {
+               eth_zero_addr(priv->random_mac);
        }
 
-       ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
        user_scan_cfg->num_ssids = request->n_ssids;
        user_scan_cfg->ssid_list = request->ssids;
 
index 25a7475702f7fa9166098e9ca97e9cdd268682a8..0c3b217247b145a9f823f9f73d1233070e87f542 100644 (file)
@@ -242,7 +242,7 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
        mwifiex_dbg(adapter, CMD,
                    "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
                    cmd_code,
-                   le16_to_cpu(*(__le16 *)((u8 *)host_cmd + S_DS_GEN)),
+                   get_unaligned_le16((u8 *)host_cmd + S_DS_GEN),
                    cmd_size, le16_to_cpu(host_cmd->seq_num));
        mwifiex_dbg_dump(adapter, CMD_D, "cmd buffer:", host_cmd, cmd_size);
 
@@ -286,7 +286,7 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
                        (adapter->dbg.last_cmd_index + 1) % DBG_CMD_NUM;
        adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index] = cmd_code;
        adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index] =
-                       le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN));
+                       get_unaligned_le16((u8 *)host_cmd + S_DS_GEN);
 
        /* Clear BSS_NO_BITS from HostCmd */
        cmd_code &= HostCmd_CMD_ID_MASK;
index cb6a1a81d44e213c7315c4e64a7e2051b4919a62..6cf9ab9133ea37145c7ccb59d527b1e41b5cf2ea 100644 (file)
@@ -31,17 +31,35 @@ struct rfc_1042_hdr {
        u8 llc_ctrl;
        u8 snap_oui[3];
        __be16 snap_type;
-};
+} __packed;
 
 struct rx_packet_hdr {
        struct ethhdr eth803_hdr;
        struct rfc_1042_hdr rfc1042_hdr;
-};
+} __packed;
 
 struct tx_packet_hdr {
        struct ethhdr eth803_hdr;
        struct rfc_1042_hdr rfc1042_hdr;
-};
+} __packed;
+
+struct mwifiex_fw_header {
+       __le32 dnld_cmd;
+       __le32 base_addr;
+       __le32 data_length;
+       __le32 crc;
+} __packed;
+
+struct mwifiex_fw_data {
+       struct mwifiex_fw_header header;
+       __le32 seq_num;
+       u8 data[1];
+} __packed;
+
+#define MWIFIEX_FW_DNLD_CMD_1 0x1
+#define MWIFIEX_FW_DNLD_CMD_5 0x5
+#define MWIFIEX_FW_DNLD_CMD_6 0x6
+#define MWIFIEX_FW_DNLD_CMD_7 0x7
 
 #define B_SUPPORTED_RATES               5
 #define G_SUPPORTED_RATES               9
@@ -707,7 +725,7 @@ struct uap_txpd {
        u8 reserved1[2];
        u8 tx_token_id;
        u8 reserved[2];
-};
+} __packed;
 
 struct uap_rxpd {
        u8 bss_type;
@@ -723,7 +741,7 @@ struct uap_rxpd {
        u8 ht_info;
        u8 reserved[3];
        u8 flags;
-};
+} __packed;
 
 struct mwifiex_fw_chan_stats {
        u8 chan_num;
@@ -987,7 +1005,7 @@ struct mwifiex_ps_param {
        __le16 adhoc_wake_period;
        __le16 mode;
        __le16 delay_to_ps;
-};
+} __packed;
 
 #define HS_DEF_WAKE_INTERVAL          100
 #define HS_DEF_INACTIVITY_TIMEOUT      50
@@ -996,7 +1014,7 @@ struct mwifiex_ps_param_in_hs {
        struct mwifiex_ie_types_header header;
        __le32 hs_wake_int;
        __le32 hs_inact_timeout;
-};
+} __packed;
 
 #define BITMAP_AUTO_DS         0x01
 #define BITMAP_STA_PS          0x10
@@ -1062,7 +1080,7 @@ struct host_cmd_ds_802_11_rssi_info {
        __le16 nbcn;
        __le16 reserved[9];
        long long reserved_1;
-};
+} __packed;
 
 struct host_cmd_ds_802_11_rssi_info_rsp {
        __le16 action;
@@ -1077,12 +1095,12 @@ struct host_cmd_ds_802_11_rssi_info_rsp {
        __le16 bcn_rssi_avg;
        __le16 bcn_nf_avg;
        long long tsf_bcn;
-};
+} __packed;
 
 struct host_cmd_ds_802_11_mac_address {
        __le16 action;
        u8 mac_addr[ETH_ALEN];
-};
+} __packed;
 
 struct host_cmd_ds_mac_control {
        __le32 action;
@@ -1230,7 +1248,7 @@ struct host_cmd_ds_802_11_get_log {
        __le32 wep_icv_err_cnt[4];
        __le32 bcn_rcv_cnt;
        __le32 bcn_miss_cnt;
-};
+} __packed;
 
 /* Enumeration for rate format */
 enum _mwifiex_rate_format {
@@ -1368,12 +1386,12 @@ struct host_cmd_ds_rf_ant_mimo {
        __le16 tx_ant_mode;
        __le16 action_rx;
        __le16 rx_ant_mode;
-};
+} __packed;
 
 struct host_cmd_ds_rf_ant_siso {
        __le16 action;
        __le16 ant_mode;
-};
+} __packed;
 
 struct host_cmd_ds_tdls_oper {
        __le16 tdls_action;
@@ -1383,13 +1401,13 @@ struct host_cmd_ds_tdls_oper {
 
 struct mwifiex_tdls_config {
        __le16 enable;
-};
+} __packed;
 
 struct mwifiex_tdls_config_cs_params {
        u8 unit_time;
        u8 thr_otherlink;
        u8 thr_directlink;
-};
+} __packed;
 
 struct mwifiex_tdls_init_cs_params {
        u8 peer_mac[ETH_ALEN];
@@ -1404,7 +1422,7 @@ struct mwifiex_tdls_init_cs_params {
 
 struct mwifiex_tdls_stop_cs_params {
        u8 peer_mac[ETH_ALEN];
-};
+} __packed;
 
 struct host_cmd_ds_tdls_config {
        __le16 tdls_action;
@@ -1709,7 +1727,7 @@ struct mwifiex_ie_types_local_pwr_constraint {
 struct mwifiex_ie_types_wmm_param_set {
        struct mwifiex_ie_types_header header;
        u8 wmm_ie[1];
-};
+} __packed;
 
 struct mwifiex_ie_types_mgmt_frame {
        struct mwifiex_ie_types_header header;
@@ -1834,7 +1852,7 @@ struct host_cmd_ds_mem_access {
        __le16 reserved;
        __le32 addr;
        __le32 value;
-};
+} __packed;
 
 struct mwifiex_ie_types_qos_info {
        struct mwifiex_ie_types_header header;
index c488c3068abc53e8991720405057634772e4afdd..922e3d69fd84748d72f39c0ba4b2592b468a8101 100644 (file)
@@ -131,9 +131,10 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
                               sizeof(struct mwifiex_ie));
                }
 
-               le16_add_cpu(&ie_list->len,
-                            le16_to_cpu(priv->mgmt_ie[index].ie_length) +
-                            MWIFIEX_IE_HDR_SIZE);
+               le16_unaligned_add_cpu(&ie_list->len,
+                                      le16_to_cpu(
+                                           priv->mgmt_ie[index].ie_length) +
+                                      MWIFIEX_IE_HDR_SIZE);
                input_len -= tlv_len + MWIFIEX_IE_HDR_SIZE;
        }
 
@@ -172,21 +173,21 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
                      le16_to_cpu(beacon_ie->ie_length);
                memcpy(pos, beacon_ie, len);
                pos += len;
-               le16_add_cpu(&ap_custom_ie->len, len);
+               le16_unaligned_add_cpu(&ap_custom_ie->len, len);
        }
        if (pr_ie) {
                len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
                      le16_to_cpu(pr_ie->ie_length);
                memcpy(pos, pr_ie, len);
                pos += len;
-               le16_add_cpu(&ap_custom_ie->len, len);
+               le16_unaligned_add_cpu(&ap_custom_ie->len, len);
        }
        if (ar_ie) {
                len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
                      le16_to_cpu(ar_ie->ie_length);
                memcpy(pos, ar_ie, len);
                pos += len;
-               le16_add_cpu(&ap_custom_ie->len, len);
+               le16_unaligned_add_cpu(&ap_custom_ie->len, len);
        }
 
        ret = mwifiex_update_autoindex_ies(priv, ap_custom_ie);
@@ -242,7 +243,7 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
                vs_ie = (struct ieee_types_header *)vendor_ie;
                memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
                       vs_ie, vs_ie->len + 2);
-               le16_add_cpu(&ie->ie_length, vs_ie->len + 2);
+               le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2);
                ie->mgmt_subtype_mask = cpu_to_le16(mask);
                ie->ie_index = cpu_to_le16(MWIFIEX_AUTO_IDX_MASK);
        }
index 536ab834b12625f8ac7ccce1b309b465f82db31b..48e154e1865df321e12f4f160d51085d599d8721 100644 (file)
@@ -91,6 +91,8 @@ struct wep_key {
 #define MWIFIEX_TDLS_DEF_QOS_CAPAB             0xf
 #define MWIFIEX_PRIO_BK                                2
 #define MWIFIEX_PRIO_VI                                5
+#define MWIFIEX_SUPPORTED_CHANNELS             2
+#define MWIFIEX_OPERATING_CLASSES              16
 
 struct mwifiex_uap_bss_param {
        u8 channel;
index 5ebca1d0cfc750969793c26ac5f37e858e897fc4..739d654bc9a6140b9db2c103b2678a1685f9a451 100644 (file)
@@ -17,6 +17,8 @@
  * this warranty disclaimer.
  */
 
+#include <linux/suspend.h>
+
 #include "main.h"
 #include "wmm.h"
 #include "cfg80211.h"
@@ -57,8 +59,8 @@ MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0");
  * In case of any errors during inittialization, this function also ensures
  * proper cleanup before exiting.
  */
-static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
-                           void **padapter)
+static int mwifiex_register(void *card, struct device *dev,
+                           struct mwifiex_if_ops *if_ops, void **padapter)
 {
        struct mwifiex_adapter *adapter;
        int i;
@@ -68,6 +70,7 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
                return -ENOMEM;
 
        *padapter = adapter;
+       adapter->dev = dev;
        adapter->card = card;
 
        /* Save interface specific operations in adapter */
@@ -146,7 +149,6 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
 
        kfree(adapter->regd);
 
-       vfree(adapter->chan_stats);
        kfree(adapter);
        return 0;
 }
@@ -510,7 +512,7 @@ static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
  *      - Download the correct firmware to card
  *      - Issue the init commands to firmware
  */
-static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
+static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
 {
        int ret;
        char fmt[64];
@@ -630,6 +632,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
        goto done;
 
 err_add_intf:
+       vfree(adapter->chan_stats);
        wiphy_unregister(adapter->wiphy);
        wiphy_free(adapter->wiphy);
 err_init_fw:
@@ -663,11 +666,18 @@ done:
                mwifiex_free_adapter(adapter);
        /* Tell all current and future waiters we're finished */
        complete_all(fw_done);
-       return;
+
+       return init_failed ? -EIO : 0;
+}
+
+static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
+{
+       _mwifiex_fw_dpc(firmware, context);
 }
 
 /*
- * This function initializes the hardware and gets firmware.
+ * This function gets the firmware and (if called asynchronously) kicks off the
+ * HW init when done.
  */
 static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter,
                              bool req_fw_nowait)
@@ -690,20 +700,15 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter,
                ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
                                              adapter->dev, GFP_KERNEL, adapter,
                                              mwifiex_fw_dpc);
-               if (ret < 0)
-                       mwifiex_dbg(adapter, ERROR,
-                                   "request_firmware_nowait error %d\n", ret);
        } else {
                ret = request_firmware(&adapter->firmware,
                                       adapter->fw_name,
                                       adapter->dev);
-               if (ret < 0)
-                       mwifiex_dbg(adapter, ERROR,
-                                   "request_firmware error %d\n", ret);
-               else
-                       mwifiex_fw_dpc(adapter->firmware, (void *)adapter);
        }
 
+       if (ret < 0)
+               mwifiex_dbg(adapter, ERROR, "request_firmware%s error %d\n",
+                           req_fw_nowait ? "_nowait" : "", ret);
        return ret;
 }
 
@@ -1412,6 +1417,7 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
                        mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
                rtnl_unlock();
        }
+       vfree(adapter->chan_stats);
 
        mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
 exit_return:
@@ -1425,6 +1431,8 @@ EXPORT_SYMBOL_GPL(mwifiex_shutdown_sw);
 int
 mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
 {
+       int ret;
+
        mwifiex_init_lock_list(adapter);
        if (adapter->if_ops.up_dev)
                adapter->if_ops.up_dev(adapter);
@@ -1434,6 +1442,7 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
        init_waitqueue_head(&adapter->init_wait_q);
        adapter->is_suspended = false;
        adapter->hs_activated = false;
+       adapter->is_cmd_timedout = 0;
        init_waitqueue_head(&adapter->hs_activate_wait_q);
        init_waitqueue_head(&adapter->cmd_wait_q.wait);
        adapter->cmd_wait_q.status = 0;
@@ -1471,9 +1480,15 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
                            "%s: firmware init failed\n", __func__);
                goto err_init_fw;
        }
+
+       /* _mwifiex_fw_dpc() does its own cleanup */
+       ret = _mwifiex_fw_dpc(adapter->firmware, adapter);
+       if (ret) {
+               pr_err("Failed to bring up adapter: %d\n", ret);
+               return ret;
+       }
        mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
 
-       complete_all(adapter->fw_done);
        return 0;
 
 err_init_fw:
@@ -1501,14 +1516,13 @@ static irqreturn_t mwifiex_irq_wakeup_handler(int irq, void *priv)
 {
        struct mwifiex_adapter *adapter = priv;
 
-       if (adapter->irq_wakeup >= 0) {
-               dev_dbg(adapter->dev, "%s: wake by wifi", __func__);
-               adapter->wake_by_wifi = true;
-               disable_irq_nosync(irq);
-       }
+       dev_dbg(adapter->dev, "%s: wake by wifi", __func__);
+       adapter->wake_by_wifi = true;
+       disable_irq_nosync(irq);
 
        /* Notify PM core we are wakeup source */
        pm_wakeup_event(adapter->dev, 0);
+       pm_system_wakeup();
 
        return IRQ_HANDLED;
 }
@@ -1568,12 +1582,11 @@ mwifiex_add_card(void *card, struct completion *fw_done,
 {
        struct mwifiex_adapter *adapter;
 
-       if (mwifiex_register(card, if_ops, (void **)&adapter)) {
+       if (mwifiex_register(card, dev, if_ops, (void **)&adapter)) {
                pr_err("%s: software init failed\n", __func__);
                goto err_init_sw;
        }
 
-       adapter->dev = dev;
        mwifiex_probe_of(adapter);
 
        adapter->iface_type = iface_type;
@@ -1714,10 +1727,14 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter)
                        mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
                rtnl_unlock();
        }
+       vfree(adapter->chan_stats);
 
        wiphy_unregister(adapter->wiphy);
        wiphy_free(adapter->wiphy);
 
+       if (adapter->irq_wakeup >= 0)
+               device_init_wakeup(adapter->dev, false);
+
        /* Unregister device */
        mwifiex_dbg(adapter, INFO,
                    "info: unregister device\n");
@@ -1739,7 +1756,7 @@ void _mwifiex_dbg(const struct mwifiex_adapter *adapter, int mask,
        struct va_format vaf;
        va_list args;
 
-       if (!adapter->dev || !(adapter->debug_mask & mask))
+       if (!(adapter->debug_mask & mask))
                return;
 
        va_start(args, fmt);
@@ -1747,7 +1764,10 @@ void _mwifiex_dbg(const struct mwifiex_adapter *adapter, int mask,
        vaf.fmt = fmt;
        vaf.va = &args;
 
-       dev_info(adapter->dev, "%pV", &vaf);
+       if (adapter->dev)
+               dev_info(adapter->dev, "%pV", &vaf);
+       else
+               pr_info("%pV", &vaf);
 
        va_end(args);
 }
index 5c8297207f339559461f43cee1dc9b0585a23042..f1cb8753dc0214bd775502aaf9d96e309115e021 100644 (file)
@@ -1359,7 +1359,7 @@ mwifiex_netdev_get_priv(struct net_device *dev)
  */
 static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb)
 {
-       return (le32_to_cpu(*(__le32 *)skb->data) == PKT_TYPE_MGMT);
+       return (get_unaligned_le32(skb->data) == PKT_TYPE_MGMT);
 }
 
 /* This function retrieves channel closed for operation by Channel
index a0d918094889df6cd9de14046b773d6112b2006b..ac62bce50e964b900135cdc600dcdb9032928a3f 100644 (file)
@@ -119,7 +119,7 @@ static int mwifiex_read_reg_byte(struct mwifiex_adapter *adapter,
  */
 static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
 {
-       u32 *cookie_addr;
+       u32 cookie_value;
        struct pcie_service_card *card = adapter->card;
        const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
@@ -127,11 +127,11 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
                return true;
 
        if (card->sleep_cookie_vbase) {
-               cookie_addr = (u32 *)card->sleep_cookie_vbase;
+               cookie_value = get_unaligned_le32(card->sleep_cookie_vbase);
                mwifiex_dbg(adapter, INFO,
                            "info: ACCESS_HW: sleep cookie=0x%x\n",
-                           *cookie_addr);
-               if (*cookie_addr == FW_AWAKE_COOKIE)
+                           cookie_value);
+               if (cookie_value == FW_AWAKE_COOKIE)
                        return true;
        }
 
@@ -294,8 +294,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
        if (!adapter || !adapter->priv_num)
                return;
 
-       cancel_work_sync(&card->work);
-
        reg = card->pcie.reg;
        if (reg)
                ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
@@ -350,22 +348,16 @@ MODULE_DEVICE_TABLE(pci, mwifiex_ids);
 
 static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare)
 {
-       struct mwifiex_adapter *adapter;
-       struct pcie_service_card *card;
-
-       if (!pdev) {
-               pr_err("%s: PCIe device is not specified\n", __func__);
-               return;
-       }
+       struct pcie_service_card *card = pci_get_drvdata(pdev);
+       struct mwifiex_adapter *adapter = card->adapter;
+       int ret;
 
-       card = (struct pcie_service_card *)pci_get_drvdata(pdev);
-       if (!card || !card->adapter) {
-               pr_err("%s: Card or adapter structure is not valid (%ld)\n",
-                      __func__, (long)card);
+       if (!adapter) {
+               dev_err(&pdev->dev, "%s: adapter structure is not valid\n",
+                       __func__);
                return;
        }
 
-       adapter = card->adapter;
        mwifiex_dbg(adapter, INFO,
                    "%s: vendor=0x%4.04x device=0x%4.04x rev=%d %s\n",
                    __func__, pdev->vendor, pdev->device,
@@ -379,13 +371,19 @@ static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare)
                 */
                mwifiex_shutdown_sw(adapter);
                adapter->surprise_removed = true;
+               clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
+               clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
        } else {
                /* Kernel stores and restores PCIe function context before and
                 * after performing FLR respectively. Reconfigure the software
                 * and firmware including firmware redownload
                 */
                adapter->surprise_removed = false;
-               mwifiex_reinit_sw(adapter);
+               ret = mwifiex_reinit_sw(adapter);
+               if (ret) {
+                       dev_err(&pdev->dev, "reinit failed: %d\n", ret);
+                       return;
+               }
        }
        mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
 }
@@ -447,7 +445,7 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
                                            sizeof(sleep_cookie),
                                            PCI_DMA_FROMDEVICE);
                buffer = cmdrsp->data;
-               sleep_cookie = READ_ONCE(*(u32 *)buffer);
+               sleep_cookie = get_unaligned_le32(buffer);
 
                if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) {
                        mwifiex_dbg(adapter, INFO,
@@ -1039,6 +1037,7 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
        if (card && card->cmd_buf) {
                mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
                                         PCI_DMA_TODEVICE);
+               dev_kfree_skb_any(card->cmd_buf);
        }
        return 0;
 }
@@ -1049,6 +1048,7 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
 static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
+       u32 tmp;
 
        card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32),
                                                     &card->sleep_cookie_pbase);
@@ -1058,11 +1058,12 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
                return -ENOMEM;
        }
        /* Init val of Sleep Cookie */
-       *(u32 *)card->sleep_cookie_vbase = FW_AWAKE_COOKIE;
+       tmp = FW_AWAKE_COOKIE;
+       put_unaligned(tmp, card->sleep_cookie_vbase);
 
        mwifiex_dbg(adapter, INFO,
                    "alloc_scook: sleep cookie=0x%x\n",
-                   *((u32 *)card->sleep_cookie_vbase));
+                   get_unaligned(card->sleep_cookie_vbase));
 
        return 0;
 }
@@ -1223,7 +1224,6 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
        dma_addr_t buf_pa;
        struct mwifiex_pcie_buf_desc *desc = NULL;
        struct mwifiex_pfu_buf_desc *desc2 = NULL;
-       __le16 *tmp;
 
        if (!(skb->data && skb->len)) {
                mwifiex_dbg(adapter, ERROR,
@@ -1244,10 +1244,8 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
 
                adapter->data_sent = true;
                payload = skb->data;
-               tmp = (__le16 *)&payload[0];
-               *tmp = cpu_to_le16((u16)skb->len);
-               tmp = (__le16 *)&payload[2];
-               *tmp = cpu_to_le16(MWIFIEX_TYPE_DATA);
+               put_unaligned_le16((u16)skb->len, payload + 0);
+               put_unaligned_le16(MWIFIEX_TYPE_DATA, payload + 2);
 
                if (mwifiex_map_pci_memory(adapter, skb, skb->len,
                                           PCI_DMA_TODEVICE))
@@ -1376,7 +1374,6 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
                (card->rxbd_rdptr & reg->rx_rollover_ind))) {
                struct sk_buff *skb_data;
                u16 rx_len;
-               __le16 pkt_len;
 
                rd_index = card->rxbd_rdptr & reg->rx_mask;
                skb_data = card->rx_buf_list[rd_index];
@@ -1393,8 +1390,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
                /* Get data length from interface header -
                 * first 2 bytes for len, next 2 bytes is for type
                 */
-               pkt_len = *((__le16 *)skb_data->data);
-               rx_len = le16_to_cpu(pkt_len);
+               rx_len = get_unaligned_le16(skb_data->data);
                if (WARN_ON(rx_len <= INTF_HEADER_LEN ||
                            rx_len > MWIFIEX_RX_DATA_BUF_SIZE)) {
                        mwifiex_dbg(adapter, ERROR,
@@ -1601,13 +1597,18 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
 
        adapter->cmd_sent = true;
 
-       *(__le16 *)&payload[0] = cpu_to_le16((u16)skb->len);
-       *(__le16 *)&payload[2] = cpu_to_le16(MWIFIEX_TYPE_CMD);
+       put_unaligned_le16((u16)skb->len, &payload[0]);
+       put_unaligned_le16(MWIFIEX_TYPE_CMD, &payload[2]);
 
        if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE))
                return -1;
 
        card->cmd_buf = skb;
+       /*
+        * Need to keep a reference, since core driver might free up this
+        * buffer before we've unmapped it.
+        */
+       skb_get(skb);
 
        /* To send a command, the driver will:
                1. Write the 64bit physical address of the data buffer to
@@ -1694,7 +1695,6 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
        struct sk_buff *skb = card->cmdrsp_buf;
        int count = 0;
        u16 rx_len;
-       __le16 pkt_len;
 
        mwifiex_dbg(adapter, CMD,
                    "info: Rx CMD Response\n");
@@ -1711,11 +1711,11 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
        if (card->cmd_buf) {
                mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
                                         PCI_DMA_TODEVICE);
+               dev_kfree_skb_any(card->cmd_buf);
                card->cmd_buf = NULL;
        }
 
-       pkt_len = *((__le16 *)skb->data);
-       rx_len = le16_to_cpu(pkt_len);
+       rx_len = get_unaligned_le16(skb->data);
        skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len);
        skb_trim(skb, rx_len);
 
@@ -1856,7 +1856,7 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
                desc = card->evtbd_ring[rdptr];
                memset(desc, 0, sizeof(*desc));
 
-               event = *(u32 *) &skb_cmd->data[INTF_HEADER_LEN];
+               event = get_unaligned_le32(&skb_cmd->data[INTF_HEADER_LEN]);
                adapter->event_cause = event;
                /* The first 4bytes will be the event transfer header
                   len is 2 bytes followed by type which is 2 bytes */
@@ -1965,6 +1965,94 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
        return ret;
 }
 
+/* Combo firmware image is a combination of
+ * (1) combo crc heaer, start with CMD5
+ * (2) bluetooth image, start with CMD7, end with CMD6, data wrapped in CMD1.
+ * (3) wifi image.
+ *
+ * This function bypass the header and bluetooth part, return
+ * the offset of tail wifi-only part.
+ */
+
+static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter,
+                                  const void *firmware, u32 firmware_len) {
+       const struct mwifiex_fw_data *fwdata;
+       u32 offset = 0, data_len, dnld_cmd;
+       int ret = 0;
+       bool cmd7_before = false;
+
+       while (1) {
+               /* Check for integer and buffer overflow */
+               if (offset + sizeof(fwdata->header) < sizeof(fwdata->header) ||
+                   offset + sizeof(fwdata->header) >= firmware_len) {
+                       mwifiex_dbg(adapter, ERROR,
+                                   "extract wifi-only fw failure!\n");
+                       ret = -1;
+                       goto done;
+               }
+
+               fwdata = firmware + offset;
+               dnld_cmd = le32_to_cpu(fwdata->header.dnld_cmd);
+               data_len = le32_to_cpu(fwdata->header.data_length);
+
+               /* Skip past header */
+               offset += sizeof(fwdata->header);
+
+               switch (dnld_cmd) {
+               case MWIFIEX_FW_DNLD_CMD_1:
+                       if (!cmd7_before) {
+                               mwifiex_dbg(adapter, ERROR,
+                                           "no cmd7 before cmd1!\n");
+                               ret = -1;
+                               goto done;
+                       }
+                       if (offset + data_len < data_len) {
+                               mwifiex_dbg(adapter, ERROR, "bad FW parse\n");
+                               ret = -1;
+                               goto done;
+                       }
+                       offset += data_len;
+                       break;
+               case MWIFIEX_FW_DNLD_CMD_5:
+                       /* Check for integer overflow */
+                       if (offset + data_len < data_len) {
+                               mwifiex_dbg(adapter, ERROR, "bad FW parse\n");
+                               ret = -1;
+                               goto done;
+                       }
+                       offset += data_len;
+                       break;
+               case MWIFIEX_FW_DNLD_CMD_6:
+                       /* Check for integer overflow */
+                       if (offset + data_len < data_len) {
+                               mwifiex_dbg(adapter, ERROR, "bad FW parse\n");
+                               ret = -1;
+                               goto done;
+                       }
+                       offset += data_len;
+                       if (offset >= firmware_len) {
+                               mwifiex_dbg(adapter, ERROR,
+                                           "extract wifi-only fw failure!\n");
+                               ret = -1;
+                       } else {
+                               ret = offset;
+                       }
+                       goto done;
+               case MWIFIEX_FW_DNLD_CMD_7:
+                       cmd7_before = true;
+                       break;
+               default:
+                       mwifiex_dbg(adapter, ERROR, "unknown dnld_cmd %d\n",
+                                   dnld_cmd);
+                       ret = -1;
+                       goto done;
+               }
+       }
+
+done:
+       return ret;
+}
+
 /*
  * This function downloads the firmware to the card.
  *
@@ -1980,7 +2068,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
        u32 firmware_len = fw->fw_len;
        u32 offset = 0;
        struct sk_buff *skb;
-       u32 txlen, tx_blocks = 0, tries, len;
+       u32 txlen, tx_blocks = 0, tries, len, val;
        u32 block_retry_cnt = 0;
        struct pcie_service_card *card = adapter->card;
        const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
@@ -2007,6 +2095,24 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                goto done;
        }
 
+       ret = mwifiex_read_reg(adapter, PCIE_SCRATCH_13_REG, &val);
+       if (ret) {
+               mwifiex_dbg(adapter, FATAL, "Failed to read scratch register 13\n");
+               goto done;
+       }
+
+       /* PCIE FLR case: extract wifi part from combo firmware*/
+       if (val == MWIFIEX_PCIE_FLR_HAPPENS) {
+               ret = mwifiex_extract_wifi_fw(adapter, firmware, firmware_len);
+               if (ret < 0) {
+                       mwifiex_dbg(adapter, ERROR, "Failed to extract wifi fw\n");
+                       goto done;
+               }
+               offset = ret;
+               mwifiex_dbg(adapter, MSG,
+                           "info: dnld wifi firmware from %d bytes\n", offset);
+       }
+
        /* Perform firmware data transfer */
        do {
                u32 ireg_intr = 0;
@@ -2503,8 +2609,8 @@ mwifiex_pcie_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
        struct pcie_service_card *card = adapter->card;
        const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
        int pcie_scratch_reg[] = {PCIE_SCRATCH_12_REG,
-                                 PCIE_SCRATCH_13_REG,
-                                 PCIE_SCRATCH_14_REG};
+                                 PCIE_SCRATCH_14_REG,
+                                 PCIE_SCRATCH_15_REG};
 
        if (!p)
                return 0;
@@ -2739,6 +2845,21 @@ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter)
        schedule_work(&card->work);
 }
 
+static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter)
+{
+       struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+       if (reg->sleep_cookie)
+               mwifiex_pcie_delete_sleep_cookie_buf(adapter);
+
+       mwifiex_pcie_delete_cmdrsp_buf(adapter);
+       mwifiex_pcie_delete_evtbd_ring(adapter);
+       mwifiex_pcie_delete_rxbd_ring(adapter);
+       mwifiex_pcie_delete_txbd_ring(adapter);
+       card->cmdrsp_buf = NULL;
+}
+
 /*
  * This function initializes the PCI-E host memory space, WCB rings, etc.
  *
@@ -2850,13 +2971,6 @@ err_enable_dev:
 
 /*
  * This function cleans up the allocated card buffers.
- *
- * The following are freed by this function -
- *      - TXBD ring buffers
- *      - RXBD ring buffers
- *      - Event BD ring buffers
- *      - Command response ring buffer
- *      - Sleep cookie buffer
  */
 static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
 {
@@ -2866,6 +2980,8 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
        int ret;
        u32 fw_status;
 
+       cancel_work_sync(&card->work);
+
        ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
        if (fw_status == FIRMWARE_READY_PCIE) {
                mwifiex_dbg(adapter, INFO,
@@ -2875,6 +2991,8 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
                                    "Failed to write driver not-ready signature\n");
        }
 
+       mwifiex_pcie_free_buffers(adapter);
+
        if (pdev) {
                pci_iounmap(pdev, card->pci_mmap);
                pci_iounmap(pdev, card->pci_mmap1);
@@ -3067,12 +3185,6 @@ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter)
        struct pci_dev *pdev = card->dev;
        const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
-       /* Bluetooth is not on pcie interface. Download Wifi only firmware
-        * during pcie FLR, so that bluetooth part of firmware which is
-        * already running doesn't get affected.
-        */
-       strcpy(adapter->fw_name, PCIE8997_DEFAULT_WIFIFW_NAME);
-
        /* tx_buf_size might be changed to 3584 by firmware during
         * data transfer, we should reset it to default size.
         */
@@ -3126,10 +3238,7 @@ err_cre_txbd:
        pci_iounmap(pdev, card->pci_mmap1);
 }
 
-/* This function cleans up the PCI-E host memory space.
- * Some code is extracted from mwifiex_unregister_dev()
- *
- */
+/* This function cleans up the PCI-E host memory space. */
 static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
@@ -3140,14 +3249,7 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
 
        adapter->seq_num = 0;
 
-       if (reg->sleep_cookie)
-               mwifiex_pcie_delete_sleep_cookie_buf(adapter);
-
-       mwifiex_pcie_delete_cmdrsp_buf(adapter);
-       mwifiex_pcie_delete_evtbd_ring(adapter);
-       mwifiex_pcie_delete_rxbd_ring(adapter);
-       mwifiex_pcie_delete_txbd_ring(adapter);
-       card->cmdrsp_buf = NULL;
+       mwifiex_pcie_free_buffers(adapter);
 }
 
 static struct mwifiex_if_ops pcie_ops = {
index 00e8ee5ad4a834ed1905c5b98acee85be3ca1a32..f7ce9b6db6b41a9c4ff2493f0b11c2a67de0f6a3 100644 (file)
@@ -35,7 +35,6 @@
 #define PCIE8897_B0_FW_NAME "mrvl/pcie8897_uapsta.bin"
 #define PCIEUART8997_FW_NAME_V4 "mrvl/pcieuart8997_combo_v4.bin"
 #define PCIEUSB8997_FW_NAME_V4 "mrvl/pcieusb8997_combo_v4.bin"
-#define PCIE8997_DEFAULT_WIFIFW_NAME "mrvl/pcie8997_wlan_v4.bin"
 
 #define PCIE_VENDOR_ID_MARVELL              (0x11ab)
 #define PCIE_VENDOR_ID_V2_MARVELL           (0x1b4b)
@@ -77,8 +76,9 @@
 #define PCIE_SCRATCH_10_REG                            0xCE8
 #define PCIE_SCRATCH_11_REG                            0xCEC
 #define PCIE_SCRATCH_12_REG                            0xCF0
-#define PCIE_SCRATCH_13_REG                            0xCF8
-#define PCIE_SCRATCH_14_REG                            0xCFC
+#define PCIE_SCRATCH_13_REG                            0xCF4
+#define PCIE_SCRATCH_14_REG                            0xCF8
+#define PCIE_SCRATCH_15_REG                            0xCFC
 #define PCIE_RD_DATA_PTR_Q0_Q1                          0xC08C
 #define PCIE_WR_DATA_PTR_Q0_Q1                          0xC05C
 
 #define MWIFIEX_SLEEP_COOKIE_SIZE                      4
 #define MWIFIEX_MAX_DELAY_COUNT                                100
 
+#define MWIFIEX_PCIE_FLR_HAPPENS 0xFEDCBABA
+
 struct mwifiex_pcie_card_reg {
        u16 cmd_addr_lo;
        u16 cmd_addr_hi;
@@ -217,8 +219,8 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
        .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
        .pfu_enabled = 1,
        .sleep_cookie = 0,
-       .fw_dump_ctrl = 0xcf4,
-       .fw_dump_start = 0xcf8,
+       .fw_dump_ctrl = PCIE_SCRATCH_13_REG,
+       .fw_dump_start = PCIE_SCRATCH_14_REG,
        .fw_dump_end = 0xcff,
        .fw_dump_host_ready = 0xee,
        .fw_dump_read_done = 0xfe,
@@ -254,8 +256,8 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
        .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
        .pfu_enabled = 1,
        .sleep_cookie = 0,
-       .fw_dump_ctrl = 0xcf4,
-       .fw_dump_start = 0xcf8,
+       .fw_dump_ctrl = PCIE_SCRATCH_13_REG,
+       .fw_dump_start = PCIE_SCRATCH_14_REG,
        .fw_dump_end = 0xcff,
        .fw_dump_host_ready = 0xcc,
        .fw_dump_read_done = 0xdd,
index 181691684a08f0a1bfc6eca17292ba5fe3f1a778..ce6936d0c5c02ee4a64e5bcb5a13e11188adf07f 100644 (file)
@@ -691,8 +691,9 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
 
                        /* Increment the TLV header length by the size
                           appended */
-                       le16_add_cpu(&chan_tlv_out->header.len,
-                                    sizeof(chan_tlv_out->chan_scan_param));
+                       le16_unaligned_add_cpu(&chan_tlv_out->header.len,
+                                              sizeof(
+                                               chan_tlv_out->chan_scan_param));
 
                        /*
                         * The tlv buffer length is set to the number of bytes
@@ -859,6 +860,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
        *scan_current_only = false;
 
        if (user_scan_in) {
+               u8 tmpaddr[ETH_ALEN];
 
                /* Default the ssid_filter flag to TRUE, set false under
                   certain wildcard conditions and qualified by the existence
@@ -883,8 +885,10 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                       user_scan_in->specific_bssid,
                       sizeof(scan_cfg_out->specific_bssid));
 
+               memcpy(tmpaddr, scan_cfg_out->specific_bssid, ETH_ALEN);
+
                if (adapter->ext_scan &&
-                   !is_zero_ether_addr(scan_cfg_out->specific_bssid)) {
+                   !is_zero_ether_addr(tmpaddr)) {
                        bssid_tlv =
                                (struct mwifiex_ie_types_bssid_list *)tlv_pos;
                        bssid_tlv->header.type = cpu_to_le16(TLV_TYPE_BSSID);
@@ -947,8 +951,9 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                 *  truncate scan results.  That is not an issue with an SSID
                 *  or BSSID filter applied to the scan results in the firmware.
                 */
+               memcpy(tmpaddr, scan_cfg_out->specific_bssid, ETH_ALEN);
                if ((i && ssid_filter) ||
-                   !is_zero_ether_addr(scan_cfg_out->specific_bssid))
+                   !is_zero_ether_addr(tmpaddr))
                        *filtered_scan = true;
 
                if (user_scan_in->scan_chan_gap) {
@@ -989,10 +994,15 @@ mwifiex_config_scan(struct mwifiex_private *priv,
         *  If a specific BSSID or SSID is used, the number of channels in the
         *  scan command will be increased to the absolute maximum.
         */
-       if (*filtered_scan)
+       if (*filtered_scan) {
                *max_chan_per_scan = MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN;
-       else
-               *max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD;
+       } else {
+               if (!priv->media_connected)
+                       *max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD;
+               else
+                       *max_chan_per_scan =
+                                       MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD / 2;
+       }
 
        if (adapter->ext_scan) {
                bss_mode = (struct mwifiex_ie_types_bss_mode *)tlv_pos;
@@ -1742,7 +1752,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
 
        if (*bytes_left >= sizeof(beacon_size)) {
                /* Extract & convert beacon size from command buffer */
-               beacon_size = le16_to_cpu(*(__le16 *)(*bss_info));
+               beacon_size = get_unaligned_le16((*bss_info));
                *bytes_left -= sizeof(beacon_size);
                *bss_info += sizeof(beacon_size);
        }
@@ -2369,8 +2379,9 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
                        temp_chan = chan_list_tlv->chan_scan_param + chan_idx;
 
                        /* Increment the TLV header length by size appended */
-                       le16_add_cpu(&chan_list_tlv->header.len,
-                                    sizeof(chan_list_tlv->chan_scan_param));
+                       le16_unaligned_add_cpu(&chan_list_tlv->header.len,
+                                              sizeof(
+                                              chan_list_tlv->chan_scan_param));
 
                        temp_chan->chan_number =
                                bgscan_cfg_in->chan_list[chan_idx].chan_number;
@@ -2407,8 +2418,8 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
                        mwifiex_bgscan_create_channel_list(priv, bgscan_cfg_in,
                                                           chan_list_tlv->
                                                           chan_scan_param);
-               le16_add_cpu(&chan_list_tlv->header.len,
-                            chan_num *
+               le16_unaligned_add_cpu(&chan_list_tlv->header.len,
+                                      chan_num *
                             sizeof(chan_list_tlv->chan_scan_param[0]));
        }
 
@@ -2432,7 +2443,7 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
        /* Append vendor specific IE TLV */
        mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_BGSCAN, &tlv_pos);
 
-       le16_add_cpu(&cmd->size, tlv_pos - bgscan_config->tlv);
+       le16_unaligned_add_cpu(&cmd->size, tlv_pos - bgscan_config->tlv);
 
        return 0;
 }
index a4b356d267f982b2646dc90f6eb1a2c3deac1425..0af1c6733c9257e64fbecd81e11fb76ec871452a 100644 (file)
@@ -387,8 +387,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
        if (!adapter || !adapter->priv_num)
                return;
 
-       cancel_work_sync(&card->work);
-
        mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num);
 
        ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat);
@@ -943,7 +941,7 @@ static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter,
                return -1;
        }
 
-       nb = le16_to_cpu(*(__le16 *) (buffer));
+       nb = get_unaligned_le16((buffer));
        if (nb > npayload) {
                mwifiex_dbg(adapter, ERROR,
                            "%s: invalid packet, nb=%d npayload=%d\n",
@@ -951,7 +949,7 @@ static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter,
                return -1;
        }
 
-       *type = le16_to_cpu(*(__le16 *) (buffer + 2));
+       *type = get_unaligned_le16((buffer + 2));
 
        return ret;
 }
@@ -1139,7 +1137,8 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
                                    __func__, blk_num, blk_size, total_pkt_len);
                        break;
                }
-               pkt_len = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET));
+               pkt_len = get_unaligned_le16((data +
+                                            SDIO_HEADER_OFFSET));
                if ((pkt_len + SDIO_HEADER_OFFSET) > blk_size) {
                        mwifiex_dbg(adapter, ERROR,
                                    "%s: error in pkt_len,\t"
@@ -1172,10 +1171,11 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
                                    struct sk_buff *skb, u32 upld_typ)
 {
        u8 *cmd_buf;
-       __le16 *curr_ptr = (__le16 *)skb->data;
-       u16 pkt_len = le16_to_cpu(*curr_ptr);
+       u16 pkt_len;
        struct mwifiex_rxinfo *rx_info;
 
+       pkt_len = get_unaligned_le16(skb->data);
+
        if (upld_typ != MWIFIEX_TYPE_AGGR_DATA) {
                skb_trim(skb, pkt_len);
                skb_pull(skb, INTF_HEADER_LEN);
@@ -1235,7 +1235,7 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
        case MWIFIEX_TYPE_EVENT:
                mwifiex_dbg(adapter, EVENT,
                            "info: --- Rx: Event ---\n");
-               adapter->event_cause = le32_to_cpu(*(__le32 *) skb->data);
+               adapter->event_cause = get_unaligned_le32(skb->data);
 
                if ((skb->len > 0) && (skb->len  < MAX_EVENT_SIZE))
                        memcpy(adapter->event_body,
@@ -1380,7 +1380,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
                }
 
                if (card->mpa_rx.pkt_cnt == 1)
-                       mport = adapter->ioport + port;
+                       mport = adapter->ioport + card->mpa_rx.start_port;
 
                if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf,
                                           card->mpa_rx.buf_len, mport, 1))
@@ -1392,8 +1392,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
                        u32 *len_arr = card->mpa_rx.len_arr;
 
                        /* get curr PKT len & type */
-                       pkt_len = le16_to_cpu(*(__le16 *) &curr_ptr[0]);
-                       pkt_type = le16_to_cpu(*(__le16 *) &curr_ptr[2]);
+                       pkt_len = get_unaligned_le16(&curr_ptr[0]);
+                       pkt_type = get_unaligned_le16(&curr_ptr[2]);
 
                        /* copy pkt to deaggr buf */
                        skb_deaggr = mwifiex_alloc_dma_align_buf(len_arr[pind],
@@ -1813,7 +1813,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
                }
 
                if (card->mpa_tx.pkt_cnt == 1)
-                       mport = adapter->ioport + port;
+                       mport = adapter->ioport + card->mpa_tx.start_port;
 
                ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf,
                                                 card->mpa_tx.buf_len, mport);
@@ -1874,8 +1874,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
        /* Allocate buffer and copy payload */
        blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
        buf_block_len = (pkt_len + blk_size - 1) / blk_size;
-       *(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len);
-       *(__le16 *)&payload[2] = cpu_to_le16(type);
+       put_unaligned_le16((u16)pkt_len, payload + 0);
+       put_unaligned_le16((u32)type, payload + 2);
+
 
        /*
         * This is SDIO specific header
@@ -2155,6 +2156,8 @@ static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter)
 {
        struct sdio_mmc_card *card = adapter->card;
 
+       cancel_work_sync(&card->work);
+
        kfree(card->mp_regs);
        kfree(card->mpa_rx.skb_arr);
        kfree(card->mpa_rx.len_arr);
@@ -2193,6 +2196,7 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
 {
        struct sdio_mmc_card *card = adapter->card;
        struct sdio_func *func = card->func;
+       int ret;
 
        mwifiex_shutdown_sw(adapter);
 
@@ -2207,7 +2211,9 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
        clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
        clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
 
-       mwifiex_reinit_sw(adapter);
+       ret = mwifiex_reinit_sw(adapter);
+       if (ret)
+               dev_err(&func->dev, "reinit failed: %d\n", ret);
 }
 
 /* This function read/write firmware */
index 2f1f4d190b28429c43dc61770f53f81a09353616..83916c1439af767ca4d03c2fab046170e44492d0 100644 (file)
@@ -126,19 +126,19 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
        if (cmd_action == HostCmd_ACT_GEN_GET) {
                snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_GET);
                snmp_mib->buf_size = cpu_to_le16(MAX_SNMP_BUF_SIZE);
-               le16_add_cpu(&cmd->size, MAX_SNMP_BUF_SIZE);
+               le16_unaligned_add_cpu(&cmd->size, MAX_SNMP_BUF_SIZE);
        } else if (cmd_action == HostCmd_ACT_GEN_SET) {
                snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
                snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
-               *((__le16 *) (snmp_mib->value)) = cpu_to_le16(*ul_temp);
-               le16_add_cpu(&cmd->size, sizeof(u16));
+               put_unaligned_le16(*ul_temp, snmp_mib->value);
+               le16_unaligned_add_cpu(&cmd->size, sizeof(u16));
        }
 
        mwifiex_dbg(priv->adapter, CMD,
                    "cmd: SNMP_CMD: Action=0x%x, OID=0x%x,\t"
                    "OIDSize=0x%x, Value=0x%x\n",
                    cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size),
-                   le16_to_cpu(*(__le16 *)snmp_mib->value));
+                   get_unaligned_le16(snmp_mib->value));
        return 0;
 }
 
@@ -1357,8 +1357,9 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
                            subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq);
 
                pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
-               le16_add_cpu(&cmd->size,
-                            sizeof(struct mwifiex_ie_types_rssi_threshold));
+               le16_unaligned_add_cpu(&cmd->size,
+                                      sizeof(
+                                      struct mwifiex_ie_types_rssi_threshold));
        }
 
        if (event_bitmap & BITMASK_BCN_RSSI_HIGH) {
@@ -1378,8 +1379,9 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
                            subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq);
 
                pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
-               le16_add_cpu(&cmd->size,
-                            sizeof(struct mwifiex_ie_types_rssi_threshold));
+               le16_unaligned_add_cpu(&cmd->size,
+                                      sizeof(
+                                      struct mwifiex_ie_types_rssi_threshold));
        }
 
        return 0;
@@ -1398,7 +1400,7 @@ mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv,
                filter = &mef_entry->filter[i];
                if (!filter->filt_type)
                        break;
-               *(__le32 *)stack_ptr = cpu_to_le32((u32)filter->repeat);
+               put_unaligned_le32((u32)filter->repeat, stack_ptr);
                stack_ptr += 4;
                *stack_ptr = TYPE_DNUM;
                stack_ptr += 1;
@@ -1410,8 +1412,7 @@ mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv,
                stack_ptr += 1;
                *stack_ptr = TYPE_BYTESEQ;
                stack_ptr += 1;
-
-               *(__le32 *)stack_ptr = cpu_to_le32((u32)filter->offset);
+               put_unaligned_le32((u32)filter->offset, stack_ptr);
                stack_ptr += 4;
                *stack_ptr = TYPE_DNUM;
                stack_ptr += 1;
@@ -1683,14 +1684,15 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
                                               sizeof(u8) + sizeof(u8));
 
                /* Add the rule length to the command size*/
-               le16_add_cpu(&cmd->size, le16_to_cpu(rule->header.len) +
-                            sizeof(struct mwifiex_ie_types_header));
+               le16_unaligned_add_cpu(&cmd->size,
+                                      le16_to_cpu(rule->header.len) +
+                                      sizeof(struct mwifiex_ie_types_header));
 
                rule = (void *)((u8 *)rule->params + length);
        }
 
        /* Add sizeof action, num_of_rules to total command length */
-       le16_add_cpu(&cmd->size, sizeof(u16) + sizeof(u16));
+       le16_unaligned_add_cpu(&cmd->size, sizeof(u16) + sizeof(u16));
 
        return 0;
 }
@@ -1708,7 +1710,7 @@ mwifiex_cmd_tdls_config(struct mwifiex_private *priv,
        cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_CONFIG);
        cmd->size = cpu_to_le16(S_DS_GEN);
        tdls_config->tdls_action = cpu_to_le16(cmd_action);
-       le16_add_cpu(&cmd->size, sizeof(tdls_config->tdls_action));
+       le16_unaligned_add_cpu(&cmd->size, sizeof(tdls_config->tdls_action));
 
        switch (cmd_action) {
        case ACT_TDLS_CS_ENABLE_CONFIG:
@@ -1735,7 +1737,7 @@ mwifiex_cmd_tdls_config(struct mwifiex_private *priv,
                return -ENOTSUPP;
        }
 
-       le16_add_cpu(&cmd->size, len);
+       le16_unaligned_add_cpu(&cmd->size, len);
        return 0;
 }
 
@@ -1759,7 +1761,8 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
 
        cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_OPER);
        cmd->size = cpu_to_le16(S_DS_GEN);
-       le16_add_cpu(&cmd->size, sizeof(struct host_cmd_ds_tdls_oper));
+       le16_unaligned_add_cpu(&cmd->size,
+                              sizeof(struct host_cmd_ds_tdls_oper));
 
        tdls_oper->reason = 0;
        memcpy(tdls_oper->peer_mac, oper->peer_mac, ETH_ALEN);
@@ -1783,7 +1786,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
                        return -ENODATA;
                }
 
-               *(__le16 *)pos = cpu_to_le16(params->capability);
+               put_unaligned_le16(params->capability, pos);
                config_len += sizeof(params->capability);
 
                qos_info = params->uapsd_queues | (params->max_sp << 5);
@@ -1861,7 +1864,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
                return -ENOTSUPP;
        }
 
-       le16_add_cpu(&cmd->size, config_len);
+       le16_unaligned_add_cpu(&cmd->size, config_len);
 
        return 0;
 }
@@ -2032,7 +2035,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
        case HostCmd_CMD_VERSION_EXT:
                cmd_ptr->command = cpu_to_le16(cmd_no);
                cmd_ptr->params.verext.version_str_sel =
-                       (u8) (*((u32 *) data_buf));
+                       (u8)(get_unaligned((u32 *)data_buf));
                memcpy(&cmd_ptr->params, data_buf,
                       sizeof(struct host_cmd_ds_version_ext));
                cmd_ptr->size =
@@ -2043,7 +2046,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
        case HostCmd_CMD_MGMT_FRAME_REG:
                cmd_ptr->command = cpu_to_le16(cmd_no);
                cmd_ptr->params.reg_mask.action = cpu_to_le16(cmd_action);
-               cmd_ptr->params.reg_mask.mask = cpu_to_le32(*(u32 *)data_buf);
+               cmd_ptr->params.reg_mask.mask = cpu_to_le32(
+                                               get_unaligned((u32 *)data_buf));
                cmd_ptr->size =
                        cpu_to_le16(sizeof(struct host_cmd_ds_mgmt_frame_reg) +
                                    S_DS_GEN);
@@ -2063,7 +2067,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
        case HostCmd_CMD_P2P_MODE_CFG:
                cmd_ptr->command = cpu_to_le16(cmd_no);
                cmd_ptr->params.mode_cfg.action = cpu_to_le16(cmd_action);
-               cmd_ptr->params.mode_cfg.mode = cpu_to_le16(*(u16 *)data_buf);
+               cmd_ptr->params.mode_cfg.mode = cpu_to_le16(
+                                               get_unaligned((u16 *)data_buf));
                cmd_ptr->size =
                        cpu_to_le16(sizeof(struct host_cmd_ds_p2p_mode_cfg) +
                                    S_DS_GEN);
@@ -2359,8 +2364,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
        if (ret)
                return -1;
 
-       if (!disable_auto_ds &&
-           first_sta && priv->adapter->iface_type != MWIFIEX_USB &&
+       if (!disable_auto_ds && first_sta &&
            priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
                /* Enable auto deep sleep */
                auto_ds.auto_ds = DEEP_SLEEP_ON;
index 8548027abf71bbe01d2f115e938d3cc79f2da086..ab75da3e0c2b7c1d6d0b6206886bf838a7c4c1bf 100644 (file)
@@ -183,7 +183,7 @@ static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv,
                    "query_type = %#x, buf size = %#x\n",
                    oid, query_type, le16_to_cpu(smib->buf_size));
        if (query_type == HostCmd_ACT_GEN_GET) {
-               ul_temp = le16_to_cpu(*((__le16 *) (smib->value)));
+               ul_temp = get_unaligned_le16(smib->value);
                if (data_buf)
                        *data_buf = ul_temp;
                switch (oid) {
@@ -741,7 +741,7 @@ mwifiex_ret_p2p_mode_cfg(struct mwifiex_private *priv,
        struct host_cmd_ds_p2p_mode_cfg *mode_cfg = &resp->params.mode_cfg;
 
        if (data_buf)
-               *((u16 *)data_buf) = le16_to_cpu(mode_cfg->mode);
+               put_unaligned_le16(le16_to_cpu(mode_cfg->mode), data_buf);
 
        return 0;
 }
index d63d163eb1ecaaa441d4da1861fc9038f7cd326e..b5b7664507eb51c1d93b6d26b8d44007aeed88fb 100644 (file)
@@ -670,7 +670,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                adapter->dbg.num_event_deauth++;
                if (priv->media_connected) {
                        reason_code =
-                               le16_to_cpu(*(__le16 *)adapter->event_body);
+                               get_unaligned_le16(adapter->event_body);
                        mwifiex_reset_connect_state(priv, reason_code, true);
                }
                break;
@@ -685,7 +685,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                adapter->dbg.num_event_disassoc++;
                if (priv->media_connected) {
                        reason_code =
-                               le16_to_cpu(*(__le16 *)adapter->event_body);
+                               get_unaligned_le16(adapter->event_body);
                        mwifiex_reset_connect_state(priv, reason_code, true);
                }
                break;
@@ -695,7 +695,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                adapter->dbg.num_event_link_lost++;
                if (priv->media_connected) {
                        reason_code =
-                               le16_to_cpu(*(__le16 *)adapter->event_body);
+                               get_unaligned_le16(adapter->event_body);
                        mwifiex_reset_connect_state(priv, reason_code, true);
                }
                break;
@@ -923,7 +923,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                                              adapter->event_body);
                break;
        case EVENT_AMSDU_AGGR_CTRL:
-               ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
+               ctrl = get_unaligned_le16(adapter->event_body);
                mwifiex_dbg(adapter, EVENT,
                            "event: AMSDU_AGGR_CTRL %d\n", ctrl);
 
index df9704de07150d82887de1db36c99fd15d3933f7..6e507c99e7927c7ae3792421f0105b5927e838d3 100644 (file)
@@ -431,6 +431,41 @@ mwifiex_add_wmm_info_ie(struct mwifiex_private *priv, struct sk_buff *skb,
        *buf++ = qosinfo; /* U-APSD no in use */
 }
 
+static void mwifiex_tdls_add_bss_co_2040(struct sk_buff *skb)
+{
+       struct ieee_types_bss_co_2040 *bssco;
+
+       bssco = (void *)skb_put(skb, sizeof(struct ieee_types_bss_co_2040));
+       bssco->ieee_hdr.element_id = WLAN_EID_BSS_COEX_2040;
+       bssco->ieee_hdr.len = sizeof(struct ieee_types_bss_co_2040) -
+                             sizeof(struct ieee_types_header);
+       bssco->bss_2040co = 0x01;
+}
+
+static void mwifiex_tdls_add_supported_chan(struct sk_buff *skb)
+{
+       struct ieee_types_generic *supp_chan;
+       u8 chan_supp[] = {1, 11};
+
+       supp_chan = (void *)skb_put(skb, (sizeof(struct ieee_types_header) +
+                                         sizeof(chan_supp)));
+       supp_chan->ieee_hdr.element_id = WLAN_EID_SUPPORTED_CHANNELS;
+       supp_chan->ieee_hdr.len = sizeof(chan_supp);
+       memcpy(supp_chan->data, chan_supp, sizeof(chan_supp));
+}
+
+static void mwifiex_tdls_add_oper_class(struct sk_buff *skb)
+{
+       struct ieee_types_generic *reg_class;
+       u8 rc_list[] = {1,
+               1, 2, 3, 4, 12, 22, 23, 24, 25, 27, 28, 29, 30, 32, 33};
+       reg_class = (void *)skb_put(skb, (sizeof(struct ieee_types_header) +
+                                         sizeof(rc_list)));
+       reg_class->ieee_hdr.element_id = WLAN_EID_SUPPORTED_REGULATORY_CLASSES;
+       reg_class->ieee_hdr.len = sizeof(rc_list);
+       memcpy(reg_class->data, rc_list, sizeof(rc_list));
+}
+
 static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
                                        const u8 *peer, u8 action_code,
                                        u8 dialog_token,
@@ -484,7 +519,9 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
                }
 
                mwifiex_tdls_add_ext_capab(priv, skb);
-               mwifiex_tdls_add_qos_capab(skb);
+               mwifiex_tdls_add_bss_co_2040(skb);
+               mwifiex_tdls_add_supported_chan(skb);
+               mwifiex_tdls_add_oper_class(skb);
                mwifiex_add_wmm_info_ie(priv, skb, 0);
                break;
 
@@ -522,7 +559,9 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
                }
 
                mwifiex_tdls_add_ext_capab(priv, skb);
-               mwifiex_tdls_add_qos_capab(skb);
+               mwifiex_tdls_add_bss_co_2040(skb);
+               mwifiex_tdls_add_supported_chan(skb);
+               mwifiex_tdls_add_oper_class(skb);
                mwifiex_add_wmm_info_ie(priv, skb, 0);
                break;
 
@@ -612,6 +651,9 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
                  sizeof(struct ieee_types_bss_co_2040) +
                  sizeof(struct ieee80211_ht_operation) +
                  sizeof(struct ieee80211_tdls_lnkie) +
+                 (2 * (sizeof(struct ieee_types_header))) +
+                  MWIFIEX_SUPPORTED_CHANNELS +
+                  MWIFIEX_OPERATING_CLASSES +
                  sizeof(struct ieee80211_wmm_param_ie) +
                  extra_ies_len;
 
@@ -760,7 +802,10 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
                }
 
                mwifiex_tdls_add_ext_capab(priv, skb);
+               mwifiex_tdls_add_bss_co_2040(skb);
+               mwifiex_tdls_add_supported_chan(skb);
                mwifiex_tdls_add_qos_capab(skb);
+               mwifiex_tdls_add_oper_class(skb);
                break;
        default:
                mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS action frame type\n");
@@ -857,7 +902,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
        struct mwifiex_sta_node *sta_ptr;
        u8 *peer, *pos, *end;
        u8 i, action, basic;
-       __le16 cap = 0;
+       u16 cap = 0;
        int ie_len = 0;
 
        if (len < (sizeof(struct ethhdr) + 3))
@@ -879,7 +924,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
 
                pos = buf + sizeof(struct ethhdr) + 4;
                /* payload 1+ category 1 + action 1 + dialog 1 */
-               cap = cpu_to_le16(*(u16 *)pos);
+               cap = get_unaligned_le16(pos);
                ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
                pos += 2;
                break;
@@ -889,7 +934,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
                        return;
                /* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
                pos = buf + sizeof(struct ethhdr) + 6;
-               cap = cpu_to_le16(*(u16 *)pos);
+               cap = get_unaligned_le16(pos);
                ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
                pos += 2;
                break;
@@ -909,7 +954,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
        if (!sta_ptr)
                return;
 
-       sta_ptr->tdls_cap.capab = cap;
+       sta_ptr->tdls_cap.capab = cpu_to_le16(cap);
 
        for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
                if (pos + 2 + pos[1] > end)
@@ -969,7 +1014,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
                case WLAN_EID_AID:
                        if (priv->adapter->is_hw_11ac_capable)
                                sta_ptr->tdls_cap.aid =
-                                             le16_to_cpu(*(__le16 *)(pos + 2));
+                                       get_unaligned_le16((pos + 2));
                default:
                        break;
                }
index d24eca34ac119d6a917ac92640a41de620a53eac..e10b2a52e78fe7c04b24a62d63af1e6b7bcec682 100644 (file)
@@ -202,7 +202,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
                            "AP EVENT: event id: %#x\n", eventcause);
                break;
        case EVENT_AMSDU_AGGR_CTRL:
-               ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
+               ctrl = get_unaligned_le16(adapter->event_body);
                mwifiex_dbg(adapter, EVENT,
                            "event: AMSDU_AGGR_CTRL %d\n", ctrl);
 
index 9cf3334adf4d56e2b3dcbc77bf847c028001a443..2f7705c50161404825695846bd94e8c95bfd5a7c 100644 (file)
@@ -306,9 +306,17 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size)
                }
        }
 
-       usb_fill_bulk_urb(ctx->urb, card->udev,
-                         usb_rcvbulkpipe(card->udev, ctx->ep), ctx->skb->data,
-                         size, mwifiex_usb_rx_complete, (void *)ctx);
+       if (card->rx_cmd_ep == ctx->ep &&
+           card->rx_cmd_ep_type == USB_ENDPOINT_XFER_INT)
+               usb_fill_int_urb(ctx->urb, card->udev,
+                                usb_rcvintpipe(card->udev, ctx->ep),
+                                ctx->skb->data, size, mwifiex_usb_rx_complete,
+                                (void *)ctx, card->rx_cmd_interval);
+       else
+               usb_fill_bulk_urb(ctx->urb, card->udev,
+                                 usb_rcvbulkpipe(card->udev, ctx->ep),
+                                 ctx->skb->data, size, mwifiex_usb_rx_complete,
+                                 (void *)ctx);
 
        if (card->rx_cmd_ep == ctx->ep)
                atomic_inc(&card->rx_cmd_urb_pending);
@@ -424,10 +432,13 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
                epd = &iface_desc->endpoint[i].desc;
                if (usb_endpoint_dir_in(epd) &&
                    usb_endpoint_num(epd) == MWIFIEX_USB_EP_CMD_EVENT &&
-                   usb_endpoint_xfer_bulk(epd)) {
-                       pr_debug("info: bulk IN: max pkt size: %d, addr: %d\n",
+                   (usb_endpoint_xfer_bulk(epd) ||
+                    usb_endpoint_xfer_int(epd))) {
+                       card->rx_cmd_ep_type = usb_endpoint_type(epd);
+                       card->rx_cmd_interval = epd->bInterval;
+                       pr_debug("info: Rx CMD/EVT:: max pkt size: %d, addr: %d, ep_type: %d\n",
                                 le16_to_cpu(epd->wMaxPacketSize),
-                                epd->bEndpointAddress);
+                                epd->bEndpointAddress, card->rx_cmd_ep_type);
                        card->rx_cmd_ep = usb_endpoint_num(epd);
                        atomic_set(&card->rx_cmd_urb_pending, 0);
                }
@@ -461,10 +472,16 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
                }
                if (usb_endpoint_dir_out(epd) &&
                    usb_endpoint_num(epd) == MWIFIEX_USB_EP_CMD_EVENT &&
-                   usb_endpoint_xfer_bulk(epd)) {
+                   (usb_endpoint_xfer_bulk(epd) ||
+                    usb_endpoint_xfer_int(epd))) {
+                       card->tx_cmd_ep_type = usb_endpoint_type(epd);
+                       card->tx_cmd_interval = epd->bInterval;
                        pr_debug("info: bulk OUT: max pkt size: %d, addr: %d\n",
                                 le16_to_cpu(epd->wMaxPacketSize),
                                 epd->bEndpointAddress);
+                       pr_debug("info: Tx CMD:: max pkt size: %d, addr: %d, ep_type: %d\n",
+                                le16_to_cpu(epd->wMaxPacketSize),
+                                epd->bEndpointAddress, card->tx_cmd_ep_type);
                        card->tx_cmd_ep = usb_endpoint_num(epd);
                        atomic_set(&card->tx_cmd_urb_pending, 0);
                        card->bulk_out_maxpktsize =
@@ -884,9 +901,17 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
        context->skb = skb;
        tx_urb = context->urb;
 
-       usb_fill_bulk_urb(tx_urb, card->udev, usb_sndbulkpipe(card->udev, ep),
-                         data, skb->len, mwifiex_usb_tx_complete,
-                         (void *)context);
+       if (ep == card->tx_cmd_ep &&
+           card->tx_cmd_ep_type == USB_ENDPOINT_XFER_INT)
+               usb_fill_int_urb(tx_urb, card->udev,
+                                usb_sndintpipe(card->udev, ep), data,
+                                skb->len, mwifiex_usb_tx_complete,
+                                (void *)context, card->tx_cmd_interval);
+       else
+               usb_fill_bulk_urb(tx_urb, card->udev,
+                                 usb_sndbulkpipe(card->udev, ep), data,
+                                 skb->len, mwifiex_usb_tx_complete,
+                                 (void *)context);
 
        tx_urb->transfer_flags |= URB_ZERO_PACKET;
 
index e5f204ea018bd8022b5da4f71f8416fadff6d410..e36bd63172ff9606ee61979fdb51a96330b996ef 100644 (file)
@@ -90,6 +90,10 @@ struct usb_card_rec {
        struct urb_context tx_cmd;
        u8 mc_resync_flag;
        struct usb_tx_data_port port[MWIFIEX_TX_DATA_PORT];
+       int rx_cmd_ep_type;
+       u8 rx_cmd_interval;
+       int tx_cmd_ep_type;
+       u8 tx_cmd_interval;
 };
 
 struct fw_header {
@@ -102,12 +106,12 @@ struct fw_header {
 struct fw_sync_header {
        __le32 cmd;
        __le32 seq_num;
-};
+} __packed;
 
 struct fw_data {
        struct fw_header fw_hdr;
        __le32 seq_num;
        u8 data[1];
-};
+} __packed;
 
 #endif /*_MWIFIEX_USB_H */
index b1ab8da121dd8a9e015fb0a82bf7e0c5ab031749..0cd68ffc2c74dc1c849e8011b84208ebf74a6730 100644 (file)
@@ -274,13 +274,13 @@ int mwifiex_debug_info_to_buffer(struct mwifiex_private *priv, char *buf,
                                val = *((u8 *)addr);
                                break;
                        case 2:
-                               val = *((u16 *)addr);
+                               val = get_unaligned((u16 *)addr);
                                break;
                        case 4:
-                               val = *((u32 *)addr);
+                               val = get_unaligned((u32 *)addr);
                                break;
                        case 8:
-                               val = *((long long *)addr);
+                               val = get_unaligned((long long *)addr);
                                break;
                        default:
                                val = -1;
index b541d66c01ebf47362a53573296ac34868c0fe54..c386992abcdb870eb3fef637f627f4abab33de44 100644 (file)
@@ -93,4 +93,9 @@ static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb)
 int mwifiex_debug_info_to_buffer(struct mwifiex_private *priv, char *buf,
                                 struct mwifiex_debug_info *info);
 
+static inline void le16_unaligned_add_cpu(__le16 *var, u16 val)
+{
+       put_unaligned_le16(get_unaligned_le16(var) + val, var);
+}
+
 #endif /* !_MWIFIEX_UTIL_H_ */
index de62f5dcb62f7971fbc21adcc1212a1ed9ac68aa..a1d1cfe214d2e4cef2e6f2ebb3873b1c8a9a2034 100644 (file)
@@ -201,7 +201,7 @@ endif
 
 config RT2800SOC
        tristate "Ralink WiSoC support"
-       depends on SOC_RT288X || SOC_RT305X
+       depends on SOC_RT288X || SOC_RT305X || SOC_MT7620
        select RT2X00_LIB_SOC
        select RT2X00_LIB_MMIO
        select RT2X00_LIB_CRYPTO
index 256496bfbafb2ef29b4560c9d93753f731035656..6a8c93fb6a43587614d27a734306e350ef509efe 100644 (file)
@@ -79,6 +79,7 @@
 #define RF5372                         0x5372
 #define RF5390                         0x5390
 #define RF5392                         0x5392
+#define RF7620                         0x7620
 
 /*
  * Chipset revisions.
 #define RF_CSR_CFG_WRITE               FIELD32(0x00010000)
 #define RF_CSR_CFG_BUSY                        FIELD32(0x00020000)
 
+/*
+ * MT7620 RF registers (reversed order)
+ */
+#define RF_CSR_CFG_DATA_MT7620         FIELD32(0x0000ff00)
+#define RF_CSR_CFG_REGNUM_MT7620       FIELD32(0x03ff0000)
+#define RF_CSR_CFG_WRITE_MT7620                FIELD32(0x00000010)
+#define RF_CSR_CFG_BUSY_MT7620         FIELD32(0x00000001)
+
+/* undocumented registers for calibration of new MAC */
+#define RF_CONTROL0                    0x0518
+#define RF_BYPASS0                     0x051c
+#define RF_CONTROL1                    0x0520
+#define RF_BYPASS1                     0x0524
+#define RF_CONTROL2                    0x0528
+#define RF_BYPASS2                     0x052c
+#define RF_CONTROL3                    0x0530
+#define RF_BYPASS3                     0x0534
+
 /*
  * EFUSE_CSR: RT30x0 EEPROM
  */
 #define AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE        FIELD32(0x00007f00)
 #define AUTOWAKEUP_CFG_AUTOWAKE                FIELD32(0x00008000)
 
+/*
+ * MIMO_PS_CFG: MIMO Power-save Configuration
+ */
+#define MIMO_PS_CFG                    0x1210
+#define MIMO_PS_CFG_MMPS_BB_EN         FIELD32(0x00000001)
+#define MIMO_PS_CFG_MMPS_RX_ANT_NUM    FIELD32(0x00000006)
+#define MIMO_PS_CFG_MMPS_RF_EN         FIELD32(0x00000008)
+#define MIMO_PS_CFG_RX_STBY_POL                FIELD32(0x00000010)
+#define MIMO_PS_CFG_RX_RX_STBY0                FIELD32(0x00000020)
+
 /*
  * EDCA_AC0_CFG:
  */
 #define TX_PWR_CFG_0_OFDM6_CH1         FIELD32(0x00f00000)
 #define TX_PWR_CFG_0_OFDM12_CH0                FIELD32(0x0f000000)
 #define TX_PWR_CFG_0_OFDM12_CH1                FIELD32(0xf0000000)
+/* bits for new 2T devices */
+#define TX_PWR_CFG_0B_1MBS_2MBS                FIELD32(0x000000ff)
+#define TX_PWR_CFG_0B_5MBS_11MBS               FIELD32(0x0000ff00)
+#define TX_PWR_CFG_0B_6MBS_9MBS                FIELD32(0x00ff0000)
+#define TX_PWR_CFG_0B_12MBS_18MBS      FIELD32(0xff000000)
+
 
 /*
  * TX_PWR_CFG_1:
 #define TX_PWR_CFG_1_MCS0_CH1          FIELD32(0x00f00000)
 #define TX_PWR_CFG_1_MCS2_CH0          FIELD32(0x0f000000)
 #define TX_PWR_CFG_1_MCS2_CH1          FIELD32(0xf0000000)
+/* bits for new 2T devices */
+#define TX_PWR_CFG_1B_24MBS_36MBS      FIELD32(0x000000ff)
+#define TX_PWR_CFG_1B_48MBS            FIELD32(0x0000ff00)
+#define TX_PWR_CFG_1B_MCS0_MCS1                FIELD32(0x00ff0000)
+#define TX_PWR_CFG_1B_MCS2_MCS3                FIELD32(0xff000000)
 
 /*
  * TX_PWR_CFG_2:
 #define TX_PWR_CFG_2_MCS8_CH1          FIELD32(0x00f00000)
 #define TX_PWR_CFG_2_MCS10_CH0         FIELD32(0x0f000000)
 #define TX_PWR_CFG_2_MCS10_CH1         FIELD32(0xf0000000)
+/* bits for new 2T devices */
+#define TX_PWR_CFG_2B_MCS4_MCS5                FIELD32(0x000000ff)
+#define TX_PWR_CFG_2B_MCS6_MCS7                FIELD32(0x0000ff00)
+#define TX_PWR_CFG_2B_MCS8_MCS9                FIELD32(0x00ff0000)
+#define TX_PWR_CFG_2B_MCS10_MCS11      FIELD32(0xff000000)
 
 /*
  * TX_PWR_CFG_3:
 #define TX_PWR_CFG_3_STBC0_CH1         FIELD32(0x00f00000)
 #define TX_PWR_CFG_3_STBC2_CH0         FIELD32(0x0f000000)
 #define TX_PWR_CFG_3_STBC2_CH1         FIELD32(0xf0000000)
+/* bits for new 2T devices */
+#define TX_PWR_CFG_3B_MCS12_MCS13      FIELD32(0x000000ff)
+#define TX_PWR_CFG_3B_MCS14            FIELD32(0x0000ff00)
+#define TX_PWR_CFG_3B_STBC_MCS0_MCS1   FIELD32(0x00ff0000)
+#define TX_PWR_CFG_3B_STBC_MCS2_MSC3   FIELD32(0xff000000)
 
 /*
  * TX_PWR_CFG_4:
 #define TX_PWR_CFG_4_UKNOWN7           FIELD32(0x00000f00)
 #define TX_PWR_CFG_4_UKNOWN8           FIELD32(0x0000f000)
 /* bits for 3T devices */
-#define TX_PWR_CFG_3_STBC4_CH0         FIELD32(0x0000000f)
-#define TX_PWR_CFG_3_STBC4_CH1         FIELD32(0x000000f0)
-#define TX_PWR_CFG_3_STBC6_CH0         FIELD32(0x00000f00)
-#define TX_PWR_CFG_3_STBC6_CH1         FIELD32(0x0000f000)
+#define TX_PWR_CFG_4_STBC4_CH0         FIELD32(0x0000000f)
+#define TX_PWR_CFG_4_STBC4_CH1         FIELD32(0x000000f0)
+#define TX_PWR_CFG_4_STBC6_CH0         FIELD32(0x00000f00)
+#define TX_PWR_CFG_4_STBC6_CH1         FIELD32(0x0000f000)
+/* bits for new 2T devices */
+#define TX_PWR_CFG_4B_STBC_MCS4_MCS5   FIELD32(0x000000ff)
+#define TX_PWR_CFG_4B_STBC_MCS6                FIELD32(0x0000ff00)
 
 /*
  * TX_PIN_CFG:
 #define TX_PIN_CFG_RFTR_POL            FIELD32(0x00020000)
 #define TX_PIN_CFG_TRSW_EN             FIELD32(0x00040000)
 #define TX_PIN_CFG_TRSW_POL            FIELD32(0x00080000)
+#define TX_PIN_CFG_RFRX_EN             FIELD32(0x00100000)
+#define TX_PIN_CFG_RFRX_POL            FIELD32(0x00200000)
 #define TX_PIN_CFG_PA_PE_A2_EN         FIELD32(0x01000000)
 #define TX_PIN_CFG_PA_PE_G2_EN         FIELD32(0x02000000)
 #define TX_PIN_CFG_PA_PE_A2_POL                FIELD32(0x04000000)
 #define TX_PWR_CFG_4_EXT_STBC4_CH2     FIELD32(0x0000000f)
 #define TX_PWR_CFG_4_EXT_STBC6_CH2     FIELD32(0x00000f00)
 
+/* TXn_RF_GAIN_CORRECT: RF Gain Correction for each RF_ALC[3:2]
+ * Unit: 0.1 dB, Range: -3.2 dB to 3.1 dB
+ */
+#define TX0_RF_GAIN_CORRECT            0x13a0
+#define TX0_RF_GAIN_CORRECT_GAIN_CORR_0        FIELD32(0x0000003f)
+#define TX0_RF_GAIN_CORRECT_GAIN_CORR_1        FIELD32(0x00003f00)
+#define TX0_RF_GAIN_CORRECT_GAIN_CORR_2        FIELD32(0x003f0000)
+#define TX0_RF_GAIN_CORRECT_GAIN_CORR_3        FIELD32(0x3f000000)
+
+#define TX1_RF_GAIN_CORRECT            0x13a4
+#define TX1_RF_GAIN_CORRECT_GAIN_CORR_0        FIELD32(0x0000003f)
+#define TX1_RF_GAIN_CORRECT_GAIN_CORR_1        FIELD32(0x00003f00)
+#define TX1_RF_GAIN_CORRECT_GAIN_CORR_2        FIELD32(0x003f0000)
+#define TX1_RF_GAIN_CORRECT_GAIN_CORR_3        FIELD32(0x3f000000)
+
+/* TXn_RF_GAIN_ATTEN: TXn RF Gain Attenuation Level
+ * Format: 7-bit, signed value
+ * Unit: 0.5 dB, Range: -20 dB to -5 dB
+ */
+#define TX0_RF_GAIN_ATTEN              0x13a8
+#define TX0_RF_GAIN_ATTEN_LEVEL_0      FIELD32(0x0000007f)
+#define TX0_RF_GAIN_ATTEN_LEVEL_1      FIELD32(0x00007f00)
+#define TX0_RF_GAIN_ATTEN_LEVEL_2      FIELD32(0x007f0000)
+#define TX0_RF_GAIN_ATTEN_LEVEL_3      FIELD32(0x7f000000)
+#define TX1_RF_GAIN_ATTEN              0x13ac
+#define TX1_RF_GAIN_ATTEN_LEVEL_0      FIELD32(0x0000007f)
+#define TX1_RF_GAIN_ATTEN_LEVEL_1      FIELD32(0x00007f00)
+#define TX1_RF_GAIN_ATTEN_LEVEL_2      FIELD32(0x007f0000)
+#define TX1_RF_GAIN_ATTEN_LEVEL_3      FIELD32(0x7f000000)
+
+/* TX_ALC_CFG_0: TX Automatic Level Control Configuration 0
+ * TX_ALC_LIMIT_n: TXn upper limit
+ * TX_ALC_CH_INIT_n: TXn channel initial transmission gain
+ * Unit: 0.5 dB, Range: 0 to 23.5 dB
+ */
+#define TX_ALC_CFG_0                   0x13b0
+#define TX_ALC_CFG_0_CH_INIT_0         FIELD32(0x0000003f)
+#define TX_ALC_CFG_0_CH_INIT_1         FIELD32(0x00003f00)
+#define TX_ALC_CFG_0_LIMIT_0           FIELD32(0x003f0000)
+#define TX_ALC_CFG_0_LIMIT_1           FIELD32(0x3f000000)
+
+/* TX_ALC_CFG_1: TX Automatic Level Control Configuration 1
+ * TX_TEMP_COMP:      TX Power Temperature Compensation
+ *                    Unit: 0.5 dB, Range: -10 dB to 10 dB
+ * TXn_GAIN_FINE:     TXn Gain Fine Adjustment
+ *                    Unit: 0.1 dB, Range: -0.8 dB to 0.7 dB
+ * RF_TOS_DLY:        Sets the RF_TOS_EN assertion delay after
+ *                    deassertion of PA_PE.
+ *                    Unit: 0.25 usec
+ * TXn_RF_GAIN_ATTEN: TXn RF gain attentuation selector
+ * RF_TOS_TIMEOUT:    time-out value for RF_TOS_ENABLE
+ *                    deassertion if RF_TOS_DONE is missing.
+ *                    Unit: 0.25 usec
+ * RF_TOS_ENABLE:     TX offset calibration enable
+ * ROS_BUSY_EN:       RX offset calibration busy enable
+ */
+#define TX_ALC_CFG_1                   0x13b4
+#define TX_ALC_CFG_1_TX_TEMP_COMP      FIELD32(0x0000003f)
+#define TX_ALC_CFG_1_TX0_GAIN_FINE     FIELD32(0x00000f00)
+#define TX_ALC_CFG_1_TX1_GAIN_FINE     FIELD32(0x0000f000)
+#define TX_ALC_CFG_1_RF_TOS_DLY                FIELD32(0x00070000)
+#define TX_ALC_CFG_1_TX0_RF_GAIN_ATTEN FIELD32(0x00300000)
+#define TX_ALC_CFG_1_TX1_RF_GAIN_ATTEN FIELD32(0x00c00000)
+#define TX_ALC_CFG_1_RF_TOS_TIMEOUT    FIELD32(0x3f000000)
+#define TX_ALC_CFG_1_RF_TOS_ENABLE     FIELD32(0x40000000)
+#define TX_ALC_CFG_1_ROS_BUSY_EN       FIELD32(0x80000000)
+
+/* TXn_BB_GAIN_ATTEN: TXn RF Gain Attenuation Level
+ * Format: 5-bit signed values
+ * Unit: 0.5 dB, Range: -8 dB to 7 dB
+ */
+#define TX0_BB_GAIN_ATTEN              0x13c0
+#define TX0_BB_GAIN_ATTEN_LEVEL_0      FIELD32(0x0000001f)
+#define TX0_BB_GAIN_ATTEN_LEVEL_1      FIELD32(0x00001f00)
+#define TX0_BB_GAIN_ATTEN_LEVEL_2      FIELD32(0x001f0000)
+#define TX0_BB_GAIN_ATTEN_LEVEL_3      FIELD32(0x1f000000)
+#define TX1_BB_GAIN_ATTEN              0x13c4
+#define TX1_BB_GAIN_ATTEN_LEVEL_0      FIELD32(0x0000001f)
+#define TX1_BB_GAIN_ATTEN_LEVEL_1      FIELD32(0x00001f00)
+#define TX1_BB_GAIN_ATTEN_LEVEL_2      FIELD32(0x001f0000)
+#define TX1_BB_GAIN_ATTEN_LEVEL_3      FIELD32(0x1f000000)
+
+/* TX_ALC_VGA3: TX Automatic Level Correction Variable Gain Amplifier 3 */
+#define TX_ALC_VGA3                    0x13c8
+#define TX_ALC_VGA3_TX0_ALC_VGA3       FIELD32(0x0000001f)
+#define TX_ALC_VGA3_TX1_ALC_VGA3       FIELD32(0x00001f00)
+#define TX_ALC_VGA3_TX0_ALC_VGA2       FIELD32(0x001f0000)
+#define TX_ALC_VGA3_TX1_ALC_VGA2       FIELD32(0x1f000000)
+
 /* TX_PWR_CFG_7 */
 #define TX_PWR_CFG_7                   0x13d4
 #define TX_PWR_CFG_7_OFDM54_CH0                FIELD32(0x0000000f)
 #define TX_PWR_CFG_7_MCS7_CH0          FIELD32(0x000f0000)
 #define TX_PWR_CFG_7_MCS7_CH1          FIELD32(0x00f00000)
 #define TX_PWR_CFG_7_MCS7_CH2          FIELD32(0x0f000000)
+/* bits for new 2T devices */
+#define TX_PWR_CFG_7B_54MBS            FIELD32(0x000000ff)
+#define TX_PWR_CFG_7B_MCS7             FIELD32(0x00ff0000)
+
 
 /* TX_PWR_CFG_8 */
 #define TX_PWR_CFG_8                   0x13d8
 #define TX_PWR_CFG_8_MCS23_CH0         FIELD32(0x000f0000)
 #define TX_PWR_CFG_8_MCS23_CH1         FIELD32(0x00f00000)
 #define TX_PWR_CFG_8_MCS23_CH2         FIELD32(0x0f000000)
+/* bits for new 2T devices */
+#define TX_PWR_CFG_8B_MCS15            FIELD32(0x000000ff)
+
 
 /* TX_PWR_CFG_9 */
 #define TX_PWR_CFG_9                   0x13dc
 #define TX_PWR_CFG_9_STBC7_CH0         FIELD32(0x0000000f)
 #define TX_PWR_CFG_9_STBC7_CH1         FIELD32(0x000000f0)
 #define TX_PWR_CFG_9_STBC7_CH2         FIELD32(0x00000f00)
+/* bits for new 2T devices */
+#define TX_PWR_CFG_9B_STBC_MCS7                FIELD32(0x000000ff)
 
 /*
  * RX_FILTER_CFG: RX configuration register.
 #define TX_STA_FIFO_WCID               FIELD32(0x0000ff00)
 #define TX_STA_FIFO_SUCCESS_RATE       FIELD32(0xffff0000)
 #define TX_STA_FIFO_MCS                        FIELD32(0x007f0000)
+#define TX_STA_FIFO_BW                 FIELD32(0x00800000)
+#define TX_STA_FIFO_SGI                        FIELD32(0x01000000)
 #define TX_STA_FIFO_PHYMODE            FIELD32(0xc0000000)
 
 /*
@@ -2135,11 +2290,14 @@ struct mac_iveiv_entry {
 #define RFCSR1_TX1_PD                  FIELD8(0x20)
 #define RFCSR1_RX2_PD                  FIELD8(0x40)
 #define RFCSR1_TX2_PD                  FIELD8(0x80)
+#define RFCSR1_TX2_EN_MT7620           FIELD8(0x02)
 
 /*
  * RFCSR 2:
  */
 #define RFCSR2_RESCAL_EN               FIELD8(0x80)
+#define RFCSR2_RX2_EN_MT7620           FIELD8(0x02)
+#define RFCSR2_TX2_EN_MT7620           FIELD8(0x20)
 
 /*
  * RFCSR 3:
@@ -2157,6 +2315,12 @@ struct mac_iveiv_entry {
 #define RFCSR3_BIT4                    FIELD8(0x10)
 #define RFCSR3_BIT5                    FIELD8(0x20)
 
+/*
+ * RFCSR 4:
+ * VCOCAL_EN used by MT7620
+ */
+#define RFCSR4_VCOCAL_EN               FIELD8(0x80)
+
 /*
  * FRCSR 5:
  */
@@ -2212,6 +2376,7 @@ struct mac_iveiv_entry {
  */
 #define RFCSR13_TX_POWER               FIELD8(0x1f)
 #define RFCSR13_DR0                    FIELD8(0xe0)
+#define RFCSR13_RDIV_MT7620            FIELD8(0x03)
 
 /*
  * RFCSR 15:
@@ -2222,6 +2387,8 @@ struct mac_iveiv_entry {
  * RFCSR 16:
  */
 #define RFCSR16_TXMIXER_GAIN           FIELD8(0x07)
+#define RFCSR16_RF_PLL_FREQ_SEL_MT7620 FIELD8(0x0F)
+#define RFCSR16_SDM_MODE_MT7620                FIELD8(0xE0)
 
 /*
  * RFCSR 17:
@@ -2234,6 +2401,8 @@ struct mac_iveiv_entry {
 /* RFCSR 18 */
 #define RFCSR18_XO_TUNE_BYPASS         FIELD8(0x40)
 
+/* RFCSR 19 */
+#define RFCSR19_K                      FIELD8(0x03)
 
 /*
  * RFCSR 20:
@@ -2244,11 +2413,14 @@ struct mac_iveiv_entry {
  * RFCSR 21:
  */
 #define RFCSR21_RX_LO2_EN              FIELD8(0x08)
+#define RFCSR21_BIT1                   FIELD8(0x01)
+#define RFCSR21_BIT8                   FIELD8(0x80)
 
 /*
  * RFCSR 22:
  */
 #define RFCSR22_BASEBAND_LOOPBACK      FIELD8(0x01)
+#define RFCSR22_FREQPLAN_D_MT7620      FIELD8(0x07)
 
 /*
  * RFCSR 23:
@@ -2270,6 +2442,11 @@ struct mac_iveiv_entry {
 #define RFCSR27_R3                     FIELD8(0x30)
 #define RFCSR27_R4                     FIELD8(0x40)
 
+/*
+ * RFCSR 28:
+ */
+#define RFCSR28_CH11_HT40              FIELD8(0x04)
+
 /*
  * RFCSR 29:
  */
@@ -2331,6 +2508,7 @@ struct mac_iveiv_entry {
  */
 #define RFCSR42_BIT1                   FIELD8(0x01)
 #define RFCSR42_BIT4                   FIELD8(0x08)
+#define RFCSR42_TX2_EN_MT7620          FIELD8(0x40)
 
 /*
  * RFCSR 49:
@@ -2433,6 +2611,7 @@ enum rt2800_eeprom_word {
        EEPROM_TSSI_BOUND_BG5,
        EEPROM_TXPOWER_A1,
        EEPROM_TXPOWER_A2,
+       EEPROM_TXPOWER_INIT,
        EEPROM_TSSI_BOUND_A1,
        EEPROM_TSSI_BOUND_A2,
        EEPROM_TSSI_BOUND_A3,
@@ -2987,29 +3166,4 @@ enum rt2800_eeprom_word {
  */
 #define BCN_TBTT_OFFSET 64
 
-/*
- * Hardware has 255 WCID table entries. First 32 entries are reserved for
- * shared keys. Since parts of the pairwise key table might be shared with
- * the beacon frame buffers 6 & 7 we could only use the first 222 entries.
- */
-#define WCID_START     33
-#define WCID_END       222
-#define STA_IDS_SIZE   (WCID_END - WCID_START + 2)
-
-/*
- * RT2800 driver data structure
- */
-struct rt2800_drv_data {
-       u8 calibration_bw20;
-       u8 calibration_bw40;
-       u8 bbp25;
-       u8 bbp26;
-       u8 txmixer_gain_24g;
-       u8 txmixer_gain_5g;
-       u8 max_psdu;
-       unsigned int tbtt_tick;
-       unsigned int ampdu_factor_cnt[4];
-       DECLARE_BITMAP(sta_ids, STA_IDS_SIZE);
-};
-
 #endif /* RT2800_H */
index 8223a15203165d32f09c03fff245162d04968b60..ba06ac2d876da2f5ddcfd9cb7bb9da73ac11a1a8 100644 (file)
@@ -59,6 +59,9 @@
        rt2800_regbusy_read((__dev), BBP_CSR_CFG, BBP_CSR_CFG_BUSY, (__reg))
 #define WAIT_FOR_RFCSR(__dev, __reg) \
        rt2800_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY, (__reg))
+#define WAIT_FOR_RFCSR_MT7620(__dev, __reg) \
+       rt2800_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY_MT7620, \
+                           (__reg))
 #define WAIT_FOR_RF(__dev, __reg) \
        rt2800_regbusy_read((__dev), RF_CSR_CFG0, RF_CSR_CFG0_BUSY, (__reg))
 #define WAIT_FOR_MCU(__dev, __reg) \
@@ -150,19 +153,56 @@ static void rt2800_rfcsr_write(struct rt2x00_dev *rt2x00dev,
         * Wait until the RFCSR becomes available, afterwards we
         * can safely write the new data into the register.
         */
-       if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
-               reg = 0;
-               rt2x00_set_field32(&reg, RF_CSR_CFG_DATA, value);
-               rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
-               rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 1);
-               rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
+       switch (rt2x00dev->chip.rt) {
+       case RT6352:
+               if (WAIT_FOR_RFCSR_MT7620(rt2x00dev, &reg)) {
+                       reg = 0;
+                       rt2x00_set_field32(&reg, RF_CSR_CFG_DATA_MT7620, value);
+                       rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM_MT7620,
+                                          word);
+                       rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE_MT7620, 1);
+                       rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY_MT7620, 1);
+
+                       rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
+               }
+               break;
 
-               rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
+       default:
+               if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
+                       reg = 0;
+                       rt2x00_set_field32(&reg, RF_CSR_CFG_DATA, value);
+                       rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
+                       rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 1);
+                       rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
+
+                       rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
+               }
+               break;
        }
 
        mutex_unlock(&rt2x00dev->csr_mutex);
 }
 
+static void rt2800_rfcsr_write_bank(struct rt2x00_dev *rt2x00dev, const u8 bank,
+                                   const unsigned int reg, const u8 value)
+{
+       rt2800_rfcsr_write(rt2x00dev, (reg | (bank << 6)), value);
+}
+
+static void rt2800_rfcsr_write_chanreg(struct rt2x00_dev *rt2x00dev,
+                                      const unsigned int reg, const u8 value)
+{
+       rt2800_rfcsr_write_bank(rt2x00dev, 4, reg, value);
+       rt2800_rfcsr_write_bank(rt2x00dev, 6, reg, value);
+}
+
+static void rt2800_rfcsr_write_dccal(struct rt2x00_dev *rt2x00dev,
+                                    const unsigned int reg, const u8 value)
+{
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, reg, value);
+       rt2800_rfcsr_write_bank(rt2x00dev, 7, reg, value);
+}
+
 static void rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev,
                              const unsigned int word, u8 *value)
 {
@@ -178,22 +218,48 @@ static void rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev,
         * doesn't become available in time, reg will be 0xffffffff
         * which means we return 0xff to the caller.
         */
-       if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
-               reg = 0;
-               rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
-               rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 0);
-               rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
+       switch (rt2x00dev->chip.rt) {
+       case RT6352:
+               if (WAIT_FOR_RFCSR_MT7620(rt2x00dev, &reg)) {
+                       reg = 0;
+                       rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM_MT7620,
+                                          word);
+                       rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE_MT7620, 0);
+                       rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY_MT7620, 1);
 
-               rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
+                       rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
 
-               WAIT_FOR_RFCSR(rt2x00dev, &reg);
-       }
+                       WAIT_FOR_RFCSR_MT7620(rt2x00dev, &reg);
+               }
+
+               *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA_MT7620);
+               break;
+
+       default:
+               if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
+                       reg = 0;
+                       rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
+                       rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 0);
+                       rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
 
-       *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA);
+                       rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
+
+                       WAIT_FOR_RFCSR(rt2x00dev, &reg);
+               }
+
+               *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA);
+               break;
+       }
 
        mutex_unlock(&rt2x00dev->csr_mutex);
 }
 
+static void rt2800_rfcsr_read_bank(struct rt2x00_dev *rt2x00dev, const u8 bank,
+                                  const unsigned int reg, u8 *value)
+{
+       rt2800_rfcsr_read(rt2x00dev, (reg | (bank << 6)), value);
+}
+
 static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
                            const unsigned int word, const u32 value)
 {
@@ -250,6 +316,7 @@ static const unsigned int rt2800_eeprom_map[EEPROM_WORD_COUNT] = {
        [EEPROM_TSSI_BOUND_BG5]         = 0x003b,
        [EEPROM_TXPOWER_A1]             = 0x003c,
        [EEPROM_TXPOWER_A2]             = 0x0053,
+       [EEPROM_TXPOWER_INIT]           = 0x0068,
        [EEPROM_TSSI_BOUND_A1]          = 0x006a,
        [EEPROM_TSSI_BOUND_A2]          = 0x006b,
        [EEPROM_TSSI_BOUND_A3]          = 0x006c,
@@ -524,6 +591,7 @@ void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev,
                break;
 
        case RT5592:
+       case RT6352:
                *txwi_size = TXWI_DESC_SIZE_5WORDS;
                *rxwi_size = RXWI_DESC_SIZE_6WORDS;
                break;
@@ -852,14 +920,49 @@ void rt2800_process_rxwi(struct queue_entry *entry,
 }
 EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
 
-void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi)
+static void rt2800_rate_from_status(struct skb_frame_desc *skbdesc,
+                                   u32 status, enum nl80211_band band)
+{
+       u8 flags = 0;
+       u8 idx = rt2x00_get_field32(status, TX_STA_FIFO_MCS);
+
+       switch (rt2x00_get_field32(status, TX_STA_FIFO_PHYMODE)) {
+       case RATE_MODE_HT_GREENFIELD:
+               flags |= IEEE80211_TX_RC_GREEN_FIELD;
+               /* fall through */
+       case RATE_MODE_HT_MIX:
+               flags |= IEEE80211_TX_RC_MCS;
+               break;
+       case RATE_MODE_OFDM:
+               if (band == NL80211_BAND_2GHZ)
+                       idx += 4;
+               break;
+       case RATE_MODE_CCK:
+               if (idx >= 8)
+                       idx -= 8;
+               break;
+       }
+
+       if (rt2x00_get_field32(status, TX_STA_FIFO_BW))
+               flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+
+       if (rt2x00_get_field32(status, TX_STA_FIFO_SGI))
+               flags |= IEEE80211_TX_RC_SHORT_GI;
+
+       skbdesc->tx_rate_idx = idx;
+       skbdesc->tx_rate_flags = flags;
+}
+
+void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi,
+                        bool match)
 {
        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+       struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
        struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
        struct txdone_entry_desc txdesc;
        u32 word;
        u16 mcs, real_mcs;
-       int aggr, ampdu;
+       int aggr, ampdu, wcid, ack_req;
 
        /*
         * Obtain the status about this packet.
@@ -872,6 +975,8 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi)
 
        real_mcs = rt2x00_get_field32(status, TX_STA_FIFO_MCS);
        aggr = rt2x00_get_field32(status, TX_STA_FIFO_TX_AGGRE);
+       wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
+       ack_req = rt2x00_get_field32(status, TX_STA_FIFO_TX_ACK_REQUIRED);
 
        /*
         * If a frame was meant to be sent as a single non-aggregated MPDU
@@ -888,15 +993,22 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi)
         * Hence, replace the requested rate with the real tx rate to not
         * confuse the rate control algortihm by providing clearly wrong
         * data.
-        */
-       if (unlikely(aggr == 1 && ampdu == 0 && real_mcs != mcs)) {
-               skbdesc->tx_rate_idx = real_mcs;
+        *
+        * FIXME: if we do not find matching entry, we tell that frame was
+        * posted without any retries. We need to find a way to fix that
+        * and provide retry count.
+        */
+       if (unlikely((aggr == 1 && ampdu == 0 && real_mcs != mcs)) || !match) {
+               rt2800_rate_from_status(skbdesc, status, rt2x00dev->curr_band);
                mcs = real_mcs;
        }
 
        if (aggr == 1 || ampdu == 1)
                __set_bit(TXDONE_AMPDU, &txdesc.flags);
 
+       if (!ack_req)
+               __set_bit(TXDONE_NO_ACK_REQ, &txdesc.flags);
+
        /*
         * Ralink has a retry mechanism using a global fallback
         * table. We setup this fallback table to try the immediate
@@ -928,7 +1040,18 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi)
        if (txdesc.retry)
                __set_bit(TXDONE_FALLBACK, &txdesc.flags);
 
-       rt2x00lib_txdone(entry, &txdesc);
+       if (!match) {
+               /* RCU assures non-null sta will not be freed by mac80211. */
+               rcu_read_lock();
+               if (likely(wcid >= WCID_START && wcid <= WCID_END))
+                       skbdesc->sta = drv_data->wcid_to_sta[wcid - WCID_START];
+               else
+                       skbdesc->sta = NULL;
+               rt2x00lib_txdone_nomatch(entry, &txdesc);
+               rcu_read_unlock();
+       } else {
+               rt2x00lib_txdone(entry, &txdesc);
+       }
 }
 EXPORT_SYMBOL_GPL(rt2800_txdone_entry);
 
@@ -1468,6 +1591,7 @@ int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif,
                return 0;
 
        __set_bit(wcid - WCID_START, drv_data->sta_ids);
+       drv_data->wcid_to_sta[wcid - WCID_START] = sta;
 
        /*
         * Clean up WCID attributes and write STA address to the device.
@@ -1498,6 +1622,7 @@ int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, struct ieee80211_sta *sta)
         * get renewed when the WCID is reused.
         */
        rt2800_config_wcid(rt2x00dev, NULL, wcid);
+       drv_data->wcid_to_sta[wcid - WCID_START] = NULL;
        __clear_bit(wcid - WCID_START, drv_data->sta_ids);
 
        return 0;
@@ -2753,7 +2878,8 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
                                rt2800_rfcsr_write(rt2x00dev, 59,
                                                   r59_nonbt_rev[idx]);
                        } else if (rt2x00_rt(rt2x00dev, RT5390) ||
-                                  rt2x00_rt(rt2x00dev, RT5392)) {
+                                  rt2x00_rt(rt2x00dev, RT5392) ||
+                                  rt2x00_rt(rt2x00dev, RT6352)) {
                                static const char r59_non_bt[] = {0x8f, 0x8f,
                                        0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
                                        0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
@@ -3047,6 +3173,242 @@ static void rt2800_config_channel_rf55xx(struct rt2x00_dev *rt2x00dev,
        rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x19 : 0x7F);
 }
 
+static void rt2800_config_channel_rf7620(struct rt2x00_dev *rt2x00dev,
+                                        struct ieee80211_conf *conf,
+                                        struct rf_channel *rf,
+                                        struct channel_info *info)
+{
+       struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+       u8 rx_agc_fc, tx_agc_fc;
+       u8 rfcsr;
+
+       /* Frequeny plan setting */
+       /* Rdiv setting (set 0x03 if Xtal==20)
+        * R13[1:0]
+        */
+       rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR13_RDIV_MT7620,
+                         rt2800_clk_is_20mhz(rt2x00dev) ? 3 : 0);
+       rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
+
+       /* N setting
+        * R20[7:0] in rf->rf1
+        * R21[0] always 0
+        */
+       rt2800_rfcsr_read(rt2x00dev, 20, &rfcsr);
+       rfcsr = (rf->rf1 & 0x00ff);
+       rt2800_rfcsr_write(rt2x00dev, 20, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 21, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR21_BIT1, 0);
+       rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
+
+       /* K setting (always 0)
+        * R16[3:0] (RF PLL freq selection)
+        */
+       rt2800_rfcsr_read(rt2x00dev, 16, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR16_RF_PLL_FREQ_SEL_MT7620, 0);
+       rt2800_rfcsr_write(rt2x00dev, 16, rfcsr);
+
+       /* D setting (always 0)
+        * R22[2:0] (D=15, R22[2:0]=<111>)
+        */
+       rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR22_FREQPLAN_D_MT7620, 0);
+       rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
+
+       /* Ksd setting
+        * Ksd: R17<7:0> in rf->rf2
+        *      R18<7:0> in rf->rf3
+        *      R19<1:0> in rf->rf4
+        */
+       rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+       rfcsr = rf->rf2;
+       rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 18, &rfcsr);
+       rfcsr = rf->rf3;
+       rt2800_rfcsr_write(rt2x00dev, 18, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 19, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR19_K, rf->rf4);
+       rt2800_rfcsr_write(rt2x00dev, 19, rfcsr);
+
+       /* Default: XO=20MHz , SDM mode */
+       rt2800_rfcsr_read(rt2x00dev, 16, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR16_SDM_MODE_MT7620, 0x80);
+       rt2800_rfcsr_write(rt2x00dev, 16, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 21, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR21_BIT8, 1);
+       rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR1_TX2_EN_MT7620,
+                         rt2x00dev->default_ant.tx_chain_num != 1);
+       rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR2_TX2_EN_MT7620,
+                         rt2x00dev->default_ant.tx_chain_num != 1);
+       rt2x00_set_field8(&rfcsr, RFCSR2_RX2_EN_MT7620,
+                         rt2x00dev->default_ant.rx_chain_num != 1);
+       rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 42, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR42_TX2_EN_MT7620,
+                         rt2x00dev->default_ant.tx_chain_num != 1);
+       rt2800_rfcsr_write(rt2x00dev, 42, rfcsr);
+
+       /* RF for DC Cal BW */
+       if (conf_is_ht40(conf)) {
+               rt2800_rfcsr_write_dccal(rt2x00dev, 6, 0x10);
+               rt2800_rfcsr_write_dccal(rt2x00dev, 7, 0x10);
+               rt2800_rfcsr_write_dccal(rt2x00dev, 8, 0x04);
+               rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x10);
+               rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x10);
+       } else {
+               rt2800_rfcsr_write_dccal(rt2x00dev, 6, 0x20);
+               rt2800_rfcsr_write_dccal(rt2x00dev, 7, 0x20);
+               rt2800_rfcsr_write_dccal(rt2x00dev, 8, 0x00);
+               rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x20);
+               rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x20);
+       }
+
+       if (conf_is_ht40(conf)) {
+               rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x08);
+               rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x08);
+       } else {
+               rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x28);
+               rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x28);
+       }
+
+       rt2800_rfcsr_read(rt2x00dev, 28, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR28_CH11_HT40,
+                         conf_is_ht40(conf) && (rf->channel == 11));
+       rt2800_rfcsr_write(rt2x00dev, 28, rfcsr);
+
+       if (!test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags)) {
+               if (conf_is_ht40(conf)) {
+                       rx_agc_fc = drv_data->rx_calibration_bw40;
+                       tx_agc_fc = drv_data->tx_calibration_bw40;
+               } else {
+                       rx_agc_fc = drv_data->rx_calibration_bw20;
+                       tx_agc_fc = drv_data->tx_calibration_bw20;
+               }
+               rt2800_rfcsr_read_bank(rt2x00dev, 5, 6, &rfcsr);
+               rfcsr &= (~0x3F);
+               rfcsr |= rx_agc_fc;
+               rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, rfcsr);
+               rt2800_rfcsr_read_bank(rt2x00dev, 5, 7, &rfcsr);
+               rfcsr &= (~0x3F);
+               rfcsr |= rx_agc_fc;
+               rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, rfcsr);
+               rt2800_rfcsr_read_bank(rt2x00dev, 7, 6, &rfcsr);
+               rfcsr &= (~0x3F);
+               rfcsr |= rx_agc_fc;
+               rt2800_rfcsr_write_bank(rt2x00dev, 7, 6, rfcsr);
+               rt2800_rfcsr_read_bank(rt2x00dev, 7, 7, &rfcsr);
+               rfcsr &= (~0x3F);
+               rfcsr |= rx_agc_fc;
+               rt2800_rfcsr_write_bank(rt2x00dev, 7, 7, rfcsr);
+
+               rt2800_rfcsr_read_bank(rt2x00dev, 5, 58, &rfcsr);
+               rfcsr &= (~0x3F);
+               rfcsr |= tx_agc_fc;
+               rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, rfcsr);
+               rt2800_rfcsr_read_bank(rt2x00dev, 5, 59, &rfcsr);
+               rfcsr &= (~0x3F);
+               rfcsr |= tx_agc_fc;
+               rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, rfcsr);
+               rt2800_rfcsr_read_bank(rt2x00dev, 7, 58, &rfcsr);
+               rfcsr &= (~0x3F);
+               rfcsr |= tx_agc_fc;
+               rt2800_rfcsr_write_bank(rt2x00dev, 7, 58, rfcsr);
+               rt2800_rfcsr_read_bank(rt2x00dev, 7, 59, &rfcsr);
+               rfcsr &= (~0x3F);
+               rfcsr |= tx_agc_fc;
+               rt2800_rfcsr_write_bank(rt2x00dev, 7, 59, rfcsr);
+       }
+}
+
+static void rt2800_config_alc(struct rt2x00_dev *rt2x00dev,
+                             struct ieee80211_channel *chan,
+                             int power_level) {
+       u16 eeprom, target_power, max_power;
+       u32 mac_sys_ctrl, mac_status;
+       u32 reg;
+       u8 bbp;
+       int i;
+
+       /* hardware unit is 0.5dBm, limited to 23.5dBm */
+       power_level *= 2;
+       if (power_level > 0x2f)
+               power_level = 0x2f;
+
+       max_power = chan->max_power * 2;
+       if (max_power > 0x2f)
+               max_power = 0x2f;
+
+       rt2800_register_read(rt2x00dev, TX_ALC_CFG_0, &reg);
+       rt2x00_set_field32(&reg, TX_ALC_CFG_0_CH_INIT_0, power_level);
+       rt2x00_set_field32(&reg, TX_ALC_CFG_0_CH_INIT_1, power_level);
+       rt2x00_set_field32(&reg, TX_ALC_CFG_0_LIMIT_0, max_power);
+       rt2x00_set_field32(&reg, TX_ALC_CFG_0_LIMIT_1, max_power);
+
+       rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+       if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_INTERNAL_TX_ALC)) {
+               /* init base power by eeprom target power */
+               rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_INIT,
+                                  &target_power);
+               rt2x00_set_field32(&reg, TX_ALC_CFG_0_CH_INIT_0, target_power);
+               rt2x00_set_field32(&reg, TX_ALC_CFG_0_CH_INIT_1, target_power);
+       }
+       rt2800_register_write(rt2x00dev, TX_ALC_CFG_0, reg);
+
+       rt2800_register_read(rt2x00dev, TX_ALC_CFG_1, &reg);
+       rt2x00_set_field32(&reg, TX_ALC_CFG_1_TX_TEMP_COMP, 0);
+       rt2800_register_write(rt2x00dev, TX_ALC_CFG_1, reg);
+
+       /* Save MAC SYS CTRL registers */
+       rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &mac_sys_ctrl);
+       /* Disable Tx/Rx */
+       rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
+       /* Check MAC Tx/Rx idle */
+       for (i = 0; i < 10000; i++) {
+               rt2800_register_read(rt2x00dev, MAC_STATUS_CFG,
+                                    &mac_status);
+               if (mac_status & 0x3)
+                       usleep_range(50, 200);
+               else
+                       break;
+       }
+
+       if (i == 10000)
+               rt2x00_warn(rt2x00dev, "Wait MAC Status to MAX !!!\n");
+
+       if (chan->center_freq > 2457) {
+               rt2800_bbp_read(rt2x00dev, 30, &bbp);
+               bbp = 0x40;
+               rt2800_bbp_write(rt2x00dev, 30, bbp);
+               rt2800_rfcsr_write(rt2x00dev, 39, 0);
+               if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
+                       rt2800_rfcsr_write(rt2x00dev, 42, 0xfb);
+               else
+                       rt2800_rfcsr_write(rt2x00dev, 42, 0x7b);
+       } else {
+               rt2800_bbp_read(rt2x00dev, 30, &bbp);
+               bbp = 0x1f;
+               rt2800_bbp_write(rt2x00dev, 30, bbp);
+               rt2800_rfcsr_write(rt2x00dev, 39, 0x80);
+               if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
+                       rt2800_rfcsr_write(rt2x00dev, 42, 0xdb);
+               else
+                       rt2800_rfcsr_write(rt2x00dev, 42, 0x5b);
+       }
+       rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, mac_sys_ctrl);
+}
+
 static void rt2800_bbp_write_with_rx_chain(struct rt2x00_dev *rt2x00dev,
                                           const unsigned int word,
                                           const u8 value)
@@ -3171,7 +3533,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
                                  struct channel_info *info)
 {
        u32 reg;
-       unsigned int tx_pin;
+       u32 tx_pin;
        u8 bbp, rfcsr;
 
        info->default_power1 = rt2800_txpower_to_dev(rt2x00dev, rf->channel,
@@ -3216,6 +3578,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        case RF5592:
                rt2800_config_channel_rf55xx(rt2x00dev, conf, rf, info);
                break;
+       case RF7620:
+               rt2800_config_channel_rf7620(rt2x00dev, conf, rf, info);
+               break;
        default:
                rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
        }
@@ -3290,7 +3655,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
 
        if (rf->channel <= 14) {
                if (!rt2x00_rt(rt2x00dev, RT5390) &&
-                   !rt2x00_rt(rt2x00dev, RT5392)) {
+                   !rt2x00_rt(rt2x00dev, RT5392) &&
+                   !rt2x00_rt(rt2x00dev, RT6352)) {
                        if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
                                rt2800_bbp_write(rt2x00dev, 82, 0x62);
                                rt2800_bbp_write(rt2x00dev, 75, 0x46);
@@ -3310,7 +3676,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
                        rt2800_bbp_write(rt2x00dev, 82, 0x94);
                else if (rt2x00_rt(rt2x00dev, RT3593))
                        rt2800_bbp_write(rt2x00dev, 82, 0x82);
-               else
+               else if (!rt2x00_rt(rt2x00dev, RT6352))
                        rt2800_bbp_write(rt2x00dev, 82, 0xf2);
 
                if (rt2x00_rt(rt2x00dev, RT3593))
@@ -3331,7 +3697,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        if (rt2x00_rt(rt2x00dev, RT3572))
                rt2800_rfcsr_write(rt2x00dev, 8, 0);
 
-       tx_pin = 0;
+       rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin);
 
        switch (rt2x00dev->default_ant.tx_chain_num) {
        case 3:
@@ -3380,6 +3746,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
 
        rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
        rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
+       rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFRX_EN, 1); /* mt7620 */
 
        rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
 
@@ -3438,7 +3805,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
                usleep_range(1000, 1500);
        }
 
-       if (rt2x00_rt(rt2x00dev, RT5592)) {
+       if (rt2x00_rt(rt2x00dev, RT5592) || rt2x00_rt(rt2x00dev, RT6352)) {
                rt2800_bbp_write(rt2x00dev, 195, 141);
                rt2800_bbp_write(rt2x00dev, 196, conf_is_ht40(conf) ? 0x10 : 0x1a);
 
@@ -4125,6 +4492,128 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
                           (unsigned long) regs[i]);
 }
 
+static void rt2800_config_txpower_rt6352(struct rt2x00_dev *rt2x00dev,
+                                        struct ieee80211_channel *chan,
+                                        int power_level)
+{
+       u32 reg, pwreg;
+       u16 eeprom;
+       u32 data, gdata;
+       u8 t, i;
+       enum nl80211_band band = chan->band;
+       int delta;
+
+       /* Warn user if bw_comp is set in EEPROM */
+       delta = rt2800_get_txpower_bw_comp(rt2x00dev, band);
+
+       if (delta)
+               rt2x00_warn(rt2x00dev, "ignoring EEPROM HT40 power delta: %d\n",
+                           delta);
+
+       /* populate TX_PWR_CFG_0 up to TX_PWR_CFG_4 from EEPROM for HT20, limit
+        * value to 0x3f and replace 0x20 by 0x21 as this is what the vendor
+        * driver does as well, though it looks kinda wrong.
+        * Maybe some misunderstanding of what a signed 8-bit value is? Maybe
+        * the hardware has a problem handling 0x20, and as the code initially
+        * used a fixed offset between HT20 and HT40 rates they had to work-
+        * around that issue and most likely just forgot about it later on.
+        * Maybe we should use rt2800_get_txpower_bw_comp() here as well,
+        * however, the corresponding EEPROM value is not respected by the
+        * vendor driver, so maybe this is rather being taken care of the
+        * TXALC and the driver doesn't need to handle it...?
+        * Though this is all very awkward, just do as they did, as that's what
+        * board vendors expected when they populated the EEPROM...
+        */
+       for (i = 0; i < 5; i++) {
+               rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+                                             i * 2, &eeprom);
+
+               data = eeprom;
+
+               t = eeprom & 0x3f;
+               if (t == 32)
+                       t++;
+
+               gdata = t;
+
+               t = (eeprom & 0x3f00) >> 8;
+               if (t == 32)
+                       t++;
+
+               gdata |= (t << 8);
+
+               rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+                                             (i * 2) + 1, &eeprom);
+
+               t = eeprom & 0x3f;
+               if (t == 32)
+                       t++;
+
+               gdata |= (t << 16);
+
+               t = (eeprom & 0x3f00) >> 8;
+               if (t == 32)
+                       t++;
+
+               gdata |= (t << 24);
+               data |= (eeprom << 16);
+
+               if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) {
+                       /* HT20 */
+                       if (data != 0xffffffff)
+                               rt2800_register_write(rt2x00dev,
+                                                     TX_PWR_CFG_0 + (i * 4),
+                                                     data);
+               } else {
+                       /* HT40 */
+                       if (gdata != 0xffffffff)
+                               rt2800_register_write(rt2x00dev,
+                                                     TX_PWR_CFG_0 + (i * 4),
+                                                     gdata);
+               }
+       }
+
+       /* Aparently Ralink ran out of space in the BYRATE calibration section
+        * of the EERPOM which is copied to the corresponding TX_PWR_CFG_x
+        * registers. As recent 2T chips use 8-bit instead of 4-bit values for
+        * power-offsets more space would be needed. Ralink decided to keep the
+        * EEPROM layout untouched and rather have some shared values covering
+        * multiple bitrates.
+        * Populate the registers not covered by the EEPROM in the same way the
+        * vendor driver does.
+        */
+
+       /* For OFDM 54MBS use value from OFDM 48MBS */
+       pwreg = 0;
+       rt2800_register_read(rt2x00dev, TX_PWR_CFG_1, &reg);
+       t = rt2x00_get_field32(reg, TX_PWR_CFG_1B_48MBS);
+       rt2x00_set_field32(&pwreg, TX_PWR_CFG_7B_54MBS, t);
+
+       /* For MCS 7 use value from MCS 6 */
+       rt2800_register_read(rt2x00dev, TX_PWR_CFG_2, &reg);
+       t = rt2x00_get_field32(reg, TX_PWR_CFG_2B_MCS6_MCS7);
+       rt2x00_set_field32(&pwreg, TX_PWR_CFG_7B_MCS7, t);
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_7, pwreg);
+
+       /* For MCS 15 use value from MCS 14 */
+       pwreg = 0;
+       rt2800_register_read(rt2x00dev, TX_PWR_CFG_3, &reg);
+       t = rt2x00_get_field32(reg, TX_PWR_CFG_3B_MCS14);
+       rt2x00_set_field32(&pwreg, TX_PWR_CFG_8B_MCS15, t);
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_8, pwreg);
+
+       /* For STBC MCS 7 use value from STBC MCS 6 */
+       pwreg = 0;
+       rt2800_register_read(rt2x00dev, TX_PWR_CFG_4, &reg);
+       t = rt2x00_get_field32(reg, TX_PWR_CFG_4B_STBC_MCS6);
+       rt2x00_set_field32(&pwreg, TX_PWR_CFG_9B_STBC_MCS7, t);
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_9, pwreg);
+
+       rt2800_config_alc(rt2x00dev, chan, power_level);
+
+       /* TODO: temperature compensation code! */
+}
+
 /*
  * We configure transmit power using MAC TX_PWR_CFG_{0,...,N} registers and
  * BBP R1 register. TX_PWR_CFG_X allow to configure per rate TX power values,
@@ -4321,6 +4810,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
 {
        if (rt2x00_rt(rt2x00dev, RT3593))
                rt2800_config_txpower_rt3593(rt2x00dev, chan, power_level);
+       else if (rt2x00_rt(rt2x00dev, RT6352))
+               rt2800_config_txpower_rt6352(rt2x00dev, chan, power_level);
        else
                rt2800_config_txpower_rt28xx(rt2x00dev, chan, power_level);
 }
@@ -4336,6 +4827,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
 {
        u32     tx_pin;
        u8      rfcsr;
+       unsigned long min_sleep = 0;
 
        /*
         * A voltage-controlled oscillator(VCO) is an electronic oscillator
@@ -4374,6 +4866,15 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
                rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
                rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
                rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
+               min_sleep = 1000;
+               break;
+       case RF7620:
+               rt2800_rfcsr_write(rt2x00dev, 5, 0x40);
+               rt2800_rfcsr_write(rt2x00dev, 4, 0x0C);
+               rt2800_rfcsr_read(rt2x00dev, 4, &rfcsr);
+               rt2x00_set_field8(&rfcsr, RFCSR4_VCOCAL_EN, 1);
+               rt2800_rfcsr_write(rt2x00dev, 4, rfcsr);
+               min_sleep = 2000;
                break;
        default:
                WARN_ONCE(1, "Not supported RF chipet %x for VCO recalibration",
@@ -4381,7 +4882,8 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
                return;
        }
 
-       usleep_range(1000, 1500);
+       if (min_sleep > 0)
+               usleep_range(min_sleep, min_sleep * 2);
 
        rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin);
        if (rt2x00dev->rf_channel <= 14) {
@@ -4413,6 +4915,42 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
        }
        rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
 
+       if (rt2x00_rt(rt2x00dev, RT6352)) {
+               if (rt2x00dev->default_ant.tx_chain_num == 1) {
+                       rt2800_bbp_write(rt2x00dev, 91, 0x07);
+                       rt2800_bbp_write(rt2x00dev, 95, 0x1A);
+                       rt2800_bbp_write(rt2x00dev, 195, 128);
+                       rt2800_bbp_write(rt2x00dev, 196, 0xA0);
+                       rt2800_bbp_write(rt2x00dev, 195, 170);
+                       rt2800_bbp_write(rt2x00dev, 196, 0x12);
+                       rt2800_bbp_write(rt2x00dev, 195, 171);
+                       rt2800_bbp_write(rt2x00dev, 196, 0x10);
+               } else {
+                       rt2800_bbp_write(rt2x00dev, 91, 0x06);
+                       rt2800_bbp_write(rt2x00dev, 95, 0x9A);
+                       rt2800_bbp_write(rt2x00dev, 195, 128);
+                       rt2800_bbp_write(rt2x00dev, 196, 0xE0);
+                       rt2800_bbp_write(rt2x00dev, 195, 170);
+                       rt2800_bbp_write(rt2x00dev, 196, 0x30);
+                       rt2800_bbp_write(rt2x00dev, 195, 171);
+                       rt2800_bbp_write(rt2x00dev, 196, 0x30);
+               }
+
+               if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
+                       rt2800_bbp_write(rt2x00dev, 75, 0x60);
+                       rt2800_bbp_write(rt2x00dev, 76, 0x44);
+                       rt2800_bbp_write(rt2x00dev, 79, 0x1C);
+                       rt2800_bbp_write(rt2x00dev, 80, 0x0C);
+                       rt2800_bbp_write(rt2x00dev, 82, 0xB6);
+               }
+
+               /* On 11A, We should delay and wait RF/BBP to be stable
+                * and the appropriate time should be 1000 micro seconds
+                * 2005/06/05 - On 11G, we also need this delay time.
+                * Otherwise it's difficult to pass the WHQL.
+                */
+               usleep_range(1000, 1500);
+       }
 }
 EXPORT_SYMBOL_GPL(rt2800_vco_calibration);
 
@@ -4511,7 +5049,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
                    rt2x00_rt(rt2x00dev, RT3593) ||
                    rt2x00_rt(rt2x00dev, RT5390) ||
                    rt2x00_rt(rt2x00dev, RT5392) ||
-                   rt2x00_rt(rt2x00dev, RT5592))
+                   rt2x00_rt(rt2x00dev, RT5592) ||
+                   rt2x00_rt(rt2x00dev, RT6352))
                        vgc = 0x1c + (2 * rt2x00dev->lna_gain);
                else
                        vgc = 0x2e + rt2x00dev->lna_gain;
@@ -4738,7 +5277,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
                                              0x00000000);
                }
        } else if (rt2x00_rt(rt2x00dev, RT5390) ||
-                  rt2x00_rt(rt2x00dev, RT5392)) {
+                  rt2x00_rt(rt2x00dev, RT5392) ||
+                  rt2x00_rt(rt2x00dev, RT6352)) {
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
                rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
                rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -4748,6 +5288,24 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
                rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
        } else if (rt2x00_rt(rt2x00dev, RT5350)) {
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
+       } else if (rt2x00_rt(rt2x00dev, RT6352)) {
+               rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000401);
+               rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0000);
+               rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
+               rt2800_register_write(rt2x00dev, MIMO_PS_CFG, 0x00000002);
+               rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x00150F0F);
+               rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x06060606);
+               rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0);
+               rt2800_register_write(rt2x00dev, TX1_BB_GAIN_ATTEN, 0x0);
+               rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN, 0x6C6C666C);
+               rt2800_register_write(rt2x00dev, TX1_RF_GAIN_ATTEN, 0x6C6C666C);
+               rt2800_register_write(rt2x00dev, TX0_RF_GAIN_CORRECT,
+                                     0x3630363A);
+               rt2800_register_write(rt2x00dev, TX1_RF_GAIN_CORRECT,
+                                     0x3630363A);
+               rt2800_register_read(rt2x00dev, TX_ALC_CFG_1, &reg);
+               rt2x00_set_field32(&reg, TX_ALC_CFG_1_ROS_BUSY_EN, 0);
+               rt2800_register_write(rt2x00dev, TX_ALC_CFG_1, reg);
        } else {
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
                rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -5729,6 +6287,231 @@ static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
                rt2800_bbp_write(rt2x00dev, 103, 0xc0);
 }
 
+static void rt2800_bbp_glrt_write(struct rt2x00_dev *rt2x00dev,
+                                 const u8 reg, const u8 value)
+{
+       rt2800_bbp_write(rt2x00dev, 195, reg);
+       rt2800_bbp_write(rt2x00dev, 196, value);
+}
+
+static void rt2800_bbp_dcoc_write(struct rt2x00_dev *rt2x00dev,
+                                 const u8 reg, const u8 value)
+{
+       rt2800_bbp_write(rt2x00dev, 158, reg);
+       rt2800_bbp_write(rt2x00dev, 159, value);
+}
+
+static void rt2800_bbp_dcoc_read(struct rt2x00_dev *rt2x00dev,
+                                const u8 reg, u8 *value)
+{
+       rt2800_bbp_write(rt2x00dev, 158, reg);
+       rt2800_bbp_read(rt2x00dev, 159, value);
+}
+
+static void rt2800_init_bbp_6352(struct rt2x00_dev *rt2x00dev)
+{
+       u8 bbp;
+
+       /* Apply Maximum Likelihood Detection (MLD) for 2 stream case */
+       rt2800_bbp_read(rt2x00dev, 105, &bbp);
+       rt2x00_set_field8(&bbp, BBP105_MLD,
+                         rt2x00dev->default_ant.rx_chain_num == 2);
+       rt2800_bbp_write(rt2x00dev, 105, bbp);
+
+       /* Avoid data loss and CRC errors */
+       rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+
+       /* Fix I/Q swap issue */
+       rt2800_bbp_read(rt2x00dev, 1, &bbp);
+       bbp |= 0x04;
+       rt2800_bbp_write(rt2x00dev, 1, bbp);
+
+       /* BBP for G band */
+       rt2800_bbp_write(rt2x00dev, 3, 0x08);
+       rt2800_bbp_write(rt2x00dev, 4, 0x00); /* rt2800_bbp4_mac_if_ctrl? */
+       rt2800_bbp_write(rt2x00dev, 6, 0x08);
+       rt2800_bbp_write(rt2x00dev, 14, 0x09);
+       rt2800_bbp_write(rt2x00dev, 15, 0xFF);
+       rt2800_bbp_write(rt2x00dev, 16, 0x01);
+       rt2800_bbp_write(rt2x00dev, 20, 0x06);
+       rt2800_bbp_write(rt2x00dev, 21, 0x00);
+       rt2800_bbp_write(rt2x00dev, 22, 0x00);
+       rt2800_bbp_write(rt2x00dev, 27, 0x00);
+       rt2800_bbp_write(rt2x00dev, 28, 0x00);
+       rt2800_bbp_write(rt2x00dev, 30, 0x00);
+       rt2800_bbp_write(rt2x00dev, 31, 0x48);
+       rt2800_bbp_write(rt2x00dev, 47, 0x40);
+       rt2800_bbp_write(rt2x00dev, 62, 0x00);
+       rt2800_bbp_write(rt2x00dev, 63, 0x00);
+       rt2800_bbp_write(rt2x00dev, 64, 0x00);
+       rt2800_bbp_write(rt2x00dev, 65, 0x2C);
+       rt2800_bbp_write(rt2x00dev, 66, 0x1C);
+       rt2800_bbp_write(rt2x00dev, 67, 0x20);
+       rt2800_bbp_write(rt2x00dev, 68, 0xDD);
+       rt2800_bbp_write(rt2x00dev, 69, 0x10);
+       rt2800_bbp_write(rt2x00dev, 70, 0x05);
+       rt2800_bbp_write(rt2x00dev, 73, 0x18);
+       rt2800_bbp_write(rt2x00dev, 74, 0x0F);
+       rt2800_bbp_write(rt2x00dev, 75, 0x60);
+       rt2800_bbp_write(rt2x00dev, 76, 0x44);
+       rt2800_bbp_write(rt2x00dev, 77, 0x59);
+       rt2800_bbp_write(rt2x00dev, 78, 0x1E);
+       rt2800_bbp_write(rt2x00dev, 79, 0x1C);
+       rt2800_bbp_write(rt2x00dev, 80, 0x0C);
+       rt2800_bbp_write(rt2x00dev, 81, 0x3A);
+       rt2800_bbp_write(rt2x00dev, 82, 0xB6);
+       rt2800_bbp_write(rt2x00dev, 83, 0x9A);
+       rt2800_bbp_write(rt2x00dev, 84, 0x9A);
+       rt2800_bbp_write(rt2x00dev, 86, 0x38);
+       rt2800_bbp_write(rt2x00dev, 88, 0x90);
+       rt2800_bbp_write(rt2x00dev, 91, 0x04);
+       rt2800_bbp_write(rt2x00dev, 92, 0x02);
+       rt2800_bbp_write(rt2x00dev, 95, 0x9A);
+       rt2800_bbp_write(rt2x00dev, 96, 0x00);
+       rt2800_bbp_write(rt2x00dev, 103, 0xC0);
+       rt2800_bbp_write(rt2x00dev, 104, 0x92);
+       /* FIXME BBP105 owerwrite */
+       rt2800_bbp_write(rt2x00dev, 105, 0x3C);
+       rt2800_bbp_write(rt2x00dev, 106, 0x12);
+       rt2800_bbp_write(rt2x00dev, 109, 0x00);
+       rt2800_bbp_write(rt2x00dev, 134, 0x10);
+       rt2800_bbp_write(rt2x00dev, 135, 0xA6);
+       rt2800_bbp_write(rt2x00dev, 137, 0x04);
+       rt2800_bbp_write(rt2x00dev, 142, 0x30);
+       rt2800_bbp_write(rt2x00dev, 143, 0xF7);
+       rt2800_bbp_write(rt2x00dev, 160, 0xEC);
+       rt2800_bbp_write(rt2x00dev, 161, 0xC4);
+       rt2800_bbp_write(rt2x00dev, 162, 0x77);
+       rt2800_bbp_write(rt2x00dev, 163, 0xF9);
+       rt2800_bbp_write(rt2x00dev, 164, 0x00);
+       rt2800_bbp_write(rt2x00dev, 165, 0x00);
+       rt2800_bbp_write(rt2x00dev, 186, 0x00);
+       rt2800_bbp_write(rt2x00dev, 187, 0x00);
+       rt2800_bbp_write(rt2x00dev, 188, 0x00);
+       rt2800_bbp_write(rt2x00dev, 186, 0x00);
+       rt2800_bbp_write(rt2x00dev, 187, 0x01);
+       rt2800_bbp_write(rt2x00dev, 188, 0x00);
+       rt2800_bbp_write(rt2x00dev, 189, 0x00);
+
+       rt2800_bbp_write(rt2x00dev, 91, 0x06);
+       rt2800_bbp_write(rt2x00dev, 92, 0x04);
+       rt2800_bbp_write(rt2x00dev, 93, 0x54);
+       rt2800_bbp_write(rt2x00dev, 99, 0x50);
+       rt2800_bbp_write(rt2x00dev, 148, 0x84);
+       rt2800_bbp_write(rt2x00dev, 167, 0x80);
+       rt2800_bbp_write(rt2x00dev, 178, 0xFF);
+       rt2800_bbp_write(rt2x00dev, 106, 0x13);
+
+       /* BBP for G band GLRT function (BBP_128 ~ BBP_221) */
+       rt2800_bbp_glrt_write(rt2x00dev, 0, 0x00);
+       rt2800_bbp_glrt_write(rt2x00dev, 1, 0x14);
+       rt2800_bbp_glrt_write(rt2x00dev, 2, 0x20);
+       rt2800_bbp_glrt_write(rt2x00dev, 3, 0x0A);
+       rt2800_bbp_glrt_write(rt2x00dev, 10, 0x16);
+       rt2800_bbp_glrt_write(rt2x00dev, 11, 0x06);
+       rt2800_bbp_glrt_write(rt2x00dev, 12, 0x02);
+       rt2800_bbp_glrt_write(rt2x00dev, 13, 0x07);
+       rt2800_bbp_glrt_write(rt2x00dev, 14, 0x05);
+       rt2800_bbp_glrt_write(rt2x00dev, 15, 0x09);
+       rt2800_bbp_glrt_write(rt2x00dev, 16, 0x20);
+       rt2800_bbp_glrt_write(rt2x00dev, 17, 0x08);
+       rt2800_bbp_glrt_write(rt2x00dev, 18, 0x4A);
+       rt2800_bbp_glrt_write(rt2x00dev, 19, 0x00);
+       rt2800_bbp_glrt_write(rt2x00dev, 20, 0x00);
+       rt2800_bbp_glrt_write(rt2x00dev, 128, 0xE0);
+       rt2800_bbp_glrt_write(rt2x00dev, 129, 0x1F);
+       rt2800_bbp_glrt_write(rt2x00dev, 130, 0x4F);
+       rt2800_bbp_glrt_write(rt2x00dev, 131, 0x32);
+       rt2800_bbp_glrt_write(rt2x00dev, 132, 0x08);
+       rt2800_bbp_glrt_write(rt2x00dev, 133, 0x28);
+       rt2800_bbp_glrt_write(rt2x00dev, 134, 0x19);
+       rt2800_bbp_glrt_write(rt2x00dev, 135, 0x0A);
+       rt2800_bbp_glrt_write(rt2x00dev, 138, 0x16);
+       rt2800_bbp_glrt_write(rt2x00dev, 139, 0x10);
+       rt2800_bbp_glrt_write(rt2x00dev, 140, 0x10);
+       rt2800_bbp_glrt_write(rt2x00dev, 141, 0x1A);
+       rt2800_bbp_glrt_write(rt2x00dev, 142, 0x36);
+       rt2800_bbp_glrt_write(rt2x00dev, 143, 0x2C);
+       rt2800_bbp_glrt_write(rt2x00dev, 144, 0x26);
+       rt2800_bbp_glrt_write(rt2x00dev, 145, 0x24);
+       rt2800_bbp_glrt_write(rt2x00dev, 146, 0x42);
+       rt2800_bbp_glrt_write(rt2x00dev, 147, 0x40);
+       rt2800_bbp_glrt_write(rt2x00dev, 148, 0x30);
+       rt2800_bbp_glrt_write(rt2x00dev, 149, 0x29);
+       rt2800_bbp_glrt_write(rt2x00dev, 150, 0x4C);
+       rt2800_bbp_glrt_write(rt2x00dev, 151, 0x46);
+       rt2800_bbp_glrt_write(rt2x00dev, 152, 0x3D);
+       rt2800_bbp_glrt_write(rt2x00dev, 153, 0x40);
+       rt2800_bbp_glrt_write(rt2x00dev, 154, 0x3E);
+       rt2800_bbp_glrt_write(rt2x00dev, 155, 0x38);
+       rt2800_bbp_glrt_write(rt2x00dev, 156, 0x3D);
+       rt2800_bbp_glrt_write(rt2x00dev, 157, 0x2F);
+       rt2800_bbp_glrt_write(rt2x00dev, 158, 0x3C);
+       rt2800_bbp_glrt_write(rt2x00dev, 159, 0x34);
+       rt2800_bbp_glrt_write(rt2x00dev, 160, 0x2C);
+       rt2800_bbp_glrt_write(rt2x00dev, 161, 0x2F);
+       rt2800_bbp_glrt_write(rt2x00dev, 162, 0x3C);
+       rt2800_bbp_glrt_write(rt2x00dev, 163, 0x35);
+       rt2800_bbp_glrt_write(rt2x00dev, 164, 0x2E);
+       rt2800_bbp_glrt_write(rt2x00dev, 165, 0x2F);
+       rt2800_bbp_glrt_write(rt2x00dev, 166, 0x49);
+       rt2800_bbp_glrt_write(rt2x00dev, 167, 0x41);
+       rt2800_bbp_glrt_write(rt2x00dev, 168, 0x36);
+       rt2800_bbp_glrt_write(rt2x00dev, 169, 0x39);
+       rt2800_bbp_glrt_write(rt2x00dev, 170, 0x30);
+       rt2800_bbp_glrt_write(rt2x00dev, 171, 0x30);
+       rt2800_bbp_glrt_write(rt2x00dev, 172, 0x0E);
+       rt2800_bbp_glrt_write(rt2x00dev, 173, 0x0D);
+       rt2800_bbp_glrt_write(rt2x00dev, 174, 0x28);
+       rt2800_bbp_glrt_write(rt2x00dev, 175, 0x21);
+       rt2800_bbp_glrt_write(rt2x00dev, 176, 0x1C);
+       rt2800_bbp_glrt_write(rt2x00dev, 177, 0x16);
+       rt2800_bbp_glrt_write(rt2x00dev, 178, 0x50);
+       rt2800_bbp_glrt_write(rt2x00dev, 179, 0x4A);
+       rt2800_bbp_glrt_write(rt2x00dev, 180, 0x43);
+       rt2800_bbp_glrt_write(rt2x00dev, 181, 0x50);
+       rt2800_bbp_glrt_write(rt2x00dev, 182, 0x10);
+       rt2800_bbp_glrt_write(rt2x00dev, 183, 0x10);
+       rt2800_bbp_glrt_write(rt2x00dev, 184, 0x10);
+       rt2800_bbp_glrt_write(rt2x00dev, 185, 0x10);
+       rt2800_bbp_glrt_write(rt2x00dev, 200, 0x7D);
+       rt2800_bbp_glrt_write(rt2x00dev, 201, 0x14);
+       rt2800_bbp_glrt_write(rt2x00dev, 202, 0x32);
+       rt2800_bbp_glrt_write(rt2x00dev, 203, 0x2C);
+       rt2800_bbp_glrt_write(rt2x00dev, 204, 0x36);
+       rt2800_bbp_glrt_write(rt2x00dev, 205, 0x4C);
+       rt2800_bbp_glrt_write(rt2x00dev, 206, 0x43);
+       rt2800_bbp_glrt_write(rt2x00dev, 207, 0x2C);
+       rt2800_bbp_glrt_write(rt2x00dev, 208, 0x2E);
+       rt2800_bbp_glrt_write(rt2x00dev, 209, 0x36);
+       rt2800_bbp_glrt_write(rt2x00dev, 210, 0x30);
+       rt2800_bbp_glrt_write(rt2x00dev, 211, 0x6E);
+
+       /* BBP for G band DCOC function */
+       rt2800_bbp_dcoc_write(rt2x00dev, 140, 0x0C);
+       rt2800_bbp_dcoc_write(rt2x00dev, 141, 0x00);
+       rt2800_bbp_dcoc_write(rt2x00dev, 142, 0x10);
+       rt2800_bbp_dcoc_write(rt2x00dev, 143, 0x10);
+       rt2800_bbp_dcoc_write(rt2x00dev, 144, 0x10);
+       rt2800_bbp_dcoc_write(rt2x00dev, 145, 0x10);
+       rt2800_bbp_dcoc_write(rt2x00dev, 146, 0x08);
+       rt2800_bbp_dcoc_write(rt2x00dev, 147, 0x40);
+       rt2800_bbp_dcoc_write(rt2x00dev, 148, 0x04);
+       rt2800_bbp_dcoc_write(rt2x00dev, 149, 0x04);
+       rt2800_bbp_dcoc_write(rt2x00dev, 150, 0x08);
+       rt2800_bbp_dcoc_write(rt2x00dev, 151, 0x08);
+       rt2800_bbp_dcoc_write(rt2x00dev, 152, 0x03);
+       rt2800_bbp_dcoc_write(rt2x00dev, 153, 0x03);
+       rt2800_bbp_dcoc_write(rt2x00dev, 154, 0x03);
+       rt2800_bbp_dcoc_write(rt2x00dev, 155, 0x02);
+       rt2800_bbp_dcoc_write(rt2x00dev, 156, 0x40);
+       rt2800_bbp_dcoc_write(rt2x00dev, 157, 0x40);
+       rt2800_bbp_dcoc_write(rt2x00dev, 158, 0x64);
+       rt2800_bbp_dcoc_write(rt2x00dev, 159, 0x64);
+
+       rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+}
+
 static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
 {
        unsigned int i;
@@ -5773,6 +6556,9 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
        case RT5592:
                rt2800_init_bbp_5592(rt2x00dev);
                return;
+       case RT6352:
+               rt2800_init_bbp_6352(rt2x00dev);
+               break;
        }
 
        for (i = 0; i < EEPROM_BBP_SIZE; i++) {
@@ -6228,9 +7014,9 @@ static void rt2800_init_rfcsr_3290(struct rt2x00_dev *rt2x00dev)
 
 static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
 {
-       int tx0_int_pa = test_bit(CAPABILITY_INTERNAL_PA_TX0,
+       int tx0_ext_pa = test_bit(CAPABILITY_EXTERNAL_PA_TX0,
                                  &rt2x00dev->cap_flags);
-       int tx1_int_pa = test_bit(CAPABILITY_INTERNAL_PA_TX1,
+       int tx1_ext_pa = test_bit(CAPABILITY_EXTERNAL_PA_TX1,
                                  &rt2x00dev->cap_flags);
        u8 rfcsr;
 
@@ -6270,9 +7056,9 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
        rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
        rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
        rfcsr = 0x01;
-       if (!tx0_int_pa)
+       if (tx0_ext_pa)
                rt2x00_set_field8(&rfcsr, RFCSR34_TX0_EXT_PA, 1);
-       if (!tx1_int_pa)
+       if (tx1_ext_pa)
                rt2x00_set_field8(&rfcsr, RFCSR34_TX1_EXT_PA, 1);
        rt2800_rfcsr_write(rt2x00dev, 34, rfcsr);
        rt2800_rfcsr_write(rt2x00dev, 35, 0x03);
@@ -6282,13 +7068,13 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
        rt2800_rfcsr_write(rt2x00dev, 39, 0xc5);
        rt2800_rfcsr_write(rt2x00dev, 40, 0x33);
        rfcsr = 0x52;
-       if (tx0_int_pa) {
+       if (!tx0_ext_pa) {
                rt2x00_set_field8(&rfcsr, RFCSR41_BIT1, 1);
                rt2x00_set_field8(&rfcsr, RFCSR41_BIT4, 1);
        }
        rt2800_rfcsr_write(rt2x00dev, 41, rfcsr);
        rfcsr = 0x52;
-       if (tx1_int_pa) {
+       if (!tx1_ext_pa) {
                rt2x00_set_field8(&rfcsr, RFCSR42_BIT1, 1);
                rt2x00_set_field8(&rfcsr, RFCSR42_BIT4, 1);
        }
@@ -6301,19 +7087,19 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
        rt2800_rfcsr_write(rt2x00dev, 48, 0x14);
        rt2800_rfcsr_write(rt2x00dev, 49, 0x00);
        rfcsr = 0x2d;
-       if (!tx0_int_pa)
+       if (tx0_ext_pa)
                rt2x00_set_field8(&rfcsr, RFCSR50_TX0_EXT_PA, 1);
-       if (!tx1_int_pa)
+       if (tx1_ext_pa)
                rt2x00_set_field8(&rfcsr, RFCSR50_TX1_EXT_PA, 1);
        rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
-       rt2800_rfcsr_write(rt2x00dev, 51, (tx0_int_pa ? 0x7f : 0x52));
-       rt2800_rfcsr_write(rt2x00dev, 52, (tx0_int_pa ? 0x00 : 0xc0));
-       rt2800_rfcsr_write(rt2x00dev, 53, (tx0_int_pa ? 0x52 : 0xd2));
-       rt2800_rfcsr_write(rt2x00dev, 54, (tx0_int_pa ? 0x1b : 0xc0));
-       rt2800_rfcsr_write(rt2x00dev, 55, (tx1_int_pa ? 0x7f : 0x52));
-       rt2800_rfcsr_write(rt2x00dev, 56, (tx1_int_pa ? 0x00 : 0xc0));
-       rt2800_rfcsr_write(rt2x00dev, 57, (tx0_int_pa ? 0x52 : 0x49));
-       rt2800_rfcsr_write(rt2x00dev, 58, (tx1_int_pa ? 0x1b : 0xc0));
+       rt2800_rfcsr_write(rt2x00dev, 51, (tx0_ext_pa ? 0x52 : 0x7f));
+       rt2800_rfcsr_write(rt2x00dev, 52, (tx0_ext_pa ? 0xc0 : 0x00));
+       rt2800_rfcsr_write(rt2x00dev, 53, (tx0_ext_pa ? 0xd2 : 0x52));
+       rt2800_rfcsr_write(rt2x00dev, 54, (tx0_ext_pa ? 0xc0 : 0x1b));
+       rt2800_rfcsr_write(rt2x00dev, 55, (tx1_ext_pa ? 0x52 : 0x7f));
+       rt2800_rfcsr_write(rt2x00dev, 56, (tx1_ext_pa ? 0xc0 : 0x00));
+       rt2800_rfcsr_write(rt2x00dev, 57, (tx0_ext_pa ? 0x49 : 0x52));
+       rt2800_rfcsr_write(rt2x00dev, 58, (tx1_ext_pa ? 0xc0 : 0x1b));
        rt2800_rfcsr_write(rt2x00dev, 59, 0x00);
        rt2800_rfcsr_write(rt2x00dev, 60, 0x00);
        rt2800_rfcsr_write(rt2x00dev, 61, 0x00);
@@ -6844,6 +7630,615 @@ static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev)
        rt2800_led_open_drain_enable(rt2x00dev);
 }
 
+static void rt2800_bbp_core_soft_reset(struct rt2x00_dev *rt2x00dev,
+                                      bool set_bw, bool is_ht40)
+{
+       u8 bbp_val;
+
+       rt2800_bbp_read(rt2x00dev, 21, &bbp_val);
+       bbp_val |= 0x1;
+       rt2800_bbp_write(rt2x00dev, 21, bbp_val);
+       usleep_range(100, 200);
+
+       if (set_bw) {
+               rt2800_bbp_read(rt2x00dev, 4, &bbp_val);
+               rt2x00_set_field8(&bbp_val, BBP4_BANDWIDTH, 2 * is_ht40);
+               rt2800_bbp_write(rt2x00dev, 4, bbp_val);
+               usleep_range(100, 200);
+       }
+
+       rt2800_bbp_read(rt2x00dev, 21, &bbp_val);
+       bbp_val &= (~0x1);
+       rt2800_bbp_write(rt2x00dev, 21, bbp_val);
+       usleep_range(100, 200);
+}
+
+static int rt2800_rf_lp_config(struct rt2x00_dev *rt2x00dev, bool btxcal)
+{
+       u8 rf_val;
+
+       if (btxcal)
+               rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x04);
+       else
+               rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x02);
+
+       rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x06);
+
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 17, &rf_val);
+       rf_val |= 0x80;
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, rf_val);
+
+       if (btxcal) {
+               rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0xC1);
+               rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0x20);
+               rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x02);
+               rt2800_rfcsr_read_bank(rt2x00dev, 5, 3, &rf_val);
+               rf_val &= (~0x3F);
+               rf_val |= 0x3F;
+               rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, rf_val);
+               rt2800_rfcsr_read_bank(rt2x00dev, 5, 4, &rf_val);
+               rf_val &= (~0x3F);
+               rf_val |= 0x3F;
+               rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, rf_val);
+               rt2800_rfcsr_write_bank(rt2x00dev, 5, 5, 0x31);
+       } else {
+               rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0xF1);
+               rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0x18);
+               rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x02);
+               rt2800_rfcsr_read_bank(rt2x00dev, 5, 3, &rf_val);
+               rf_val &= (~0x3F);
+               rf_val |= 0x34;
+               rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, rf_val);
+               rt2800_rfcsr_read_bank(rt2x00dev, 5, 4, &rf_val);
+               rf_val &= (~0x3F);
+               rf_val |= 0x34;
+               rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, rf_val);
+       }
+
+       return 0;
+}
+
+static char rt2800_lp_tx_filter_bw_cal(struct rt2x00_dev *rt2x00dev)
+{
+       unsigned int cnt;
+       u8 bbp_val;
+       char cal_val;
+
+       rt2800_bbp_dcoc_write(rt2x00dev, 0, 0x82);
+
+       cnt = 0;
+       do {
+               usleep_range(500, 2000);
+               rt2800_bbp_read(rt2x00dev, 159, &bbp_val);
+               if (bbp_val == 0x02 || cnt == 20)
+                       break;
+
+               cnt++;
+       } while (cnt < 20);
+
+       rt2800_bbp_dcoc_read(rt2x00dev, 0x39, &bbp_val);
+       cal_val = bbp_val & 0x7F;
+       if (cal_val >= 0x40)
+               cal_val -= 128;
+
+       return cal_val;
+}
+
+static void rt2800_bw_filter_calibration(struct rt2x00_dev *rt2x00dev,
+                                        bool btxcal)
+{
+       struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+       u8 tx_agc_fc = 0, rx_agc_fc = 0, cmm_agc_fc;
+       u8 filter_target;
+       u8 tx_filter_target_20m = 0x09, tx_filter_target_40m = 0x02;
+       u8 rx_filter_target_20m = 0x27, rx_filter_target_40m = 0x31;
+       int loop = 0, is_ht40, cnt;
+       u8 bbp_val, rf_val;
+       char cal_r32_init, cal_r32_val, cal_diff;
+       u8 saverfb5r00, saverfb5r01, saverfb5r03, saverfb5r04, saverfb5r05;
+       u8 saverfb5r06, saverfb5r07;
+       u8 saverfb5r08, saverfb5r17, saverfb5r18, saverfb5r19, saverfb5r20;
+       u8 saverfb5r37, saverfb5r38, saverfb5r39, saverfb5r40, saverfb5r41;
+       u8 saverfb5r42, saverfb5r43, saverfb5r44, saverfb5r45, saverfb5r46;
+       u8 saverfb5r58, saverfb5r59;
+       u8 savebbp159r0, savebbp159r2, savebbpr23;
+       u32 MAC_RF_CONTROL0, MAC_RF_BYPASS0;
+
+       /* Save MAC registers */
+       rt2800_register_read(rt2x00dev, RF_CONTROL0, &MAC_RF_CONTROL0);
+       rt2800_register_read(rt2x00dev, RF_BYPASS0, &MAC_RF_BYPASS0);
+
+       /* save BBP registers */
+       rt2800_bbp_read(rt2x00dev, 23, &savebbpr23);
+
+       rt2800_bbp_dcoc_read(rt2x00dev, 0, &savebbp159r0);
+       rt2800_bbp_dcoc_read(rt2x00dev, 2, &savebbp159r2);
+
+       /* Save RF registers */
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 0, &saverfb5r00);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 1, &saverfb5r01);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 3, &saverfb5r03);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 4, &saverfb5r04);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 5, &saverfb5r05);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 6, &saverfb5r06);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 7, &saverfb5r07);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 8, &saverfb5r08);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 17, &saverfb5r17);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 18, &saverfb5r18);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 19, &saverfb5r19);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 20, &saverfb5r20);
+
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 37, &saverfb5r37);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 38, &saverfb5r38);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 39, &saverfb5r39);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 40, &saverfb5r40);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 41, &saverfb5r41);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 42, &saverfb5r42);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 43, &saverfb5r43);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 44, &saverfb5r44);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 45, &saverfb5r45);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 46, &saverfb5r46);
+
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 58, &saverfb5r58);
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 59, &saverfb5r59);
+
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 0, &rf_val);
+       rf_val |= 0x3;
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 0, rf_val);
+
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 1, &rf_val);
+       rf_val |= 0x1;
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 1, rf_val);
+
+       cnt = 0;
+       do {
+               usleep_range(500, 2000);
+               rt2800_rfcsr_read_bank(rt2x00dev, 5, 1, &rf_val);
+               if (((rf_val & 0x1) == 0x00) || (cnt == 40))
+                       break;
+               cnt++;
+       } while (cnt < 40);
+
+       rt2800_rfcsr_read_bank(rt2x00dev, 5, 0, &rf_val);
+       rf_val &= (~0x3);
+       rf_val |= 0x1;
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 0, rf_val);
+
+       /* I-3 */
+       rt2800_bbp_read(rt2x00dev, 23, &bbp_val);
+       bbp_val &= (~0x1F);
+       bbp_val |= 0x10;
+       rt2800_bbp_write(rt2x00dev, 23, bbp_val);
+
+       do {
+               /* I-4,5,6,7,8,9 */
+               if (loop == 0) {
+                       is_ht40 = false;
+
+                       if (btxcal)
+                               filter_target = tx_filter_target_20m;
+                       else
+                               filter_target = rx_filter_target_20m;
+               } else {
+                       is_ht40 = true;
+
+                       if (btxcal)
+                               filter_target = tx_filter_target_40m;
+                       else
+                               filter_target = rx_filter_target_40m;
+               }
+
+               rt2800_rfcsr_read_bank(rt2x00dev, 5, 8, &rf_val);
+               rf_val &= (~0x04);
+               if (loop == 1)
+                       rf_val |= 0x4;
+
+               rt2800_rfcsr_write_bank(rt2x00dev, 5, 8, rf_val);
+
+               rt2800_bbp_core_soft_reset(rt2x00dev, true, is_ht40);
+
+               rt2800_rf_lp_config(rt2x00dev, btxcal);
+               if (btxcal) {
+                       tx_agc_fc = 0;
+                       rt2800_rfcsr_read_bank(rt2x00dev, 5, 58, &rf_val);
+                       rf_val &= (~0x7F);
+                       rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, rf_val);
+                       rt2800_rfcsr_read_bank(rt2x00dev, 5, 59, &rf_val);
+                       rf_val &= (~0x7F);
+                       rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, rf_val);
+               } else {
+                       rx_agc_fc = 0;
+                       rt2800_rfcsr_read_bank(rt2x00dev, 5, 6, &rf_val);
+                       rf_val &= (~0x7F);
+                       rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, rf_val);
+                       rt2800_rfcsr_read_bank(rt2x00dev, 5, 7, &rf_val);
+                       rf_val &= (~0x7F);
+                       rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, rf_val);
+               }
+
+               usleep_range(1000, 2000);
+
+               rt2800_bbp_dcoc_read(rt2x00dev, 2, &bbp_val);
+               bbp_val &= (~0x6);
+               rt2800_bbp_dcoc_write(rt2x00dev, 2, bbp_val);
+
+               rt2800_bbp_core_soft_reset(rt2x00dev, false, is_ht40);
+
+               cal_r32_init = rt2800_lp_tx_filter_bw_cal(rt2x00dev);
+
+               rt2800_bbp_dcoc_read(rt2x00dev, 2, &bbp_val);
+               bbp_val |= 0x6;
+               rt2800_bbp_dcoc_write(rt2x00dev, 2, bbp_val);
+do_cal:
+               if (btxcal) {
+                       rt2800_rfcsr_read_bank(rt2x00dev, 5, 58, &rf_val);
+                       rf_val &= (~0x7F);
+                       rf_val |= tx_agc_fc;
+                       rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, rf_val);
+                       rt2800_rfcsr_read_bank(rt2x00dev, 5, 59, &rf_val);
+                       rf_val &= (~0x7F);
+                       rf_val |= tx_agc_fc;
+                       rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, rf_val);
+               } else {
+                       rt2800_rfcsr_read_bank(rt2x00dev, 5, 6, &rf_val);
+                       rf_val &= (~0x7F);
+                       rf_val |= rx_agc_fc;
+                       rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, rf_val);
+                       rt2800_rfcsr_read_bank(rt2x00dev, 5, 7, &rf_val);
+                       rf_val &= (~0x7F);
+                       rf_val |= rx_agc_fc;
+                       rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, rf_val);
+               }
+
+               usleep_range(500, 1000);
+
+               rt2800_bbp_core_soft_reset(rt2x00dev, false, is_ht40);
+
+               cal_r32_val = rt2800_lp_tx_filter_bw_cal(rt2x00dev);
+
+               cal_diff = cal_r32_init - cal_r32_val;
+
+               if (btxcal)
+                       cmm_agc_fc = tx_agc_fc;
+               else
+                       cmm_agc_fc = rx_agc_fc;
+
+               if (((cal_diff > filter_target) && (cmm_agc_fc == 0)) ||
+                   ((cal_diff < filter_target) && (cmm_agc_fc == 0x3f))) {
+                       if (btxcal)
+                               tx_agc_fc = 0;
+                       else
+                               rx_agc_fc = 0;
+               } else if ((cal_diff <= filter_target) && (cmm_agc_fc < 0x3f)) {
+                       if (btxcal)
+                               tx_agc_fc++;
+                       else
+                               rx_agc_fc++;
+                       goto do_cal;
+               }
+
+               if (btxcal) {
+                       if (loop == 0)
+                               drv_data->tx_calibration_bw20 = tx_agc_fc;
+                       else
+                               drv_data->tx_calibration_bw40 = tx_agc_fc;
+               } else {
+                       if (loop == 0)
+                               drv_data->rx_calibration_bw20 = rx_agc_fc;
+                       else
+                               drv_data->rx_calibration_bw40 = rx_agc_fc;
+               }
+
+               loop++;
+       } while (loop <= 1);
+
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 0, saverfb5r00);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 1, saverfb5r01);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, saverfb5r03);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, saverfb5r04);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 5, saverfb5r05);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, saverfb5r06);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, saverfb5r07);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 8, saverfb5r08);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, saverfb5r17);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, saverfb5r18);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, saverfb5r19);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, saverfb5r20);
+
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 37, saverfb5r37);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 38, saverfb5r38);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 39, saverfb5r39);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 40, saverfb5r40);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 41, saverfb5r41);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 42, saverfb5r42);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 43, saverfb5r43);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 44, saverfb5r44);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 45, saverfb5r45);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 46, saverfb5r46);
+
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, saverfb5r58);
+       rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, saverfb5r59);
+
+       rt2800_bbp_write(rt2x00dev, 23, savebbpr23);
+
+       rt2800_bbp_dcoc_write(rt2x00dev, 0, savebbp159r0);
+       rt2800_bbp_dcoc_write(rt2x00dev, 2, savebbp159r2);
+
+       rt2800_bbp_read(rt2x00dev, 4, &bbp_val);
+       rt2x00_set_field8(&bbp_val, BBP4_BANDWIDTH,
+                         2 * test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags));
+       rt2800_bbp_write(rt2x00dev, 4, bbp_val);
+
+       rt2800_register_write(rt2x00dev, RF_CONTROL0, MAC_RF_CONTROL0);
+       rt2800_register_write(rt2x00dev, RF_BYPASS0, MAC_RF_BYPASS0);
+}
+
+static void rt2800_init_rfcsr_6352(struct rt2x00_dev *rt2x00dev)
+{
+       /* Initialize RF central register to default value */
+       rt2800_rfcsr_write(rt2x00dev, 0, 0x02);
+       rt2800_rfcsr_write(rt2x00dev, 1, 0x03);
+       rt2800_rfcsr_write(rt2x00dev, 2, 0x33);
+       rt2800_rfcsr_write(rt2x00dev, 3, 0xFF);
+       rt2800_rfcsr_write(rt2x00dev, 4, 0x0C);
+       rt2800_rfcsr_write(rt2x00dev, 5, 0x40);
+       rt2800_rfcsr_write(rt2x00dev, 6, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 8, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 9, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 10, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 11, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 12, rt2x00dev->freq_offset);
+       rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 14, 0x40);
+       rt2800_rfcsr_write(rt2x00dev, 15, 0x22);
+       rt2800_rfcsr_write(rt2x00dev, 16, 0x4C);
+       rt2800_rfcsr_write(rt2x00dev, 17, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 18, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 19, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 20, 0xA0);
+       rt2800_rfcsr_write(rt2x00dev, 21, 0x12);
+       rt2800_rfcsr_write(rt2x00dev, 22, 0x07);
+       rt2800_rfcsr_write(rt2x00dev, 23, 0x13);
+       rt2800_rfcsr_write(rt2x00dev, 24, 0xFE);
+       rt2800_rfcsr_write(rt2x00dev, 25, 0x24);
+       rt2800_rfcsr_write(rt2x00dev, 26, 0x7A);
+       rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 29, 0x05);
+       rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 32, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 34, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 35, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 37, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 38, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 39, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 40, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 41, 0xD0);
+       rt2800_rfcsr_write(rt2x00dev, 42, 0x5B);
+       rt2800_rfcsr_write(rt2x00dev, 43, 0x00);
+
+       rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
+       if (rt2800_clk_is_20mhz(rt2x00dev))
+               rt2800_rfcsr_write(rt2x00dev, 13, 0x03);
+       else
+               rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 14, 0x7C);
+       rt2800_rfcsr_write(rt2x00dev, 16, 0x80);
+       rt2800_rfcsr_write(rt2x00dev, 17, 0x99);
+       rt2800_rfcsr_write(rt2x00dev, 18, 0x99);
+       rt2800_rfcsr_write(rt2x00dev, 19, 0x09);
+       rt2800_rfcsr_write(rt2x00dev, 20, 0x50);
+       rt2800_rfcsr_write(rt2x00dev, 21, 0xB0);
+       rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 23, 0x06);
+       rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 25, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 26, 0x5D);
+       rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 28, 0x61);
+       rt2800_rfcsr_write(rt2x00dev, 29, 0xB5);
+       rt2800_rfcsr_write(rt2x00dev, 43, 0x02);
+
+       rt2800_rfcsr_write(rt2x00dev, 28, 0x62);
+       rt2800_rfcsr_write(rt2x00dev, 29, 0xAD);
+       rt2800_rfcsr_write(rt2x00dev, 39, 0x80);
+
+       /* Initialize RF channel register to default value */
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 0, 0x03);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 1, 0x00);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 2, 0x00);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 3, 0x00);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 4, 0x00);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 5, 0x08);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 6, 0x00);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 7, 0x51);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 8, 0x53);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 9, 0x16);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 10, 0x61);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 11, 0x53);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 12, 0x22);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 13, 0x3D);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x06);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 15, 0x13);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 16, 0x22);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 17, 0x27);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 18, 0x02);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 19, 0xA7);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 20, 0x01);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 21, 0x52);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 22, 0x80);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 23, 0xB3);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 24, 0x00);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 25, 0x00);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 26, 0x00);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 27, 0x00);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 28, 0x5C);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 29, 0x6B);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 30, 0x6B);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 31, 0x31);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 32, 0x5D);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 33, 0x00);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 34, 0xE6);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 35, 0x55);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 36, 0x00);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 37, 0xBB);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 38, 0xB3);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 39, 0xB3);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 40, 0x03);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 41, 0x00);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 42, 0x00);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0xB3);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0xD3);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0xD5);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x07);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0x68);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xEF);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x1C);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x07);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0xA8);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0x85);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x10);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x07);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0x6A);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0x85);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x10);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 62, 0x1C);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 63, 0x00);
+
+       rt2800_rfcsr_write_bank(rt2x00dev, 6, 45, 0xC5);
+
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 9, 0x47);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 10, 0x71);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 11, 0x33);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x0E);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 17, 0x23);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 19, 0xA4);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 20, 0x02);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 21, 0x12);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 28, 0x1C);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 29, 0xEB);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 32, 0x7D);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 34, 0xD6);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 36, 0x08);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 38, 0xB4);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0xD3);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0xB3);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0xD5);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x27);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0x69);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xFF);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x20);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x66);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xFF);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x1C);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x20);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0x6B);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xF7);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x09);
+
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 10, 0x51);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x06);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 19, 0xA7);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 28, 0x2C);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x64);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 8, 0x51);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 9, 0x36);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 11, 0x53);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x16);
+
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0x6C);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xFC);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x1F);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x27);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x66);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0x6B);
+
+       /* Initialize RF channel register for DRQFN */
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0xD3);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0xE3);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0xE5);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0x28);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x68);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xF7);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x02);
+       rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xC7);
+
+       /* Initialize RF DC calibration register to default value */
+       rt2800_rfcsr_write_dccal(rt2x00dev, 0, 0x47);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 1, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 2, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 3, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 5, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 6, 0x10);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 7, 0x10);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 8, 0x04);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 9, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 10, 0x07);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 11, 0x01);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 12, 0x07);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 13, 0x07);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 14, 0x07);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 15, 0x20);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 16, 0x22);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 17, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 18, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 19, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 20, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 21, 0xF1);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 22, 0x11);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 23, 0x02);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 24, 0x41);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 25, 0x20);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 26, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 27, 0xD7);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 28, 0xA2);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 29, 0x20);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 30, 0x49);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 31, 0x20);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 32, 0x04);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 33, 0xF1);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 34, 0xA1);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 35, 0x01);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 41, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 42, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 43, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 44, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 45, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 46, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 47, 0x3E);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 48, 0x3D);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 49, 0x3E);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 50, 0x3D);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 51, 0x3E);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 52, 0x3D);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 53, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 54, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 55, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 56, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 57, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x10);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x10);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 60, 0x0A);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 61, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 62, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 63, 0x00);
+
+       rt2800_rfcsr_write_dccal(rt2x00dev, 3, 0x08);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x04);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 5, 0x20);
+
+       rt2800_rfcsr_write_dccal(rt2x00dev, 5, 0x00);
+       rt2800_rfcsr_write_dccal(rt2x00dev, 17, 0x7C);
+
+       rt2800_bw_filter_calibration(rt2x00dev, true);
+       rt2800_bw_filter_calibration(rt2x00dev, false);
+}
+
 static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
 {
        if (rt2800_is_305x_soc(rt2x00dev)) {
@@ -6884,6 +8279,9 @@ static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
        case RT5592:
                rt2800_init_rfcsr_5592(rt2x00dev);
                break;
+       case RT6352:
+               rt2800_init_rfcsr_6352(rt2x00dev);
+               break;
        }
 }
 
@@ -7250,7 +8648,8 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
         */
        if (rt2x00_rt(rt2x00dev, RT3290) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
-           rt2x00_rt(rt2x00dev, RT5392))
+           rt2x00_rt(rt2x00dev, RT5392) ||
+           rt2x00_rt(rt2x00dev, RT6352))
                rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf);
        else if (rt2x00_rt(rt2x00dev, RT3352))
                rf = RF3322;
@@ -7282,6 +8681,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        case RF5390:
        case RF5392:
        case RF5592:
+       case RF7620:
                break;
        default:
                rt2x00_err(rt2x00dev, "Invalid RF chipset 0x%04x detected\n",
@@ -7382,13 +8782,13 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
 
        if (rt2x00_rt(rt2x00dev, RT3352)) {
-               if (!rt2x00_get_field16(eeprom,
+               if (rt2x00_get_field16(eeprom,
                    EEPROM_NIC_CONF1_EXTERNAL_TX0_PA_3352))
-                   __set_bit(CAPABILITY_INTERNAL_PA_TX0,
+                   __set_bit(CAPABILITY_EXTERNAL_PA_TX0,
                              &rt2x00dev->cap_flags);
-               if (!rt2x00_get_field16(eeprom,
+               if (rt2x00_get_field16(eeprom,
                    EEPROM_NIC_CONF1_EXTERNAL_TX1_PA_3352))
-                   __set_bit(CAPABILITY_INTERNAL_PA_TX1,
+                   __set_bit(CAPABILITY_EXTERNAL_PA_TX1,
                              &rt2x00dev->cap_flags);
        }
 
@@ -7689,6 +9089,23 @@ static const struct rf_channel rf_vals_5592_xtal40[] = {
        {196, 83, 0, 12, 1},
 };
 
+static const struct rf_channel rf_vals_7620[] = {
+       {1, 0x50, 0x99, 0x99, 1},
+       {2, 0x50, 0x44, 0x44, 2},
+       {3, 0x50, 0xEE, 0xEE, 2},
+       {4, 0x50, 0x99, 0x99, 3},
+       {5, 0x51, 0x44, 0x44, 0},
+       {6, 0x51, 0xEE, 0xEE, 0},
+       {7, 0x51, 0x99, 0x99, 1},
+       {8, 0x51, 0x44, 0x44, 2},
+       {9, 0x51, 0xEE, 0xEE, 2},
+       {10, 0x51, 0x99, 0x99, 3},
+       {11, 0x52, 0x44, 0x44, 0},
+       {12, 0x52, 0xEE, 0xEE, 0},
+       {13, 0x52, 0x99, 0x99, 1},
+       {14, 0x52, 0x33, 0x33, 3},
+};
+
 static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 {
        struct hw_mode_spec *spec = &rt2x00dev->spec;
@@ -7792,6 +9209,11 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
                        spec->channels = rf_vals_3x;
                break;
 
+       case RF7620:
+               spec->num_channels = ARRAY_SIZE(rf_vals_7620);
+               spec->channels = rf_vals_7620;
+               break;
+
        case RF3052:
        case RF3053:
                spec->num_channels = ARRAY_SIZE(rf_vals_3x);
@@ -7923,6 +9345,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
        case RF5390:
        case RF5392:
        case RF5592:
+       case RF7620:
                __set_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags);
                break;
        }
@@ -7967,6 +9390,9 @@ static int rt2800_probe_rt(struct rt2x00_dev *rt2x00dev)
                return -ENODEV;
        }
 
+       if (rt == RT5390 && rt2x00_is_soc(rt2x00dev))
+               rt = RT6352;
+
        rt2x00_set_rt(rt2x00dev, rt, rev);
 
        return 0;
index 0a8b4df665fe36bde6be9fd1e20fd232797c8d19..f357531d9488d99d97033ac049d4ebbe99b70be2 100644 (file)
 #ifndef RT2800LIB_H
 #define RT2800LIB_H
 
+/*
+ * Hardware has 255 WCID table entries. First 32 entries are reserved for
+ * shared keys. Since parts of the pairwise key table might be shared with
+ * the beacon frame buffers 6 & 7 we could only use the first 222 entries.
+ */
+#define WCID_START     33
+#define WCID_END       222
+#define STA_IDS_SIZE   (WCID_END - WCID_START + 2)
+
+/* RT2800 driver data structure */
+struct rt2800_drv_data {
+       u8 calibration_bw20;
+       u8 calibration_bw40;
+       char rx_calibration_bw20;
+       char rx_calibration_bw40;
+       char tx_calibration_bw20;
+       char tx_calibration_bw40;
+       u8 bbp25;
+       u8 bbp26;
+       u8 txmixer_gain_24g;
+       u8 txmixer_gain_5g;
+       u8 max_psdu;
+       unsigned int tbtt_tick;
+       unsigned int ampdu_factor_cnt[4];
+       DECLARE_BITMAP(sta_ids, STA_IDS_SIZE);
+       struct ieee80211_sta *wcid_to_sta[STA_IDS_SIZE];
+};
+
 struct rt2800_ops {
        void (*register_read)(struct rt2x00_dev *rt2x00dev,
                              const unsigned int offset, u32 *value);
@@ -167,7 +195,8 @@ void rt2800_write_tx_data(struct queue_entry *entry,
                          struct txentry_desc *txdesc);
 void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *txdesc);
 
-void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32* txwi);
+void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi,
+                        bool match);
 
 void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
 void rt2800_clear_beacon(struct queue_entry *entry);
index de4790b41be7d64a7c3a9651e27451c3f76183d6..3ab3b53238974a72c5d2170215666e4890110d4e 100644 (file)
@@ -239,7 +239,7 @@ static bool rt2800mmio_txdone_release_entries(struct queue_entry *entry,
 {
        if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
                rt2800_txdone_entry(entry, entry->status,
-                                   rt2800mmio_get_txwi(entry));
+                                   rt2800mmio_get_txwi(entry), true);
                return false;
        }
 
index 205a7b8ac8a7a252845f8e8c5866f2ed4cd1e14d..f11e3f532a84e48e17e9d530030c3f066099fe7f 100644 (file)
@@ -501,8 +501,7 @@ static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
 /*
  * TX control handlers
  */
-static enum txdone_entry_desc_flags
-rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
+static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
 {
        __le32 *txwi;
        u32 word;
@@ -515,7 +514,7 @@ rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
         * frame.
         */
        if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
-               return TXDONE_FAILURE;
+               return false;
 
        wcid    = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
        ack     = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
@@ -537,10 +536,10 @@ rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
                rt2x00_dbg(entry->queue->rt2x00dev,
                           "TX status report missed for queue %d entry %d\n",
                           entry->queue->qid, entry->entry_idx);
-               return TXDONE_UNKNOWN;
+               return false;
        }
 
-       return TXDONE_SUCCESS;
+       return true;
 }
 
 static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
@@ -549,7 +548,7 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
        struct queue_entry *entry;
        u32 reg;
        u8 qid;
-       enum txdone_entry_desc_flags done_status;
+       bool match;
 
        while (kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
                /*
@@ -574,11 +573,8 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
                        break;
                }
 
-               done_status = rt2800usb_txdone_entry_check(entry, reg);
-               if (likely(done_status == TXDONE_SUCCESS))
-                       rt2800_txdone_entry(entry, reg, rt2800usb_get_txwi(entry));
-               else
-                       rt2x00lib_txdone_noinfo(entry, done_status);
+               match = rt2800usb_txdone_entry_check(entry, reg);
+               rt2800_txdone_entry(entry, reg, rt2800usb_get_txwi(entry), match);
        }
 }
 
index 340787894c694aaa45c94cff33f66bead02af3ce..1bc353eafe37dea8408428ffc0b710ddd7ffadc9 100644 (file)
@@ -174,6 +174,7 @@ struct rt2x00_chip {
 #define RT5390         0x5390  /* 2.4GHz */
 #define RT5392         0x5392  /* 2.4GHz */
 #define RT5592         0x5592
+#define RT6352         0x6352  /* WSOC 2.4GHz */
 
        u16 rf;
        u16 rev;
@@ -718,8 +719,8 @@ enum rt2x00_capability_flags {
        CAPABILITY_DOUBLE_ANTENNA,
        CAPABILITY_BT_COEXIST,
        CAPABILITY_VCO_RECALIBRATION,
-       CAPABILITY_INTERNAL_PA_TX0,
-       CAPABILITY_INTERNAL_PA_TX1,
+       CAPABILITY_EXTERNAL_PA_TX0,
+       CAPABILITY_EXTERNAL_PA_TX1,
 };
 
 /*
@@ -1396,7 +1397,7 @@ void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop);
  * rt2x00debug_dump_frame - Dump a frame to userspace through debugfs.
  * @rt2x00dev: Pointer to &struct rt2x00_dev.
  * @type: The type of frame that is being dumped.
- * @skb: The skb containing the frame to be dumped.
+ * @entry: The queue entry containing the frame to be dumped.
  */
 #ifdef CONFIG_RT2X00_LIB_DEBUGFS
 void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
@@ -1425,6 +1426,8 @@ void rt2x00lib_dmastart(struct queue_entry *entry);
 void rt2x00lib_dmadone(struct queue_entry *entry);
 void rt2x00lib_txdone(struct queue_entry *entry,
                      struct txdone_entry_desc *txdesc);
+void rt2x00lib_txdone_nomatch(struct queue_entry *entry,
+                             struct txdone_entry_desc *txdesc);
 void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status);
 void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp);
 
index dd6678109b7e4609533c296f774dea5864df5370..e95d2aad3b3f84736aa12c9020a49a20fd19955a 100644 (file)
@@ -313,73 +313,14 @@ static inline int rt2x00lib_txdone_bar_status(struct queue_entry *entry)
        return ret;
 }
 
-void rt2x00lib_txdone(struct queue_entry *entry,
-                     struct txdone_entry_desc *txdesc)
+static void rt2x00lib_fill_tx_status(struct rt2x00_dev *rt2x00dev,
+                                    struct ieee80211_tx_info *tx_info,
+                                    struct skb_frame_desc *skbdesc,
+                                    struct txdone_entry_desc *txdesc,
+                                    bool success)
 {
-       struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
-       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
-       struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
-       unsigned int header_length, i;
        u8 rate_idx, rate_flags, retry_rates;
-       u8 skbdesc_flags = skbdesc->flags;
-       bool success;
-
-       /*
-        * Unmap the skb.
-        */
-       rt2x00queue_unmap_skb(entry);
-
-       /*
-        * Remove the extra tx headroom from the skb.
-        */
-       skb_pull(entry->skb, rt2x00dev->extra_tx_headroom);
-
-       /*
-        * Signal that the TX descriptor is no longer in the skb.
-        */
-       skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
-
-       /*
-        * Determine the length of 802.11 header.
-        */
-       header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
-
-       /*
-        * Remove L2 padding which was added during
-        */
-       if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_L2PAD))
-               rt2x00queue_remove_l2pad(entry->skb, header_length);
-
-       /*
-        * If the IV/EIV data was stripped from the frame before it was
-        * passed to the hardware, we should now reinsert it again because
-        * mac80211 will expect the same data to be present it the
-        * frame as it was passed to us.
-        */
-       if (rt2x00_has_cap_hw_crypto(rt2x00dev))
-               rt2x00crypto_tx_insert_iv(entry->skb, header_length);
-
-       /*
-        * Send frame to debugfs immediately, after this call is completed
-        * we are going to overwrite the skb->cb array.
-        */
-       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry);
-
-       /*
-        * Determine if the frame has been successfully transmitted and
-        * remove BARs from our check list while checking for their
-        * TX status.
-        */
-       success =
-           rt2x00lib_txdone_bar_status(entry) ||
-           test_bit(TXDONE_SUCCESS, &txdesc->flags) ||
-           test_bit(TXDONE_UNKNOWN, &txdesc->flags);
-
-       /*
-        * Update TX statistics.
-        */
-       rt2x00dev->link.qual.tx_success += success;
-       rt2x00dev->link.qual.tx_failed += !success;
+       int i;
 
        rate_idx = skbdesc->tx_rate_idx;
        rate_flags = skbdesc->tx_rate_flags;
@@ -416,6 +357,9 @@ void rt2x00lib_txdone(struct queue_entry *entry,
        if (i < (IEEE80211_TX_MAX_RATES - 1))
                tx_info->status.rates[i].idx = -1; /* terminate */
 
+       if (test_bit(TXDONE_NO_ACK_REQ, &txdesc->flags))
+               tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
+
        if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) {
                if (success)
                        tx_info->flags |= IEEE80211_TX_STAT_ACK;
@@ -434,7 +378,8 @@ void rt2x00lib_txdone(struct queue_entry *entry,
         */
        if (test_bit(TXDONE_AMPDU, &txdesc->flags) ||
            tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
-               tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
+               tx_info->flags |= IEEE80211_TX_STAT_AMPDU |
+                                 IEEE80211_TX_CTL_AMPDU;
                tx_info->status.ampdu_len = 1;
                tx_info->status.ampdu_ack_len = success ? 1 : 0;
 
@@ -448,21 +393,11 @@ void rt2x00lib_txdone(struct queue_entry *entry,
                else
                        rt2x00dev->low_level_stats.dot11RTSFailureCount++;
        }
+}
 
-       /*
-        * Only send the status report to mac80211 when it's a frame
-        * that originated in mac80211. If this was a extra frame coming
-        * through a mac80211 library call (RTS/CTS) then we should not
-        * send the status report back.
-        */
-       if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) {
-               if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TASKLET_CONTEXT))
-                       ieee80211_tx_status(rt2x00dev->hw, entry->skb);
-               else
-                       ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb);
-       } else
-               dev_kfree_skb_any(entry->skb);
-
+static void rt2x00lib_clear_entry(struct rt2x00_dev *rt2x00dev,
+                                 struct queue_entry *entry)
+{
        /*
         * Make this entry available for reuse.
         */
@@ -485,6 +420,143 @@ void rt2x00lib_txdone(struct queue_entry *entry,
                rt2x00queue_unpause_queue(entry->queue);
        spin_unlock_bh(&entry->queue->tx_lock);
 }
+
+void rt2x00lib_txdone_nomatch(struct queue_entry *entry,
+                             struct txdone_entry_desc *txdesc)
+{
+       struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+       struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+       struct ieee80211_tx_info txinfo = {};
+       bool success;
+
+       /*
+        * Unmap the skb.
+        */
+       rt2x00queue_unmap_skb(entry);
+
+       /*
+        * Signal that the TX descriptor is no longer in the skb.
+        */
+       skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
+
+       /*
+        * Send frame to debugfs immediately, after this call is completed
+        * we are going to overwrite the skb->cb array.
+        */
+       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry);
+
+       /*
+        * Determine if the frame has been successfully transmitted and
+        * remove BARs from our check list while checking for their
+        * TX status.
+        */
+       success =
+           rt2x00lib_txdone_bar_status(entry) ||
+           test_bit(TXDONE_SUCCESS, &txdesc->flags);
+
+       if (!test_bit(TXDONE_UNKNOWN, &txdesc->flags)) {
+               /*
+                * Update TX statistics.
+                */
+               rt2x00dev->link.qual.tx_success += success;
+               rt2x00dev->link.qual.tx_failed += !success;
+
+               rt2x00lib_fill_tx_status(rt2x00dev, &txinfo, skbdesc, txdesc,
+                                        success);
+               ieee80211_tx_status_noskb(rt2x00dev->hw, skbdesc->sta, &txinfo);
+       }
+
+       dev_kfree_skb_any(entry->skb);
+       rt2x00lib_clear_entry(rt2x00dev, entry);
+}
+EXPORT_SYMBOL_GPL(rt2x00lib_txdone_nomatch);
+
+void rt2x00lib_txdone(struct queue_entry *entry,
+                     struct txdone_entry_desc *txdesc)
+{
+       struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
+       struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+       u8 skbdesc_flags = skbdesc->flags;
+       unsigned int header_length;
+       bool success;
+
+       /*
+        * Unmap the skb.
+        */
+       rt2x00queue_unmap_skb(entry);
+
+       /*
+        * Remove the extra tx headroom from the skb.
+        */
+       skb_pull(entry->skb, rt2x00dev->extra_tx_headroom);
+
+       /*
+        * Signal that the TX descriptor is no longer in the skb.
+        */
+       skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
+
+       /*
+        * Determine the length of 802.11 header.
+        */
+       header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
+
+       /*
+        * Remove L2 padding which was added during
+        */
+       if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_L2PAD))
+               rt2x00queue_remove_l2pad(entry->skb, header_length);
+
+       /*
+        * If the IV/EIV data was stripped from the frame before it was
+        * passed to the hardware, we should now reinsert it again because
+        * mac80211 will expect the same data to be present it the
+        * frame as it was passed to us.
+        */
+       if (rt2x00_has_cap_hw_crypto(rt2x00dev))
+               rt2x00crypto_tx_insert_iv(entry->skb, header_length);
+
+       /*
+        * Send frame to debugfs immediately, after this call is completed
+        * we are going to overwrite the skb->cb array.
+        */
+       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry);
+
+       /*
+        * Determine if the frame has been successfully transmitted and
+        * remove BARs from our check list while checking for their
+        * TX status.
+        */
+       success =
+           rt2x00lib_txdone_bar_status(entry) ||
+           test_bit(TXDONE_SUCCESS, &txdesc->flags) ||
+           test_bit(TXDONE_UNKNOWN, &txdesc->flags);
+
+       /*
+        * Update TX statistics.
+        */
+       rt2x00dev->link.qual.tx_success += success;
+       rt2x00dev->link.qual.tx_failed += !success;
+
+       rt2x00lib_fill_tx_status(rt2x00dev, tx_info, skbdesc, txdesc, success);
+
+       /*
+        * Only send the status report to mac80211 when it's a frame
+        * that originated in mac80211. If this was a extra frame coming
+        * through a mac80211 library call (RTS/CTS) then we should not
+        * send the status report back.
+        */
+       if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) {
+               if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TASKLET_CONTEXT))
+                       ieee80211_tx_status(rt2x00dev->hw, entry->skb);
+               else
+                       ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb);
+       } else {
+               dev_kfree_skb_any(entry->skb);
+       }
+
+       rt2x00lib_clear_entry(rt2x00dev, entry);
+}
 EXPORT_SYMBOL_GPL(rt2x00lib_txdone);
 
 void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status)
index e1660b92b20c7793c88ea5d470e1d793f495bbf6..a2c1ca5c76d1c7d5b9191d8ad0f35aba949dfcc7 100644 (file)
@@ -372,15 +372,16 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
 
        /*
         * Determine IFS values
-        * - Use TXOP_BACKOFF for management frames except beacons
+        * - Use TXOP_BACKOFF for probe and management frames except beacons
         * - Use TXOP_SIFS for fragment bursts
         * - Use TXOP_HTTXOP for everything else
         *
         * Note: rt2800 devices won't use CTS protection (if used)
         * for frames not transmitted with TXOP_HTTXOP
         */
-       if (ieee80211_is_mgmt(hdr->frame_control) &&
-           !ieee80211_is_beacon(hdr->frame_control))
+       if ((ieee80211_is_mgmt(hdr->frame_control) &&
+            !ieee80211_is_beacon(hdr->frame_control)) ||
+           (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
                txdesc->u.ht.txop = TXOP_BACKOFF;
        else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
                txdesc->u.ht.txop = TXOP_SIFS;
index 22d18818e85004261bbc6c8e6c63d6c8208058ac..c78fb8c8838a5ebec5892fe6b2bc2589602aac0a 100644 (file)
@@ -102,7 +102,7 @@ enum skb_frame_desc_flags {
  *     of the scope of the skb->data pointer.
  * @iv: IV/EIV data used during encryption/decryption.
  * @skb_dma: (PCI-only) the DMA address associated with the sk buffer.
- * @entry: The entry to which this sk buffer belongs.
+ * @sta: The station where sk buffer was sent.
  */
 struct skb_frame_desc {
        u8 flags;
@@ -116,6 +116,7 @@ struct skb_frame_desc {
        __le32 iv[2];
 
        dma_addr_t skb_dma;
+       struct ieee80211_sta *sta;
 };
 
 /**
@@ -214,6 +215,7 @@ enum txdone_entry_desc_flags {
        TXDONE_FAILURE,
        TXDONE_EXCESSIVE_RETRY,
        TXDONE_AMPDU,
+       TXDONE_NO_ACK_REQ,
 };
 
 /**
index 231f84db9ab0691e46eee60c41f2b1ce3ccf9312..56a8686cd36744ef6bfa833856cb1800d84e2fe3 100644 (file)
@@ -946,8 +946,7 @@ static int rtl8187_start(struct ieee80211_hw *dev)
                      (7 << 13 /* RX FIFO threshold NONE */) |
                      (7 << 10 /* MAX RX DMA */) |
                      RTL818X_RX_CONF_RX_AUTORESETPHY |
-                     RTL818X_RX_CONF_ONLYERLPKT |
-                     RTL818X_RX_CONF_MULTICAST;
+                     RTL818X_RX_CONF_ONLYERLPKT;
                priv->rx_conf = reg;
                rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
 
@@ -1319,12 +1318,11 @@ static void rtl8187_configure_filter(struct ieee80211_hw *dev,
                priv->rx_conf ^= RTL818X_RX_CONF_FCS;
        if (changed_flags & FIF_CONTROL)
                priv->rx_conf ^= RTL818X_RX_CONF_CTRL;
-       if (changed_flags & FIF_OTHER_BSS)
-               priv->rx_conf ^= RTL818X_RX_CONF_MONITOR;
-       if (*total_flags & FIF_ALLMULTI || multicast > 0)
-               priv->rx_conf |= RTL818X_RX_CONF_MULTICAST;
+       if (*total_flags & FIF_OTHER_BSS ||
+           *total_flags & FIF_ALLMULTI || multicast > 0)
+               priv->rx_conf |= RTL818X_RX_CONF_MONITOR;
        else
-               priv->rx_conf &= ~RTL818X_RX_CONF_MULTICAST;
+               priv->rx_conf &= ~RTL818X_RX_CONF_MONITOR;
 
        *total_flags = 0;
 
@@ -1332,10 +1330,10 @@ static void rtl8187_configure_filter(struct ieee80211_hw *dev,
                *total_flags |= FIF_FCSFAIL;
        if (priv->rx_conf & RTL818X_RX_CONF_CTRL)
                *total_flags |= FIF_CONTROL;
-       if (priv->rx_conf & RTL818X_RX_CONF_MONITOR)
+       if (priv->rx_conf & RTL818X_RX_CONF_MONITOR) {
                *total_flags |= FIF_OTHER_BSS;
-       if (priv->rx_conf & RTL818X_RX_CONF_MULTICAST)
                *total_flags |= FIF_ALLMULTI;
+       }
 
        rtl818x_iowrite32_async(priv, &priv->map->RX_CONF, priv->rx_conf);
 }
index caea350f05aac7b2e3dc7137b0b4363abcd8c2d4..bdc379178e87955c5456028a43657f97862670af 100644 (file)
@@ -1742,12 +1742,14 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val)
        unsigned long flags;
        struct rtl_c2hcmd *c2hcmd;
 
-       c2hcmd = kmalloc(sizeof(*c2hcmd), GFP_KERNEL);
+       c2hcmd = kmalloc(sizeof(*c2hcmd),
+                        in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
 
        if (!c2hcmd)
                goto label_err;
 
-       c2hcmd->val = kmalloc(len, GFP_KERNEL);
+       c2hcmd->val = kmalloc(len,
+                             in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
 
        if (!c2hcmd->val)
                goto label_err2;
index ffa1f438424d44c67a9f848b1e43b2969be71737..57e633dbf9a9ab76bdf31e4ec9c3c5094eab180e 100644 (file)
@@ -44,7 +44,7 @@ static struct coex_dm_8192e_2ant *coex_dm = &glcoex_dm_8192e_2ant;
 static struct coex_sta_8192e_2ant glcoex_sta_8192e_2ant;
 static struct coex_sta_8192e_2ant *coex_sta = &glcoex_sta_8192e_2ant;
 
-static const char *const GLBtInfoSrc8192e2Ant[] = {
+static const char *const glbt_info_src_8192e_2ant[] = {
        "BT Info[wifi fw]",
        "BT Info[bt rsp]",
        "BT Info[bt auto report]",
@@ -57,31 +57,31 @@ static u32 glcoex_ver_8192e_2ant = 0x34;
  *   local function proto type if needed
  **************************************************************/
 /**************************************************************
- *   local function start with halbtc8192e2ant_
+ *   local function start with btc8192e2ant_
  **************************************************************/
-static u8 halbtc8192e2ant_btrssi_state(struct btc_coexist *btcoexist,
-                                       u8 level_num, u8 rssi_thresh,
-                                      u8 rssi_thresh1)
+static u8 btc8192e2ant_bt_rssi_state(struct btc_coexist *btcoexist,
+                                    u8 level_num, u8 rssi_thresh,
+                                    u8 rssi_thresh1)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       int btrssi = 0;
-       u8 btrssi_state = coex_sta->pre_bt_rssi_state;
+       int bt_rssi = 0;
+       u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
 
-       btrssi = coex_sta->bt_rssi;
+       bt_rssi = coex_sta->bt_rssi;
 
        if (level_num == 2) {
                if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
                    (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-                       if (btrssi >=
+                       if (bt_rssi >=
                            (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
-                               btrssi_state = BTC_RSSI_STATE_HIGH;
+                               bt_rssi_state = BTC_RSSI_STATE_HIGH;
                        else
-                               btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+                               bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
                } else {
-                       if (btrssi < rssi_thresh)
-                               btrssi_state = BTC_RSSI_STATE_LOW;
+                       if (bt_rssi < rssi_thresh)
+                               bt_rssi_state = BTC_RSSI_STATE_LOW;
                        else
-                               btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
+                               bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
@@ -89,62 +89,63 @@ static u8 halbtc8192e2ant_btrssi_state(struct btc_coexist *btcoexist,
                                 "[BTCoex], BT Rssi thresh error!!\n");
                        return coex_sta->pre_bt_rssi_state;
                }
+
                if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
                    (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-                       if (btrssi >=
+                       if (bt_rssi >=
                            (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
-                               btrssi_state = BTC_RSSI_STATE_MEDIUM;
+                               bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
                        else
-                               btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+                               bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
                } else if ((coex_sta->pre_bt_rssi_state ==
                            BTC_RSSI_STATE_MEDIUM) ||
                           (coex_sta->pre_bt_rssi_state ==
                            BTC_RSSI_STATE_STAY_MEDIUM)) {
-                       if (btrssi >= (rssi_thresh1 +
+                       if (bt_rssi >= (rssi_thresh1 +
                                        BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
-                               btrssi_state = BTC_RSSI_STATE_HIGH;
-                       else if (btrssi < rssi_thresh)
-                               btrssi_state = BTC_RSSI_STATE_LOW;
+                               bt_rssi_state = BTC_RSSI_STATE_HIGH;
+                       else if (bt_rssi < rssi_thresh)
+                               bt_rssi_state = BTC_RSSI_STATE_LOW;
                        else
-                               btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+                               bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
                } else {
-                       if (btrssi < rssi_thresh1)
-                               btrssi_state = BTC_RSSI_STATE_MEDIUM;
+                       if (bt_rssi < rssi_thresh1)
+                               bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
                        else
-                               btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
+                               bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
                }
        }
 
-       coex_sta->pre_bt_rssi_state = btrssi_state;
+       coex_sta->pre_bt_rssi_state = bt_rssi_state;
 
-       return btrssi_state;
+       return bt_rssi_state;
 }
 
-static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
-                                        u8 index, u8 level_num, u8 rssi_thresh,
-                                        u8 rssi_thresh1)
+static u8 btc8192e2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
+                                      u8 index, u8 level_num, u8 rssi_thresh,
+                                      u8 rssi_thresh1)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       int wifirssi = 0;
-       u8 wifirssi_state = coex_sta->pre_wifi_rssi_state[index];
+       int wifi_rssi = 0;
+       u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
 
-       btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi);
+       btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
 
        if (level_num == 2) {
                if ((coex_sta->pre_wifi_rssi_state[index] ==
                     BTC_RSSI_STATE_LOW) ||
                    (coex_sta->pre_wifi_rssi_state[index] ==
                     BTC_RSSI_STATE_STAY_LOW)) {
-                       if (wifirssi >= (rssi_thresh +
-                                        BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
-                               wifirssi_state = BTC_RSSI_STATE_HIGH;
+                       if (wifi_rssi >=
+                           (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
+                               wifi_rssi_state = BTC_RSSI_STATE_HIGH;
                        else
-                               wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
+                               wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
                } else {
-                       if (wifirssi < rssi_thresh)
-                               wifirssi_state = BTC_RSSI_STATE_LOW;
+                       if (wifi_rssi < rssi_thresh)
+                               wifi_rssi_state = BTC_RSSI_STATE_LOW;
                        else
-                               wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
+                               wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
@@ -157,36 +158,37 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
                     BTC_RSSI_STATE_LOW) ||
                    (coex_sta->pre_wifi_rssi_state[index] ==
                     BTC_RSSI_STATE_STAY_LOW)) {
-                       if (wifirssi >= (rssi_thresh +
-                                        BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
-                               wifirssi_state = BTC_RSSI_STATE_MEDIUM;
+                       if (wifi_rssi >=
+                           (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
+                               wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
                        else
-                               wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
+                               wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
                } else if ((coex_sta->pre_wifi_rssi_state[index] ==
                            BTC_RSSI_STATE_MEDIUM) ||
                           (coex_sta->pre_wifi_rssi_state[index] ==
                            BTC_RSSI_STATE_STAY_MEDIUM)) {
-                       if (wifirssi >= (rssi_thresh1 +
+                       if (wifi_rssi >= (rssi_thresh1 +
                                         BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
-                               wifirssi_state = BTC_RSSI_STATE_HIGH;
-                       else if (wifirssi < rssi_thresh)
-                               wifirssi_state = BTC_RSSI_STATE_LOW;
+                               wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+                       else if (wifi_rssi < rssi_thresh)
+                               wifi_rssi_state = BTC_RSSI_STATE_LOW;
                        else
-                               wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+                               wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
                } else {
-                       if (wifirssi < rssi_thresh1)
-                               wifirssi_state = BTC_RSSI_STATE_MEDIUM;
+                       if (wifi_rssi < rssi_thresh1)
+                               wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
                        else
-                               wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
+                               wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
                }
        }
 
-       coex_sta->pre_wifi_rssi_state[index] = wifirssi_state;
+       coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
 
-       return wifirssi_state;
+       return wifi_rssi_state;
 }
 
-static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist)
+static void btc8192e2ant_monitor_bt_enable_disable(struct btc_coexist
+                                                  *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        static bool pre_bt_disabled;
@@ -236,57 +238,57 @@ static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist)
        }
 }
 
-static u32 halbtc8192e2ant_decidera_mask(struct btc_coexist *btcoexist,
-                                        u8 sstype, u32 ra_masktype)
+static u32 btc8192e2ant_decide_ra_mask(struct btc_coexist *btcoexist,
+                                      u8 ss_type, u32 ra_mask_type)
 {
-       u32 disra_mask = 0x0;
+       u32 dis_ra_mask = 0x0;
 
-       switch (ra_masktype) {
+       switch (ra_mask_type) {
        case 0: /* normal mode */
-               if (sstype == 2)
-                       disra_mask = 0x0;       /* enable 2ss */
+               if (ss_type == 2)
+                       dis_ra_mask = 0x0; /* enable 2ss */
                else
-                       disra_mask = 0xfff00000;/* disable 2ss */
+                       dis_ra_mask = 0xfff00000; /* disable 2ss */
                break;
        case 1: /* disable cck 1/2 */
-               if (sstype == 2)
-                       disra_mask = 0x00000003;/* enable 2ss */
+               if (ss_type == 2)
+                       dis_ra_mask = 0x00000003; /* enable 2ss */
                else
-                       disra_mask = 0xfff00003;/* disable 2ss */
+                       dis_ra_mask = 0xfff00003; /* disable 2ss */
                break;
        case 2: /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4 */
-               if (sstype == 2)
-                       disra_mask = 0x0001f1f7;/* enable 2ss */
+               if (ss_type == 2)
+                       dis_ra_mask = 0x0001f1f7; /* enable 2ss */
                else
-                       disra_mask = 0xfff1f1f7;/* disable 2ss */
+                       dis_ra_mask = 0xfff1f1f7; /* disable 2ss */
                break;
        default:
                break;
        }
 
-       return disra_mask;
+       return dis_ra_mask;
 }
 
-static void halbtc8192e2ant_Updatera_mask(struct btc_coexist *btcoexist,
-                                         bool force_exec, u32 dis_ratemask)
+static void btc8192e2ant_update_ra_mask(struct btc_coexist *btcoexist,
+                                       bool force_exec, u32 dis_rate_mask)
 {
-       coex_dm->curra_mask = dis_ratemask;
+       coex_dm->cur_ra_mask = dis_rate_mask;
 
-       if (force_exec || (coex_dm->prera_mask != coex_dm->curra_mask))
-               btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask,
-                                  &coex_dm->curra_mask);
-       coex_dm->prera_mask = coex_dm->curra_mask;
+       if (force_exec || (coex_dm->pre_ra_mask != coex_dm->cur_ra_mask))
+               btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_RAMASK,
+                                  &coex_dm->cur_ra_mask);
+       coex_dm->pre_ra_mask = coex_dm->cur_ra_mask;
 }
 
-static void btc8192e2ant_autorate_fallback_retry(struct btc_coexist *btcoexist,
-                                                bool force_exec, u8 type)
+static void btc8192e2ant_auto_rate_fallback_retry(struct btc_coexist *btcoexist,
+                                                 bool force_exec, u8 type)
 {
-       bool wifi_under_bmode = false;
+       bool wifi_under_b_mode = false;
 
-       coex_dm->cur_arfrtype = type;
+       coex_dm->cur_arfr_type = type;
 
-       if (force_exec || (coex_dm->pre_arfrtype != coex_dm->cur_arfrtype)) {
-               switch (coex_dm->cur_arfrtype) {
+       if (force_exec || (coex_dm->pre_arfr_type != coex_dm->cur_arfr_type)) {
+               switch (coex_dm->cur_arfr_type) {
                case 0: /* normal mode */
                        btcoexist->btc_write_4byte(btcoexist, 0x430,
                                                   coex_dm->backup_arfr_cnt1);
@@ -296,8 +298,8 @@ static void btc8192e2ant_autorate_fallback_retry(struct btc_coexist *btcoexist,
                case 1:
                        btcoexist->btc_get(btcoexist,
                                           BTC_GET_BL_WIFI_UNDER_B_MODE,
-                                          &wifi_under_bmode);
-                       if (wifi_under_bmode) {
+                                          &wifi_under_b_mode);
+                       if (wifi_under_b_mode) {
                                btcoexist->btc_write_4byte(btcoexist, 0x430,
                                                           0x0);
                                btcoexist->btc_write_4byte(btcoexist, 0x434,
@@ -314,46 +316,45 @@ static void btc8192e2ant_autorate_fallback_retry(struct btc_coexist *btcoexist,
                }
        }
 
-       coex_dm->pre_arfrtype = coex_dm->cur_arfrtype;
+       coex_dm->pre_arfr_type = coex_dm->cur_arfr_type;
 }
 
-static void halbtc8192e2ant_retrylimit(struct btc_coexist *btcoexist,
-                                      bool force_exec, u8 type)
+static void btc8192e2ant_retry_limit(struct btc_coexist *btcoexist,
+                                    bool force_exec, u8 type)
 {
-       coex_dm->cur_retrylimit_type = type;
+       coex_dm->cur_retry_limit_type = type;
 
-       if (force_exec || (coex_dm->pre_retrylimit_type !=
-                          coex_dm->cur_retrylimit_type)) {
-               switch (coex_dm->cur_retrylimit_type) {
+       if (force_exec || (coex_dm->pre_retry_limit_type !=
+                          coex_dm->cur_retry_limit_type)) {
+               switch (coex_dm->cur_retry_limit_type) {
                case 0: /* normal mode */
-                               btcoexist->btc_write_2byte(btcoexist, 0x42a,
-                                                   coex_dm->backup_retrylimit);
-                               break;
+                       btcoexist->btc_write_2byte(btcoexist, 0x42a,
+                                                  coex_dm->backup_retry_limit);
+                       break;
                case 1: /* retry limit = 8 */
-                               btcoexist->btc_write_2byte(btcoexist, 0x42a,
-                                                          0x0808);
-                               break;
+                       btcoexist->btc_write_2byte(btcoexist, 0x42a, 0x0808);
+                       break;
                default:
-                               break;
+                       break;
                }
        }
 
-       coex_dm->pre_retrylimit_type = coex_dm->cur_retrylimit_type;
+       coex_dm->pre_retry_limit_type = coex_dm->cur_retry_limit_type;
 }
 
-static void halbtc8192e2ant_ampdu_maxtime(struct btc_coexist *btcoexist,
-                                         bool force_exec, u8 type)
+static void btc8192e2ant_ampdu_maxtime(struct btc_coexist *btcoexist,
+                                      bool force_exec, u8 type)
 {
-       coex_dm->cur_ampdutime_type = type;
+       coex_dm->cur_ampdu_time_type = type;
 
-       if (force_exec || (coex_dm->pre_ampdutime_type !=
-                          coex_dm->cur_ampdutime_type)) {
-               switch (coex_dm->cur_ampdutime_type) {
+       if (force_exec || (coex_dm->pre_ampdu_time_type !=
+                          coex_dm->cur_ampdu_time_type)) {
+               switch (coex_dm->cur_ampdu_time_type) {
                case 0: /* normal mode */
                        btcoexist->btc_write_1byte(btcoexist, 0x456,
                                                coex_dm->backup_ampdu_maxtime);
                        break;
-               case 1: /* AMPDU timw = 0x38 * 32us */
+               case 1: /* AMPDU time = 0x38 * 32us */
                        btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38);
                        break;
                default:
@@ -361,30 +362,30 @@ static void halbtc8192e2ant_ampdu_maxtime(struct btc_coexist *btcoexist,
                }
        }
 
-       coex_dm->pre_ampdutime_type = coex_dm->cur_ampdutime_type;
+       coex_dm->pre_ampdu_time_type = coex_dm->cur_ampdu_time_type;
 }
 
-static void halbtc8192e2ant_limited_tx(struct btc_coexist *btcoexist,
-                                      bool force_exec, u8 ra_masktype,
-                                      u8 arfr_type, u8 retrylimit_type,
-                                      u8 ampdutime_type)
+static void btc8192e2ant_limited_tx(struct btc_coexist *btcoexist,
+                                   bool force_exec, u8 ra_mask_type,
+                                   u8 arfr_type, u8 retry_limit_type,
+                                   u8 ampdu_time_type)
 {
-       u32 disra_mask = 0x0;
+       u32 dis_ra_mask = 0x0;
 
-       coex_dm->curra_masktype = ra_masktype;
-       disra_mask = halbtc8192e2ant_decidera_mask(btcoexist,
-                                                  coex_dm->cur_sstype,
-                                                  ra_masktype);
-       halbtc8192e2ant_Updatera_mask(btcoexist, force_exec, disra_mask);
-btc8192e2ant_autorate_fallback_retry(btcoexist, force_exec, arfr_type);
-       halbtc8192e2ant_retrylimit(btcoexist, force_exec, retrylimit_type);
-       halbtc8192e2ant_ampdu_maxtime(btcoexist, force_exec, ampdutime_type);
+       coex_dm->cur_ra_mask_type = ra_mask_type;
+       dis_ra_mask =
+                btc8192e2ant_decide_ra_mask(btcoexist, coex_dm->cur_ss_type,
+                                            ra_mask_type);
+       btc8192e2ant_update_ra_mask(btcoexist, force_exec, dis_ra_mask);
+       btc8192e2ant_auto_rate_fallback_retry(btcoexist, force_exec, arfr_type);
+       btc8192e2ant_retry_limit(btcoexist, force_exec, retry_limit_type);
+       btc8192e2ant_ampdu_maxtime(btcoexist, force_exec, ampdu_time_type);
 }
 
-static void halbtc8192e2ant_limited_rx(struct btc_coexist *btcoexist,
-                                      bool force_exec, bool rej_ap_agg_pkt,
-                                      bool bt_ctrl_agg_buf_size,
-                                      u8 agg_buf_size)
+static void btc8192e2ant_limited_rx(struct btc_coexist *btcoexist,
+                                   bool force_exec, bool rej_ap_agg_pkt,
+                                   bool bt_ctrl_agg_buf_size,
+                                   u8 agg_buf_size)
 {
        bool reject_rx_agg = rej_ap_agg_pkt;
        bool bt_ctrl_rx_agg_size = bt_ctrl_agg_buf_size;
@@ -406,7 +407,7 @@ static void halbtc8192e2ant_limited_rx(struct btc_coexist *btcoexist,
        btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
 }
 
-static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+static void btc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
@@ -417,11 +418,11 @@ static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
 
        u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
        reg_hp_tx = u32tmp & MASKLWORD;
-       reg_hp_rx = (u32tmp & MASKHWORD)>>16;
+       reg_hp_rx = (u32tmp & MASKHWORD) >> 16;
 
        u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
        reg_lp_tx = u32tmp & MASKLWORD;
-       reg_lp_rx = (u32tmp & MASKHWORD)>>16;
+       reg_lp_rx = (u32tmp & MASKHWORD) >> 16;
 
        coex_sta->high_priority_tx = reg_hp_tx;
        coex_sta->high_priority_rx = reg_hp_rx;
@@ -439,14 +440,14 @@ static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
        btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
 }
 
-static void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist)
+static void btc8192e2ant_query_bt_info(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        coex_sta->c2h_bt_info_req_sent = true;
 
-       h2c_parameter[0] |= BIT0;       /* trigger */
+       h2c_parameter[0] |= BIT0; /* trigger */
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
@@ -455,12 +456,12 @@ static void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist)
        btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
 
-static void halbtc8192e2ant_update_btlink_info(struct btc_coexist *btcoexist)
+static void btc8192e2ant_update_bt_link_info(struct btc_coexist *btcoexist)
 {
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-       bool bt_hson = false;
+       bool bt_hs_on = false;
 
-       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 
        bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
        bt_link_info->sco_exist = coex_sta->sco_exist;
@@ -469,7 +470,7 @@ static void halbtc8192e2ant_update_btlink_info(struct btc_coexist *btcoexist)
        bt_link_info->hid_exist = coex_sta->hid_exist;
 
        /* work around for HS mode. */
-       if (bt_hson) {
+       if (bt_hs_on) {
                bt_link_info->pan_exist = true;
                bt_link_info->bt_link_exist = true;
        }
@@ -511,16 +512,16 @@ static void halbtc8192e2ant_update_btlink_info(struct btc_coexist *btcoexist)
                bt_link_info->hid_only = false;
 }
 
-static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
+static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
-       bool bt_hson = false;
+       bool bt_hs_on = false;
        u8 algorithm = BT_8192E_2ANT_COEX_ALGO_UNDEFINED;
-       u8 numdiffprofile = 0;
+       u8 num_of_diff_profile = 0;
 
-       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 
        if (!bt_link_info->bt_link_exist) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -529,15 +530,15 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
        }
 
        if (bt_link_info->sco_exist)
-               numdiffprofile++;
+               num_of_diff_profile++;
        if (bt_link_info->hid_exist)
-               numdiffprofile++;
+               num_of_diff_profile++;
        if (bt_link_info->pan_exist)
-               numdiffprofile++;
+               num_of_diff_profile++;
        if (bt_link_info->a2dp_exist)
-               numdiffprofile++;
+               num_of_diff_profile++;
 
-       if (numdiffprofile == 1) {
+       if (num_of_diff_profile == 1) {
                if (bt_link_info->sco_exist) {
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "SCO only\n");
@@ -552,7 +553,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                                         "A2DP only\n");
                                algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP;
                        } else if (bt_link_info->pan_exist) {
-                               if (bt_hson) {
+                               if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "PAN(HS) only\n");
@@ -567,7 +568,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                                }
                        }
                }
-       } else if (numdiffprofile == 2) {
+       } else if (num_of_diff_profile == 2) {
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist) {
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -578,7 +579,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                                         "SCO + A2DP ==> SCO\n");
                                algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (bt_link_info->pan_exist) {
-                               if (bt_hson) {
+                               if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "SCO + PAN(HS)\n");
@@ -609,7 +610,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                                }
                        } else if (bt_link_info->hid_exist &&
                                   bt_link_info->pan_exist) {
-                               if (bt_hson) {
+                               if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "HID + PAN(HS)\n");
@@ -623,7 +624,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                                }
                        } else if (bt_link_info->pan_exist &&
                                   bt_link_info->a2dp_exist) {
-                               if (bt_hson) {
+                               if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "A2DP + PAN(HS)\n");
@@ -638,7 +639,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                                }
                        }
                }
-       } else if (numdiffprofile == 3) {
+       } else if (num_of_diff_profile == 3) {
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist &&
                            bt_link_info->a2dp_exist) {
@@ -647,7 +648,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                                algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (bt_link_info->hid_exist &&
                                   bt_link_info->pan_exist) {
-                               if (bt_hson) {
+                               if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "SCO + HID + PAN(HS)\n");
@@ -661,7 +662,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                                }
                        } else if (bt_link_info->pan_exist &&
                                   bt_link_info->a2dp_exist) {
-                               if (bt_hson) {
+                               if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "SCO + A2DP + PAN(HS)\n");
@@ -678,7 +679,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                        if (bt_link_info->hid_exist &&
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
-                               if (bt_hson) {
+                               if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "HID + A2DP + PAN(HS)\n");
@@ -693,12 +694,12 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                                }
                        }
                }
-       } else if (numdiffprofile >= 3) {
+       } else if (num_of_diff_profile >= 3) {
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist &&
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
-                               if (bt_hson) {
+                               if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "ErrorSCO+HID+A2DP+PAN(HS)\n");
@@ -717,8 +718,8 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
        return algorithm;
 }
 
-static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist,
-                                                u8 dac_swinglvl)
+static void btc8192e2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
+                                               u8 dac_swing_lvl)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
@@ -726,81 +727,81 @@ static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist,
        /* There are several type of dacswing
         * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6
         */
-       h2c_parameter[0] = dac_swinglvl;
+       h2c_parameter[0] = dac_swing_lvl;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl);
+                "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
 }
 
-static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist,
-                                           u8 dec_btpwr_lvl)
+static void btc8192e2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
+                                          u8 dec_bt_pwr_lvl)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
-       h2c_parameter[0] = dec_btpwr_lvl;
+       h2c_parameter[0] = dec_bt_pwr_lvl;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
-                dec_btpwr_lvl, h2c_parameter[0]);
+                dec_bt_pwr_lvl, h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
 }
 
-static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist,
-                                     bool force_exec, u8 dec_btpwr_lvl)
+static void btc8192e2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
+                                   bool force_exec, u8 dec_bt_pwr_lvl)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], %s Dec BT power level = %d\n",
-                force_exec ? "force to" : "", dec_btpwr_lvl);
-       coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl;
+                force_exec ? "force to" : "", dec_bt_pwr_lvl);
+       coex_dm->cur_dec_bt_pwr = dec_bt_pwr_lvl;
 
        if (!force_exec) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
                         coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
        }
-       halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+       btc8192e2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
 
        coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
 }
 
-static void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist,
-                                             bool enable_autoreport)
+static void btc8192e2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
+                                           bool enable_auto_report)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        h2c_parameter[0] = 0;
 
-       if (enable_autoreport)
+       if (enable_auto_report)
                h2c_parameter[0] |= BIT0;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
-                (enable_autoreport ? "Enabled!!" : "Disabled!!"),
+                (enable_auto_report ? "Enabled!!" : "Disabled!!"),
                 h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
 }
 
-static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
-                                         bool force_exec,
-                                         bool enable_autoreport)
+static void btc8192e2ant_bt_auto_report(struct btc_coexist *btcoexist,
+                                       bool force_exec,
+                                       bool enable_auto_report)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], %s BT Auto report = %s\n",
                 (force_exec ? "force to" : ""),
-                ((enable_autoreport) ? "Enabled" : "Disabled"));
-       coex_dm->cur_bt_auto_report = enable_autoreport;
+                ((enable_auto_report) ? "Enabled" : "Disabled"));
+       coex_dm->cur_bt_auto_report = enable_auto_report;
 
        if (!force_exec) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -811,21 +812,21 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
                if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
                        return;
        }
-       halbtc8192e2ant_set_bt_autoreport(btcoexist,
-                                         coex_dm->cur_bt_auto_report);
+       btc8192e2ant_set_bt_auto_report(btcoexist,
+                                       coex_dm->cur_bt_auto_report);
 
        coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
 }
 
-static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist,
-                                           bool force_exec, u8 fw_dac_swinglvl)
+static void btc8192e2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
+                                         bool force_exec, u8 fw_dac_swing_lvl)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], %s set FW Dac Swing level = %d\n",
-                (force_exec ? "force to" : ""), fw_dac_swinglvl);
-       coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl;
+                (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+       coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
 
        if (!force_exec) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -838,8 +839,8 @@ static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist,
                        return;
        }
 
-       halbtc8192e2ant_setfw_dac_swinglevel(btcoexist,
-                                            coex_dm->cur_fw_dac_swing_lvl);
+       btc8192e2ant_set_fw_dac_swing_level(btcoexist,
+                                           coex_dm->cur_fw_dac_swing_lvl);
 
        coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
 }
@@ -869,8 +870,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
        }
 }
 
-static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
-                                     bool force_exec, bool rx_rf_shrink_on)
+static void btc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
+                                  bool force_exec, bool rx_rf_shrink_on)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -896,8 +897,8 @@ static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
        coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
 }
 
-static void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist,
-                                            u32 level)
+static void btc8192e2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
+                                          u32 level)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 val = (u8)level;
@@ -907,28 +908,28 @@ static void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist,
        btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
 }
 
-static void btc8192e2ant_setsw_full_swing(struct btc_coexist *btcoexist,
-                                         bool sw_dac_swingon,
-                                         u32 sw_dac_swinglvl)
+static void btc8192e2ant_set_sw_full_swing(struct btc_coexist *btcoexist,
+                                          bool sw_dac_swing_on,
+                                          u32 sw_dac_swing_lvl)
 {
-       if (sw_dac_swingon)
-               halbtc8192e2ant_set_dac_swingreg(btcoexist, sw_dac_swinglvl);
+       if (sw_dac_swing_on)
+               btc8192e2ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl);
        else
-               halbtc8192e2ant_set_dac_swingreg(btcoexist, 0x18);
+               btc8192e2ant_set_dac_swing_reg(btcoexist, 0x18);
 }
 
-static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist,
-                                    bool force_exec, bool dac_swingon,
-                                    u32 dac_swinglvl)
+static void btc8192e2ant_dac_swing(struct btc_coexist *btcoexist,
+                                  bool force_exec, bool dac_swing_on,
+                                  u32 dac_swing_lvl)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n",
+                "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl = 0x%x\n",
                 (force_exec ? "force to" : ""),
-                ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl);
-       coex_dm->cur_dac_swing_on = dac_swingon;
-       coex_dm->cur_dac_swing_lvl = dac_swinglvl;
+                ((dac_swing_on) ? "ON" : "OFF"), dac_swing_lvl);
+       coex_dm->cur_dac_swing_on = dac_swing_on;
+       coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
 
        if (!force_exec) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -945,14 +946,14 @@ static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist,
                        return;
        }
        mdelay(30);
-       btc8192e2ant_setsw_full_swing(btcoexist, dac_swingon, dac_swinglvl);
+       btc8192e2ant_set_sw_full_swing(btcoexist, dac_swing_on, dac_swing_lvl);
 
        coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
        coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
 }
 
-static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
-                                         bool agc_table_en)
+static void btc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
+                                      bool agc_table_en)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -978,8 +979,8 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
        }
 }
 
-static void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist,
-                                    bool force_exec, bool agc_table_en)
+static void btc8192e2ant_agc_table(struct btc_coexist *btcoexist,
+                                  bool force_exec, bool agc_table_en)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -998,14 +999,14 @@ static void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist,
                if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
                        return;
        }
-       halbtc8192e2ant_set_agc_table(btcoexist, agc_table_en);
+       btc8192e2ant_set_agc_table(btcoexist, agc_table_en);
 
        coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
 }
 
-static void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist,
-                                          u32 val0x6c0, u32 val0x6c4,
-                                          u32 val0x6c8, u8 val0x6cc)
+static void btc8192e2ant_set_coex_table(struct btc_coexist *btcoexist,
+                                       u32 val0x6c0, u32 val0x6c4,
+                                       u32 val0x6c8, u8 val0x6cc)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -1026,10 +1027,9 @@ static void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist,
        btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
-static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist,
-                                      bool force_exec,
-                                      u32 val0x6c0, u32 val0x6c4,
-                                      u32 val0x6c8, u8 val0x6cc)
+static void btc8192e2ant_coex_table(struct btc_coexist *btcoexist,
+                                   bool force_exec, u32 val0x6c0, u32 val0x6c4,
+                                   u32 val0x6c8, u8 val0x6cc)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -1064,8 +1064,8 @@ static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist,
                    (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
                        return;
        }
-       halbtc8192e2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
-                                      val0x6c8, val0x6cc);
+       btc8192e2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, val0x6c8,
+                                   val0x6cc);
 
        coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
        coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
@@ -1073,37 +1073,37 @@ static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist,
        coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
 }
 
-static void btc8192e2ant_coex_tbl_w_type(struct btc_coexist *btcoexist,
-                                        bool force_exec, u8 type)
+static void btc8192e2ant_coex_table_with_type(struct btc_coexist *btcoexist,
+                                             bool force_exec, u8 type)
 {
        switch (type) {
        case 0:
-               halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555,
-                                          0x5a5a5a5a, 0xffffff, 0x3);
+               btc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555,
+                                       0x5a5a5a5a, 0xffffff, 0x3);
                break;
        case 1:
-               halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
-                                          0x5a5a5a5a, 0xffffff, 0x3);
+               btc8192e2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+                                       0x5a5a5a5a, 0xffffff, 0x3);
                break;
        case 2:
-               halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555,
-                                          0x5ffb5ffb, 0xffffff, 0x3);
+               btc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555,
+                                       0x5ffb5ffb, 0xffffff, 0x3);
                break;
        case 3:
-               halbtc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff,
-                                          0x5fdb5fdb, 0xffffff, 0x3);
+               btc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff,
+                                       0x5fdb5fdb, 0xffffff, 0x3);
                break;
        case 4:
-               halbtc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff,
-                                          0x5ffb5ffb, 0xffffff, 0x3);
+               btc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff,
+                                       0x5ffb5ffb, 0xffffff, 0x3);
                break;
        default:
                break;
        }
 }
 
-static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
-                                                 bool enable)
+static void btc8192e2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
+                                               bool enable)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
@@ -1118,8 +1118,8 @@ static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
        btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
 }
 
-static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist,
-                                         bool force_exec, bool enable)
+static void btc8192e2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
+                                        bool force_exec, bool enable)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -1140,12 +1140,12 @@ static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist,
                    coex_dm->cur_ignore_wlan_act)
                        return;
        }
-       halbtc8192e2ant_set_fw_ignore_wlanact(btcoexist, enable);
+       btc8192e2ant_set_fw_ignore_wlan_act(btcoexist, enable);
 
        coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
 }
 
-static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1,
+static void btc8192e2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
                                        u8 byte2, u8 byte3, u8 byte4, u8 byte5)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -1173,24 +1173,24 @@ static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1,
        btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
 
-static void btc8192e2ant_sw_mec1(struct btc_coexist *btcoexist,
-                                bool shrink_rx_lpf, bool low_penalty_ra,
-                                bool limited_dig, bool btlan_constrain)
+static void btc8192e2ant_sw_mechanism1(struct btc_coexist *btcoexist,
+                                      bool shrink_rx_lpf, bool low_penalty_ra,
+                                      bool limited_dig, bool btlan_constrain)
 {
-       halbtc8192e2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
+       btc8192e2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
 }
 
-static void btc8192e2ant_sw_mec2(struct btc_coexist *btcoexist,
-                                bool agc_table_shift, bool adc_backoff,
-                                bool sw_dac_swing, u32 dac_swinglvl)
+static void btc8192e2ant_sw_mechanism2(struct btc_coexist *btcoexist,
+                                      bool agc_table_shift, bool adc_backoff,
+                                      bool sw_dac_swing, u32 dac_swing_lvl)
 {
-       halbtc8192e2ant_AgcTable(btcoexist, NORMAL_EXEC, agc_table_shift);
-       halbtc8192e2ant_DacSwing(btcoexist, NORMAL_EXEC, sw_dac_swing,
-                                dac_swinglvl);
+       btc8192e2ant_agc_table(btcoexist, NORMAL_EXEC, agc_table_shift);
+       btc8192e2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing,
+                              dac_swing_lvl);
 }
 
-static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
-                                   bool force_exec, bool turn_on, u8 type)
+static void btc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
+                                bool force_exec, bool turn_on, u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -1217,91 +1217,91 @@ static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
                switch (type) {
                case 1:
                default:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
                                                    0x1a, 0xe1, 0x90);
                        break;
                case 2:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
                                                    0x12, 0xe1, 0x90);
                        break;
                case 3:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
                                                    0x3, 0xf1, 0x90);
                        break;
                case 4:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x10,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
                                                    0x3, 0xf1, 0x90);
                        break;
                case 5:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
                                                    0x1a, 0x60, 0x90);
                        break;
                case 6:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
                                                    0x12, 0x60, 0x90);
                        break;
                case 7:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
                                                    0x3, 0x70, 0x90);
                        break;
                case 8:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xa3, 0x10,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10,
                                                    0x3, 0x70, 0x90);
                        break;
                case 9:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
                                                    0x1a, 0xe1, 0x10);
                        break;
                case 10:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
                                                    0x12, 0xe1, 0x10);
                        break;
                case 11:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
                                                    0x3, 0xf1, 0x10);
                        break;
                case 12:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x10,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
                                                    0x3, 0xf1, 0x10);
                        break;
                case 13:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
                                                    0x1a, 0xe0, 0x10);
                        break;
                case 14:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
                                                    0x12, 0xe0, 0x10);
                        break;
                case 15:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
                                                    0x3, 0xf0, 0x10);
                        break;
                case 16:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
                                                    0x3, 0xf0, 0x10);
                        break;
                case 17:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0x61, 0x20,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0x61, 0x20,
                                                    0x03, 0x10, 0x10);
                        break;
                case 18:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x5,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
                                                    0x5, 0xe1, 0x90);
                        break;
                case 19:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x25,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
                                                    0x25, 0xe1, 0x90);
                        break;
                case 20:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x25,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
                                                    0x25, 0x60, 0x90);
                        break;
                case 21:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x15,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
                                                    0x03, 0x70, 0x90);
                        break;
                case 71:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
                                                    0x1a, 0xe1, 0x90);
                        break;
                }
@@ -1310,12 +1310,12 @@ static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
                switch (type) {
                default:
                case 0:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0x8, 0x0, 0x0,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0x8, 0x0, 0x0,
                                                    0x0, 0x0);
                        btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x4);
                        break;
                case 1:
-                       halbtc8192e2ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0,
+                       btc8192e2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
                                                    0x8, 0x0);
                        mdelay(5);
                        btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20);
@@ -1328,22 +1328,22 @@ static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
        coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
 }
 
-static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
-                                             u8 sstype)
+static void btc8192e2ant_set_switch_ss_type(struct btc_coexist *btcoexist,
+                                           u8 ss_type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 mimops = BTC_MIMO_PS_DYNAMIC;
-       u32 disra_mask = 0x0;
+       u32 dis_ra_mask = 0x0;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], REAL set SS Type = %d\n", sstype);
+                "[BTCoex], REAL set SS Type = %d\n", ss_type);
 
-       disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype,
-                                                  coex_dm->curra_masktype);
-       halbtc8192e2ant_Updatera_mask(btcoexist, FORCE_EXEC, disra_mask);
+       dis_ra_mask = btc8192e2ant_decide_ra_mask(btcoexist, ss_type,
+                                                 coex_dm->cur_ra_mask_type);
+       btc8192e2ant_update_ra_mask(btcoexist, FORCE_EXEC, dis_ra_mask);
 
-       if (sstype == 1) {
-               halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+       if (ss_type == 1) {
+               btc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
                /* switch ofdm path */
                btcoexist->btc_write_1byte(btcoexist, 0xc04, 0x11);
                btcoexist->btc_write_1byte(btcoexist, 0xd04, 0x1);
@@ -1352,8 +1352,8 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
                btcoexist->btc_write_1byte_bitmask(btcoexist, 0xe77, 0x4, 0x1);
                btcoexist->btc_write_1byte(btcoexist, 0xa07, 0x81);
                mimops = BTC_MIMO_PS_STATIC;
-       } else if (sstype == 2) {
-               halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
+       } else if (ss_type == 2) {
+               btc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
                btcoexist->btc_write_1byte(btcoexist, 0xc04, 0x33);
                btcoexist->btc_write_1byte(btcoexist, 0xd04, 0x3);
                btcoexist->btc_write_4byte(btcoexist, 0x90c, 0x81121313);
@@ -1365,89 +1365,89 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
        btcoexist->btc_set(btcoexist, BTC_SET_ACT_SEND_MIMO_PS, &mimops);
 }
 
-static void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist,
-                                         bool force_exec, u8 new_sstype)
+static void btc8192e2ant_switch_ss_type(struct btc_coexist *btcoexist,
+                                       bool force_exec, u8 new_ss_type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], %s Switch SS Type = %d\n",
-                (force_exec ? "force to" : ""), new_sstype);
-       coex_dm->cur_sstype = new_sstype;
+                (force_exec ? "force to" : ""), new_ss_type);
+       coex_dm->cur_ss_type = new_ss_type;
 
        if (!force_exec) {
-               if (coex_dm->pre_sstype == coex_dm->cur_sstype)
+               if (coex_dm->pre_ss_type == coex_dm->cur_ss_type)
                        return;
        }
-       halbtc8192e2ant_set_switch_sstype(btcoexist, coex_dm->cur_sstype);
+       btc8192e2ant_set_switch_ss_type(btcoexist, coex_dm->cur_ss_type);
 
-       coex_dm->pre_sstype = coex_dm->cur_sstype;
+       coex_dm->pre_ss_type = coex_dm->cur_ss_type;
 }
 
-static void halbtc8192e2ant_coex_alloff(struct btc_coexist *btcoexist)
+static void btc8192e2ant_coex_all_off(struct btc_coexist *btcoexist)
 {
        /* fw all off */
-       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
-       halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
-       halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+       btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+       btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+       btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
        /* sw all off */
-       btc8192e2ant_sw_mec1(btcoexist, false, false, false, false);
-       btc8192e2ant_sw_mec2(btcoexist, false, false, false, 0x18);
+       btc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false);
+       btc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
 
        /* hw all off */
-       btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 0);
+       btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
 }
 
-static void halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
+static void btc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
        /* force to reset coex mechanism */
 
-       halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
-       halbtc8192e2ant_fw_dac_swinglvl(btcoexist, FORCE_EXEC, 6);
-       halbtc8192e2ant_dec_btpwr(btcoexist, FORCE_EXEC, 0);
+       btc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+       btc8192e2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+       btc8192e2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, 0);
 
-       btc8192e2ant_coex_tbl_w_type(btcoexist, FORCE_EXEC, 0);
-       halbtc8192e2ant_switch_sstype(btcoexist, FORCE_EXEC, 2);
+       btc8192e2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+       btc8192e2ant_switch_ss_type(btcoexist, FORCE_EXEC, 2);
 
-       btc8192e2ant_sw_mec1(btcoexist, false, false, false, false);
-       btc8192e2ant_sw_mec2(btcoexist, false, false, false, 0x18);
+       btc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false);
+       btc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
 }
 
-static void halbtc8192e2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
 {
        bool low_pwr_disable = true;
 
        btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
                           &low_pwr_disable);
 
-       halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+       btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
 
-       btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2);
-       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
-       halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
-       halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+       btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+       btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+       btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+       btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
-       btc8192e2ant_sw_mec1(btcoexist, false, false, false, false);
-       btc8192e2ant_sw_mec2(btcoexist, false, false, false, 0x18);
+       btc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false);
+       btc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
 }
 
-static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
+static bool btc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
        bool common = false, wifi_connected = false, wifi_busy = false;
-       bool bt_hson = false, low_pwr_disable = false;
+       bool bt_hs_on = false, low_pwr_disable = false;
 
-       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
                           &wifi_connected);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
 
        if (bt_link_info->sco_exist || bt_link_info->hid_exist)
-               halbtc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 1, 0, 0, 0);
+               btc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 1, 0, 0, 0);
        else
-               halbtc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+               btc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
 
        if (!wifi_connected) {
                low_pwr_disable = false;
@@ -1461,26 +1461,24 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
                     coex_dm->bt_status) ||
                    (BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE ==
                     coex_dm->bt_status)) {
-                       halbtc8192e2ant_switch_sstype(btcoexist,
-                                                     NORMAL_EXEC, 2);
-                       btc8192e2ant_coex_tbl_w_type(btcoexist,
-                                                    NORMAL_EXEC, 1);
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               false, 0);
+                       btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 2);
+                       btc8192e2ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 1);
+                       btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
                } else {
-                       halbtc8192e2ant_switch_sstype(btcoexist,
-                                                     NORMAL_EXEC, 1);
-                       btc8192e2ant_coex_tbl_w_type(btcoexist,
-                                                    NORMAL_EXEC, 0);
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               false, 1);
+                       btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+                       btc8192e2ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 0);
+                       btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
                }
 
-               halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+               btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
-               btc8192e2ant_sw_mec1(btcoexist, false, false, false, false);
-               btc8192e2ant_sw_mec2(btcoexist, false, false, false, 0x18);
+               btc8192e2ant_sw_mechanism1(btcoexist, false, false, false,
+                                          false);
+               btc8192e2ant_sw_mechanism2(btcoexist, false, false, false,
+                                          0x18);
 
                common = true;
        } else {
@@ -1494,20 +1492,18 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "Wifi connected + BT non connected-idle!!\n");
 
-                       halbtc8192e2ant_switch_sstype(btcoexist,
-                                                     NORMAL_EXEC, 2);
-                       btc8192e2ant_coex_tbl_w_type(btcoexist,
-                                                    NORMAL_EXEC, 1);
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               false, 0);
-                       halbtc8192e2ant_fw_dac_swinglvl(btcoexist,
-                                                       NORMAL_EXEC, 6);
-                       halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
-
-                       btc8192e2ant_sw_mec1(btcoexist, false, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 2);
+                       btc8192e2ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 1);
+                       btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
+                       btc8192e2ant_fw_dac_swing_lvl(btcoexist,
+                                                     NORMAL_EXEC, 6);
+                       btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
 
                        common = true;
                } else if (BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE ==
@@ -1517,25 +1513,25 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
                                           BTC_SET_ACT_DISABLE_LOW_POWER,
                                           &low_pwr_disable);
 
-                       if (bt_hson)
+                       if (bt_hs_on)
                                return false;
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "Wifi connected + BT connected-idle!!\n");
 
-                       halbtc8192e2ant_switch_sstype(btcoexist,
-                                                     NORMAL_EXEC, 2);
-                       btc8192e2ant_coex_tbl_w_type(btcoexist,
-                                                    NORMAL_EXEC, 1);
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               false, 0);
-                       halbtc8192e2ant_fw_dac_swinglvl(btcoexist,
-                                                       NORMAL_EXEC, 6);
-                       halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
-
-                       btc8192e2ant_sw_mec1(btcoexist, true, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_switch_ss_type(btcoexist,
+                                                   NORMAL_EXEC, 2);
+                       btc8192e2ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 1);
+                       btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            false, 0);
+                       btc8192e2ant_fw_dac_swing_lvl(btcoexist,
+                                                     NORMAL_EXEC, 6);
+                       btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
 
                        common = true;
                } else {
@@ -1552,20 +1548,21 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                         "Wifi Connected-Idle + BT Busy!!\n");
 
-                               halbtc8192e2ant_switch_sstype(btcoexist,
-                                                             NORMAL_EXEC, 1);
-                               btc8192e2ant_coex_tbl_w_type(btcoexist,
-                                                            NORMAL_EXEC, 2);
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 21);
-                               halbtc8192e2ant_fw_dac_swinglvl(btcoexist,
-                                                               NORMAL_EXEC, 6);
-                               halbtc8192e2ant_dec_btpwr(btcoexist,
-                                                         NORMAL_EXEC, 0);
-                               btc8192e2ant_sw_mec1(btcoexist, false,
-                                                    false, false, false);
-                               btc8192e2ant_sw_mec2(btcoexist, false,
-                                                    false, false, 0x18);
+                               btc8192e2ant_switch_ss_type(btcoexist,
+                                                           NORMAL_EXEC, 1);
+                               btc8192e2ant_coex_table_with_type(btcoexist,
+                                                                 NORMAL_EXEC,
+                                                                 2);
+                               btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 21);
+                               btc8192e2ant_fw_dac_swing_lvl(btcoexist,
+                                                             NORMAL_EXEC, 6);
+                               btc8192e2ant_dec_bt_pwr(btcoexist,
+                                                       NORMAL_EXEC, 0);
+                               btc8192e2ant_sw_mechanism1(btcoexist, false,
+                                                          false, false, false);
+                               btc8192e2ant_sw_mechanism2(btcoexist, false,
+                                                          false, false, 0x18);
                                common = true;
                        }
                }
@@ -1573,588 +1570,9 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
        return common;
 }
 
-static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause,
-                         int result)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-       if (tx_pause) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 1\n");
-
-               if (coex_dm->cur_ps_tdma == 71) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 5);
-                       coex_dm->tdma_adj_type = 5;
-               } else if (coex_dm->cur_ps_tdma == 1) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 5);
-                       coex_dm->tdma_adj_type = 5;
-               } else if (coex_dm->cur_ps_tdma == 2) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 6);
-                       coex_dm->tdma_adj_type = 6;
-               } else if (coex_dm->cur_ps_tdma == 3) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 7);
-                       coex_dm->tdma_adj_type = 7;
-               } else if (coex_dm->cur_ps_tdma == 4) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 8);
-                       coex_dm->tdma_adj_type = 8;
-               }
-               if (coex_dm->cur_ps_tdma == 9) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 13);
-                       coex_dm->tdma_adj_type = 13;
-               } else if (coex_dm->cur_ps_tdma == 10) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 14);
-                       coex_dm->tdma_adj_type = 14;
-               } else if (coex_dm->cur_ps_tdma == 11) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 15);
-                       coex_dm->tdma_adj_type = 15;
-               } else if (coex_dm->cur_ps_tdma == 12) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 16);
-                       coex_dm->tdma_adj_type = 16;
-               }
-
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 5) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 6);
-                               coex_dm->tdma_adj_type = 6;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 8);
-                               coex_dm->tdma_adj_type = 8;
-                       } else if (coex_dm->cur_ps_tdma == 13) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 14);
-                               coex_dm->tdma_adj_type = 14;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 16);
-                               coex_dm->tdma_adj_type = 16;
-                       }
-               } else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 8) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 6);
-                               coex_dm->tdma_adj_type = 6;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 5);
-                               coex_dm->tdma_adj_type = 5;
-                       } else if (coex_dm->cur_ps_tdma == 16) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 14);
-                               coex_dm->tdma_adj_type = 14;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 13);
-                               coex_dm->tdma_adj_type = 13;
-                       }
-               }
-       } else {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 0\n");
-               if (coex_dm->cur_ps_tdma == 5) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 71);
-                       coex_dm->tdma_adj_type = 71;
-               } else if (coex_dm->cur_ps_tdma == 6) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 2);
-                       coex_dm->tdma_adj_type = 2;
-               } else if (coex_dm->cur_ps_tdma == 7) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 3);
-                       coex_dm->tdma_adj_type = 3;
-               } else if (coex_dm->cur_ps_tdma == 8) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 4);
-                       coex_dm->tdma_adj_type = 4;
-               }
-               if (coex_dm->cur_ps_tdma == 13) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 9);
-                       coex_dm->tdma_adj_type = 9;
-               } else if (coex_dm->cur_ps_tdma == 14) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 10);
-                       coex_dm->tdma_adj_type = 10;
-               } else if (coex_dm->cur_ps_tdma == 15) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 11);
-                       coex_dm->tdma_adj_type = 11;
-               } else if (coex_dm->cur_ps_tdma == 16) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 12);
-                       coex_dm->tdma_adj_type = 12;
-               }
-
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 71) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 1);
-                               coex_dm->tdma_adj_type = 1;
-                       } else if (coex_dm->cur_ps_tdma == 1) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 4);
-                               coex_dm->tdma_adj_type = 4;
-                       } else if (coex_dm->cur_ps_tdma == 9) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 10);
-                               coex_dm->tdma_adj_type = 10;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 12);
-                               coex_dm->tdma_adj_type = 12;
-                       }
-               } else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 4) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 1);
-                               coex_dm->tdma_adj_type = 1;
-                       } else if (coex_dm->cur_ps_tdma == 1) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 71);
-                               coex_dm->tdma_adj_type = 71;
-                       } else if (coex_dm->cur_ps_tdma == 12) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 10);
-                               coex_dm->tdma_adj_type = 10;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 9);
-                               coex_dm->tdma_adj_type = 9;
-                       }
-               }
-       }
-}
-
-static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause,
-                         int result)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-       if (tx_pause) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 1\n");
-               if (coex_dm->cur_ps_tdma == 1) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 6);
-                       coex_dm->tdma_adj_type = 6;
-               } else if (coex_dm->cur_ps_tdma == 2) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 6);
-                       coex_dm->tdma_adj_type = 6;
-               } else if (coex_dm->cur_ps_tdma == 3) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 7);
-                       coex_dm->tdma_adj_type = 7;
-               } else if (coex_dm->cur_ps_tdma == 4) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 8);
-                       coex_dm->tdma_adj_type = 8;
-               }
-               if (coex_dm->cur_ps_tdma == 9) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 14);
-                       coex_dm->tdma_adj_type = 14;
-               } else if (coex_dm->cur_ps_tdma == 10) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 14);
-                       coex_dm->tdma_adj_type = 14;
-               } else if (coex_dm->cur_ps_tdma == 11) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 15);
-                       coex_dm->tdma_adj_type = 15;
-               } else if (coex_dm->cur_ps_tdma == 12) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 16);
-                       coex_dm->tdma_adj_type = 16;
-               }
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 5) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 6);
-                               coex_dm->tdma_adj_type = 6;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 8);
-                               coex_dm->tdma_adj_type = 8;
-                       } else if (coex_dm->cur_ps_tdma == 13) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 14);
-                               coex_dm->tdma_adj_type = 14;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 16);
-                               coex_dm->tdma_adj_type = 16;
-                       }
-               } else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 8) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 6);
-                               coex_dm->tdma_adj_type = 6;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 6);
-                               coex_dm->tdma_adj_type = 6;
-                       } else if (coex_dm->cur_ps_tdma == 16) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 14);
-                               coex_dm->tdma_adj_type = 14;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 14);
-                               coex_dm->tdma_adj_type = 14;
-                       }
-               }
-       } else {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 0\n");
-               if (coex_dm->cur_ps_tdma == 5) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 2);
-                       coex_dm->tdma_adj_type = 2;
-               } else if (coex_dm->cur_ps_tdma == 6) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 2);
-                       coex_dm->tdma_adj_type = 2;
-               } else if (coex_dm->cur_ps_tdma == 7) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 3);
-                       coex_dm->tdma_adj_type = 3;
-               } else if (coex_dm->cur_ps_tdma == 8) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 4);
-                       coex_dm->tdma_adj_type = 4;
-               }
-               if (coex_dm->cur_ps_tdma == 13) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 10);
-                       coex_dm->tdma_adj_type = 10;
-               } else if (coex_dm->cur_ps_tdma == 14) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 10);
-                       coex_dm->tdma_adj_type = 10;
-               } else if (coex_dm->cur_ps_tdma == 15) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 11);
-                       coex_dm->tdma_adj_type = 11;
-               } else if (coex_dm->cur_ps_tdma == 16) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 12);
-                       coex_dm->tdma_adj_type = 12;
-               }
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 1) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 4);
-                               coex_dm->tdma_adj_type = 4;
-                       } else if (coex_dm->cur_ps_tdma == 9) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 10);
-                               coex_dm->tdma_adj_type = 10;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 12);
-                               coex_dm->tdma_adj_type = 12;
-                       }
-               } else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 4) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 12) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 10);
-                               coex_dm->tdma_adj_type = 10;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 10);
-                               coex_dm->tdma_adj_type = 10;
-                       }
-               }
-       }
-}
-
-static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause,
-                         int result)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-       if (tx_pause) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 1\n");
-               if (coex_dm->cur_ps_tdma == 1) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 7);
-                       coex_dm->tdma_adj_type = 7;
-               } else if (coex_dm->cur_ps_tdma == 2) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 7);
-                       coex_dm->tdma_adj_type = 7;
-               } else if (coex_dm->cur_ps_tdma == 3) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 7);
-                       coex_dm->tdma_adj_type = 7;
-               } else if (coex_dm->cur_ps_tdma == 4) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 8);
-                       coex_dm->tdma_adj_type = 8;
-               }
-               if (coex_dm->cur_ps_tdma == 9) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 15);
-                       coex_dm->tdma_adj_type = 15;
-               } else if (coex_dm->cur_ps_tdma == 10) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 15);
-                       coex_dm->tdma_adj_type = 15;
-               } else if (coex_dm->cur_ps_tdma == 11) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 15);
-                       coex_dm->tdma_adj_type = 15;
-               } else if (coex_dm->cur_ps_tdma == 12) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 16);
-                       coex_dm->tdma_adj_type = 16;
-               }
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 5) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 8);
-                               coex_dm->tdma_adj_type = 8;
-                       } else if (coex_dm->cur_ps_tdma == 13) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 16);
-                               coex_dm->tdma_adj_type = 16;
-                       }
-               } else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 8) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 16) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       }
-               }
-       } else {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 0\n");
-               if (coex_dm->cur_ps_tdma == 5) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 3);
-                       coex_dm->tdma_adj_type = 3;
-               } else if (coex_dm->cur_ps_tdma == 6) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 3);
-                       coex_dm->tdma_adj_type = 3;
-               } else if (coex_dm->cur_ps_tdma == 7) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 3);
-                       coex_dm->tdma_adj_type = 3;
-               } else if (coex_dm->cur_ps_tdma == 8) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 4);
-                       coex_dm->tdma_adj_type = 4;
-               }
-               if (coex_dm->cur_ps_tdma == 13) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 11);
-                       coex_dm->tdma_adj_type = 11;
-               } else if (coex_dm->cur_ps_tdma == 14) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 11);
-                       coex_dm->tdma_adj_type = 11;
-               } else if (coex_dm->cur_ps_tdma == 15) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 11);
-                       coex_dm->tdma_adj_type = 11;
-               } else if (coex_dm->cur_ps_tdma == 16) {
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 12);
-                       coex_dm->tdma_adj_type = 12;
-               }
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 1) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 4);
-                               coex_dm->tdma_adj_type = 4;
-                       } else if (coex_dm->cur_ps_tdma == 9) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 12);
-                               coex_dm->tdma_adj_type = 12;
-                       }
-               } else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 4) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 12) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       }
-               }
-       }
-}
-
-static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
-                                                bool sco_hid, bool tx_pause,
-                                                u8 max_interval)
+static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
+                                             bool sco_hid, bool tx_pause,
+                                             u8 max_interval)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        static int up, dn, m, n, wait_cnt;
@@ -2174,72 +1592,72 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                if (sco_hid) {
                        if (tx_pause) {
                                if (max_interval == 1) {
-                                       halbtc8192e2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 13);
+                                       btc8192e2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 13);
                                        coex_dm->tdma_adj_type = 13;
                                } else if (max_interval == 2) {
-                                       halbtc8192e2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 14);
+                                       btc8192e2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 14);
                                        coex_dm->tdma_adj_type = 14;
                                } else {
-                                       halbtc8192e2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 15);
+                                       btc8192e2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 15);
                                        coex_dm->tdma_adj_type = 15;
                                }
                        } else {
                                if (max_interval == 1) {
-                                       halbtc8192e2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 9);
+                                       btc8192e2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 9);
                                        coex_dm->tdma_adj_type = 9;
                                } else if (max_interval == 2) {
-                                       halbtc8192e2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 10);
+                                       btc8192e2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 10);
                                        coex_dm->tdma_adj_type = 10;
                                } else {
-                                       halbtc8192e2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 11);
+                                       btc8192e2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 11);
                                        coex_dm->tdma_adj_type = 11;
                                }
                        }
                } else {
                        if (tx_pause) {
                                if (max_interval == 1) {
-                                       halbtc8192e2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 5);
+                                       btc8192e2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 5);
                                        coex_dm->tdma_adj_type = 5;
                                } else if (max_interval == 2) {
-                                       halbtc8192e2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 6);
+                                       btc8192e2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 6);
                                        coex_dm->tdma_adj_type = 6;
                                } else {
-                                       halbtc8192e2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 7);
+                                       btc8192e2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 7);
                                        coex_dm->tdma_adj_type = 7;
                                }
                        } else {
                                if (max_interval == 1) {
-                                       halbtc8192e2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 1);
+                                       btc8192e2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 1);
                                        coex_dm->tdma_adj_type = 1;
                                } else if (max_interval == 2) {
-                                       halbtc8192e2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 2);
+                                       btc8192e2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 2);
                                        coex_dm->tdma_adj_type = 2;
                                } else {
-                                       halbtc8192e2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 3);
+                                       btc8192e2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 3);
                                        coex_dm->tdma_adj_type = 3;
                                }
                        }
@@ -2322,12 +1740,6 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
 
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], max Interval = %d\n", max_interval);
-               if (max_interval == 1)
-                       btc8192e_int1(btcoexist, tx_pause, result);
-               else if (max_interval == 2)
-                       btc8192e_int2(btcoexist, tx_pause, result);
-               else if (max_interval == 3)
-                       btc8192e_int3(btcoexist, tx_pause, result);
        }
 
        /* if current PsTdma not match with
@@ -2348,9 +1760,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
 
                if (!scan && !link && !roam)
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true,
-                                               coex_dm->tdma_adj_type);
+                       btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            true, coex_dm->tdma_adj_type);
                else
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
@@ -2358,583 +1769,578 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
 }
 
 /* SCO only or SCO+PAN(HS) */
-static void halbtc8192e2ant_action_sco(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_sco(struct btc_coexist *btcoexist)
 {
-       u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+       u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
        u32 wifi_bw;
 
-       wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
 
-       halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
-       halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+       btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+       btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
 
-       halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+       btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 4);
+       btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
 
-       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+       bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
 
-       if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
-           (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
-               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
-       } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
-               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
-       } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
-               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+       if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+               btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+               btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
+               btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
        }
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
        /* sw mechanism */
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, true, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x6);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x6);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, true, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x6);
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x6);
                }
        } else {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, false, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x6);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x6);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, false, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x6);
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x6);
                }
        }
 }
 
-static void halbtc8192e2ant_action_sco_pan(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_sco_pan(struct btc_coexist *btcoexist)
 {
-       u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+       u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
        u32 wifi_bw;
 
-       wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
 
-       halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
-       halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+       btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+       btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
 
-       halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+       btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 4);
+       btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
 
-       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+       bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
 
-       if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
-           (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
-               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
-       } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
-               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
-       } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
-               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+       if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+               btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+               btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
+               btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
        }
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
        /* sw mechanism */
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, true, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x6);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x6);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, true, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x6);
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x6);
                }
        } else {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, false, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x6);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x6);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, false, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x6);
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x6);
                }
        }
 }
 
-static void halbtc8192e2ant_action_hid(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_hid(struct btc_coexist *btcoexist)
 {
-       u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+       u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
        u32 wifi_bw;
 
-       wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+       wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
 
-       halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
-       halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+       btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+       btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
 
-       halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+       btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 3);
+       btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
 
-       if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
-           (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
-               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
-       } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
-               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
-       } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
-               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+       if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+               btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+               btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
+               btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
        }
 
        /* sw mechanism */
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, true, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x18);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, true, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        } else {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, false, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x18);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, false, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        }
 }
 
 /* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
-static void halbtc8192e2ant_action_a2dp(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_a2dp(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+       u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
        u32 wifi_bw;
        bool long_dist = false;
 
-       wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+       wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
 
-       if ((btrssi_state == BTC_RSSI_STATE_LOW ||
-            btrssi_state == BTC_RSSI_STATE_STAY_LOW) &&
-           (wifirssi_state == BTC_RSSI_STATE_LOW ||
-            wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+       if ((bt_rssi_state == BTC_RSSI_STATE_LOW ||
+            bt_rssi_state == BTC_RSSI_STATE_STAY_LOW) &&
+           (wifi_rssi_state == BTC_RSSI_STATE_LOW ||
+            wifi_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
                long_dist = true;
        }
        if (long_dist) {
-               halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 2);
-               halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, true,
-                                          0x4);
+               btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 2);
+               btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, true,
+                                       0x4);
        } else {
-               halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
-               halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false,
-                                          0x8);
+               btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+               btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false,
+                                       0x8);
        }
 
-       halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+       btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
        if (long_dist)
-               btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 0);
+               btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
        else
-               btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2);
+               btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
 
        if (long_dist) {
-               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 17);
+               btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 17);
                coex_dm->auto_tdma_adjust = false;
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
        } else {
-               if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
-                   (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-                       halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
-                                                            true, 1);
-                       halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
-               } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
-                          (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
-                       halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
-                                                            false, 1);
-                       halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
-               } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
-                          (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
-                                                            false, 1);
-                       halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+               if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+                   (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+                       btc8192e2ant_tdma_duration_adjust(btcoexist, false,
+                                                         true, 1);
+                       btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+               } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+                          (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+                       btc8192e2ant_tdma_duration_adjust(btcoexist, false,
+                                                         false, 1);
+                       btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+               } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                          (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_tdma_duration_adjust(btcoexist, false,
+                                                         false, 1);
+                       btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
                }
        }
 
        /* sw mechanism */
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, true, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x18);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, true, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        } else {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, false, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x18);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, false, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        }
 }
 
-static void halbtc8192e2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
 {
-       u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+       u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
        u32 wifi_bw;
 
-       wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+       wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
 
-       halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
-       halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+       btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+       btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
 
-       halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
-       btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2);
+       btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+       btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
 
-       if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
-           (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-               halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 2);
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
-       } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
-               halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
-                                                    false, 2);
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
-       } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-               halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
-                                                    false, 2);
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+       if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+               btc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 2);
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+               btc8192e2ant_tdma_duration_adjust(btcoexist, false, false, 2);
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+               btc8192e2ant_tdma_duration_adjust(btcoexist, false, false, 2);
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
        }
 
        /* sw mechanism */
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, true, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            true, 0x6);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  true, 0x6);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, true, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            true, 0x6);
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  true, 0x6);
                }
        } else {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, false, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            true, 0x6);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  true, 0x6);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, false, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            true, 0x6);
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  true, 0x6);
                }
        }
 }
 
-static void halbtc8192e2ant_action_pan_edr(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_pan_edr(struct btc_coexist *btcoexist)
 {
-       u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+       u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
        u32 wifi_bw;
 
-       wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+       wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
 
-       halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
-       halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+       btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+       btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
 
-       halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+       btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2);
+       btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
 
-       if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
-           (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
-               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
-       } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
-               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
-       } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
-               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
+       if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+               btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+               btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
+               btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
        }
 
        /* sw mechanism */
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, true, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x18);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, true, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        } else {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, false, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x18);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, false, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        }
 }
 
 /* PAN(HS) only */
-static void halbtc8192e2ant_action_pan_hs(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_pan_hs(struct btc_coexist *btcoexist)
 {
-       u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+       u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
        u32 wifi_bw;
 
-       wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+       wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
 
-       halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
-       halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+       btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+       btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
 
-       halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+       btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2);
+       btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
 
-       if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
-           (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
-       } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
-       } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+       if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
        }
-       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+       btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, true, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x18);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, true, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        } else {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, false, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x18);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, false, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        }
 }
 
 /* PAN(EDR)+A2DP */
-static void halbtc8192e2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
 {
-       u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+       u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
        u32 wifi_bw;
 
-       wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+       wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
 
-       halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
-       halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+       btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+       btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
 
-       halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+       btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2);
+       btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
-           (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
-               halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 3);
-       } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
-               halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
-                                                    false, 3);
-       } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
-               halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
-                                                    false, 3);
+       if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+               btc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 3);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+               btc8192e2ant_tdma_duration_adjust(btcoexist, false, false, 3);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
+               btc8192e2ant_tdma_duration_adjust(btcoexist, false, false, 3);
        }
 
        /* sw mechanism */
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, true, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x18);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, true, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        } else {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, false, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x18);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, false, false,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        }
 }
 
-static void halbtc8192e2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
 {
-       u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+       u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
        u32 wifi_bw;
 
-       wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+       wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
-       halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+       btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+       btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
 
-       halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+       btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 3);
+       btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
 
-       if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
-           (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
-               halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
-       } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
-                       halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 10);
-       } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
-                       halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 10);
+       if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+               btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+               btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
+               btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                    true, 10);
        }
 
        /* sw mechanism */
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, true, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x18);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, true, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        } else {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, false, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x18);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, false, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        }
 }
@@ -2942,125 +2348,125 @@ static void halbtc8192e2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
 /* HID+A2DP+PAN(EDR) */
 static void btc8192e2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
 {
-       u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+       u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
        u32 wifi_bw;
 
-       wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+       wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
 
-       halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
-       halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+       btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+       btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
 
-       halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+       btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 3);
+       btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
 
-       if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
-           (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
-               halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 3);
-       } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
-               halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3);
-       } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
-               halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3);
+       if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+               btc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 3);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+               btc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
+               btc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3);
        }
 
        /* sw mechanism */
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, true, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x18);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, true, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        } else {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, false, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x18);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, false, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        }
 }
 
-static void halbtc8192e2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
 {
-       u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+       u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
        u32 wifi_bw;
 
-       wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+       wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
 
-       halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
-       halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+       btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+       btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 3);
+       btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
 
-       if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
-           (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
-               halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 2);
-       } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM))        {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
-               halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2);
-       } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
-                  (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-               halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
-               halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+       if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+               btc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM))       {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+               btc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+       } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                  (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+               btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
+               btc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2);
        }
 
        /* sw mechanism */
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, true, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x18);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, true, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        } else {
-               if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8192e2ant_sw_mec1(btcoexist, false, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, true, false,
-                                            false, 0x18);
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8192e2ant_sw_mec1(btcoexist, false, true,
-                                            false, false);
-                       btc8192e2ant_sw_mec2(btcoexist, false, false,
-                                            false, 0x18);
+                       btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        }
 }
 
-static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+static void btc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 algorithm = 0;
@@ -3080,12 +2486,12 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
                return;
        }
 
-       algorithm = halbtc8192e2ant_action_algorithm(btcoexist);
+       algorithm = btc8192e2ant_action_algorithm(btcoexist);
        if (coex_sta->c2h_bt_inquiry_page &&
            (BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], BT is under inquiry/page scan !!\n");
-               halbtc8192e2ant_action_bt_inquiry(btcoexist);
+               btc8192e2ant_action_bt_inquiry(btcoexist);
                return;
        }
 
@@ -3093,7 +2499,7 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
 
-       if (halbtc8192e2ant_is_common_action(btcoexist)) {
+       if (btc8192e2ant_is_common_action(btcoexist)) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], Action 2-Ant common\n");
                coex_dm->auto_tdma_adjust = false;
@@ -3109,47 +2515,47 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
                case BT_8192E_2ANT_COEX_ALGO_SCO:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "Action 2-Ant, algorithm = SCO\n");
-                       halbtc8192e2ant_action_sco(btcoexist);
+                       btc8192e2ant_action_sco(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_SCO_PAN:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "Action 2-Ant, algorithm = SCO+PAN(EDR)\n");
-                       halbtc8192e2ant_action_sco_pan(btcoexist);
+                       btc8192e2ant_action_sco_pan(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_HID:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "Action 2-Ant, algorithm = HID\n");
-                       halbtc8192e2ant_action_hid(btcoexist);
+                       btc8192e2ant_action_hid(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_A2DP:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "Action 2-Ant, algorithm = A2DP\n");
-                       halbtc8192e2ant_action_a2dp(btcoexist);
+                       btc8192e2ant_action_a2dp(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
-                       halbtc8192e2ant_action_a2dp_pan_hs(btcoexist);
+                       btc8192e2ant_action_a2dp_pan_hs(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_PANEDR:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "Action 2-Ant, algorithm = PAN(EDR)\n");
-                       halbtc8192e2ant_action_pan_edr(btcoexist);
+                       btc8192e2ant_action_pan_edr(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_PANHS:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "Action 2-Ant, algorithm = HS mode\n");
-                       halbtc8192e2ant_action_pan_hs(btcoexist);
+                       btc8192e2ant_action_pan_hs(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "Action 2-Ant, algorithm = PAN+A2DP\n");
-                       halbtc8192e2ant_action_pan_edr_a2dp(btcoexist);
+                       btc8192e2ant_action_pan_edr_a2dp(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "Action 2-Ant, algorithm = PAN(EDR)+HID\n");
-                       halbtc8192e2ant_action_pan_edr_hid(btcoexist);
+                       btc8192e2ant_action_pan_edr_hid(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -3159,20 +2565,20 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
                case BT_8192E_2ANT_COEX_ALGO_HID_A2DP:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "Action 2-Ant, algorithm = HID+A2DP\n");
-                       halbtc8192e2ant_action_hid_a2dp(btcoexist);
+                       btc8192e2ant_action_hid_a2dp(btcoexist);
                        break;
                default:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "Action 2-Ant, algorithm = unknown!!\n");
-                       /* halbtc8192e2ant_coex_alloff(btcoexist); */
+                       /* btc8192e2ant_coex_all_off(btcoexist); */
                        break;
                }
                coex_dm->pre_algorithm = coex_dm->cur_algorithm;
        }
 }
 
-static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
-                                         bool backup)
+static void btc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
+                                      bool backup)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u16 u16tmp = 0;
@@ -3191,7 +2597,7 @@ static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
                                                                      0x430);
                coex_dm->backup_arfr_cnt2 = btcoexist->btc_read_4byte(btcoexist,
                                                                     0x434);
-               coex_dm->backup_retrylimit = btcoexist->btc_read_2byte(
+               coex_dm->backup_retry_limit = btcoexist->btc_read_2byte(
                                                                    btcoexist,
                                                                    0x42a);
                coex_dm->backup_ampdu_maxtime = btcoexist->btc_read_1byte(
@@ -3209,7 +2615,7 @@ static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
        else
                btcoexist->btc_write_4byte(btcoexist, 0x64, 0x30030004);
 
-       btc8192e2ant_coex_tbl_w_type(btcoexist, FORCE_EXEC, 0);
+       btc8192e2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
 
        /* antenna switch control parameter */
        btcoexist->btc_write_4byte(btcoexist, 0x858, 0x55555555);
@@ -3232,7 +2638,7 @@ static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
        u16tmp |= BIT9;
        btcoexist->btc_write_2byte(btcoexist, 0x40, u16tmp);
 
-       /* enable PTA I2C mailbox  */
+       /* enable PTA I2C mailbox */
        u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x101);
        u8tmp |= BIT4;
        btcoexist->btc_write_1byte(btcoexist, 0x101, u8tmp);
@@ -3247,29 +2653,25 @@ static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
        btcoexist->btc_write_1byte(btcoexist, 0x7, u8tmp);
 }
 
-/*************************************************************
- *   work around function start with wa_halbtc8192e2ant_
- *************************************************************/
-
 /************************************************************
- *   extern function start with EXhalbtc8192e2ant_
+ *   extern function start with ex_btc8192e2ant_
  ************************************************************/
 
-void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist)
+void ex_btc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist)
 {
-       halbtc8192e2ant_init_hwconfig(btcoexist, true);
+       btc8192e2ant_init_hwconfig(btcoexist, true);
 }
 
-void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
+void ex_btc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], Coex Mechanism Init!!\n");
-       halbtc8192e2ant_init_coex_dm(btcoexist);
+       btc8192e2ant_init_coex_dm(btcoexist);
 }
 
-void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
 {
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
@@ -3278,8 +2680,8 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
        u16 u16tmp[4];
        u32 u32tmp[4];
        bool roam = false, scan = false, link = false, wifi_under_5g = false;
-       bool bt_hson = false, wifi_busy = false;
-       int wifirssi = 0, bt_hs_rssi = 0;
+       bool bt_hs_on = false, wifi_busy = false;
+       int wifi_rssi = 0, bt_hs_rssi = 0;
        u32 wifi_bw, wifi_traffic_dir;
        u8 wifi_dot11_chnl, wifi_hs_chnl;
        u32 fw_ver = 0, bt_patch_ver = 0;
@@ -3316,21 +2718,21 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
                 glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
                 fw_ver, bt_patch_ver, bt_patch_ver);
 
-       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
                           &wifi_dot11_chnl);
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
                 "Dot11 channel / HsMode(HsChnl)",
-                wifi_dot11_chnl, bt_hson, wifi_hs_chnl);
+                wifi_dot11_chnl, bt_hs_on, wifi_hs_chnl);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
                 "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
 
-       btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi);
+       btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
        btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                "Wifi rssi/ HS rssi", wifirssi, bt_hs_rssi);
+                "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -3377,7 +2779,7 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
                if (coex_sta->bt_info_c2h_cnt[i]) {
                        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
                                 "\r\n %-35s = %7ph(%d)",
-                                GLBtInfoSrc8192e2Ant[i],
+                                glbt_info_src_8192e_2ant[i],
                                 coex_sta->bt_info_c2h[i],
                                 coex_sta->bt_info_c2h_cnt[i]);
                }
@@ -3390,7 +2792,7 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
        btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "SS Type",
-                coex_dm->cur_sstype);
+                coex_dm->cur_ss_type);
 
        /* Sw mechanism */
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
@@ -3429,7 +2831,7 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
                 "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
-                coex_dm->backup_arfr_cnt2, coex_dm->backup_retrylimit,
+                coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
                 coex_dm->backup_ampdu_maxtime);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
@@ -3485,12 +2887,12 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
                 "0x774(lp rx[31:16]/tx[15:0])",
                 coex_sta->low_priority_rx, coex_sta->low_priority_tx);
 #if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 1)
-       halbtc8192e2ant_monitor_bt_ctr(btcoexist);
+       btc8192e2ant_monitor_bt_ctr(btcoexist);
 #endif
        btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
 }
 
-void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -3498,7 +2900,7 @@ void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], IPS ENTER notify\n");
                coex_sta->under_ips = true;
-               halbtc8192e2ant_coex_alloff(btcoexist);
+               btc8192e2ant_coex_all_off(btcoexist);
        } else if (BTC_IPS_LEAVE == type) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], IPS LEAVE notify\n");
@@ -3506,7 +2908,7 @@ void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
        }
 }
 
-void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -3521,7 +2923,7 @@ void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
        }
 }
 
-void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -3533,7 +2935,7 @@ void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
                         "[BTCoex], SCAN FINISH notify\n");
 }
 
-void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -3545,8 +2947,8 @@ void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
                         "[BTCoex], CONNECT FINISH notify\n");
 }
 
-void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
-                                           u8 type)
+void ex_btc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
+                                        u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[3] = {0};
@@ -3591,8 +2993,8 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
        btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
 
-void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
-                                             u8 type)
+void ex_btc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
+                                          u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -3601,8 +3003,8 @@ void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
                         "[BTCoex], DHCP Packet notify\n");
 }
 
-void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
-                                      u8 *tmp_buf, u8 length)
+void ex_btc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
+                                   u8 *tmp_buf, u8 length)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 bt_info = 0;
@@ -3633,7 +3035,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
        }
 
        if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rsp_source) {
-               coex_sta->bt_retry_cnt =        /* [3:0] */
+               /* [3:0] */
+               coex_sta->bt_retry_cnt =
                        coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
 
                coex_sta->bt_rssi =
@@ -3651,11 +3054,11 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
                        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
                                           &wifi_connected);
                        if (wifi_connected)
-                               ex_halbtc8192e2ant_media_status_notify(
+                               ex_btc8192e2ant_media_status_notify(
                                                        btcoexist,
                                                        BTC_MEDIA_CONNECT);
                        else
-                               ex_halbtc8192e2ant_media_status_notify(
+                               ex_btc8192e2ant_media_status_notify(
                                                        btcoexist,
                                                        BTC_MEDIA_DISCONNECT);
                }
@@ -3665,9 +3068,9 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
                            !btcoexist->stop_coex_dm) {
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                         "bit3, BT NOT ignore Wlan active!\n");
-                               halbtc8192e2ant_IgnoreWlanAct(btcoexist,
-                                                             FORCE_EXEC,
-                                                             false);
+                               btc8192e2ant_ignore_wlan_act(btcoexist,
+                                                            FORCE_EXEC,
+                                                            false);
                        }
                } else {
                        /* BT already NOT ignore Wlan active,
@@ -3679,8 +3082,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
                if ((coex_sta->bt_info_ext & BIT4)) {
                        /* BT auto report already enabled, do nothing */
                } else {
-                       halbtc8192e2ant_bt_autoreport(btcoexist, FORCE_EXEC,
-                                                     true);
+                       btc8192e2ant_bt_auto_report(btcoexist, FORCE_EXEC,
+                                                   true);
                }
 #endif
        }
@@ -3718,9 +3121,9 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
                        coex_sta->sco_exist = false;
        }
 
-       halbtc8192e2ant_update_btlink_info(btcoexist);
+       btc8192e2ant_update_bt_link_info(btcoexist);
 
-       if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) {
+       if (!(bt_info & BT_INFO_8192E_2ANT_B_CONNECTION)) {
                coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], BT Non-Connected idle!!!\n");
@@ -3728,12 +3131,12 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
                coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE;
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
-       } else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) ||
-                  (bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) {
+       } else if ((bt_info & BT_INFO_8192E_2ANT_B_SCO_ESCO) ||
+                  (bt_info & BT_INFO_8192E_2ANT_B_SCO_BUSY)) {
                coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY;
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
-       } else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) {
+       } else if (bt_info & BT_INFO_8192E_2ANT_B_ACL_BUSY) {
                coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY;
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
@@ -3758,12 +3161,7 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
        coex_dm->limited_dig = limited_dig;
        btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
 
-       halbtc8192e2ant_run_coexist_mechanism(btcoexist);
-}
-
-void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
-                                              u8 type)
-{
+       btc8192e2ant_run_coexist_mechanism(btcoexist);
 }
 
 void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
@@ -3772,11 +3170,11 @@ void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
 
-       halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
-       ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+       btc8192e2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+       ex_btc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
 }
 
-void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist)
+void ex_btc8192e2ant_periodical(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        static u8 dis_ver_info_cnt;
@@ -3810,12 +3208,12 @@ void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist)
        }
 
 #if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 0)
-       halbtc8192e2ant_querybt_info(btcoexist);
-       halbtc8192e2ant_monitor_bt_ctr(btcoexist);
-       btc8192e2ant_monitor_bt_enable_dis(btcoexist);
+       btc8192e2ant_query_bt_info(btcoexist);
+       btc8192e2ant_monitor_bt_ctr(btcoexist);
+       btc8192e2ant_monitor_bt_enable_disable(btcoexist);
 #else
-       if (halbtc8192e2ant_iswifi_status_changed(btcoexist) ||
+       if (btc8192e2ant_is_wifi_status_changed(btcoexist) ||
            coex_dm->auto_tdma_adjust)
-               halbtc8192e2ant_run_coexist_mechanism(btcoexist);
+               btc8192e2ant_run_coexist_mechanism(btcoexist);
 #endif
 }
index 75e1f7d0db0627ec99a64da62680aec13c61465a..fc0fa87ec404aa94a6185e87478f1f545783dbea 100644 (file)
@@ -116,7 +116,7 @@ struct coex_dm_8192e_2ant {
 
        u32 backup_arfr_cnt1;   /* Auto Rate Fallback Retry cnt */
        u32 backup_arfr_cnt2;   /* Auto Rate Fallback Retry cnt */
-       u16 backup_retrylimit;
+       u16 backup_retry_limit;
        u8 backup_ampdu_maxtime;
 
        /* algorithm related */
@@ -125,18 +125,18 @@ struct coex_dm_8192e_2ant {
        u8 bt_status;
        u8 wifi_chnl_info[3];
 
-       u8 pre_sstype;
-       u8 cur_sstype;
+       u8 pre_ss_type;
+       u8 cur_ss_type;
 
-       u32 prera_mask;
-       u32 curra_mask;
-       u8 curra_masktype;
-       u8 pre_arfrtype;
-       u8 cur_arfrtype;
-       u8 pre_retrylimit_type;
-       u8 cur_retrylimit_type;
-       u8 pre_ampdutime_type;
-       u8 cur_ampdutime_type;
+       u32 pre_ra_mask;
+       u32 cur_ra_mask;
+       u8 cur_ra_mask_type;
+       u8 pre_arfr_type;
+       u8 cur_arfr_type;
+       u8 pre_retry_limit_type;
+       u8 cur_retry_limit_type;
+       u8 pre_ampdu_time_type;
+       u8 cur_ampdu_time_type;
 };
 
 struct coex_sta_8192e_2ant {
index d67bbfb6ad8e61c4e1e0cef2e3a83c04a961f71f..2003c8c51dcc4c9deefcbe5f1045b010e53cc080 100644 (file)
@@ -45,7 +45,7 @@ static struct coex_dm_8723b_1ant *coex_dm = &glcoex_dm_8723b_1ant;
 static struct coex_sta_8723b_1ant glcoex_sta_8723b_1ant;
 static struct coex_sta_8723b_1ant *coex_sta = &glcoex_sta_8723b_1ant;
 
-static const char *const GLBtInfoSrc8723b1Ant[] = {
+static const char *const glbt_info_src_8723b_1ant[] = {
        "BT Info[wifi fw]",
        "BT Info[bt rsp]",
        "BT Info[bt auto report]",
@@ -60,188 +60,6 @@ static u32 glcoex_ver_8723b_1ant = 0x47;
 /***************************************************************
  * local function start with halbtc8723b1ant_
  ***************************************************************/
-static u8 halbtc8723b1ant_bt_rssi_state(struct btc_coexist *btcoexist,
-                                       u8 level_num, u8 rssi_thresh,
-                                       u8 rssi_thresh1)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-       s32 bt_rssi = 0;
-       u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
-
-       bt_rssi = coex_sta->bt_rssi;
-
-       if (level_num == 2) {
-               if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
-                   (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-                       if (bt_rssi >= rssi_thresh +
-                                       BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
-                               bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], BT Rssi state switch to High\n");
-                       } else {
-                               bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], BT Rssi state stay at Low\n");
-                       }
-               } else {
-                       if (bt_rssi < rssi_thresh) {
-                               bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], BT Rssi state switch to Low\n");
-                       } else {
-                               bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], BT Rssi state stay at High\n");
-                       }
-               }
-       } else if (level_num == 3) {
-               if (rssi_thresh > rssi_thresh1) {
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], BT Rssi thresh error!!\n");
-                       return coex_sta->pre_bt_rssi_state;
-               }
-
-               if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
-                   (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-                       if (bt_rssi >= rssi_thresh +
-                                       BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
-                               bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], BT Rssi state switch to Medium\n");
-                       } else {
-                               bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], BT Rssi state stay at Low\n");
-                       }
-               } else if ((coex_sta->pre_bt_rssi_state ==
-                                       BTC_RSSI_STATE_MEDIUM) ||
-                         (coex_sta->pre_bt_rssi_state ==
-                                       BTC_RSSI_STATE_STAY_MEDIUM)) {
-                       if (bt_rssi >= rssi_thresh1 +
-                                       BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
-                               bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], BT Rssi state switch to High\n");
-                       } else if (bt_rssi < rssi_thresh) {
-                               bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], BT Rssi state switch to Low\n");
-                       } else {
-                               bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], BT Rssi state stay at Medium\n");
-                       }
-               } else {
-                       if (bt_rssi < rssi_thresh1) {
-                               bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], BT Rssi state switch to Medium\n");
-                       } else {
-                               bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], BT Rssi state stay at High\n");
-                       }
-               }
-       }
-
-       coex_sta->pre_bt_rssi_state = bt_rssi_state;
-
-       return bt_rssi_state;
-}
-
-static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
-                                         u8 index, u8 level_num,
-                                         u8 rssi_thresh, u8 rssi_thresh1)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-       s32 wifi_rssi = 0;
-       u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
-
-       btcoexist->btc_get(btcoexist,
-               BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
-
-       if (level_num == 2) {
-               if ((coex_sta->pre_wifi_rssi_state[index] ==
-                                       BTC_RSSI_STATE_LOW) ||
-                   (coex_sta->pre_wifi_rssi_state[index] ==
-                                       BTC_RSSI_STATE_STAY_LOW)) {
-                       if (wifi_rssi >= rssi_thresh +
-                                       BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
-                               wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], wifi RSSI state switch to High\n");
-                       } else {
-                               wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], wifi RSSI state stay at Low\n");
-                       }
-               } else {
-                       if (wifi_rssi < rssi_thresh) {
-                               wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], wifi RSSI state switch to Low\n");
-                       } else {
-                               wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], wifi RSSI state stay at High\n");
-                       }
-               }
-       } else if (level_num == 3) {
-               if (rssi_thresh > rssi_thresh1) {
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], wifi RSSI thresh error!!\n");
-                       return coex_sta->pre_wifi_rssi_state[index];
-               }
-
-               if ((coex_sta->pre_wifi_rssi_state[index] ==
-                                               BTC_RSSI_STATE_LOW) ||
-                   (coex_sta->pre_wifi_rssi_state[index] ==
-                                               BTC_RSSI_STATE_STAY_LOW)) {
-                       if (wifi_rssi >= rssi_thresh +
-                                        BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
-                               wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], wifi RSSI state switch to Medium\n");
-                       } else {
-                               wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], wifi RSSI state stay at Low\n");
-                       }
-               } else if ((coex_sta->pre_wifi_rssi_state[index] ==
-                                               BTC_RSSI_STATE_MEDIUM) ||
-                          (coex_sta->pre_wifi_rssi_state[index] ==
-                                               BTC_RSSI_STATE_STAY_MEDIUM)) {
-                       if (wifi_rssi >= rssi_thresh1 +
-                                        BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
-                               wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], wifi RSSI state switch to High\n");
-                       } else if (wifi_rssi < rssi_thresh) {
-                               wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], wifi RSSI state switch to Low\n");
-                       } else {
-                               wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], wifi RSSI state stay at Medium\n");
-                       }
-               } else {
-                       if (wifi_rssi < rssi_thresh1) {
-                               wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], wifi RSSI state switch to Medium\n");
-                       } else {
-                               wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], wifi RSSI state stay at High\n");
-                       }
-               }
-       }
-
-       coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
-
-       return wifi_rssi_state;
-}
 
 static void halbtc8723b1ant_updatera_mask(struct btc_coexist *btcoexist,
                                          bool force_exec, u32 dis_rate_mask)
@@ -249,7 +67,7 @@ static void halbtc8723b1ant_updatera_mask(struct btc_coexist *btcoexist,
        coex_dm->curra_mask = dis_rate_mask;
 
        if (force_exec || (coex_dm->prera_mask != coex_dm->curra_mask))
-               btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask,
+               btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_RAMASK,
                                   &coex_dm->curra_mask);
 
        coex_dm->prera_mask = coex_dm->curra_mask;
@@ -326,15 +144,14 @@ static void halbtc8723b1ant_ampdu_maxtime(struct btc_coexist *btcoexist,
                coex_dm->cur_ampdu_time_type)) {
                switch (coex_dm->cur_ampdu_time_type) {
                case 0: /* normal mode */
-                               btcoexist->btc_write_1byte(btcoexist, 0x456,
-                                               coex_dm->backup_ampdu_max_time);
-                               break;
+                       btcoexist->btc_write_1byte(btcoexist, 0x456,
+                                       coex_dm->backup_ampdu_max_time);
+                       break;
                case 1: /* AMPDU timw = 0x38 * 32us */
-                               btcoexist->btc_write_1byte(btcoexist,
-                                                          0x456, 0x38);
-                               break;
+                       btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38);
+                       break;
                default:
-                               break;
+                       break;
                }
        }
 
@@ -354,7 +171,7 @@ static void halbtc8723b1ant_limited_tx(struct btc_coexist *btcoexist,
                halbtc8723b1ant_updatera_mask(btcoexist, force_exec,
                                              0x00000003);
                break;
-       /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4*/
+       /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4 */
        case 2:
                halbtc8723b1ant_updatera_mask(btcoexist, force_exec,
                                              0x0001f1f7);
@@ -426,7 +243,8 @@ static void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist)
 
        coex_sta->c2h_bt_info_req_sent = true;
 
-       h2c_parameter[0] |= BIT0;       /* trigger*/
+       /* trigger */
+       h2c_parameter[0] |= BIT0;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
@@ -515,202 +333,6 @@ static void halbtc8723b1ant_update_bt_link_info(struct btc_coexist *btcoexist)
                bt_link_info->hid_only = false;
 }
 
-static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-       bool bt_hs_on = false;
-       u8 algorithm = BT_8723B_1ANT_COEX_ALGO_UNDEFINED;
-       u8 numdiffprofile = 0;
-
-       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
-
-       if (!bt_link_info->bt_link_exist) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], No BT link exists!!!\n");
-               return algorithm;
-       }
-
-       if (bt_link_info->sco_exist)
-               numdiffprofile++;
-       if (bt_link_info->hid_exist)
-               numdiffprofile++;
-       if (bt_link_info->pan_exist)
-               numdiffprofile++;
-       if (bt_link_info->a2dp_exist)
-               numdiffprofile++;
-
-       if (numdiffprofile == 1) {
-               if (bt_link_info->sco_exist) {
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], BT Profile = SCO only\n");
-                       algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
-               } else {
-                       if (bt_link_info->hid_exist) {
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], BT Profile = HID only\n");
-                               algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
-                       } else if (bt_link_info->a2dp_exist) {
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], BT Profile = A2DP only\n");
-                               algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP;
-                       } else if (bt_link_info->pan_exist) {
-                               if (bt_hs_on) {
-                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
-                                                DBG_LOUD,
-                                                "[BTCoex], BT Profile = PAN(HS) only\n");
-                                       algorithm =
-                                               BT_8723B_1ANT_COEX_ALGO_PANHS;
-                               } else {
-                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
-                                                DBG_LOUD,
-                                                "[BTCoex], BT Profile = PAN(EDR) only\n");
-                                       algorithm =
-                                               BT_8723B_1ANT_COEX_ALGO_PANEDR;
-                               }
-                       }
-               }
-       } else if (numdiffprofile == 2) {
-               if (bt_link_info->sco_exist) {
-                       if (bt_link_info->hid_exist) {
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], BT Profile = SCO + HID\n");
-                               algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
-                       } else if (bt_link_info->a2dp_exist) {
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
-                               algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
-                       } else if (bt_link_info->pan_exist) {
-                               if (bt_hs_on) {
-                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
-                                                DBG_LOUD,
-                                                "[BTCoex], BT Profile = SCO + PAN(HS)\n");
-                                       algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
-                               } else {
-                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
-                                                DBG_LOUD,
-                                                "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
-                                       algorithm =
-                                           BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
-                               }
-                       }
-               } else {
-                       if (bt_link_info->hid_exist &&
-                           bt_link_info->a2dp_exist) {
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], BT Profile = HID + A2DP\n");
-                               algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
-                       } else if (bt_link_info->hid_exist &&
-                                  bt_link_info->pan_exist) {
-                               if (bt_hs_on) {
-                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
-                                                DBG_LOUD,
-                                                "[BTCoex], BT Profile = HID + PAN(HS)\n");
-                                       algorithm =
-                                           BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
-                               } else {
-                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
-                                                DBG_LOUD,
-                                                "[BTCoex], BT Profile = HID + PAN(EDR)\n");
-                                       algorithm =
-                                           BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
-                               }
-                       } else if (bt_link_info->pan_exist &&
-                                  bt_link_info->a2dp_exist) {
-                               if (bt_hs_on) {
-                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
-                                                DBG_LOUD,
-                                                "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
-                                       algorithm =
-                                           BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS;
-                               } else {
-                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
-                                                DBG_LOUD,
-                                                "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
-                                       algorithm =
-                                           BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP;
-                               }
-                       }
-               }
-       } else if (numdiffprofile == 3) {
-               if (bt_link_info->sco_exist) {
-                       if (bt_link_info->hid_exist &&
-                           bt_link_info->a2dp_exist) {
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
-                               algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
-                       } else if (bt_link_info->hid_exist &&
-                                  bt_link_info->pan_exist) {
-                               if (bt_hs_on) {
-                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
-                                                DBG_LOUD,
-                                                "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
-                                       algorithm =
-                                           BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
-                               } else {
-                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
-                                                DBG_LOUD,
-                                                "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
-                                       algorithm =
-                                           BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
-                               }
-                       } else if (bt_link_info->pan_exist &&
-                                  bt_link_info->a2dp_exist) {
-                               if (bt_hs_on) {
-                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
-                                                DBG_LOUD,
-                                                "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
-                                       algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
-                               } else {
-                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
-                                                DBG_LOUD,
-                                                "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
-                                       algorithm =
-                                           BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
-                               }
-                       }
-               } else {
-                       if (bt_link_info->hid_exist &&
-                           bt_link_info->pan_exist &&
-                           bt_link_info->a2dp_exist) {
-                               if (bt_hs_on) {
-                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
-                                                DBG_LOUD,
-                                                "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
-                                       algorithm =
-                                           BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
-                               } else {
-                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
-                                                DBG_LOUD,
-                                                "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
-                                       algorithm =
-                                           BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
-                               }
-                       }
-               }
-       } else if (numdiffprofile >= 3) {
-               if (bt_link_info->sco_exist) {
-                       if (bt_link_info->hid_exist &&
-                           bt_link_info->pan_exist &&
-                           bt_link_info->a2dp_exist) {
-                               if (bt_hs_on) {
-                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
-                                                DBG_LOUD,
-                                                "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
-                               } else {
-                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
-                                                DBG_LOUD,
-                                                "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
-                                       algorithm =
-                                           BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
-                               }
-                       }
-               }
-       }
-
-       return algorithm;
-}
-
 static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist,
                                                  bool low_penalty_ra)
 {
@@ -721,11 +343,11 @@ static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist,
 
        if (low_penalty_ra) {
                h2c_parameter[1] |= BIT0;
-               /*normal rate except MCS7/6/5, OFDM54/48/36 */
+               /* normal rate except MCS7/6/5, OFDM54/48/36 */
                h2c_parameter[2] = 0x00;
-               h2c_parameter[3] = 0xf7;  /*MCS7 or OFDM54 */
-               h2c_parameter[4] = 0xf8;  /*MCS6 or OFDM48 */
-               h2c_parameter[5] = 0xf9;  /*MCS5 or OFDM36 */
+               h2c_parameter[3] = 0xf7;  /* MCS7 or OFDM54 */
+               h2c_parameter[4] = 0xf8;  /* MCS6 or OFDM48 */
+               h2c_parameter[5] = 0xf9;  /* MCS5 or OFDM36 */
        }
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -846,8 +468,9 @@ static void halbtc8723b1ant_coex_table_with_type(struct btc_coexist *btcoexist,
        }
 }
 
-static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist,
-                                              bool enable)
+static void
+halbtc8723b1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
+                                      bool enable)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
@@ -882,7 +505,7 @@ static void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
                    coex_dm->cur_ignore_wlan_act)
                        return;
        }
-       halbtc8723b1ant_SetFwIgnoreWlanAct(btcoexist, enable);
+       halbtc8723b1ant_set_fw_ignore_wlan_act(btcoexist, enable);
 
        coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
 }
@@ -944,9 +567,9 @@ static void halbtc8723b1ant_set_lps_rpwm(struct btc_coexist *btcoexist,
        btcoexist->btc_set(btcoexist, BTC_SET_U1_RPWM_VAL, &rpwm);
 }
 
-static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist,
-                                   bool force_exec,
-                                   u8 lps_val, u8 rpwm_val)
+static void halbtc8723b1ant_lps_rpwm(struct btc_coexist *btcoexist,
+                                    bool force_exec,
+                                    u8 lps_val, u8 rpwm_val)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -987,9 +610,9 @@ static void halbtc8723b1ant_sw_mechanism(struct btc_coexist *btcoexist,
        halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
 }
 
-static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist,
-                                      u8 ant_pos_type, bool init_hw_cfg,
-                               bool wifi_off)
+static void halbtc8723b1ant_set_ant_path(struct btc_coexist *btcoexist,
+                                        u8 ant_pos_type, bool init_hw_cfg,
+                                        bool wifi_off)
 {
        struct btc_board_info *board_info = &btcoexist->board_info;
        u32 fw_ver = 0, u32tmp = 0;
@@ -1028,7 +651,7 @@ static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist,
        if (use_ext_switch) {
                if (init_hw_cfg) {
                        /* 0x4c[23] = 0, 0x4c[24] = 1
-                        *      Antenna control by WL/BT
+                        * Antenna control by WL/BT
                         */
                        u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
                        u32tmp &= ~BIT23;
@@ -1037,35 +660,36 @@ static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist,
 
                        if (board_info->btdm_ant_pos ==
                            BTC_ANTENNA_AT_MAIN_PORT) {
-                               /* Main Ant to  BT for IPS case 0x4c[23] = 1 */
+                               /* Main Ant to BT for IPS case 0x4c[23] = 1 */
                                btcoexist->btc_write_1byte_bitmask(btcoexist,
                                                                   0x64, 0x1,
                                                                   0x1);
 
-                               /*tell firmware "no antenna inverse"*/
+                               /* tell firmware "no antenna inverse" */
                                h2c_parameter[0] = 0;
                                h2c_parameter[1] = 1;  /*ext switch type*/
                                btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
                                                        h2c_parameter);
                        } else {
-                               /*Aux Ant to  BT for IPS case 0x4c[23] = 1 */
+                               /* Aux Ant to  BT for IPS case 0x4c[23] = 1 */
                                btcoexist->btc_write_1byte_bitmask(btcoexist,
                                                                   0x64, 0x1,
                                                                   0x0);
 
-                               /*tell firmware "antenna inverse"*/
+                               /* tell firmware "antenna inverse" */
                                h2c_parameter[0] = 1;
-                               h2c_parameter[1] = 1;  /*ext switch type*/
+                               h2c_parameter[1] = 1; /* ext switch type */
                                btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
                                                        h2c_parameter);
                        }
                }
 
-               /* fixed internal switch first*/
-               /* fixed internal switch S1->WiFi, S0->BT*/
+               /* fixed internal switch first
+                * fixed internal switch S1->WiFi, S0->BT
+                */
                if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
                        btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
-               else/* fixed internal switch S0->WiFi, S1->BT*/
+               else    /* fixed internal switch S0->WiFi, S1->BT */
                        btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
 
                /* ext switch setting */
@@ -1108,7 +732,7 @@ static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist,
 
        } else {
                if (init_hw_cfg) {
-                       /* 0x4c[23] = 1, 0x4c[24] = 0  Antenna control by 0x64*/
+                       /* 0x4c[23] = 1, 0x4c[24] = 0 Antenna control by 0x64 */
                        u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
                        u32tmp |= BIT23;
                        u32tmp &= ~BIT24;
@@ -1116,41 +740,42 @@ static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist,
 
                        if (board_info->btdm_ant_pos ==
                            BTC_ANTENNA_AT_MAIN_PORT) {
-                               /*Main Ant to  WiFi for IPS case 0x4c[23] = 1*/
+                               /* Main Ant to WiFi for IPS case 0x4c[23] = 1 */
                                btcoexist->btc_write_1byte_bitmask(btcoexist,
                                                                   0x64, 0x1,
                                                                   0x0);
 
-                               /*tell firmware "no antenna inverse"*/
+                               /* tell firmware "no antenna inverse" */
                                h2c_parameter[0] = 0;
-                               h2c_parameter[1] = 0;  /*internal switch type*/
+                               h2c_parameter[1] = 0; /* internal switch type */
                                btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
                                                        h2c_parameter);
                        } else {
-                               /*Aux Ant to  BT for IPS case 0x4c[23] = 1*/
+                               /* Aux Ant to BT for IPS case 0x4c[23] = 1 */
                                btcoexist->btc_write_1byte_bitmask(btcoexist,
                                                                   0x64, 0x1,
                                                                   0x1);
 
-                               /*tell firmware "antenna inverse"*/
+                               /* tell firmware "antenna inverse" */
                                h2c_parameter[0] = 1;
-                               h2c_parameter[1] = 0;  /*internal switch type*/
+                               h2c_parameter[1] = 0; /* internal switch type */
                                btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
                                                        h2c_parameter);
                        }
                }
 
-               /* fixed external switch first*/
-               /*Main->WiFi, Aux->BT*/
+               /* fixed external switch first
+                * Main->WiFi, Aux->BT
+                */
                if (board_info->btdm_ant_pos ==
                        BTC_ANTENNA_AT_MAIN_PORT)
                        btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
                                                           0x3, 0x1);
-               else/*Main->BT, Aux->WiFi */
+               else    /* Main->BT, Aux->WiFi */
                        btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
                                                           0x3, 0x2);
 
-               /* internal switch setting*/
+               /* internal switch setting */
                switch (ant_pos_type) {
                case BTC_ANT_PATH_WIFI:
                        if (board_info->btdm_ant_pos ==
@@ -1365,7 +990,7 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist,
                        halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x12,
                                                       0x3, 0x14, 0x50);
                        break;
-               /* SoftAP only with no sta associated,BT disable ,
+               /* SoftAP only with no sta associated, BT disable,
                 * TDMA mode for power saving
                 * here softap mode screen off will cost 70-80mA for phone
                 */
@@ -1376,24 +1001,29 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist,
                }
        } else {
                switch (type) {
-               case 8: /*PTA Control */
+               case 8: /* PTA Control */
                        halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x8, 0x0,
                                                       0x0, 0x0, 0x0);
-                       halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_PTA,
-                                                  false, false);
+                       halbtc8723b1ant_set_ant_path(btcoexist,
+                                                    BTC_ANT_PATH_PTA,
+                                                    false, false);
                        break;
                case 0:
-               default:  /*Software control, Antenna at BT side */
+               default:
+                       /* Software control, Antenna at BT side */
                        halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0,
                                                       0x0, 0x0, 0x0);
-                       halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT,
-                                                  false, false);
+                       halbtc8723b1ant_set_ant_path(btcoexist,
+                                                    BTC_ANT_PATH_BT,
+                                                    false, false);
                        break;
-               case 9:   /*Software control, Antenna at WiFi side */
+               case 9:
+                       /* Software control, Antenna at WiFi side */
                        halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0,
                                                       0x0, 0x0, 0x0);
-                       halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_WIFI,
-                                                  false, false);
+                       halbtc8723b1ant_set_ant_path(btcoexist,
+                                                    BTC_ANT_PATH_WIFI,
+                                                    false, false);
                        break;
                }
        }
@@ -1407,247 +1037,15 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist,
        coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
 }
 
-static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-       bool commom = false, wifi_connected = false;
-       bool wifi_busy = false;
-
-       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
-                          &wifi_connected);
-       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
-
-       if (!wifi_connected &&
-           BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
-               halbtc8723b1ant_sw_mechanism(btcoexist, false);
-               commom = true;
-       } else if (wifi_connected &&
-                  (BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
-                   coex_dm->bt_status)) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], Wifi connected + BT non connected-idle!!\n");
-               halbtc8723b1ant_sw_mechanism(btcoexist, false);
-               commom = true;
-       } else if (!wifi_connected &&
-                  (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
-                   coex_dm->bt_status)) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
-               halbtc8723b1ant_sw_mechanism(btcoexist, false);
-               commom = true;
-       } else if (wifi_connected &&
-                  (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
-                   coex_dm->bt_status)) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], Wifi connected + BT connected-idle!!\n");
-               halbtc8723b1ant_sw_mechanism(btcoexist, false);
-               commom = true;
-       } else if (!wifi_connected &&
-                  (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE !=
-                   coex_dm->bt_status)) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
-               halbtc8723b1ant_sw_mechanism(btcoexist, false);
-               commom = true;
-       } else {
-               if (wifi_busy)
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
-               else
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
-
-               commom = false;
-       }
-
-       return commom;
-}
-
-static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
-                                             u8 wifi_status)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-       static s32 up, dn, m, n, wait_count;
-       /* 0: no change, +1: increase WiFi duration,
-        * -1: decrease WiFi duration
-        */
-       s32 result;
-       u8 retry_count = 0, bt_info_ext;
-       bool wifi_busy = false;
-
-       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], TdmaDurationAdjustForAcl()\n");
-
-       if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status)
-               wifi_busy = true;
-       else
-               wifi_busy = false;
-
-       if ((BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
-                                                        wifi_status) ||
-           (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifi_status) ||
-           (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifi_status)) {
-               if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
-                   coex_dm->cur_ps_tdma != 3 && coex_dm->cur_ps_tdma != 9) {
-                       halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 9);
-                       coex_dm->tdma_adj_type = 9;
-
-                       up = 0;
-                       dn = 0;
-                       m = 1;
-                       n = 3;
-                       result = 0;
-                       wait_count = 0;
-               }
-               return;
-       }
-
-       if (!coex_dm->auto_tdma_adjust) {
-               coex_dm->auto_tdma_adjust = true;
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], first run TdmaDurationAdjust()!!\n");
-
-               halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
-               coex_dm->tdma_adj_type = 2;
-
-               up = 0;
-               dn = 0;
-               m = 1;
-               n = 3;
-               result = 0;
-               wait_count = 0;
-       } else {
-               /*accquire the BT TRx retry count from BT_Info byte2 */
-               retry_count = coex_sta->bt_retry_cnt;
-               bt_info_ext = coex_sta->bt_info_ext;
-               result = 0;
-               wait_count++;
-               /* no retry in the last 2-second duration */
-               if (retry_count == 0) {
-                       up++;
-                       dn--;
-
-                       if (dn <= 0)
-                               dn = 0;
-
-                       if (up >= n) {
-                               wait_count = 0;
-                               n = 3;
-                               up = 0;
-                               dn = 0;
-                               result = 1;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], Increase wifi duration!!\n");
-                       }
-               } else if (retry_count <= 3) {
-                       up--;
-                       dn++;
-
-                       if (up <= 0)
-                               up = 0;
-
-                       if (dn == 2) {
-                               if (wait_count <= 2)
-                                       m++;
-                               else
-                                       m = 1;
-
-                               if (m >= 20)
-                                       m = 20;
-
-                               n = 3 * m;
-                               up = 0;
-                               dn = 0;
-                               wait_count = 0;
-                               result = -1;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
-                       }
-               } else {
-                       if (wait_count == 1)
-                               m++;
-                       else
-                               m = 1;
-
-                       if (m >= 20)
-                               m = 20;
-
-                       n = 3 * m;
-                       up = 0;
-                       dn = 0;
-                       wait_count = 0;
-                       result = -1;
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
-               }
-
-               if (result == -1) {
-                       if ((BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
-                           ((coex_dm->cur_ps_tdma == 1) ||
-                            (coex_dm->cur_ps_tdma == 2))) {
-                               halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 9);
-                               coex_dm->tdma_adj_type = 9;
-                       } else if (coex_dm->cur_ps_tdma == 1) {
-                               halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 9);
-                               coex_dm->tdma_adj_type = 9;
-                       } else if (coex_dm->cur_ps_tdma == 9) {
-                               halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       }
-               } else if (result == 1) {
-                       if ((BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
-                           ((coex_dm->cur_ps_tdma == 1) ||
-                            (coex_dm->cur_ps_tdma == 2))) {
-                               halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 9);
-                               coex_dm->tdma_adj_type = 9;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 9);
-                               coex_dm->tdma_adj_type = 9;
-                       } else if (coex_dm->cur_ps_tdma == 9) {
-                               halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 1);
-                               coex_dm->tdma_adj_type = 1;
-                       }
-               } else {          /*no change */
-                       /*if busy / idle change */
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex],********* TDMA(on, %d) ********\n",
-                                coex_dm->cur_ps_tdma);
-               }
-
-               if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
-                   coex_dm->cur_ps_tdma != 9 && coex_dm->cur_ps_tdma != 11) {
-                       /* recover to previous adjust type */
-                       halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
-                                               coex_dm->tdma_adj_type);
-               }
-       }
-}
-
-static void btc8723b1ant_pstdmachkpwrsave(struct btc_coexist *btcoexist,
-                                         bool new_ps_state)
+static void halbtc8723b1ant_ps_tdma_chk_pwr_save(struct btc_coexist *btcoexist,
+                                                bool new_ps_state)
 {
        u8 lps_mode = 0x0;
 
        btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode);
 
-       if (lps_mode) { /* already under LPS state */
+       if (lps_mode) {
+               /* already under LPS state */
                if (new_ps_state) {
                        /* keep state under LPS, do nothing. */
                } else {
@@ -1655,7 +1053,8 @@ static void btc8723b1ant_pstdmachkpwrsave(struct btc_coexist *btcoexist,
                        halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                false, 0);
                }
-       } else {        /* NO PS state */
+       } else {
+               /* NO PS state */
                if (new_ps_state) {
                        /* will enter LPS state, turn off psTdma first */
                        halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1681,18 +1080,18 @@ static void halbtc8723b1ant_power_save_state(struct btc_coexist *btcoexist,
                btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
                break;
        case BTC_PS_LPS_ON:
-               btc8723b1ant_pstdmachkpwrsave(btcoexist, true);
-               halbtc8723b1ant_LpsRpwm(btcoexist, NORMAL_EXEC, lps_val,
-                                       rpwm_val);
-               /* when coex force to enter LPS, do not enter 32k low power. */
+               halbtc8723b1ant_ps_tdma_chk_pwr_save(btcoexist, true);
+               halbtc8723b1ant_lps_rpwm(btcoexist, NORMAL_EXEC, lps_val,
+                                        rpwm_val);
+               /* when coex force to enter LPS, do not enter 32k low power */
                low_pwr_disable = true;
                btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
                                   &low_pwr_disable);
-               /* power save must executed before psTdma.       */
+               /* power save must executed before psTdma */
                btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
                break;
        case BTC_PS_LPS_OFF:
-               btc8723b1ant_pstdmachkpwrsave(btcoexist, false);
+               halbtc8723b1ant_ps_tdma_chk_pwr_save(btcoexist, false);
                btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
                break;
        default:
@@ -1700,66 +1099,6 @@ static void halbtc8723b1ant_power_save_state(struct btc_coexist *btcoexist,
        }
 }
 
-/***************************************************
- *
- *     Software Coex Mechanism start
- *
- ***************************************************/
-/* SCO only or SCO+PAN(HS) */
-static void halbtc8723b1ant_action_sco(struct btc_coexist *btcoexist)
-{
-       halbtc8723b1ant_sw_mechanism(btcoexist, true);
-}
-
-static void halbtc8723b1ant_action_hid(struct btc_coexist *btcoexist)
-{
-       halbtc8723b1ant_sw_mechanism(btcoexist, true);
-}
-
-/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
-static void halbtc8723b1ant_action_a2dp(struct btc_coexist *btcoexist)
-{
-       halbtc8723b1ant_sw_mechanism(btcoexist, false);
-}
-
-static void halbtc8723b1ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
-{
-       halbtc8723b1ant_sw_mechanism(btcoexist, false);
-}
-
-static void halbtc8723b1ant_action_pan_edr(struct btc_coexist *btcoexist)
-{
-       halbtc8723b1ant_sw_mechanism(btcoexist, false);
-}
-
-/* PAN(HS) only */
-static void halbtc8723b1ant_action_pan_hs(struct btc_coexist *btcoexist)
-{
-       halbtc8723b1ant_sw_mechanism(btcoexist, false);
-}
-
-/*PAN(EDR)+A2DP */
-static void halbtc8723b1ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
-{
-       halbtc8723b1ant_sw_mechanism(btcoexist, false);
-}
-
-static void halbtc8723b1ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
-{
-       halbtc8723b1ant_sw_mechanism(btcoexist, true);
-}
-
-/* HID+A2DP+PAN(EDR) */
-static void btc8723b1ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
-{
-       halbtc8723b1ant_sw_mechanism(btcoexist, true);
-}
-
-static void halbtc8723b1ant_action_hid_a2dp(struct btc_coexist *btcoexist)
-{
-       halbtc8723b1ant_sw_mechanism(btcoexist, true);
-}
-
 /*****************************************************
  *
  *     Non-Software Coex Mechanism start
@@ -1826,11 +1165,11 @@ static void btc8723b1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist,
                           &wifi_connected);
 
        /* tdma and coex table */
-
        if (bt_link_info->sco_exist) {
                halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
                halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
-       } else { /* HID */
+       } else {
+               /* HID */
                halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
                halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
        }
@@ -1840,30 +1179,21 @@ static void halbtc8723b1ant_action_wifi_connected_bt_acl_busy(
                                        struct btc_coexist *btcoexist,
                                        u8 wifi_status)
 {
-       u8 bt_rssi_state;
-
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
 
-       bt_rssi_state = halbtc8723b1ant_bt_rssi_state(btcoexist, 2, 28, 0);
 
-       if (bt_link_info->hid_only) {  /*HID */
+       if (bt_link_info->hid_only) { /* HID */
                btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist, wifi_status);
                coex_dm->auto_tdma_adjust = false;
                return;
-       } else if (bt_link_info->a2dp_only) { /*A2DP */
-               if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE == wifi_status) {
+       } else if (bt_link_info->a2dp_only) { /* A2DP */
+               if (wifi_status == BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE) {
                        halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                false, 8);
                        halbtc8723b1ant_coex_table_with_type(btcoexist,
                                                             NORMAL_EXEC, 2);
                        coex_dm->auto_tdma_adjust = false;
-               } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                          (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b1ant_tdma_dur_adj_for_acl(btcoexist,
-                                                         wifi_status);
-                       halbtc8723b1ant_coex_table_with_type(btcoexist,
-                                                            NORMAL_EXEC, 1);
-               } else { /*for low BT RSSI */
+               } else { /* for low BT RSSI */
                        halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 11);
                        halbtc8723b1ant_coex_table_with_type(btcoexist,
@@ -1871,18 +1201,18 @@ static void halbtc8723b1ant_action_wifi_connected_bt_acl_busy(
                        coex_dm->auto_tdma_adjust = false;
                }
        } else if (bt_link_info->hid_exist &&
-                       bt_link_info->a2dp_exist) { /*HID+A2DP */
-               halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+               bt_link_info->a2dp_exist) { /* HID + A2DP */
+               halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
                coex_dm->auto_tdma_adjust = false;
 
                halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
-        /*PAN(OPP,FTP), HID+PAN(OPP,FTP) */
+        /* PAN(OPP,FTP), HID + PAN(OPP,FTP) */
        } else if (bt_link_info->pan_only ||
                   (bt_link_info->hid_exist && bt_link_info->pan_exist)) {
                halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
                halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
                coex_dm->auto_tdma_adjust = false;
-        /*A2DP+PAN(OPP,FTP), HID+A2DP+PAN(OPP,FTP)*/
+        /* A2DP + PAN(OPP,FTP), HID + A2DP + PAN(OPP,FTP) */
        } else if ((bt_link_info->a2dp_exist && bt_link_info->pan_exist) ||
                   (bt_link_info->hid_exist && bt_link_info->a2dp_exist &&
                    bt_link_info->pan_exist)) {
@@ -1907,57 +1237,59 @@ static void btc8723b1ant_action_wifi_not_conn(struct btc_coexist *btcoexist)
        halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
 }
 
-static void btc8723b1ant_action_wifi_not_conn_scan(struct btc_coexist *btcoex)
+static void
+btc8723b1ant_action_wifi_not_conn_scan(struct btc_coexist *btcoexist)
 {
-       struct btc_bt_link_info *bt_link_info = &btcoex->bt_link_info;
+       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
 
-       halbtc8723b1ant_power_save_state(btcoex, BTC_PS_WIFI_NATIVE,
+       halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
                                         0x0, 0x0);
 
        /* tdma and coex table */
        if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
                if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) {
-                       halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC,
+                       halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 22);
-                       halbtc8723b1ant_coex_table_with_type(btcoex,
+                       halbtc8723b1ant_coex_table_with_type(btcoexist,
                                                             NORMAL_EXEC, 1);
                } else if (bt_link_info->pan_only) {
-                       halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC,
+                       halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 20);
-                       halbtc8723b1ant_coex_table_with_type(btcoex,
+                       halbtc8723b1ant_coex_table_with_type(btcoexist,
                                                             NORMAL_EXEC, 2);
                } else {
-                       halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC,
+                       halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 20);
-                       halbtc8723b1ant_coex_table_with_type(btcoex,
+                       halbtc8723b1ant_coex_table_with_type(btcoexist,
                                                             NORMAL_EXEC, 1);
                }
        } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
                   (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
                    coex_dm->bt_status)){
-               btc8723b1ant_act_bt_sco_hid_only_busy(btcoex,
+               btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist,
                                BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN);
        } else {
-               halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 8);
-               halbtc8723b1ant_coex_table_with_type(btcoex, NORMAL_EXEC, 2);
+               halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+               halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
        }
 }
 
-static void btc8723b1ant_act_wifi_not_conn_asso_auth(struct btc_coexist *btcoex)
+static void
+btc8723b1ant_act_wifi_not_conn_asso_auth(struct btc_coexist *btcoexist)
 {
-       struct btc_bt_link_info *bt_link_info = &btcoex->bt_link_info;
+       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
 
-       halbtc8723b1ant_power_save_state(btcoex, BTC_PS_WIFI_NATIVE,
+       halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
                                         0x0, 0x0);
 
        if ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) ||
            (bt_link_info->sco_exist) || (bt_link_info->hid_only) ||
            (bt_link_info->a2dp_only) || (bt_link_info->pan_only)) {
-               halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 8);
-               halbtc8723b1ant_coex_table_with_type(btcoex, NORMAL_EXEC, 7);
+               halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+               halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
        } else {
-               halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, true, 20);
-               halbtc8723b1ant_coex_table_with_type(btcoex, NORMAL_EXEC, 1);
+               halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+               halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
        }
 }
 
@@ -2109,75 +1441,6 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
        }
 }
 
-static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-       u8 algorithm = 0;
-
-       algorithm = halbtc8723b1ant_action_algorithm(btcoexist);
-       coex_dm->cur_algorithm = algorithm;
-
-       if (!halbtc8723b1ant_is_common_action(btcoexist)) {
-               switch (coex_dm->cur_algorithm) {
-               case BT_8723B_1ANT_COEX_ALGO_SCO:
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Action algorithm = SCO\n");
-                       halbtc8723b1ant_action_sco(btcoexist);
-                       break;
-               case BT_8723B_1ANT_COEX_ALGO_HID:
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Action algorithm = HID\n");
-                       halbtc8723b1ant_action_hid(btcoexist);
-                       break;
-               case BT_8723B_1ANT_COEX_ALGO_A2DP:
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Action algorithm = A2DP\n");
-                       halbtc8723b1ant_action_a2dp(btcoexist);
-                       break;
-               case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS:
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
-                       halbtc8723b1ant_action_a2dp_pan_hs(btcoexist);
-                       break;
-               case BT_8723B_1ANT_COEX_ALGO_PANEDR:
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Action algorithm = PAN(EDR)\n");
-                       halbtc8723b1ant_action_pan_edr(btcoexist);
-                       break;
-               case BT_8723B_1ANT_COEX_ALGO_PANHS:
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Action algorithm = HS mode\n");
-                       halbtc8723b1ant_action_pan_hs(btcoexist);
-                       break;
-               case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP:
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Action algorithm = PAN+A2DP\n");
-                       halbtc8723b1ant_action_pan_edr_a2dp(btcoexist);
-                       break;
-               case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID:
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
-                       halbtc8723b1ant_action_pan_edr_hid(btcoexist);
-                       break;
-               case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
-                       btc8723b1ant_action_hid_a2dp_pan_edr(btcoexist);
-                       break;
-               case BT_8723B_1ANT_COEX_ALGO_HID_A2DP:
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Action algorithm = HID+A2DP\n");
-                       halbtc8723b1ant_action_hid_a2dp(btcoexist);
-                       break;
-               default:
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Action algorithm = coexist All Off!!\n");
-                       break;
-               }
-               coex_dm->pre_algorithm = coex_dm->cur_algorithm;
-       }
-}
-
 static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -2186,7 +1449,6 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        bool increase_scan_dev_num = false;
        bool bt_ctrl_agg_buf_size = false;
        u8 agg_buf_size = 5;
-       u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
        u32 wifi_link_status = 0;
        u32 num_of_wifi_link = 0;
 
@@ -2238,16 +1500,12 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        if (!bt_link_info->sco_exist && !bt_link_info->hid_exist) {
                halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
        } else {
-               if (wifi_connected) {
-                       wifi_rssi_state =
-                               halbtc8723b1ant_wifi_rssi_state(btcoexist,
-                                                               1, 2, 30, 0);
+               if (wifi_connected)
                        halbtc8723b1ant_limited_tx(btcoexist,
                                                   NORMAL_EXEC, 1, 1, 1, 1);
-               } else {
+               else
                        halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC,
                                                   0, 0, 0, 0);
-               }
        }
 
        if (bt_link_info->sco_exist) {
@@ -2263,8 +1521,6 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        halbtc8723b1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
                                   bt_ctrl_agg_buf_size, agg_buf_size);
 
-       btc8723b1ant_run_sw_coex_mech(btcoexist);
-
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 
        if (coex_sta->c2h_bt_inquiry_page) {
@@ -2364,28 +1620,19 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
        btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
 
        /* Enable counter statistics */
-       /*0x76e[3] =1, WLAN_Act control by PTA */
+       /*0x76e[3] = 1, WLAN_Act control by PTA */
        btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
        btcoexist->btc_write_1byte(btcoexist, 0x778, 0x1);
        btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
 
-       /*Antenna config */
-       halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_PTA, true, false);
+       /* Antenna config */
+       halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA, true, false);
        /* PTA parameter */
        halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
 }
 
-static void halbtc8723b1ant_wifi_off_hw_cfg(struct btc_coexist *btcoexist)
-{
-       /* set wlan_act to low */
-       btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
-}
-
 /**************************************************************
- * work around function start with wa_halbtc8723b1ant_
- **************************************************************/
-/**************************************************************
- * extern function start with EXhalbtc8723b1ant_
+ * extern function start with ex_halbtc8723b1ant_
  **************************************************************/
 
 void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist)
@@ -2539,7 +1786,7 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
                if (coex_sta->bt_info_c2h_cnt[i]) {
                        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
                                 "\r\n %-35s = %7ph(%d)",
-                                GLBtInfoSrc8723b1Ant[i],
+                                glbt_info_src_8723b_1ant[i],
                                 coex_sta->bt_info_c2h[i],
                                 coex_sta->bt_info_c2h_cnt[i]);
                }
@@ -2697,13 +1944,12 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
                         "[BTCoex], IPS ENTER notify\n");
                coex_sta->under_ips = true;
 
-               halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT,
-                                          false, true);
+               halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
+                                            false, true);
                /* set PTA control */
                halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
                halbtc8723b1ant_coex_table_with_type(btcoexist,
                                                     NORMAL_EXEC, 0);
-               halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
        } else if (BTC_IPS_LEAVE == type) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], IPS LEAVE notify\n");
@@ -2774,14 +2020,17 @@ void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
        if (BTC_SCAN_START == type) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], SCAN START notify\n");
-               if (!wifi_connected)    /* non-connected scan */
+               if (!wifi_connected)
+                       /* non-connected scan */
                        btc8723b1ant_action_wifi_not_conn_scan(btcoexist);
-               else    /* wifi is connected */
+               else
+                       /* wifi is connected */
                        btc8723b1ant_action_wifi_conn_scan(btcoexist);
        } else if (BTC_SCAN_FINISH == type) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], SCAN FINISH notify\n");
-               if (!wifi_connected)    /* non-connected scan */
+               if (!wifi_connected)
+                       /* non-connected scan */
                        btc8723b1ant_action_wifi_not_conn(btcoexist);
                else
                        halbtc8723b1ant_action_wifi_connected(btcoexist);
@@ -2831,7 +2080,8 @@ void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
 
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
                                   &wifi_connected);
-               if (!wifi_connected) /* non-connected scan */
+               if (!wifi_connected)
+                       /* non-connected scan */
                        btc8723b1ant_action_wifi_not_conn(btcoexist);
                else
                        halbtc8723b1ant_action_wifi_connected(btcoexist);
@@ -3020,7 +2270,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
                coex_sta->a2dp_exist = false;
                coex_sta->hid_exist = false;
                coex_sta->sco_exist = false;
-       } else { /* connection exists */
+       } else {
+               /* connection exists */
                coex_sta->bt_link_exist = true;
                if (bt_info & BT_INFO_8723B_1ANT_B_FTP)
                        coex_sta->pan_exist = true;
@@ -3089,9 +2340,8 @@ void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
 
        btcoexist->stop_coex_dm = true;
 
-       halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false, true);
+       halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false, true);
 
-       halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
        halbtc8723b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
 
        halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
@@ -3111,13 +2361,12 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], Pnp notify to SLEEP\n");
                btcoexist->stop_coex_dm = true;
-               halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false,
-                                          true);
+               halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false,
+                                            true);
                halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
                                                 0x0, 0x0);
                halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
                halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
-               halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
        } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], Pnp notify to WAKE UP\n");
index 12125966a911140fd0b32a7d153cf47b0dc75a68..2f3946be4ce29bdb581753b515faa78bf7750be8 100644 (file)
@@ -240,9 +240,33 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
        return wifi_rssi_state;
 }
 
+static
+void btc8723b2ant_limited_rx(struct btc_coexist *btcoexist, bool force_exec,
+                            bool rej_ap_agg_pkt, bool bt_ctrl_agg_buf_size,
+                            u8 agg_buf_size)
+{
+       bool reject_rx_agg = rej_ap_agg_pkt;
+       bool bt_ctrl_rx_agg_size = bt_ctrl_agg_buf_size;
+       u8 rx_agg_size = agg_buf_size;
+
+       /* ============================================ */
+       /*      Rx Aggregation related setting          */
+       /* ============================================ */
+       btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT,
+                          &reject_rx_agg);
+       /* decide BT control aggregation buf size or not */
+       btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE,
+                          &bt_ctrl_rx_agg_size);
+       /* aggregate buf size, only work when BT control Rx aggregate size */
+       btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rx_agg_size);
+       /* real update aggregation setting */
+       btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
+}
+
 static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
+       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
        u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
        u32 reg_hp_tx = 0, reg_hp_rx = 0;
        u32 reg_lp_tx = 0, reg_lp_rx = 0;
@@ -263,6 +287,17 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
        coex_sta->low_priority_tx = reg_lp_tx;
        coex_sta->low_priority_rx = reg_lp_rx;
 
+       if ((coex_sta->low_priority_tx > 1050) &&
+           (!coex_sta->c2h_bt_inquiry_page))
+               coex_sta->pop_event_cnt++;
+
+       if ((coex_sta->low_priority_rx >= 950) &&
+           (coex_sta->low_priority_rx >= coex_sta->low_priority_tx) &&
+           (!coex_sta->under_ips))
+               bt_link_info->slave_role = true;
+       else
+               bt_link_info->slave_role = false;
+
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
                 reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
@@ -274,6 +309,43 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
        btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
 }
 
+static void btc8723b2ant_monitor_wifi_ctr(struct btc_coexist *btcoexist)
+{
+       if (coex_sta->under_ips) {
+               coex_sta->crc_ok_cck = 0;
+               coex_sta->crc_ok_11g = 0;
+               coex_sta->crc_ok_11n = 0;
+               coex_sta->crc_ok_11n_agg = 0;
+
+               coex_sta->crc_err_cck = 0;
+               coex_sta->crc_err_11g = 0;
+               coex_sta->crc_err_11n = 0;
+               coex_sta->crc_err_11n_agg = 0;
+       } else {
+               coex_sta->crc_ok_cck =
+                       btcoexist->btc_read_4byte(btcoexist, 0xf88);
+               coex_sta->crc_ok_11g =
+                       btcoexist->btc_read_2byte(btcoexist, 0xf94);
+               coex_sta->crc_ok_11n =
+                       btcoexist->btc_read_2byte(btcoexist, 0xf90);
+               coex_sta->crc_ok_11n_agg =
+                       btcoexist->btc_read_2byte(btcoexist, 0xfb8);
+
+               coex_sta->crc_err_cck =
+                       btcoexist->btc_read_4byte(btcoexist, 0xf84);
+               coex_sta->crc_err_11g =
+                       btcoexist->btc_read_2byte(btcoexist, 0xf96);
+               coex_sta->crc_err_11n =
+                       btcoexist->btc_read_2byte(btcoexist, 0xf92);
+               coex_sta->crc_err_11n_agg =
+                       btcoexist->btc_read_2byte(btcoexist, 0xfba);
+       }
+
+       /* reset counter */
+       btcoexist->btc_write_1byte_bitmask(btcoexist, 0xf16, 0x1, 0x1);
+       btcoexist->btc_write_1byte_bitmask(btcoexist, 0xf16, 0x1, 0x0);
+}
+
 static void btc8723b2ant_query_bt_info(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -297,6 +369,8 @@ static bool btc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
        static bool pre_bt_hs_on;
        bool wifi_busy = false, under_4way = false, bt_hs_on = false;
        bool wifi_connected = false;
+       u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+       u8 tmp;
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
                           &wifi_connected);
@@ -320,6 +394,15 @@ static bool btc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
                        pre_bt_hs_on = bt_hs_on;
                        return true;
                }
+
+               tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+                                coex_dm->switch_thres_offset;
+               wifi_rssi_state =
+                    btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, tmp, 0);
+
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_LOW))
+                       return true;
        }
 
        return false;
@@ -327,11 +410,9 @@ static bool btc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
 
 static void btc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist)
 {
-       /*struct btc_stack_info *stack_info = &btcoexist->stack_info;*/
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
        bool bt_hs_on = false;
 
-#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1) /* profile from bt patch */
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 
        bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
@@ -345,21 +426,7 @@ static void btc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist)
                bt_link_info->pan_exist = true;
                bt_link_info->bt_link_exist = true;
        }
-#else  /* profile from bt stack */
-       bt_link_info->bt_link_exist = stack_info->bt_link_exist;
-       bt_link_info->sco_exist = stack_info->sco_exist;
-       bt_link_info->a2dp_exist = stack_info->a2dp_exist;
-       bt_link_info->pan_exist = stack_info->pan_exist;
-       bt_link_info->hid_exist = stack_info->hid_exist;
-
-       /*for win-8 stack HID report error*/
-       if (!stack_info->hid_exist)
-               stack_info->hid_exist = coex_sta->hid_exist;
-       /*sync  BTInfo with BT firmware and stack*/
-       /* when stack HID report error, here we use the info from bt fw.*/
-       if (!stack_info->bt_link_exist)
-               stack_info->bt_link_exist = coex_sta->bt_link_exist;
-#endif
+
        /* check if Sco only */
        if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
            !bt_link_info->pan_exist && !bt_link_info->hid_exist)
@@ -584,44 +651,6 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
        return algorithm;
 }
 
-static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-       bool ret = false;
-       bool bt_hs_on = false, wifi_connected = false;
-       s32 bt_hs_rssi = 0;
-       u8 bt_rssi_state;
-
-       if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on))
-               return false;
-       if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
-                               &wifi_connected))
-               return false;
-       if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
-               return false;
-
-       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
-
-       if (wifi_connected) {
-               if (bt_hs_on) {
-                       if (bt_hs_rssi > 37) {
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], Need to decrease bt power for HS mode!!\n");
-                               ret = true;
-                       }
-               } else {
-                       if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                           (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
-                               ret = true;
-                       }
-               }
-       }
-
-       return ret;
-}
-
 static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
                                                u8 dac_swing_lvl)
 {
@@ -642,44 +671,40 @@ static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
 }
 
 static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
-                                          bool dec_bt_pwr)
+                                          u8 dec_bt_pwr_lvl)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
-       h2c_parameter[0] = 0;
-
-       if (dec_bt_pwr)
-               h2c_parameter[0] |= BIT1;
+       h2c_parameter[0] = dec_bt_pwr_lvl;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
-                   (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+                "[BTCoex], decrease Bt Power Level : %u\n", dec_bt_pwr_lvl);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
 }
 
 static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
-                                   bool force_exec, bool dec_bt_pwr)
+                                   bool force_exec, u8 dec_bt_pwr_lvl)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], %s Dec BT power = %s\n",
-                   force_exec ? "force to" : "", dec_bt_pwr ? "ON" : "OFF");
-       coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
+                "[BTCoex], Dec BT power level = %u\n", dec_bt_pwr_lvl);
+       coex_dm->cur_dec_bt_pwr_lvl = dec_bt_pwr_lvl;
 
        if (!force_exec) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
-                           coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+                        "[BTCoex], PreDecBtPwrLvl=%d, CurDecBtPwrLvl=%d\n",
+                           coex_dm->pre_dec_bt_pwr_lvl,
+                           coex_dm->cur_dec_bt_pwr_lvl);
 
-               if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
+               if (coex_dm->pre_dec_bt_pwr_lvl == coex_dm->cur_dec_bt_pwr_lvl)
                        return;
        }
-       btc8723b2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+       btc8723b2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr_lvl);
 
-       coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
+       coex_dm->pre_dec_bt_pwr_lvl = coex_dm->cur_dec_bt_pwr_lvl;
 }
 
 static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
@@ -708,72 +733,21 @@ static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
        coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
 }
 
-static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
-                                                bool rx_rf_shrink_on)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-       if (rx_rf_shrink_on) {
-               /* Shrink RF Rx LPF corner */
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], Shrink RF Rx LPF corner!!\n");
-               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
-                                         0xfffff, 0xffffc);
-       } else {
-               /* Resume RF Rx LPF corner */
-               /* After initialized, we can use coex_dm->btRf0x1eBackup */
-               if (btcoexist->initilized) {
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Resume RF Rx LPF corner!!\n");
-                       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
-                                                 0xfffff,
-                                                 coex_dm->bt_rf0x1e_backup);
-               }
-       }
-}
-
-static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
-                                  bool force_exec, bool rx_rf_shrink_on)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], %s turn Rx RF Shrink = %s\n",
-                   (force_exec ? "force to" : ""), (rx_rf_shrink_on ?
-                                                    "ON" : "OFF"));
-       coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
-
-       if (!force_exec) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n",
-                           coex_dm->pre_rf_rx_lpf_shrink,
-                           coex_dm->cur_rf_rx_lpf_shrink);
-
-               if (coex_dm->pre_rf_rx_lpf_shrink ==
-                   coex_dm->cur_rf_rx_lpf_shrink)
-                       return;
-       }
-       btc8723b2ant_set_sw_rf_rx_lpf_corner(btcoexist,
-                                            coex_dm->cur_rf_rx_lpf_shrink);
-
-       coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
-}
-
 static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
                                        bool low_penalty_ra)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[6] = {0};
 
-       h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/
+       h2c_parameter[0] = 0x6; /* op_code, 0x6 = Retry_Penalty */
 
        if (low_penalty_ra) {
                h2c_parameter[1] |= BIT0;
-               /*normal rate except MCS7/6/5, OFDM54/48/36*/
+               /* normal rate except MCS7/6/5, OFDM54/48/36 */
                h2c_parameter[2] = 0x00;
-               h2c_parameter[3] = 0xf7;  /*MCS7 or OFDM54*/
-               h2c_parameter[4] = 0xf8;  /*MCS6 or OFDM48*/
-               h2c_parameter[5] = 0xf9;  /*MCS5 or OFDM36*/
+               h2c_parameter[3] = 0xf4; /* MCS7 or OFDM54 */
+               h2c_parameter[4] = 0xf5; /* MCS6 or OFDM48 */
+               h2c_parameter[5] = 0xf6; /* MCS5 or OFDM36 */
        }
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -788,7 +762,6 @@ static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
-       /*return; */
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], %s turn LowPenaltyRA = %s\n",
                 (force_exec ? "force to" : ""), (low_penalty_ra ?
@@ -830,9 +803,9 @@ static void btc8723b2ant_set_sw_fulltime_dac_swing(struct btc_coexist *btcoex,
                btc8723b2ant_set_dac_swing_reg(btcoex, 0x18);
 }
 
-static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
-                                  bool force_exec, bool dac_swing_on,
-                                  u32 dac_swing_lvl)
+void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
+                           bool force_exec, bool dac_swing_on,
+                           u32 dac_swing_lvl)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -863,105 +836,6 @@ static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
        coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
 }
 
-static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
-                                      bool agc_table_en)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-       u8 rssi_adjust_val = 0;
-
-       /*  BB AGC Gain Table */
-       if (agc_table_en) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], BB Agc Table On!\n");
-               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001);
-               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001);
-               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001);
-               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6b1D0001);
-               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6a1E0001);
-               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001);
-               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001);
-       } else {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], BB Agc Table Off!\n");
-               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
-               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
-               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
-               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa71D0001);
-               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa61E0001);
-               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa51F0001);
-               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa4200001);
-       }
-
-       /* RF Gain */
-       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
-       if (agc_table_en) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], Agc Table On!\n");
-               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
-                                         0xfffff, 0x38fff);
-               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
-                                         0xfffff, 0x38ffe);
-       } else {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], Agc Table Off!\n");
-               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
-                                         0xfffff, 0x380c3);
-               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
-                                         0xfffff, 0x28ce6);
-       }
-       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
-
-       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
-
-       if (agc_table_en) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], Agc Table On!\n");
-               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
-                                         0xfffff, 0x38fff);
-               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
-                                         0xfffff, 0x38ffe);
-       } else {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], Agc Table Off!\n");
-               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
-                                         0xfffff, 0x380c3);
-               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
-                                         0xfffff, 0x28ce6);
-       }
-       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x0);
-
-       /* set rssiAdjustVal for wifi module. */
-       if (agc_table_en)
-               rssi_adjust_val = 8;
-       btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
-                          &rssi_adjust_val);
-}
-
-static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist,
-                                  bool force_exec, bool agc_table_en)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], %s %s Agc Table\n",
-                (force_exec ? "force to" : ""),
-                (agc_table_en ? "Enable" : "Disable"));
-       coex_dm->cur_agc_table_en = agc_table_en;
-
-       if (!force_exec) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
-                        coex_dm->pre_agc_table_en,
-                        coex_dm->cur_agc_table_en);
-
-               if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
-                       return;
-       }
-       btc8723b2ant_set_agc_table(btcoexist, agc_table_en);
-
-       coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
-}
-
 static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist,
                                        u32 val0x6c0, u32 val0x6c4,
                                        u32 val0x6c8, u8 val0x6cc)
@@ -1026,61 +900,73 @@ static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist,
        coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
 }
 
-static void btc8723b_coex_tbl_type(struct btc_coexist *btcoexist,
-                                  bool force_exec, u8 type)
+static void btc8723b2ant_coex_table_with_type(struct btc_coexist *btcoexist,
+                                             bool force_exec, u8 type)
 {
        switch (type) {
        case 0:
                btc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
-                                       0x55555555, 0xffff, 0x3);
+                                       0x55555555, 0xffffff, 0x3);
                break;
        case 1:
                btc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
-                                       0x5afa5afa, 0xffff, 0x3);
+                                       0x5afa5afa, 0xffffff, 0x3);
                break;
        case 2:
-               btc8723b2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
-                                       0x5a5a5a5a, 0xffff, 0x3);
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x5ada5ada,
+                                       0x5ada5ada, 0xffffff, 0x3);
                break;
        case 3:
                btc8723b2ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa,
-                                       0xaaaaaaaa, 0xffff, 0x3);
+                                       0xaaaaaaaa, 0xffffff, 0x3);
                break;
        case 4:
                btc8723b2ant_coex_table(btcoexist, force_exec, 0xffffffff,
-                                       0xffffffff, 0xffff, 0x3);
+                                       0xffffffff, 0xffffff, 0x3);
                break;
        case 5:
                btc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
-                                       0x5fff5fff, 0xffff, 0x3);
+                                       0x5fff5fff, 0xffffff, 0x3);
                break;
        case 6:
                btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
-                                       0x5a5a5a5a, 0xffff, 0x3);
+                                       0x5a5a5a5a, 0xffffff, 0x3);
                break;
        case 7:
-               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
-                                       0x5afa5afa, 0xffff, 0x3);
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+                                       0x5ada5ada, 0xffffff, 0x3);
                break;
        case 8:
-               btc8723b2ant_coex_table(btcoexist, force_exec, 0x5aea5aea,
-                                       0x5aea5aea, 0xffff, 0x3);
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+                                       0x5ada5ada, 0xffffff, 0x3);
                break;
        case 9:
-               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
-                                       0x5aea5aea, 0xffff, 0x3);
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+                                       0x5ada5ada, 0xffffff, 0x3);
                break;
        case 10:
-               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
-                                       0x5aff5aff, 0xffff, 0x3);
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+                                       0x5ada5ada, 0xffffff, 0x3);
                break;
        case 11:
-               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
-                                       0x5a5f5a5f, 0xffff, 0x3);
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+                                       0x5ada5ada, 0xffffff, 0x3);
                break;
        case 12:
-               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
-                                       0x5f5f5f5f, 0xffff, 0x3);
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+                                       0x5ada5ada, 0xffffff, 0x3);
+               break;
+       case 13:
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+                                       0xaaaaaaaa, 0xffffff, 0x3);
+               break;
+       case 14:
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+                                       0x5ada5ada, 0xffffff, 0x3);
+               break;
+       case 15:
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+                                       0xaaaaaaaa, 0xffffff, 0x3);
                break;
        default:
                break;
@@ -1094,7 +980,7 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
        u8 h2c_parameter[1] = {0};
 
        if (enable)
-               h2c_parameter[0] |= BIT0;/* function enable*/
+               h2c_parameter[0] |= BIT0; /* function enable */
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
@@ -1103,6 +989,33 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
        btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
 }
 
+static void btc8723b2ant_set_lps_rpwm(struct btc_coexist *btcoexist,
+                                     u8 lps_val, u8 rpwm_val)
+{
+       u8 lps = lps_val;
+       u8 rpwm = rpwm_val;
+
+       btcoexist->btc_set(btcoexist, BTC_SET_U1_LPS_VAL, &lps);
+       btcoexist->btc_set(btcoexist, BTC_SET_U1_RPWM_VAL, &rpwm);
+}
+
+static void btc8723b2ant_lps_rpwm(struct btc_coexist *btcoexist,
+                                 bool force_exec, u8 lps_val, u8 rpwm_val)
+{
+       coex_dm->cur_lps = lps_val;
+       coex_dm->cur_rpwm = rpwm_val;
+
+       if (!force_exec) {
+               if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
+                   (coex_dm->pre_rpwm == coex_dm->cur_rpwm))
+                       return;
+       }
+       btc8723b2ant_set_lps_rpwm(btcoexist, lps_val, rpwm_val);
+
+       coex_dm->pre_lps = coex_dm->cur_lps;
+       coex_dm->pre_rpwm = coex_dm->cur_rpwm;
+}
+
 static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
                                         bool force_exec, bool enable)
 {
@@ -1133,6 +1046,8 @@ static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[5];
+       if ((coex_sta->a2dp_exist) && (coex_sta->hid_exist))
+               byte5 = byte5 | 0x1;
 
        h2c_parameter[0] = byte1;
        h2c_parameter[1] = byte2;
@@ -1155,23 +1070,13 @@ static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
        btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
 
-static void btc8723b2ant_sw_mechanism1(struct btc_coexist *btcoexist,
-                                      bool shrink_rx_lpf, bool low_penalty_ra,
-                                      bool limited_dig, bool bt_lna_constrain)
+static void btc8723b2ant_sw_mechanism(struct btc_coexist *btcoexist,
+                                     bool shrink_rx_lpf, bool low_penalty_ra,
+                                     bool limited_dig, bool bt_lna_constrain)
 {
-       btc8723b2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
        btc8723b2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
 }
 
-static void btc8723b2ant_sw_mechanism2(struct btc_coexist *btcoexist,
-                                      bool agc_table_shift, bool adc_backoff,
-                                      bool sw_dac_swing, u32 dac_swing_lvl)
-{
-       btc8723b2ant_agc_table(btcoexist, NORMAL_EXEC, agc_table_shift);
-       btc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing,
-                              dac_swing_lvl);
-}
-
 static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
                                      u8 antpos_type, bool init_hwcfg,
                                      bool wifi_off)
@@ -1189,44 +1094,66 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
                use_ext_switch = true;
 
        if (init_hwcfg) {
-               /* 0x4c[23] = 0, 0x4c[24] = 1  Antenna control by WL/BT */
-               u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
-               u32tmp &= ~BIT23;
-               u32tmp |= BIT24;
-               btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
-
+               btcoexist->btc_write_1byte_bitmask(btcoexist, 0x39, 0x8, 0x1);
                btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
                btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3);
                btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77);
                btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1);
 
-               /* Force GNT_BT to low */
-               btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
+               if (fw_ver >= 0x180000) {
+                       /* Use H2C to set GNT_BT to High to avoid A2DP click */
+                       h2c_parameter[0] = 1;
+                       btcoexist->btc_fill_h2c(btcoexist, 0x6E, 1,
+                                               h2c_parameter);
+               } else {
+                       btcoexist->btc_write_1byte(btcoexist, 0x765, 0x18);
+               }
+
+               btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0);
+
+               /* WiFi TRx Mask off */
+               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
+                                         0x1, 0xfffff, 0x0);
 
                if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
                        /* tell firmware "no antenna inverse" */
                        h2c_parameter[0] = 0;
-                       h2c_parameter[1] = 1;  /* ext switch type */
-                       btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
-                                               h2c_parameter);
-                       btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
                } else {
                        /* tell firmware "antenna inverse" */
                        h2c_parameter[0] = 1;
-                       h2c_parameter[1] = 1;  /* ext switch type */
-                       btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+               }
+
+               if (use_ext_switch) {
+                       /* ext switch type */
+                       h2c_parameter[1] = 1;
+               } else {
+                       /* int switch type */
+                       h2c_parameter[1] = 0;
+               }
+               btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, h2c_parameter);
+       } else {
+               if (fw_ver >= 0x180000) {
+                       /* Use H2C to set GNT_BT to "Control by PTA"*/
+                       h2c_parameter[0] = 0;
+                       btcoexist->btc_fill_h2c(btcoexist, 0x6E, 1,
                                                h2c_parameter);
-                       btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+               } else {
+                       btcoexist->btc_write_1byte(btcoexist, 0x765, 0x0);
                }
        }
 
        /* ext switch setting */
        if (use_ext_switch) {
+               if (init_hwcfg) {
+                       /* 0x4c[23] = 0, 0x4c[24] = 1 Ant controlled by WL/BT */
+                       u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+                       u32tmp &= ~BIT23;
+                       u32tmp |= BIT24;
+                       btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+               }
+
                /* fixed internal switch S1->WiFi, S0->BT */
-               if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
-                       btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
-               else
-                       btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+               btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0);
 
                switch (antpos_type) {
                case BTC_ANT_WIFI_AT_MAIN:
@@ -1240,9 +1167,18 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
                                                           0x92c, 0x3, 0x2);
                        break;
                }
-       } else {        /* internal switch */
-               /* fixed ext switch */
-               btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, 0x3, 0x1);
+       } else {
+               /* internal switch */
+               if (init_hwcfg) {
+                       /* 0x4c[23] = 0, 0x4c[24] = 1 Ant controlled by WL/BT */
+                       u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+                       u32tmp |= BIT23;
+                       u32tmp &= ~BIT24;
+                       btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+               }
+
+               /* fixed ext switch, S1->Main, S0->Aux */
+               btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1, 0x0);
                switch (antpos_type) {
                case BTC_ANT_WIFI_AT_MAIN:
                        /* fixed internal switch S1->WiFi, S0->BT */
@@ -1260,6 +1196,17 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
                                 bool turn_on, u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
+       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+       u8 wifi_rssi_state, bt_rssi_state;
+       s8 wifi_duration_adjust = 0x0;
+       u8 tdma_byte4_modify = 0x0;
+       u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+                       coex_dm->switch_thres_offset;
+
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, tmp, 0);
+       tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+                       coex_dm->switch_thres_offset;
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], %s turn %s PS TDMA, type=%d\n",
@@ -1268,6 +1215,15 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
        coex_dm->cur_ps_tdma_on = turn_on;
        coex_dm->cur_ps_tdma = type;
 
+       if (!(BTC_RSSI_HIGH(wifi_rssi_state) &&
+             BTC_RSSI_HIGH(bt_rssi_state)) && turn_on) {
+                /* for WiFi RSSI low or BT RSSI low */
+               type = type + 100;
+               coex_dm->is_switch_to_1dot5_ant = true;
+       } else {
+               coex_dm->is_switch_to_1dot5_ant = false;
+       }
+
        if (!force_exec) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
@@ -1280,83 +1236,131 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
                    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
                        return;
        }
+
+       if (coex_sta->scan_ap_num <= 5) {
+               if (coex_sta->a2dp_bit_pool >= 45)
+                       wifi_duration_adjust = -15;
+               else if (coex_sta->a2dp_bit_pool >= 35)
+                       wifi_duration_adjust = -10;
+               else
+                       wifi_duration_adjust = 5;
+       } else if (coex_sta->scan_ap_num <= 20) {
+               if (coex_sta->a2dp_bit_pool >= 45)
+                       wifi_duration_adjust = -15;
+               else if (coex_sta->a2dp_bit_pool >= 35)
+                       wifi_duration_adjust = -10;
+               else
+                       wifi_duration_adjust = 0;
+       } else if (coex_sta->scan_ap_num <= 40) {
+               if (coex_sta->a2dp_bit_pool >= 45)
+                       wifi_duration_adjust = -15;
+               else if (coex_sta->a2dp_bit_pool >= 35)
+                       wifi_duration_adjust = -10;
+               else
+                       wifi_duration_adjust = -5;
+       } else {
+               if (coex_sta->a2dp_bit_pool >= 45)
+                       wifi_duration_adjust = -15;
+               else if (coex_sta->a2dp_bit_pool >= 35)
+                       wifi_duration_adjust = -10;
+               else
+                       wifi_duration_adjust = -10;
+       }
+
+       if ((bt_link_info->slave_role) && (bt_link_info->a2dp_exist))
+               /* 0x778 = 0x1 at wifi slot (no blocking BT Low-Pri pkts) */
+               tdma_byte4_modify = 0x1;
+
        if (turn_on) {
                switch (type) {
                case 1:
                default:
-                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
-                                                   0x1a, 0xe1, 0x90);
+                       btc8723b2ant_set_fw_ps_tdma(
+                               btcoexist, 0xe3, 0x3c,
+                               0x03, 0xf1, 0x90 | tdma_byte4_modify);
                        break;
                case 2:
-                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
-                                                   0x12, 0xe1, 0x90);
+                       btc8723b2ant_set_fw_ps_tdma(
+                               btcoexist, 0xe3, 0x2d,
+                               0x03, 0xf1, 0x90 | tdma_byte4_modify);
                        break;
                case 3:
-                       /* This call breaks BT when wireless is active -
-                        * comment it out for now until a better fix is found:
-                        * btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
-                        *                          0x3, 0xf1, 0x90);
-                        */
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+                                                   0x3, 0xf1,
+                                                   0x90 | tdma_byte4_modify);
                        break;
                case 4:
                        btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
-                                                   0x03, 0xf1, 0x90);
+                                                   0x03, 0xf1,
+                                                   0x90 | tdma_byte4_modify);
                        break;
                case 5:
-                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
-                                                   0x1a, 0x60, 0x90);
+                       btc8723b2ant_set_fw_ps_tdma(
+                               btcoexist, 0xe3, 0x3c,
+                               0x3, 0x70, 0x90 | tdma_byte4_modify);
                        break;
                case 6:
-                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
-                                                   0x12, 0x60, 0x90);
+                       btc8723b2ant_set_fw_ps_tdma(
+                               btcoexist, 0xe3, 0x2d,
+                               0x3, 0x70, 0x90 | tdma_byte4_modify);
                        break;
                case 7:
                        btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
-                                                   0x3, 0x70, 0x90);
+                                                   0x3, 0x70,
+                                                   0x90 | tdma_byte4_modify);
                        break;
                case 8:
                        btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10,
-                                                   0x3, 0x70, 0x90);
+                                                   0x3, 0x70,
+                                                   0x90 | tdma_byte4_modify);
                        break;
                case 9:
-                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
-                                                   0x1a, 0xe1, 0x90);
+                       btc8723b2ant_set_fw_ps_tdma(
+                               btcoexist, 0xe3, 0x3c + wifi_duration_adjust,
+                               0x03, 0xf1, 0x90 | tdma_byte4_modify);
                        break;
                case 10:
-                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
-                                                   0x12, 0xe1, 0x90);
+                       btc8723b2ant_set_fw_ps_tdma(
+                               btcoexist, 0xe3, 0x2d,
+                               0x03, 0xf1, 0x90 | tdma_byte4_modify);
                        break;
                case 11:
-                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
-                                                   0xa, 0xe1, 0x90);
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+                                                   0x3, 0xf1,
+                                                   0x90 | tdma_byte4_modify);
                        break;
                case 12:
-                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
-                                                   0x5, 0xe1, 0x90);
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
+                                                   0x3, 0xf1,
+                                                   0x90 | tdma_byte4_modify);
                        break;
                case 13:
-                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
-                                                   0x1a, 0x60, 0x90);
+                       btc8723b2ant_set_fw_ps_tdma(
+                               btcoexist, 0xe3, 0x3c,
+                               0x3, 0x70, 0x90 | tdma_byte4_modify);
                        break;
                case 14:
-                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
-                                                   0x12, 0x60, 0x90);
+                       btc8723b2ant_set_fw_ps_tdma(
+                               btcoexist, 0xe3, 0x2d,
+                               0x3, 0x70, 0x90 | tdma_byte4_modify);
                        break;
                case 15:
-                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
-                                                   0xa, 0x60, 0x90);
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+                                                   0x3, 0x70,
+                                                   0x90 | tdma_byte4_modify);
                        break;
                case 16:
-                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
-                                                   0x5, 0x60, 0x90);
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
+                                                   0x3, 0x70,
+                                                   0x90 | tdma_byte4_modify);
                        break;
                case 17:
                        btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x2f,
                                                    0x2f, 0x60, 0x90);
                        break;
                case 18:
-                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
-                                                   0x5, 0xe1, 0x90);
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5, 0x5,
+                                                   0xe1, 0x90);
                        break;
                case 19:
                        btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
@@ -1370,9 +1374,63 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
                        btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
                                                    0x03, 0x70, 0x90);
                        break;
+
+               case 23:
+               case 123:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x35,
+                                                   0x03, 0x71, 0x10);
+                       break;
                case 71:
-                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
-                                                   0x1a, 0xe1, 0x90);
+                       btc8723b2ant_set_fw_ps_tdma(
+                               btcoexist, 0xe3, 0x3c + wifi_duration_adjust,
+                               0x03, 0xf1, 0x90);
+                       break;
+               case 101:
+               case 105:
+               case 113:
+               case 171:
+                       btc8723b2ant_set_fw_ps_tdma(
+                               btcoexist, 0xd3, 0x3a + wifi_duration_adjust,
+                               0x03, 0x70, 0x50 | tdma_byte4_modify);
+                       break;
+               case 102:
+               case 106:
+               case 110:
+               case 114:
+                       btc8723b2ant_set_fw_ps_tdma(
+                               btcoexist, 0xd3, 0x2d + wifi_duration_adjust,
+                               0x03, 0x70, 0x50 | tdma_byte4_modify);
+                       break;
+               case 103:
+               case 107:
+               case 111:
+               case 115:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1c,
+                                                   0x03, 0x70,
+                                                   0x50 | tdma_byte4_modify);
+                       break;
+               case 104:
+               case 108:
+               case 112:
+               case 116:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x10,
+                                                   0x03, 0x70,
+                                                   0x50 | tdma_byte4_modify);
+                       break;
+               case 109:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c,
+                                                   0x03, 0xf1,
+                                                   0x90 | tdma_byte4_modify);
+                       break;
+               case 121:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
+                                                   0x03, 0x70,
+                                                   0x90 | tdma_byte4_modify);
+                       break;
+               case 22:
+               case 122:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x35,
+                                                   0x03, 0x71, 0x11);
                        break;
                }
        } else {
@@ -1398,62 +1456,202 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
        coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
 }
 
+static void btc8723b2ant_ps_tdma_check_for_power_save_state(
+               struct btc_coexist *btcoexist, bool new_ps_state)
+{
+       u8 lps_mode = 0x0;
+
+       btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode);
+
+       if (lps_mode) {
+               /* already under LPS state */
+               if (new_ps_state) {
+                       /* keep state under LPS, do nothing. */
+               } else {
+                       /* will leave LPS state, turn off psTdma first */
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+               }
+       } else {
+               /* NO PS state */
+               if (new_ps_state) {
+                       /* will enter LPS state, turn off psTdma first */
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+               } else {
+                       /* keep state under NO PS state, do nothing. */
+               }
+       }
+}
+
+static void btc8723b2ant_power_save_state(struct btc_coexist *btcoexist,
+                                         u8 ps_type, u8 lps_val, u8 rpwm_val)
+{
+       bool low_pwr_disable = false;
+
+       switch (ps_type) {
+       case BTC_PS_WIFI_NATIVE:
+               /* recover to original 32k low power setting */
+               low_pwr_disable = false;
+               btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+                                  &low_pwr_disable);
+               btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+               coex_sta->force_lps_on = false;
+               break;
+       case BTC_PS_LPS_ON:
+               btc8723b2ant_ps_tdma_check_for_power_save_state(btcoexist,
+                                                               true);
+               btc8723b2ant_lps_rpwm(btcoexist, NORMAL_EXEC, lps_val,
+                                     rpwm_val);
+               /* when coex force to enter LPS, do not enter 32k low power */
+               low_pwr_disable = true;
+               btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+                                  &low_pwr_disable);
+               /* power save must executed before psTdma */
+               btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+               coex_sta->force_lps_on = true;
+               break;
+       case BTC_PS_LPS_OFF:
+               btc8723b2ant_ps_tdma_check_for_power_save_state(btcoexist,
+                                                               false);
+               btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+               coex_sta->force_lps_on = false;
+               break;
+       default:
+               break;
+       }
+}
+
 static void btc8723b2ant_coex_alloff(struct btc_coexist *btcoexist)
 {
        /* fw all off */
+       btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
        btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
-       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
        /* sw all off */
-       btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
-       btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+       btc8723b2ant_sw_mechanism(btcoexist, false, false, false, false);
 
        /* hw all off */
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
-       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+       btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
 }
 
 static void btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
        /* force to reset coex mechanism*/
+       btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+       btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
 
        btc8723b2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
        btc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
-       btc8723b2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false);
+       btc8723b2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, 0);
+
+       btc8723b2ant_sw_mechanism(btcoexist, false, false, false, false);
 
-       btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
-       btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+       coex_sta->pop_event_cnt = 0;
 }
 
 static void btc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool wifi_connected = false;
        bool low_pwr_disable = true;
+       bool scan = false, link = false, roam = false;
 
        btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
                           &low_pwr_disable);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
                           &wifi_connected);
 
-       if (wifi_connected) {
-               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
-               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+       btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+       if (coex_sta->bt_abnormal_scan) {
+               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23);
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
+       } else if (scan || link || roam) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi link process + BT Inq/Page!!\n");
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
+               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
+       } else if (wifi_connected) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi connected + BT Inq/Page!!\n");
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
+               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
        } else {
-               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
                btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
        }
        btc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
-       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+       btc8723b2ant_sw_mechanism(btcoexist, false, false, false, false);
+}
+
+static void btc8723b2ant_action_wifi_link_process(struct btc_coexist
+                                                    *btcoexist)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       u32 u32tmp;
+       u8 u8tmpa, u8tmpb;
+
+       btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
+       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
+
+       btc8723b2ant_sw_mechanism(btcoexist, false, false, false, false);
+
+       u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x948);
+       u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765);
+       u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x76e);
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], 0x948 = 0x%x, 0x765 = 0x%x, 0x76e = 0x%x\n",
+                u32tmp, u8tmpa, u8tmpb);
+}
+
+static bool btc8723b2ant_action_wifi_idle_process(struct btc_coexist *btcoexist)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+       u8 ap_num = 0;
+       u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+                coex_dm->switch_thres_offset - coex_dm->switch_thres_offset;
+
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
+                                                       tmp, 0);
+       tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+             coex_dm->switch_thres_offset - coex_dm->switch_thres_offset;
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
 
-       btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
-       btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+       /* office environment */
+       if (BTC_RSSI_HIGH(wifi_rssi_state1) && (coex_sta->hid_exist) &&
+           (coex_sta->a2dp_exist)) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi  idle process for BT HID+A2DP exist!!\n");
+
+               btc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x6);
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+               /* sw all off */
+               btc8723b2ant_sw_mechanism(btcoexist, false, false, false,
+                                         false);
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+               btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
 
-       coex_dm->need_recover_0x948 = true;
-       coex_dm->backup_0x948 = btcoexist->btc_read_2byte(btcoexist, 0x948);
+               return true;
+       }
 
-       btc8723b2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_AUX,
-                                 false, false);
+       btc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x18);
+       return false;
 }
 
 static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
@@ -1472,21 +1670,21 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
                low_pwr_disable = false;
                btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
                                   &low_pwr_disable);
+               btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC,
+                                       false, false, 0x8);
 
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], Wifi non-connected idle!!\n");
 
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
                                          0x0);
-               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
                btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
                btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
-               btc8723b2ant_sw_mechanism1(btcoexist, false, false, false,
-                                          false);
-               btc8723b2ant_sw_mechanism2(btcoexist, false, false, false,
-                                          0x18);
+               btc8723b2ant_sw_mechanism(btcoexist, false, false, false,
+                                         false);
 
                common = true;
        } else {
@@ -1496,23 +1694,23 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
                        btcoexist->btc_set(btcoexist,
                                           BTC_SET_ACT_DISABLE_LOW_POWER,
                                           &low_pwr_disable);
+                       btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC,
+                                               false, false, 0x8);
 
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Wifi connected + BT non connected-idle!!\n");
 
                        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
                                                  0xfffff, 0x0);
-                       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+                       btc8723b2ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 0);
                        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
                        btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
                                                      0xb);
-                       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
-                                               false);
+                       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, false,
+                                                 false, false);
 
                        common = true;
                } else if (BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE ==
@@ -1526,20 +1724,20 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
                                return false;
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Wifi connected + BT connected-idle!!\n");
+                       btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC,
+                                               false, false, 0x8);
 
                        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
                                                  0xfffff, 0x0);
-                       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+                       btc8723b2ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 0);
                        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
                        btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
                                                      0xb);
-                       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
-                                               false);
+                       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, false,
+                                                 false, false);
 
                        common = true;
                } else {
@@ -1553,36 +1751,12 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
                                         "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
                                common = false;
                        } else {
-                               if (bt_hs_on)
-                                       return false;
-
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                         "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
 
-                               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
-                                                         0x1, 0xfffff, 0x0);
-                               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC,
-                                                      7);
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 21);
-                               btc8723b2ant_fw_dac_swing_lvl(btcoexist,
-                                                             NORMAL_EXEC,
-                                                             0xb);
-                               if (btc8723b_need_dec_pwr(btcoexist))
-                                       btc8723b2ant_dec_bt_pwr(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true);
-                               else
-                                       btc8723b2ant_dec_bt_pwr(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               false);
-                               btc8723b2ant_sw_mechanism1(btcoexist, false,
-                                                          false, false,
-                                                          false);
-                               btc8723b2ant_sw_mechanism2(btcoexist, false,
-                                                          false, false,
-                                                          0x18);
-                               common = true;
+                               common =
+                                   btc8723b2ant_action_wifi_idle_process(
+                                               btcoexist);
                        }
                }
        }
@@ -1590,550 +1764,6 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
        return common;
 }
 
-static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
-                         s32 result)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-       /* Set PS TDMA for max interval == 1 */
-       if (tx_pause) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 1\n");
-
-               if (coex_dm->cur_ps_tdma == 71) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                            true, 5);
-                       coex_dm->tdma_adj_type = 5;
-               } else if (coex_dm->cur_ps_tdma == 1) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                            true, 5);
-                       coex_dm->tdma_adj_type = 5;
-               } else if (coex_dm->cur_ps_tdma == 2) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                            true, 6);
-                       coex_dm->tdma_adj_type = 6;
-               } else if (coex_dm->cur_ps_tdma == 3) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                            true, 7);
-                       coex_dm->tdma_adj_type = 7;
-               } else if (coex_dm->cur_ps_tdma == 4) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                            true, 8);
-                       coex_dm->tdma_adj_type = 8;
-               }
-
-               if (coex_dm->cur_ps_tdma == 9) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                            true, 13);
-                       coex_dm->tdma_adj_type = 13;
-               } else if (coex_dm->cur_ps_tdma == 10) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                            true, 14);
-                       coex_dm->tdma_adj_type = 14;
-               } else if (coex_dm->cur_ps_tdma == 11) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                            true, 15);
-                       coex_dm->tdma_adj_type = 15;
-               } else if (coex_dm->cur_ps_tdma == 12) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                            true, 16);
-                       coex_dm->tdma_adj_type = 16;
-               }
-
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 5) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 6);
-                               coex_dm->tdma_adj_type = 6;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 8);
-                               coex_dm->tdma_adj_type = 8;
-                       } else if (coex_dm->cur_ps_tdma == 13) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 14);
-                               coex_dm->tdma_adj_type = 14;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 16);
-                               coex_dm->tdma_adj_type = 16;
-                       }
-               }  else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 8) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 6);
-                               coex_dm->tdma_adj_type = 6;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 5);
-                               coex_dm->tdma_adj_type = 5;
-                       } else if (coex_dm->cur_ps_tdma == 16) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 14);
-                               coex_dm->tdma_adj_type = 14;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 13);
-                               coex_dm->tdma_adj_type = 13;
-                       }
-               }
-       } else {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 0\n");
-               if (coex_dm->cur_ps_tdma == 5) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71);
-                       coex_dm->tdma_adj_type = 71;
-               } else if (coex_dm->cur_ps_tdma == 6) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
-                       coex_dm->tdma_adj_type = 2;
-               } else if (coex_dm->cur_ps_tdma == 7) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
-                       coex_dm->tdma_adj_type = 3;
-               } else if (coex_dm->cur_ps_tdma == 8) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
-                       coex_dm->tdma_adj_type = 4;
-               }
-
-               if (coex_dm->cur_ps_tdma == 13) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
-                       coex_dm->tdma_adj_type = 9;
-               } else if (coex_dm->cur_ps_tdma == 14) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
-                       coex_dm->tdma_adj_type = 10;
-               } else if (coex_dm->cur_ps_tdma == 15) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
-                       coex_dm->tdma_adj_type = 11;
-               } else if (coex_dm->cur_ps_tdma == 16) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
-                       coex_dm->tdma_adj_type = 12;
-               }
-
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 71) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 1);
-                               coex_dm->tdma_adj_type = 1;
-                       } else if (coex_dm->cur_ps_tdma == 1) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 4);
-                               coex_dm->tdma_adj_type = 4;
-                       } else if (coex_dm->cur_ps_tdma == 9) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 10);
-                               coex_dm->tdma_adj_type = 10;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 12);
-                               coex_dm->tdma_adj_type = 12;
-                       }
-               }  else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 4) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 1);
-                               coex_dm->tdma_adj_type = 1;
-                       } else if (coex_dm->cur_ps_tdma == 1) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 71);
-                               coex_dm->tdma_adj_type = 71;
-                       } else if (coex_dm->cur_ps_tdma == 12) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 10);
-                               coex_dm->tdma_adj_type = 10;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 9);
-                               coex_dm->tdma_adj_type = 9;
-                       }
-               }
-       }
-}
-
-static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
-                         s32 result)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-       /* Set PS TDMA for max interval == 2 */
-       if (tx_pause) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 1\n");
-               if (coex_dm->cur_ps_tdma == 1) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
-                       coex_dm->tdma_adj_type = 6;
-               } else if (coex_dm->cur_ps_tdma == 2) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
-                       coex_dm->tdma_adj_type = 6;
-               } else if (coex_dm->cur_ps_tdma == 3) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
-                       coex_dm->tdma_adj_type = 7;
-               } else if (coex_dm->cur_ps_tdma == 4) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8);
-                       coex_dm->tdma_adj_type = 8;
-               }
-               if (coex_dm->cur_ps_tdma == 9) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
-                       coex_dm->tdma_adj_type = 14;
-               } else if (coex_dm->cur_ps_tdma == 10) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
-                       coex_dm->tdma_adj_type = 14;
-               } else if (coex_dm->cur_ps_tdma == 11) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
-                       coex_dm->tdma_adj_type = 15;
-               } else if (coex_dm->cur_ps_tdma == 12) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 16);
-                       coex_dm->tdma_adj_type = 16;
-               }
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 5) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 6);
-                               coex_dm->tdma_adj_type = 6;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 8);
-                               coex_dm->tdma_adj_type = 8;
-                       } else if (coex_dm->cur_ps_tdma == 13) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 14);
-                               coex_dm->tdma_adj_type = 14;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 16);
-                               coex_dm->tdma_adj_type = 16;
-                       }
-               }  else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 8) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 6);
-                               coex_dm->tdma_adj_type = 6;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 6);
-                               coex_dm->tdma_adj_type = 6;
-                       } else if (coex_dm->cur_ps_tdma == 16) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 14);
-                               coex_dm->tdma_adj_type = 14;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 14);
-                               coex_dm->tdma_adj_type = 14;
-                       }
-               }
-       } else {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 0\n");
-               if (coex_dm->cur_ps_tdma == 5) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
-                       coex_dm->tdma_adj_type = 2;
-               } else if (coex_dm->cur_ps_tdma == 6) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
-                       coex_dm->tdma_adj_type = 2;
-               } else if (coex_dm->cur_ps_tdma == 7) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
-                       coex_dm->tdma_adj_type = 3;
-               } else if (coex_dm->cur_ps_tdma == 8) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
-                       coex_dm->tdma_adj_type = 4;
-               }
-               if (coex_dm->cur_ps_tdma == 13) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
-                       coex_dm->tdma_adj_type = 10;
-               } else if (coex_dm->cur_ps_tdma == 14) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
-                       coex_dm->tdma_adj_type = 10;
-               } else if (coex_dm->cur_ps_tdma == 15) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
-                       coex_dm->tdma_adj_type = 11;
-               } else if (coex_dm->cur_ps_tdma == 16) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
-                       coex_dm->tdma_adj_type = 12;
-               }
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 1) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 4);
-                               coex_dm->tdma_adj_type = 4;
-                       } else if (coex_dm->cur_ps_tdma == 9) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 10);
-                               coex_dm->tdma_adj_type = 10;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 12);
-                               coex_dm->tdma_adj_type = 12;
-                       }
-               } else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 4) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 12) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 10);
-                               coex_dm->tdma_adj_type = 10;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 10);
-                               coex_dm->tdma_adj_type = 10;
-                       }
-               }
-       }
-}
-
-static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
-                         s32 result)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-       /* Set PS TDMA for max interval == 3 */
-       if (tx_pause) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 1\n");
-               if (coex_dm->cur_ps_tdma == 1) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
-                       coex_dm->tdma_adj_type = 7;
-               } else if (coex_dm->cur_ps_tdma == 2) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
-                       coex_dm->tdma_adj_type = 7;
-               } else if (coex_dm->cur_ps_tdma == 3) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
-                       coex_dm->tdma_adj_type = 7;
-               } else if (coex_dm->cur_ps_tdma == 4) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8);
-                       coex_dm->tdma_adj_type = 8;
-               }
-               if (coex_dm->cur_ps_tdma == 9) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
-                       coex_dm->tdma_adj_type = 15;
-               } else if (coex_dm->cur_ps_tdma == 10) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
-                       coex_dm->tdma_adj_type = 15;
-               } else if (coex_dm->cur_ps_tdma == 11) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
-                       coex_dm->tdma_adj_type = 15;
-               } else if (coex_dm->cur_ps_tdma == 12) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 16);
-                       coex_dm->tdma_adj_type = 16;
-               }
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 5) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 8);
-                               coex_dm->tdma_adj_type = 8;
-                       } else if (coex_dm->cur_ps_tdma == 13) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 16);
-                               coex_dm->tdma_adj_type = 16;
-                       }
-               }  else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 8) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 16) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       }
-               }
-       } else {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 0\n");
-               if (coex_dm->cur_ps_tdma == 5) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
-                       coex_dm->tdma_adj_type = 3;
-               } else if (coex_dm->cur_ps_tdma == 6) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
-                       coex_dm->tdma_adj_type = 3;
-               } else if (coex_dm->cur_ps_tdma == 7) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
-                       coex_dm->tdma_adj_type = 3;
-               } else if (coex_dm->cur_ps_tdma == 8) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
-                       coex_dm->tdma_adj_type = 4;
-               }
-               if (coex_dm->cur_ps_tdma == 13) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
-                       coex_dm->tdma_adj_type = 11;
-               } else if (coex_dm->cur_ps_tdma == 14) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
-                       coex_dm->tdma_adj_type = 11;
-               } else if (coex_dm->cur_ps_tdma == 15) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
-                       coex_dm->tdma_adj_type = 11;
-               } else if (coex_dm->cur_ps_tdma == 16) {
-                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
-                       coex_dm->tdma_adj_type = 12;
-               }
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 1) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 4);
-                               coex_dm->tdma_adj_type = 4;
-                       } else if (coex_dm->cur_ps_tdma == 9) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 12);
-                               coex_dm->tdma_adj_type = 12;
-                       }
-               } else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 4) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 12) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                    true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       }
-               }
-       }
-}
-
 static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                          bool sco_hid, bool tx_pause,
                                          u8 max_interval)
@@ -2157,34 +1787,44 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                        btc8723b2ant_ps_tdma(btcoexist,
                                                             NORMAL_EXEC,
                                                             true, 13);
-                                       coex_dm->tdma_adj_type = 13;
+                                       coex_dm->ps_tdma_du_adj_type = 13;
                                } else if (max_interval == 2) {
                                        btc8723b2ant_ps_tdma(btcoexist,
                                                             NORMAL_EXEC,
                                                             true, 14);
-                                       coex_dm->tdma_adj_type = 14;
+                                       coex_dm->ps_tdma_du_adj_type = 14;
+                               } else if (max_interval == 3) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 15);
+                                       coex_dm->ps_tdma_du_adj_type = 15;
                                } else {
                                        btc8723b2ant_ps_tdma(btcoexist,
                                                             NORMAL_EXEC,
                                                             true, 15);
-                                       coex_dm->tdma_adj_type = 15;
+                                       coex_dm->ps_tdma_du_adj_type = 15;
                                }
                        } else {
                                if (max_interval == 1) {
                                        btc8723b2ant_ps_tdma(btcoexist,
                                                             NORMAL_EXEC,
                                                             true, 9);
-                                       coex_dm->tdma_adj_type = 9;
+                                       coex_dm->ps_tdma_du_adj_type = 9;
                                } else if (max_interval == 2) {
                                        btc8723b2ant_ps_tdma(btcoexist,
                                                             NORMAL_EXEC,
                                                             true, 10);
-                                       coex_dm->tdma_adj_type = 10;
+                                       coex_dm->ps_tdma_du_adj_type = 10;
+                               } else if (max_interval == 3) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                    true, 11);
+                                       coex_dm->ps_tdma_du_adj_type = 11;
                                } else {
                                        btc8723b2ant_ps_tdma(btcoexist,
                                                             NORMAL_EXEC,
                                                             true, 11);
-                                       coex_dm->tdma_adj_type = 11;
+                                       coex_dm->ps_tdma_du_adj_type = 11;
                                }
                        }
                } else {
@@ -2193,34 +1833,44 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                        btc8723b2ant_ps_tdma(btcoexist,
                                                             NORMAL_EXEC,
                                                             true, 5);
-                                       coex_dm->tdma_adj_type = 5;
+                                       coex_dm->ps_tdma_du_adj_type = 5;
                                } else if (max_interval == 2) {
                                        btc8723b2ant_ps_tdma(btcoexist,
                                                             NORMAL_EXEC,
                                                             true, 6);
-                                       coex_dm->tdma_adj_type = 6;
+                                       coex_dm->ps_tdma_du_adj_type = 6;
+                               } else if (max_interval == 3) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 7);
+                                       coex_dm->ps_tdma_du_adj_type = 7;
                                } else {
                                        btc8723b2ant_ps_tdma(btcoexist,
                                                             NORMAL_EXEC,
                                                             true, 7);
-                                       coex_dm->tdma_adj_type = 7;
+                                       coex_dm->ps_tdma_du_adj_type = 7;
                                }
                        } else {
                                if (max_interval == 1) {
                                        btc8723b2ant_ps_tdma(btcoexist,
                                                             NORMAL_EXEC,
                                                             true, 1);
-                                       coex_dm->tdma_adj_type = 1;
+                                       coex_dm->ps_tdma_du_adj_type = 1;
                                } else if (max_interval == 2) {
                                        btc8723b2ant_ps_tdma(btcoexist,
                                                             NORMAL_EXEC,
                                                             true, 2);
-                                       coex_dm->tdma_adj_type = 2;
+                                       coex_dm->ps_tdma_du_adj_type = 2;
+                               } else if (max_interval == 3) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 3);
+                                       coex_dm->ps_tdma_du_adj_type = 3;
                                } else {
                                        btc8723b2ant_ps_tdma(btcoexist,
                                                             NORMAL_EXEC,
                                                             true, 3);
-                                       coex_dm->tdma_adj_type = 3;
+                                       coex_dm->ps_tdma_du_adj_type = 3;
                                }
                        }
                }
@@ -2234,6 +1884,11 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
        } else {
                /*accquire the BT TRx retry count from BT_Info byte2*/
                retry_count = coex_sta->bt_retry_cnt;
+
+               if ((coex_sta->low_priority_tx) > 1050 ||
+                   (coex_sta->low_priority_rx) > 1250)
+                       retry_count++;
+
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], retry_count = %d\n", retry_count);
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -2250,6 +1905,9 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                dn = 0;
 
                        if (up >= n) {
+                               /* if retry count during continuous n*2
+                                * seconds is 0, enlarge WiFi duration
+                                */
                                wait_count = 0;
                                n = 3;
                                up = 0;
@@ -2266,12 +1924,20 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                up = 0;
 
                        if (dn == 2) {
+                               /* if continuous 2 retry count(every 2
+                                * seconds) >0 and < 3, reduce WiFi duration
+                                */
                                if (wait_count <= 2)
+                                       /* avoid loop between the two levels */
                                        m++;
                                else
                                        m = 1;
 
                                if (m >= 20)
+                                       /* maximum of m = 20 ' will recheck if
+                                        * need to adjust wifi duration in
+                                        * maximum time interval 120 seconds
+                                        */
                                        m = 20;
 
                                n = 3 * m;
@@ -2282,42 +1948,793 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                         "[BTCoex], Decrease wifi duration for retry_counter<3!!\n");
                        }
-               } else {
-                       if (wait_count == 1)
-                               m++;
-                       else
-                               m = 1;
-
-                       if (m >= 20)
-                               m = 20;
-
-                       n = 3 * m;
-                       up = 0;
-                       dn = 0;
-                       wait_count = 0;
-                       result = -1;
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Decrease wifi duration for retry_counter>3!!\n");
+               } else {
+                       /* retry count > 3, once retry count > 3, to reduce
+                        *  WiFi duration
+                        */
+                       if (wait_count == 1)
+                               /* to avoid loop between the two levels */
+                               m++;
+                       else
+                               m = 1;
+
+                       if (m >= 20)
+                               /* maximum of m = 20 ' will recheck if need to
+                                * adjust wifi duration in maximum time interval
+                                * 120 seconds
+                                */
+                               m = 20;
+
+                       n = 3 * m;
+                       up = 0;
+                       dn = 0;
+                       wait_count = 0;
+                       result = -1;
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Decrease wifi duration for retry_counter>3!!\n");
+               }
+
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], max Interval = %d\n", max_interval);
+               if (max_interval == 1) {
+                       if (tx_pause) {
+                               if (coex_dm->cur_ps_tdma == 71) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 5);
+                                       coex_dm->ps_tdma_du_adj_type = 5;
+                               } else if (coex_dm->cur_ps_tdma == 1) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 5);
+                                       coex_dm->ps_tdma_du_adj_type = 5;
+                               } else if (coex_dm->cur_ps_tdma == 2) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 6);
+                                       coex_dm->ps_tdma_du_adj_type = 6;
+                               } else if (coex_dm->cur_ps_tdma == 3) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 7);
+                                       coex_dm->ps_tdma_du_adj_type = 7;
+                               } else if (coex_dm->cur_ps_tdma == 4) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 8);
+                                       coex_dm->ps_tdma_du_adj_type = 8;
+                               }
+                               if (coex_dm->cur_ps_tdma == 9) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 13);
+                                       coex_dm->ps_tdma_du_adj_type = 13;
+                               } else if (coex_dm->cur_ps_tdma == 10) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 14);
+                                       coex_dm->ps_tdma_du_adj_type = 14;
+                               } else if (coex_dm->cur_ps_tdma == 11) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 15);
+                                       coex_dm->ps_tdma_du_adj_type = 15;
+                               } else if (coex_dm->cur_ps_tdma == 12) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 16);
+                                       coex_dm->ps_tdma_du_adj_type = 16;
+                               }
+
+                               if (result == -1) {
+                                       if (coex_dm->cur_ps_tdma == 5) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 6);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       6;
+                                       } else if (coex_dm->cur_ps_tdma == 6) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 7) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 8);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       8;
+                                       } else if (coex_dm->cur_ps_tdma == 13) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 14);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       14;
+                                       } else if (coex_dm->cur_ps_tdma == 14) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       } else if (coex_dm->cur_ps_tdma == 15) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 16);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       16;
+                                       }
+                               } else if (result == 1) {
+                                       if (coex_dm->cur_ps_tdma == 8) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 7) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 6);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       6;
+                                       } else if (coex_dm->cur_ps_tdma == 6) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 5);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       5;
+                                       } else if (coex_dm->cur_ps_tdma == 16) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       } else if (coex_dm->cur_ps_tdma == 15) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 14);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       14;
+                                       } else if (coex_dm->cur_ps_tdma == 14) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 13);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       13;
+                                       }
+                               }
+                       } else {
+                               if (coex_dm->cur_ps_tdma == 5) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 71);
+                                       coex_dm->ps_tdma_du_adj_type = 71;
+                               } else if (coex_dm->cur_ps_tdma == 6) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 2);
+                                       coex_dm->ps_tdma_du_adj_type = 2;
+                               } else if (coex_dm->cur_ps_tdma == 7) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 3);
+                                       coex_dm->ps_tdma_du_adj_type = 3;
+                               } else if (coex_dm->cur_ps_tdma == 8) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 4);
+                                       coex_dm->ps_tdma_du_adj_type = 4;
+                               }
+                               if (coex_dm->cur_ps_tdma == 13) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 9);
+                                       coex_dm->ps_tdma_du_adj_type = 9;
+                               } else if (coex_dm->cur_ps_tdma == 14) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 10);
+                                       coex_dm->ps_tdma_du_adj_type = 10;
+                               } else if (coex_dm->cur_ps_tdma == 15) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 11);
+                                       coex_dm->ps_tdma_du_adj_type = 11;
+                               } else if (coex_dm->cur_ps_tdma == 16) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 12);
+                                       coex_dm->ps_tdma_du_adj_type = 12;
+                               }
+
+                               if (result == -1) {
+                                       if (coex_dm->cur_ps_tdma == 71) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 1);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       1;
+                                       } else if (coex_dm->cur_ps_tdma == 1) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 2);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       2;
+                                       } else if (coex_dm->cur_ps_tdma == 2) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 3) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 4);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       4;
+                                       } else if (coex_dm->cur_ps_tdma == 9) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 10);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       10;
+                                       } else if (coex_dm->cur_ps_tdma == 10) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       } else if (coex_dm->cur_ps_tdma == 11) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 12);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       12;
+                                       }
+                               } else if (result == 1) {
+                                       if (coex_dm->cur_ps_tdma == 4) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 3) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 2);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       2;
+                                       } else if (coex_dm->cur_ps_tdma == 2) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 1);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       1;
+                                       } else if (coex_dm->cur_ps_tdma == 1) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 71);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       71;
+                                       } else if (coex_dm->cur_ps_tdma == 12) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       } else if (coex_dm->cur_ps_tdma == 11) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 10);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       10;
+                                       } else if (coex_dm->cur_ps_tdma == 10) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 9);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       9;
+                                       }
+                               }
+                       }
+               } else if (max_interval == 2) {
+                       if (tx_pause) {
+                               if (coex_dm->cur_ps_tdma == 1) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 6);
+                                       coex_dm->ps_tdma_du_adj_type = 6;
+                               } else if (coex_dm->cur_ps_tdma == 2) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 6);
+                                       coex_dm->ps_tdma_du_adj_type = 6;
+                               } else if (coex_dm->cur_ps_tdma == 3) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 7);
+                                       coex_dm->ps_tdma_du_adj_type = 7;
+                               } else if (coex_dm->cur_ps_tdma == 4) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 8);
+                                       coex_dm->ps_tdma_du_adj_type = 8;
+                               }
+                               if (coex_dm->cur_ps_tdma == 9) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 14);
+                                       coex_dm->ps_tdma_du_adj_type = 14;
+                               } else if (coex_dm->cur_ps_tdma == 10) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 14);
+                                       coex_dm->ps_tdma_du_adj_type = 14;
+                               } else if (coex_dm->cur_ps_tdma == 11) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 15);
+                                       coex_dm->ps_tdma_du_adj_type = 15;
+                               } else if (coex_dm->cur_ps_tdma == 12) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 16);
+                                       coex_dm->ps_tdma_du_adj_type = 16;
+                               }
+                               if (result == -1) {
+                                       if (coex_dm->cur_ps_tdma == 5) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 6);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       6;
+                                       } else if (coex_dm->cur_ps_tdma == 6) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 7) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 8);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       8;
+                                       } else if (coex_dm->cur_ps_tdma == 13) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 14);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       14;
+                                       } else if (coex_dm->cur_ps_tdma == 14) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       } else if (coex_dm->cur_ps_tdma == 15) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 16);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       16;
+                                       }
+                               } else if (result == 1) {
+                                       if (coex_dm->cur_ps_tdma == 8) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 7) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 6);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       6;
+                                       } else if (coex_dm->cur_ps_tdma == 6) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 6);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       6;
+                                       } else if (coex_dm->cur_ps_tdma == 16) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       } else if (coex_dm->cur_ps_tdma == 15) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 14);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       14;
+                                       } else if (coex_dm->cur_ps_tdma == 14) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 14);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       14;
+                                       }
+                               }
+                       } else {
+                               if (coex_dm->cur_ps_tdma == 5) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 2);
+                                       coex_dm->ps_tdma_du_adj_type = 2;
+                               } else if (coex_dm->cur_ps_tdma == 6) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 2);
+                                       coex_dm->ps_tdma_du_adj_type = 2;
+                               } else if (coex_dm->cur_ps_tdma == 7) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 3);
+                                       coex_dm->ps_tdma_du_adj_type = 3;
+                               } else if (coex_dm->cur_ps_tdma == 8) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 4);
+                                       coex_dm->ps_tdma_du_adj_type = 4;
+                               }
+                               if (coex_dm->cur_ps_tdma == 13) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 10);
+                                       coex_dm->ps_tdma_du_adj_type = 10;
+                               } else if (coex_dm->cur_ps_tdma == 14) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 10);
+                                       coex_dm->ps_tdma_du_adj_type = 10;
+                               } else if (coex_dm->cur_ps_tdma == 15) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 11);
+                                       coex_dm->ps_tdma_du_adj_type = 11;
+                               } else if (coex_dm->cur_ps_tdma == 16) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 12);
+                                       coex_dm->ps_tdma_du_adj_type = 12;
+                               }
+                               if (result == -1) {
+                                       if (coex_dm->cur_ps_tdma == 1) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 2);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       2;
+                                       } else if (coex_dm->cur_ps_tdma == 2) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 3) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 4);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       4;
+                                       } else if (coex_dm->cur_ps_tdma == 9) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 10);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       10;
+                                       } else if (coex_dm->cur_ps_tdma == 10) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       } else if (coex_dm->cur_ps_tdma == 11) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 12);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       12;
+                                       }
+                               } else if (result == 1) {
+                                       if (coex_dm->cur_ps_tdma == 4) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 3) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 2);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       2;
+                                       } else if (coex_dm->cur_ps_tdma == 2) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 2);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       2;
+                                       } else if (coex_dm->cur_ps_tdma == 12) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       } else if (coex_dm->cur_ps_tdma == 11) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 10);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       10;
+                                       } else if (coex_dm->cur_ps_tdma == 10) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 10);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       10;
+                                       }
+                               }
+                       }
+               } else if (max_interval == 3) {
+                       if (tx_pause) {
+                               if (coex_dm->cur_ps_tdma == 1) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 7);
+                                       coex_dm->ps_tdma_du_adj_type = 7;
+                               } else if (coex_dm->cur_ps_tdma == 2) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 7);
+                                       coex_dm->ps_tdma_du_adj_type = 7;
+                               } else if (coex_dm->cur_ps_tdma == 3) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 7);
+                                       coex_dm->ps_tdma_du_adj_type = 7;
+                               } else if (coex_dm->cur_ps_tdma == 4) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 8);
+                                       coex_dm->ps_tdma_du_adj_type = 8;
+                               }
+                               if (coex_dm->cur_ps_tdma == 9) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 15);
+                                       coex_dm->ps_tdma_du_adj_type = 15;
+                               } else if (coex_dm->cur_ps_tdma == 10) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 15);
+                                       coex_dm->ps_tdma_du_adj_type = 15;
+                               } else if (coex_dm->cur_ps_tdma == 11) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 15);
+                                       coex_dm->ps_tdma_du_adj_type = 15;
+                               } else if (coex_dm->cur_ps_tdma == 12) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 16);
+                                       coex_dm->ps_tdma_du_adj_type = 16;
+                               }
+                               if (result == -1) {
+                                       if (coex_dm->cur_ps_tdma == 5) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 6) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 7) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 8);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       8;
+                                       } else if (coex_dm->cur_ps_tdma == 13) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       } else if (coex_dm->cur_ps_tdma == 14) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       } else if (coex_dm->cur_ps_tdma == 15) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 16);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       16;
+                                       }
+                               } else if (result == 1) {
+                                       if (coex_dm->cur_ps_tdma == 8) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 7) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 6) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 16) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       } else if (coex_dm->cur_ps_tdma == 15) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       } else if (coex_dm->cur_ps_tdma == 14) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       }
+                               }
+                       } else {
+                               if (coex_dm->cur_ps_tdma == 5) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 3);
+                                       coex_dm->ps_tdma_du_adj_type = 3;
+                               } else if (coex_dm->cur_ps_tdma == 6) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 3);
+                                       coex_dm->ps_tdma_du_adj_type = 3;
+                               } else if (coex_dm->cur_ps_tdma == 7) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 3);
+                                       coex_dm->ps_tdma_du_adj_type = 3;
+                               } else if (coex_dm->cur_ps_tdma == 8) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 4);
+                                       coex_dm->ps_tdma_du_adj_type = 4;
+                               }
+                               if (coex_dm->cur_ps_tdma == 13) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 11);
+                                       coex_dm->ps_tdma_du_adj_type = 11;
+                               } else if (coex_dm->cur_ps_tdma == 14) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 11);
+                                       coex_dm->ps_tdma_du_adj_type = 11;
+                               } else if (coex_dm->cur_ps_tdma == 15) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 11);
+                                       coex_dm->ps_tdma_du_adj_type = 11;
+                               } else if (coex_dm->cur_ps_tdma == 16) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 12);
+                                       coex_dm->ps_tdma_du_adj_type = 12;
+                               }
+                               if (result == -1) {
+                                       if (coex_dm->cur_ps_tdma == 1) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 2) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 3) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 4);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       4;
+                                       } else if (coex_dm->cur_ps_tdma == 9) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       } else if (coex_dm->cur_ps_tdma == 10) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       } else if (coex_dm->cur_ps_tdma == 11) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 12);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       12;
+                                       }
+                               } else if (result == 1) {
+                                       if (coex_dm->cur_ps_tdma == 4) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 3) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 2) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 12) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       } else if (coex_dm->cur_ps_tdma == 11) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       } else if (coex_dm->cur_ps_tdma == 10) {
+                                               btc8723b2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       }
+                               }
+                       }
                }
-
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], max Interval = %d\n", max_interval);
-               if (max_interval == 1)
-                       set_tdma_int1(btcoexist, tx_pause, result);
-               else if (max_interval == 2)
-                       set_tdma_int2(btcoexist, tx_pause, result);
-               else if (max_interval == 3)
-                       set_tdma_int3(btcoexist, tx_pause, result);
        }
 
-       /*if current PsTdma not match with the recorded one (when scan, dhcp..),
-        *then we have to adjust it back to the previous recorded one.
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], max Interval = %d\n", max_interval);
+
+       /* if current PsTdma not match with the recorded one (scan, dhcp, ...),
+        * then we have to adjust it back to the previous recorded one.
         */
-       if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
+       if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) {
                bool scan = false, link = false, roam = false;
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
-                        coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+                        coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
 
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2325,7 +2742,7 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
 
                if (!scan && !link && !roam)
                        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
-                                            coex_dm->tdma_adj_type);
+                                            coex_dm->ps_tdma_du_adj_type);
                else
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
@@ -2335,58 +2752,55 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
 /* SCO only or SCO+PAN(HS) */
 static void btc8723b2ant_action_sco(struct btc_coexist *btcoexist)
 {
-       u8 wifi_rssi_state;
+       u8 wifi_rssi_state, bt_rssi_state;
        u32 wifi_bw;
 
-       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
-                                                      0, 2, 15, 0);
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(
+               btcoexist, 2, BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+                                              coex_dm->switch_thres_offset,
+               0);
 
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
 
+       btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
        btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
 
-       if (btc8723b_need_dec_pwr(btcoexist))
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
        else
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       /*for SCO quality at 11b/g mode*/
        if (BTC_WIFI_BW_LEGACY == wifi_bw)
-               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 2);
-       else  /*for SCO quality & wifi performance balance at 11n mode*/
-               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 8);
+               /* for SCO quality at 11b/g mode */
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+       else
+               /* for SCO quality & wifi performance balance at 11n mode */
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8);
 
-       /*for voice quality */
+       /* for voice quality */
        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
 
        /* sw mechanism */
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  true, 0x4);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, true,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  true, 0x4);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, true,
+                                                 false, false);
                }
        } else {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  true, 0x4);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, true,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  true, 0x4);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, true,
+                                                 false, false);
                }
        }
 }
@@ -2395,26 +2809,32 @@ static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist)
 {
        u8 wifi_rssi_state, bt_rssi_state;
        u32 wifi_bw;
+       u8 tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+                       coex_dm->switch_thres_offset;
 
-       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
-                                                      0, 2, 15, 0);
-       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
 
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
 
+       btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
        btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       if (btc8723b_need_dec_pwr(btcoexist))
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
        else
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       if (BTC_WIFI_BW_LEGACY == wifi_bw) /*/for HID at 11b/g mode*/
-               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
-       else  /*for HID quality & wifi performance balance at 11n mode*/
-               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 9);
+       if (wifi_bw == BTC_WIFI_BW_LEGACY)
+               /* for HID at 11b/g mode */
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+       else
+               /* for HID quality & wifi performance balance at 11n mode */
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 9);
+
+       btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
 
        if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
            (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
@@ -2426,44 +2846,36 @@ static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist)
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, true,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, true,
+                                                 false, false);
                }
        } else {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, true,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, true,
+                                                 false, false);
                }
        }
 }
 
-/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS)*/
+/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
 static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
 {
        u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
        u32 wifi_bw;
        u8 ap_num = 0;
+       u8 tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+                       coex_dm->switch_thres_offset;
 
-       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
-                                                      0, 2, 15, 0);
-       wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist,
-                                                       1, 2, 40, 0);
-       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, 40, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
 
@@ -2474,35 +2886,40 @@ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
                                          0x0);
                btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
                btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
-               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
                btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
 
                /* sw mechanism */
                btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
                if (BTC_WIFI_BW_HT40 == wifi_bw) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  true, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, false,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  true, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, false,
+                                                 false, false);
                }
                return;
        }
 
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+       btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
 
        btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       if (btc8723b_need_dec_pwr(btcoexist))
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
        else
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
-       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+       if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+               btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+       } else {
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13);
+               btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+                                             0x4);
+       }
 
        if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
            (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
@@ -2516,104 +2933,116 @@ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, false,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, false,
+                                                 false, false);
                }
        } else {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, false,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, false,
+                                                 false, false);
                }
        }
 }
 
 static void btc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
 {
-       u8 wifi_rssi_state;
+       u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
        u32 wifi_bw;
+       u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+                       coex_dm->switch_thres_offset;
 
-       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
-                                                      0, 2, 15, 0);
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
+                                                       tmp, 0);
+       tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+                       coex_dm->switch_thres_offset;
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
 
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
 
+       btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
        btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       if (btc8723b_need_dec_pwr(btcoexist))
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
        else
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
-       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+       if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+               btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+       } else {
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13);
+               btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+                                             0x4);
+       }
 
        btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 2);
 
        /* sw mechanism */
-       btcoexist->btc_get(btcoexist,
-               BTC_GET_U4_WIFI_BW, &wifi_bw);
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, false,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, false,
+                                                 false, false);
                }
        } else {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, false,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, false,
+                                                 false, false);
                }
        }
 }
 
 static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
 {
-       u8 wifi_rssi_state, bt_rssi_state;
+       u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
        u32 wifi_bw;
+       u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+                       coex_dm->switch_thres_offset;
 
-       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
-                                                      0, 2, 15, 0);
-       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
+                                                       tmp, 0);
+       tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+                       coex_dm->switch_thres_offset;
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
 
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
 
+       btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
        btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       if (btc8723b_need_dec_pwr(btcoexist))
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
        else
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
-       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 10);
+       if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 10);
+               btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+       } else {
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13);
+               btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+                                             0x4);
+       }
 
        if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
            (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
@@ -2626,109 +3055,109 @@ static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, false,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, false,
+                                                 false, false);
                }
        } else {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, false,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, false,
+                                                 false, false);
                }
        }
 }
 
-/*PAN(HS) only*/
+/* PAN(HS) only */
 static void btc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist)
 {
-       u8 wifi_rssi_state;
+       u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
        u32 wifi_bw;
+       u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+                       coex_dm->switch_thres_offset;
 
-       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
-                                                      0, 2, 15, 0);
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
+                                                       tmp, 0);
+       tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+                       coex_dm->switch_thres_offset;
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
 
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
 
+       btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
        btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
-           (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
        else
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
-
-       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
+       btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, false,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, false,
+                                                 false, false);
                }
        } else {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, false,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, false,
+                                                 false, false);
                }
        }
 }
 
-/*PAN(EDR)+A2DP*/
+/* PAN(EDR) + A2DP */
 static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
 {
-       u8 wifi_rssi_state, bt_rssi_state;
+       u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
        u32 wifi_bw;
+       u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+                       coex_dm->switch_thres_offset;
 
-       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
-                                                      0, 2, 15, 0);
-       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
+                                                       tmp, 0);
+       tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+                       coex_dm->switch_thres_offset;
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
 
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
 
        btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       if (btc8723b_need_dec_pwr(btcoexist))
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
        else
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
+       if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state))
+               btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+       else
+               btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+                                             0x4);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
        if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
            (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 12);
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 12);
                if (BTC_WIFI_BW_HT40 == wifi_bw)
                        btc8723b2ant_tdma_duration_adjust(btcoexist, false,
                                                          true, 3);
@@ -2736,74 +3165,80 @@ static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
                        btc8723b2ant_tdma_duration_adjust(btcoexist, false,
                                                          false, 3);
        } else {
-               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
-               btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 3);
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
        }
 
        /* sw mechanism */
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, false,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, false,
+                                                 false, false);
                }
        } else {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, false,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, false,
+                                                 false, false);
                }
        }
 }
 
 static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
 {
-       u8 wifi_rssi_state, bt_rssi_state;
+       u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
        u32 wifi_bw;
-
-       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
-                                                      0, 2, 15, 0);
-       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
+       u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+                       coex_dm->switch_thres_offset;
+
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
+                                                       tmp, 0);
+       tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+                       coex_dm->switch_thres_offset;
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       if (btc8723b_need_dec_pwr(btcoexist))
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
        else
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+       if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+               btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+       } else {
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 14);
+               btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+                                             0x4);
+       }
 
        if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
            (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
                if (BTC_WIFI_BW_HT40 == wifi_bw) {
                        btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
                                                      3);
-                       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 11);
                        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
                                                  0xfffff, 0x780);
                } else {
                        btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
                                                      6);
-                       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
                        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
                                                  0xfffff, 0x0);
                }
                btc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
        } else {
                btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
-               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 11);
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
                                          0x0);
                btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
@@ -2813,54 +3248,61 @@ static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, true,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, true,
+                                                 false, false);
                }
        } else {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, true,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, true,
+                                                 false, false);
                }
        }
 }
 
-/* HID+A2DP+PAN(EDR) */
+/* HID + A2DP + PAN(EDR) */
 static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
 {
-       u8 wifi_rssi_state, bt_rssi_state;
+       u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
        u32 wifi_bw;
+       u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+                       coex_dm->switch_thres_offset;
 
-       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
-                                                      0, 2, 15, 0);
-       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
+                                                       tmp, 0);
+       tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+                       coex_dm->switch_thres_offset;
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
 
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
 
+       btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
        btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       if (btc8723b_need_dec_pwr(btcoexist))
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
        else
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+       if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+               btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+       } else {
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 14);
+               btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+                                             0x4);
+       }
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
 
        if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
            (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
@@ -2878,94 +3320,148 @@ static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, true,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, true,
+                                                 false, false);
                }
        } else {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, true,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, true,
+                                                 false, false);
                }
        }
 }
 
 static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
 {
-       u8 wifi_rssi_state, bt_rssi_state;
+       u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
        u32 wifi_bw;
+       u8 ap_num = 0;
+       u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+                       coex_dm->switch_thres_offset;
 
-       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
-                                                      0, 2, 15, 0);
-       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
+                                                       tmp, 0);
+       tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+                        coex_dm->switch_thres_offset;
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 3, tmp, 37);
 
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
 
+       btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, true, 0x5);
        btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       if (btc8723b_need_dec_pwr(btcoexist))
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
-       else
-               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
-
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+       if (wifi_bw == BTC_WIFI_BW_LEGACY) {
+               if (BTC_RSSI_HIGH(bt_rssi_state))
+                       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+               else if (BTC_RSSI_MEDIUM(bt_rssi_state))
+                       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+               else
+                       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+       } else {
+               /* only 802.11N mode we have to dec bt power to 4 degree */
+               if (BTC_RSSI_HIGH(bt_rssi_state)) {
+                       /* need to check ap Number of Not */
+                       if (ap_num < 10)
+                               btc8723b2ant_dec_bt_pwr(btcoexist,
+                                                       NORMAL_EXEC, 4);
+                       else
+                               btc8723b2ant_dec_bt_pwr(btcoexist,
+                                                       NORMAL_EXEC, 2);
+               } else if (BTC_RSSI_MEDIUM(bt_rssi_state)) {
+                       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+               } else {
+                       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+               }
+       }
 
-       if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-           (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
-               btc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
-       else
-               btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+       if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+               btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+       } else {
+               btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 14);
+               btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+                                             0x4);
+       }
+
+       if (BTC_RSSI_HIGH(bt_rssi_state)) {
+               if (ap_num < 10)
+                       btc8723b2ant_tdma_duration_adjust(btcoexist, true,
+                                                         false, 1);
+               else
+                       btc8723b2ant_tdma_duration_adjust(btcoexist, true,
+                                                         false, 3);
+       } else {
+               btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 18);
+               btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38);
+               btcoexist->btc_write_2byte(btcoexist, 0x42a, 0x0808);
+               btcoexist->btc_write_4byte(btcoexist, 0x430, 0x0);
+               btcoexist->btc_write_4byte(btcoexist, 0x434, 0x01010000);
+
+               if (ap_num < 10)
+                       btc8723b2ant_tdma_duration_adjust(btcoexist, true,
+                                                         true, 1);
+               else
+                       btc8723b2ant_tdma_duration_adjust(btcoexist, true,
+                                                         true, 3);
+       }
 
        /* sw mechanism */
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, true,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, true, true,
+                                                 false, false);
                }
        } else {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, true,
+                                                 false, false);
                } else {
-                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
-                                                  false, false);
-                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
-                                                  false, 0x18);
+                       btc8723b2ant_sw_mechanism(btcoexist, false, true,
+                                                 false, false);
                }
        }
 }
 
+static void btc8723b2ant_action_wifi_multi_port(struct btc_coexist *btcoexist)
+{
+       btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+       /* sw all off */
+       btc8723b2ant_sw_mechanism(btcoexist, false, false, false, false);
+
+       /* hw all off */
+       btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+
+       btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+}
+
 static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 algorithm = 0;
+       u32 num_of_wifi_link = 0;
+       u32 wifi_link_status = 0;
+       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+       bool miracast_plus_bt = false;
+       bool scan = false, link = false, roam = false;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], RunCoexistMechanism()===>\n");
@@ -2989,14 +3485,46 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
                         "[BTCoex], BT is under inquiry/page scan !!\n");
                btc8723b2ant_action_bt_inquiry(btcoexist);
                return;
-       } else {
-               if (coex_dm->need_recover_0x948) {
-                       coex_dm->need_recover_0x948 = false;
-                       btcoexist->btc_write_2byte(btcoexist, 0x948,
-                                                  coex_dm->backup_0x948);
-               }
        }
 
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+       if (scan || link || roam) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], WiFi is under Link Process !!\n");
+               btc8723b2ant_action_wifi_link_process(btcoexist);
+               return;
+       }
+
+       /* for P2P */
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
+                          &wifi_link_status);
+       num_of_wifi_link = wifi_link_status >> 16;
+
+       if ((num_of_wifi_link >= 2) ||
+           (wifi_link_status & WIFI_P2P_GO_CONNECTED)) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "############# [BTCoex],  Multi-Port num_of_wifi_link = %d, wifi_link_status = 0x%x\n",
+                        num_of_wifi_link, wifi_link_status);
+
+               if (bt_link_info->bt_link_exist)
+                       miracast_plus_bt = true;
+               else
+                       miracast_plus_bt = false;
+
+               btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT,
+                                  &miracast_plus_bt);
+               btc8723b2ant_action_wifi_multi_port(btcoexist);
+
+               return;
+       }
+
+       miracast_plus_bt = false;
+       btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT,
+                          &miracast_plus_bt);
+
        coex_dm->cur_algorithm = algorithm;
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], Algorithm = %d\n",
@@ -3077,19 +3605,37 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
 
 static void btc8723b2ant_wifioff_hwcfg(struct btc_coexist *btcoexist)
 {
+       bool is_in_mp_mode = false;
+       u8 h2c_parameter[2] = {0};
+       u32 fw_ver = 0;
+
        /* set wlan_act to low */
        btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
-       /* Force GNT_BT to High */
-       btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x3);
-       /* BT select s0/s1 is controlled by BT */
-       btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x0);
+
+       /* WiFi standby while GNT_BT 0 -> 1 */
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x780);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+       if (fw_ver >= 0x180000) {
+               /* Use H2C to set GNT_BT to HIGH */
+               h2c_parameter[0] = 1;
+               btcoexist->btc_fill_h2c(btcoexist, 0x6E, 1, h2c_parameter);
+       } else {
+               btcoexist->btc_write_1byte(btcoexist, 0x765, 0x18);
+       }
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_IS_IN_MP_MODE,
+                          &is_in_mp_mode);
+       if (!is_in_mp_mode)
+               /* BT select s0/s1 is controlled by BT */
+               btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x0);
+       else
+               /* BT select s0/s1 is controlled by WiFi */
+               btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1);
 }
 
 /*********************************************************************
- *  work around function start with wa_btc8723b2ant_
- *********************************************************************/
-/*********************************************************************
- *  extern function start with EXbtc8723b2ant_
+ *  extern function start with ex_btc8723b2ant_
  *********************************************************************/
 void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
 {
@@ -3107,19 +3653,90 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
        u8tmp |= 0x5;
        btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
 
-       /*Antenna config */
+       /* Antenna config */
        btc8723b2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_MAIN,
                                  true, false);
+       coex_sta->dis_ver_info_cnt = 0;
+
        /* PTA parameter */
-       btc8723b_coex_tbl_type(btcoexist, FORCE_EXEC, 0);
+       btc8723b2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
 
        /* Enable counter statistics */
-       /*0x76e[3] =1, WLAN_Act control by PTA*/
-       btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+       /* 0x76e[3] = 1, WLAN_ACT controlled by PTA */
+       btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
        btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3);
        btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
 }
 
+void ex_btc8723b2ant_power_on_setting(struct btc_coexist *btcoexist)
+{
+       struct btc_board_info *board_info = &btcoexist->board_info;
+       u16 u16tmp = 0x0;
+       u32 value = 0;
+
+       btcoexist->btc_write_1byte(btcoexist, 0x67, 0x20);
+
+       /* enable BB, REG_SYS_FUNC_EN such that we can write 0x948 correctly */
+       u16tmp = btcoexist->btc_read_2byte(btcoexist, 0x2);
+       btcoexist->btc_write_2byte(btcoexist, 0x2, u16tmp | BIT0 | BIT1);
+
+       btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0);
+
+       if (btcoexist->chip_interface == BTC_INTF_USB) {
+               /* fixed at S0 for USB interface */
+               board_info->btdm_ant_pos = BTC_ANTENNA_AT_AUX_PORT;
+       } else {
+               /* for PCIE and SDIO interface, we check efuse 0xc3[6] */
+               if (board_info->single_ant_path == 0) {
+                       /* set to S1 */
+                       board_info->btdm_ant_pos = BTC_ANTENNA_AT_MAIN_PORT;
+               } else if (board_info->single_ant_path == 1) {
+                       /* set to S0 */
+                       board_info->btdm_ant_pos = BTC_ANTENNA_AT_AUX_PORT;
+               }
+               btcoexist->btc_set(btcoexist, BTC_SET_ACT_ANTPOSREGRISTRY_CTRL,
+                                  &value);
+       }
+}
+
+void ex_btc8723b2ant_pre_load_firmware(struct btc_coexist *btcoexist)
+{
+       struct btc_board_info *board_info = &btcoexist->board_info;
+       u8 u8tmp = 0x4; /* Set BIT2 by default since it's 2ant case */
+
+       /**
+        * S0 or S1 setting and Local register setting(By this fw can get
+        * ant number, S0/S1, ... info)
+        *
+        * Local setting bit define
+        *      BIT0: "0" : no antenna inverse; "1" : antenna inverse
+        *      BIT1: "0" : internal switch; "1" : external switch
+        *      BIT2: "0" : one antenna; "1" : two antennas
+        *
+        * NOTE: here default all internal switch and 1-antenna ==> BIT1=0 and
+        * BIT2 = 0
+        */
+       if (btcoexist->chip_interface == BTC_INTF_USB) {
+               /* fixed at S0 for USB interface */
+               u8tmp |= 0x1; /* antenna inverse */
+               btcoexist->btc_write_local_reg_1byte(btcoexist, 0xfe08, u8tmp);
+       } else {
+               /* for PCIE and SDIO interface, we check efuse 0xc3[6] */
+               if (board_info->single_ant_path == 0) {
+               } else if (board_info->single_ant_path == 1) {
+                       /* set to S0 */
+                       u8tmp |= 0x1; /* antenna inverse */
+               }
+
+               if (btcoexist->chip_interface == BTC_INTF_PCI)
+                       btcoexist->btc_write_local_reg_1byte(btcoexist, 0x384,
+                                                            u8tmp);
+               else if (btcoexist->chip_interface == BTC_INTF_SDIO)
+                       btcoexist->btc_write_local_reg_1byte(btcoexist, 0x60,
+                                                            u8tmp);
+       }
+}
+
 void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -3215,7 +3832,6 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
                 ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
                  "uplink" : "downlink")));
 
-
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d",
                 "SCO/HID/PAN/A2DP",
                 bt_link_info->sco_exist, bt_link_info->hid_exist,
@@ -3265,7 +3881,7 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
                 ps_tdma_case, coex_dm->auto_tdma_adjust);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
-                "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr,
+                "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr_lvl,
                 coex_dm->cur_ignore_wlan_act);
 
        /* Hw setting */
@@ -3396,6 +4012,12 @@ void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
+       u32 u32tmp;
+       u8 u8tmpa, u8tmpb;
+
+       u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x948);
+       u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765);
+       u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x76e);
 
        if (BTC_SCAN_START == type)
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -3403,6 +4025,12 @@ void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
        else if (BTC_SCAN_FINISH == type)
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], SCAN FINISH notify\n");
+       btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
+                          &coex_sta->scan_ap_num);
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "############# [BTCoex], 0x948=0x%x, 0x765=0x%x, 0x76e=0x%x\n",
+               u32tmp, u8tmpa, u8tmpb);
 }
 
 void ex_btc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
@@ -3424,6 +4052,7 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
        u8 h2c_parameter[3] = {0};
        u32 wifi_bw;
        u8 wifi_central_chnl;
+       u8 ap_num = 0;
 
        if (BTC_MEDIA_CONNECT == type)
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -3441,10 +4070,16 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
                h2c_parameter[1] = wifi_central_chnl;
                btcoexist->btc_get(btcoexist,
                        BTC_GET_U4_WIFI_BW, &wifi_bw);
-               if (BTC_WIFI_BW_HT40 == wifi_bw)
+               if (wifi_bw == BTC_WIFI_BW_HT40) {
                        h2c_parameter[2] = 0x30;
-               else
-                       h2c_parameter[2] = 0x20;
+               } else {
+                       btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
+                                          &ap_num);
+                       if (ap_num < 10)
+                               h2c_parameter[2] = 0x30;
+                       else
+                               h2c_parameter[2] = 0x20;
+               }
        }
 
        coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
@@ -3492,7 +4127,7 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
                coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
                if (i == 1)
                        bt_info = tmpbuf[i];
-               if (i == length-1)
+               if (i == length - 1)
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "0x%02x]\n", tmpbuf[i]);
                else
@@ -3507,17 +4142,30 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
        }
 
        if (BT_INFO_SRC_8723B_2ANT_WIFI_FW != rsp_source) {
-               coex_sta->bt_retry_cnt =        /* [3:0]*/
+               coex_sta->bt_retry_cnt =
                        coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
 
+               if (coex_sta->bt_retry_cnt >= 1)
+                       coex_sta->pop_event_cnt++;
+
                coex_sta->bt_rssi =
                        coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10;
 
-               coex_sta->bt_info_ext =
-                       coex_sta->bt_info_c2h[rsp_source][4];
+               coex_sta->bt_info_ext = coex_sta->bt_info_c2h[rsp_source][4];
+
+               if (coex_sta->bt_info_c2h[rsp_source][2] & 0x20)
+                       coex_sta->c2h_bt_remote_name_req = true;
+               else
+                       coex_sta->c2h_bt_remote_name_req = false;
+
+               if (coex_sta->bt_info_c2h[rsp_source][1] == 0x49)
+                       coex_sta->a2dp_bit_pool =
+                               coex_sta->bt_info_c2h[rsp_source][6];
+               else
+                       coex_sta->a2dp_bit_pool = 0;
 
                /* Here we need to resend some wifi info to BT
-                    because bt is reset and loss of the info.
+                * because BT is reset and loss of the info.
                 */
                if ((coex_sta->bt_info_ext & BIT1)) {
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -3552,20 +4200,21 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
 #endif
        }
 
-       /* check BIT2 first ==> check if bt is under inquiry or page scan*/
+       /* check BIT2 first ==> check if bt is under inquiry or page scan */
        if (bt_info & BT_INFO_8723B_2ANT_B_INQ_PAGE)
                coex_sta->c2h_bt_inquiry_page = true;
        else
                coex_sta->c2h_bt_inquiry_page = false;
 
-       /* set link exist status*/
        if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
+               /* set link exist status */
                coex_sta->bt_link_exist = false;
                coex_sta->pan_exist = false;
                coex_sta->a2dp_exist = false;
                coex_sta->hid_exist = false;
                coex_sta->sco_exist = false;
-       } else { /* connection exists */
+       } else {
+               /* connection exists */
                coex_sta->bt_link_exist = true;
                if (bt_info & BT_INFO_8723B_2ANT_B_FTP)
                        coex_sta->pan_exist = true;
@@ -3583,6 +4232,16 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
                        coex_sta->sco_exist = true;
                else
                        coex_sta->sco_exist = false;
+
+               if ((!coex_sta->hid_exist) &&
+                   (!coex_sta->c2h_bt_inquiry_page) &&
+                   (!coex_sta->sco_exist)) {
+                       if (coex_sta->high_priority_tx +
+                                   coex_sta->high_priority_rx >= 160) {
+                               coex_sta->hid_exist = true;
+                               bt_info = bt_info | 0x28;
+                       }
+               }
        }
 
        btc8723b2ant_update_bt_link_info(btcoexist);
@@ -3640,46 +4299,67 @@ void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
        ex_btc8723b2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
 }
 
+void ex_btc8723b2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
+
+       if (pnp_state == BTC_WIFI_PNP_SLEEP) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Pnp notify to SLEEP\n");
+
+               /* Driver do not leave IPS/LPS when driver is going to sleep, so
+                * BTCoexistence think wifi is still under IPS/LPS
+                *
+                * BT should clear UnderIPS/UnderLPS state to avoid mismatch
+                * state after wakeup.
+                */
+               coex_sta->under_ips = false;
+               coex_sta->under_lps = false;
+       } else if (pnp_state == BTC_WIFI_PNP_WAKE_UP) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Pnp notify to WAKE UP\n");
+               ex_btc8723b2ant_init_hwconfig(btcoexist);
+               btc8723b2ant_init_coex_dm(btcoexist);
+               btc8723b2ant_query_bt_info(btcoexist);
+       }
+}
+
 void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       struct btc_board_info *board_info = &btcoexist->board_info;
-       struct btc_stack_info *stack_info = &btcoexist->stack_info;
-       static u8 dis_ver_info_cnt;
-       u32 fw_ver = 0, bt_patch_ver = 0;
+       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], ==========================Periodical===========================\n");
 
-       if (dis_ver_info_cnt <= 5) {
-               dis_ver_info_cnt += 1;
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], ****************************************************************\n");
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
-                             board_info->pg_ant_num,
-                             board_info->btdm_ant_num,
-                             board_info->btdm_ant_pos);
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
-                             stack_info->profile_notified ? "Yes" : "No",
-                             stack_info->hci_version);
-               btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
-                                  &bt_patch_ver);
-               btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
-                             glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
-                             fw_ver, bt_patch_ver, bt_patch_ver);
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], ****************************************************************\n");
+       if (coex_sta->dis_ver_info_cnt <= 5) {
+               coex_sta->dis_ver_info_cnt += 1;
+               if (coex_sta->dis_ver_info_cnt == 3) {
+                       /* Antenna config to set 0x765 = 0x0 (GNT_BT control by
+                        * PTA) after initial
+                        */
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Set GNT_BT control by PTA\n");
+                       btc8723b2ant_set_ant_path(
+                               btcoexist, BTC_ANT_WIFI_AT_MAIN, false, false);
+               }
        }
 
 #if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
        btc8723b2ant_query_bt_info(btcoexist);
-       btc8723b2ant_monitor_bt_ctr(btcoexist);
-       btc8723b2ant_monitor_bt_enable_disable(btcoexist);
 #else
+       btc8723b2ant_monitor_bt_ctr(btcoexist);
+       btc8723b2ant_monitor_wifi_ctr(btcoexist);
+
+       /* for some BT speakers that High-Priority pkts appear before
+        * playing, this will cause HID exist
+        */
+       if ((coex_sta->high_priority_tx + coex_sta->high_priority_rx < 50) &&
+           (bt_link_info->hid_exist))
+               bt_link_info->hid_exist = false;
+
        if (btc8723b2ant_is_wifi_status_changed(btcoexist) ||
            coex_dm->auto_tdma_adjust)
                btc8723b2ant_run_coexist_mechanism(btcoexist);
index 567f354caf95c5e6f1449c1f9296ec9877646fd0..18a35c7faba92a1e8a0d0f858960329fc4a30462 100644 (file)
 
 #define BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT            2
 
+/* WiFi RSSI Threshold for 2-Ant TDMA/1-Ant PS-TDMA translation */
+#define BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES       42
+/* BT RSSI Threshold for 2-Ant TDMA/1-Ant PS-TDMA translation */
+#define BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES         46
+
 enum BT_INFO_SRC_8723B_2ANT {
        BT_INFO_SRC_8723B_2ANT_WIFI_FW                  = 0x0,
        BT_INFO_SRC_8723B_2ANT_BT_RSP                   = 0x1,
@@ -75,8 +80,8 @@ enum BT_8723B_2ANT_COEX_ALGO {
 
 struct coex_dm_8723b_2ant {
        /* fw mechanism */
-       bool pre_dec_bt_pwr;
-       bool cur_dec_bt_pwr;
+       bool pre_dec_bt_pwr_lvl;
+       bool cur_dec_bt_pwr_lvl;
        u8 pre_fw_dac_swing_lvl;
        u8 cur_fw_dac_swing_lvl;
        bool cur_ignore_wlan_act;
@@ -84,7 +89,7 @@ struct coex_dm_8723b_2ant {
        u8 pre_ps_tdma;
        u8 cur_ps_tdma;
        u8 ps_tdma_para[5];
-       u8 tdma_adj_type;
+       u8 ps_tdma_du_adj_type;
        bool reset_tdma_adjust;
        bool auto_tdma_adjust;
        bool pre_ps_tdma_on;
@@ -122,8 +127,13 @@ struct coex_dm_8723b_2ant {
        u8 bt_status;
        u8 wifi_chnl_info[3];
 
-       bool need_recover_0x948;
-       u16 backup_0x948;
+       u8 pre_lps;
+       u8 cur_lps;
+       u8 pre_rpwm;
+       u8 cur_rpwm;
+
+       bool is_switch_to_1dot5_ant;
+       u8 switch_thres_offset;
 };
 
 struct coex_sta_8723b_2ant {
@@ -132,6 +142,7 @@ struct coex_sta_8723b_2ant {
        bool a2dp_exist;
        bool hid_exist;
        bool pan_exist;
+       bool bt_abnormal_scan;
 
        bool under_lps;
        bool under_ips;
@@ -140,14 +151,33 @@ struct coex_sta_8723b_2ant {
        u32 low_priority_tx;
        u32 low_priority_rx;
        u8 bt_rssi;
+       bool bt_tx_rx_mask;
        u8 pre_bt_rssi_state;
        u8 pre_wifi_rssi_state[4];
        bool c2h_bt_info_req_sent;
        u8 bt_info_c2h[BT_INFO_SRC_8723B_2ANT_MAX][10];
        u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_2ANT_MAX];
        bool c2h_bt_inquiry_page;
+       bool c2h_bt_remote_name_req;
        u8 bt_retry_cnt;
        u8 bt_info_ext;
+       u32 pop_event_cnt;
+       u8 scan_ap_num;
+
+       u32 crc_ok_cck;
+       u32 crc_ok_11g;
+       u32 crc_ok_11n;
+       u32 crc_ok_11n_agg;
+
+       u32 crc_err_cck;
+       u32 crc_err_11g;
+       u32 crc_err_11n;
+       u32 crc_err_11n_agg;
+       bool force_lps_on;
+
+       u8 dis_ver_info_cnt;
+
+       u8 a2dp_bit_pool;
 };
 
 /*********************************************************************
index 8b689ed9a629bce30472d2b61adb562d24c071af..5e9f3b0f7a2583e36bc5c747325a52fbfb73141a 100644 (file)
@@ -23,7 +23,7 @@
  *
  *****************************************************************************/
 
-/*============================================================
+/**************************************************************
  * Description:
  *
  * This file is for RTL8821A Co-exist mechanism
  * History
  * 2012/11/15 Cosa first check in.
  *
- *============================================================
-*/
-/*============================================================
+ **************************************************************/
+
+/**************************************************************
  * include files
- *============================================================
- */
+ **************************************************************/
 #include "halbt_precomp.h"
-/*============================================================
+/**************************************************************
  * Global variables, these are static variables
- *============================================================
- */
+ **************************************************************/
 static struct coex_dm_8821a_1ant glcoex_dm_8821a_1ant;
 static struct coex_dm_8821a_1ant *coex_dm = &glcoex_dm_8821a_1ant;
 static struct coex_sta_8821a_1ant glcoex_sta_8821a_1ant;
 static struct coex_sta_8821a_1ant *coex_sta = &glcoex_sta_8821a_1ant;
+static void btc8821a1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist,
+                                                 u8 wifi_status);
 
 static const char *const glbt_info_src_8821a_1ant[] = {
          "BT Info[wifi fw]",
@@ -53,22 +53,21 @@ static const char *const glbt_info_src_8821a_1ant[] = {
          "BT Info[bt auto report]",
 };
 
-static u32     glcoex_ver_date_8821a_1ant = 20130816;
-static u32     glcoex_ver_8821a_1ant = 0x41;
+static u32 glcoex_ver_date_8821a_1ant = 20130816;
+static u32 glcoex_ver_8821a_1ant = 0x41;
 
-/*============================================================
+/**************************************************************
  * local function proto type if needed
  *
- * local function start with halbtc8821a1ant_
- *============================================================
- */
-static u8 halbtc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist,
-                                       u8 level_num, u8 rssi_thresh,
-                                       u8 rssi_thresh1)
+ * local function start with btc8821a1ant_
+ **************************************************************/
+static u8 btc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist,
+                                    u8 level_num, u8 rssi_thresh,
+                                    u8 rssi_thresh1)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       long    bt_rssi = 0;
-       u8      bt_rssi_state = coex_sta->pre_bt_rssi_state;
+       long bt_rssi = 0;
+       u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
 
        bt_rssi = coex_sta->bt_rssi;
 
@@ -150,9 +149,9 @@ static u8 halbtc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist,
        return bt_rssi_state;
 }
 
-static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
-                                       u8 index, u8 level_num, u8 rssi_thresh,
-                                       u8 rssi_thresh1)
+static u8 btc8821a1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
+                                      u8 index, u8 level_num, u8 rssi_thresh,
+                                      u8 rssi_thresh1)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        long    wifi_rssi = 0;
@@ -165,8 +164,8 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
                     BTC_RSSI_STATE_LOW) ||
                    (coex_sta->pre_wifi_rssi_state[index] ==
                     BTC_RSSI_STATE_STAY_LOW)) {
-                       if (wifi_rssi >=
-                           (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
+                       if (wifi_rssi >= (rssi_thresh +
+                                       BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                         "[BTCoex], wifi RSSI state switch to High\n");
@@ -197,8 +196,8 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
                     BTC_RSSI_STATE_LOW) ||
                    (coex_sta->pre_wifi_rssi_state[index] ==
                     BTC_RSSI_STATE_STAY_LOW)) {
-                       if (wifi_rssi >=
-                           (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
+                       if (wifi_rssi >= (rssi_thresh +
+                                       BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                         "[BTCoex], wifi RSSI state switch to Medium\n");
@@ -211,9 +210,8 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
                        BTC_RSSI_STATE_MEDIUM) ||
                        (coex_sta->pre_wifi_rssi_state[index] ==
                        BTC_RSSI_STATE_STAY_MEDIUM)) {
-                       if (wifi_rssi >=
-                           (rssi_thresh1 +
-                            BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
+                       if (wifi_rssi >= (rssi_thresh1 +
+                                       BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                         "[BTCoex], wifi RSSI state switch to High\n");
@@ -243,14 +241,14 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
        return wifi_rssi_state;
 }
 
-static void halbtc8821a1ant_update_ra_mask(struct btc_coexist *btcoexist,
-                                          bool force_exec, u32 dis_rate_mask)
+static void btc8821a1ant_update_ra_mask(struct btc_coexist *btcoexist,
+                                       bool force_exec, u32 dis_rate_mask)
 {
        coex_dm->cur_ra_mask = dis_rate_mask;
 
        if (force_exec ||
            (coex_dm->pre_ra_mask != coex_dm->cur_ra_mask)) {
-               btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask,
+               btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_RAMASK,
                                   &coex_dm->cur_ra_mask);
        }
        coex_dm->pre_ra_mask = coex_dm->cur_ra_mask;
@@ -259,14 +257,14 @@ static void halbtc8821a1ant_update_ra_mask(struct btc_coexist *btcoexist,
 static void btc8821a1ant_auto_rate_fb_retry(struct btc_coexist *btcoexist,
                                            bool force_exec, u8 type)
 {
-       bool    wifi_under_b_mode = false;
+       bool wifi_under_b_mode = false;
 
        coex_dm->cur_arfr_type = type;
 
        if (force_exec ||
            (coex_dm->pre_arfr_type != coex_dm->cur_arfr_type)) {
                switch (coex_dm->cur_arfr_type) {
-               case 0: /* normal mode*/
+               case 0: /* normal mode */
                        btcoexist->btc_write_4byte(btcoexist, 0x430,
                                                   coex_dm->backup_arfr_cnt1);
                        btcoexist->btc_write_4byte(btcoexist, 0x434,
@@ -296,19 +294,19 @@ static void btc8821a1ant_auto_rate_fb_retry(struct btc_coexist *btcoexist,
        coex_dm->pre_arfr_type = coex_dm->cur_arfr_type;
 }
 
-static void halbtc8821a1ant_retry_limit(struct btc_coexist *btcoexist,
-                                       bool force_exec, u8 type)
+static void btc8821a1ant_retry_limit(struct btc_coexist *btcoexist,
+                                    bool force_exec, u8 type)
 {
        coex_dm->cur_retry_limit_type = type;
 
        if (force_exec ||
            (coex_dm->pre_retry_limit_type != coex_dm->cur_retry_limit_type)) {
                switch (coex_dm->cur_retry_limit_type) {
-               case 0: /* normal mode*/
+               case 0: /* normal mode */
                        btcoexist->btc_write_2byte(btcoexist, 0x42a,
                                                   coex_dm->backup_retry_limit);
                        break;
-               case 1: /* retry limit = 8*/
+               case 1: /* retry limit = 8 */
                        btcoexist->btc_write_2byte(btcoexist, 0x42a, 0x0808);
                        break;
                default:
@@ -318,19 +316,19 @@ static void halbtc8821a1ant_retry_limit(struct btc_coexist *btcoexist,
        coex_dm->pre_retry_limit_type = coex_dm->cur_retry_limit_type;
 }
 
-static void halbtc8821a1ant_ampdu_max_time(struct btc_coexist *btcoexist,
-                                          bool force_exec, u8 type)
+static void btc8821a1ant_ampdu_max_time(struct btc_coexist *btcoexist,
+                                       bool force_exec, u8 type)
 {
        coex_dm->cur_ampdu_time_type = type;
 
        if (force_exec ||
            (coex_dm->pre_ampdu_time_type != coex_dm->cur_ampdu_time_type)) {
                switch (coex_dm->cur_ampdu_time_type) {
-               case 0: /* normal mode*/
+               case 0: /* normal mode */
                        btcoexist->btc_write_1byte(btcoexist, 0x456,
                                                   coex_dm->backup_ampdu_max_time);
                        break;
-               case 1: /* AMPDU timw = 0x38 * 32us*/
+               case 1: /* AMPDU time = 0x38 * 32us */
                        btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38);
                        break;
                default:
@@ -341,88 +339,85 @@ static void halbtc8821a1ant_ampdu_max_time(struct btc_coexist *btcoexist,
        coex_dm->pre_ampdu_time_type = coex_dm->cur_ampdu_time_type;
 }
 
-static void halbtc8821a1ant_limited_tx(struct btc_coexist *btcoexist,
-                                      bool force_exec, u8 ra_mask_type,
-                                      u8 arfr_type, u8 retry_limit_type,
-                                      u8 ampdu_time_type)
+static void btc8821a1ant_limited_tx(struct btc_coexist *btcoexist,
+                                   bool force_exec, u8 ra_mask_type,
+                                   u8 arfr_type, u8 retry_limit_type,
+                                   u8 ampdu_time_type)
 {
        switch (ra_mask_type) {
-       case 0: /* normal mode*/
-               halbtc8821a1ant_update_ra_mask(btcoexist, force_exec, 0x0);
+       case 0: /* normal mode */
+               btc8821a1ant_update_ra_mask(btcoexist, force_exec, 0x0);
                break;
-       case 1: /* disable cck 1/2*/
-               halbtc8821a1ant_update_ra_mask(btcoexist, force_exec,
-                                              0x00000003);
+       case 1: /* disable cck 1/2 */
+               btc8821a1ant_update_ra_mask(btcoexist, force_exec,
+                                           0x00000003);
                break;
-       case 2: /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4*/
-               halbtc8821a1ant_update_ra_mask(btcoexist, force_exec,
-                                              0x0001f1f7);
+       case 2: /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4 */
+               btc8821a1ant_update_ra_mask(btcoexist, force_exec,
+                                           0x0001f1f7);
                break;
        default:
                break;
        }
 
        btc8821a1ant_auto_rate_fb_retry(btcoexist, force_exec, arfr_type);
-       halbtc8821a1ant_retry_limit(btcoexist, force_exec, retry_limit_type);
-       halbtc8821a1ant_ampdu_max_time(btcoexist, force_exec, ampdu_time_type);
+       btc8821a1ant_retry_limit(btcoexist, force_exec, retry_limit_type);
+       btc8821a1ant_ampdu_max_time(btcoexist, force_exec, ampdu_time_type);
 }
 
-static void halbtc8821a1ant_limited_rx(struct btc_coexist *btcoexist,
-                                      bool force_exec, bool rej_ap_agg_pkt,
-                                      bool bt_ctrl_agg_buf_size,
-                                      u8 agg_buf_size)
+static void btc8821a1ant_limited_rx(struct btc_coexist *btcoexist,
+                                   bool force_exec, bool rej_ap_agg_pkt,
+                                   bool bt_ctrl_agg_buf_size, u8 agg_buf_size)
 {
        bool reject_rx_agg = rej_ap_agg_pkt;
        bool bt_ctrl_rx_agg_size = bt_ctrl_agg_buf_size;
        u8 rx_agg_size = agg_buf_size;
 
-       /*============================================*/
-       /*      Rx Aggregation related setting*/
-       /*============================================*/
+       /* Rx Aggregation related setting */
        btcoexist->btc_set(btcoexist,
                 BTC_SET_BL_TO_REJ_AP_AGG_PKT, &reject_rx_agg);
-       /* decide BT control aggregation buf size or not*/
+       /* decide BT control aggregation buf size or not */
        btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE,
                           &bt_ctrl_rx_agg_size);
-       /* aggregation buf size, only work when BT control Rx agg size.*/
+       /* aggregation buf size, only work when BT control Rx agg size */
        btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rx_agg_size);
-       /* real update aggregation setting*/
+       /* real update aggregation setting */
        btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
 }
 
-static void halbtc8821a1ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+static void btc8821a1ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
 {
-       u32     reg_hp_tx_rx, reg_lp_tx_rx, u4_tmp;
-       u32     reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0;
+       u32 reg_hp_tx_rx, reg_lp_tx_rx, u4_tmp;
+       u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0;
 
        reg_hp_tx_rx = 0x770;
        reg_lp_tx_rx = 0x774;
 
        u4_tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_tx_rx);
        reg_hp_tx = u4_tmp & MASKLWORD;
-       reg_hp_rx = (u4_tmp & MASKHWORD)>>16;
+       reg_hp_rx = (u4_tmp & MASKHWORD) >> 16;
 
        u4_tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_tx_rx);
        reg_lp_tx = u4_tmp & MASKLWORD;
-       reg_lp_rx = (u4_tmp & MASKHWORD)>>16;
+       reg_lp_rx = (u4_tmp & MASKHWORD) >> 16;
 
        coex_sta->high_priority_tx = reg_hp_tx;
        coex_sta->high_priority_rx = reg_hp_rx;
        coex_sta->low_priority_tx = reg_lp_tx;
        coex_sta->low_priority_rx = reg_lp_rx;
 
-       /* reset counter*/
+       /* reset counter */
        btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
 }
 
-static void halbtc8821a1ant_query_bt_info(struct btc_coexist *btcoexist)
+static void btc8821a1ant_query_bt_info(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        coex_sta->c2h_bt_info_req_sent = true;
 
-       h2c_parameter[0] |= BIT0;       /* trigger*/
+       h2c_parameter[0] |= BIT0; /* trigger */
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
@@ -431,10 +426,43 @@ static void halbtc8821a1ant_query_bt_info(struct btc_coexist *btcoexist)
        btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
 
-static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist)
+bool btc8821a1ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
+{
+       static bool pre_wifi_busy = true;
+       static bool pre_under_4way = true;
+       static bool pre_bt_hs_on = true;
+       bool wifi_busy = false, under_4way = false, bt_hs_on = false;
+       bool wifi_connected = false;
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+                          &wifi_connected);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+                          &under_4way);
+
+       if (wifi_connected) {
+               if (wifi_busy != pre_wifi_busy) {
+                       pre_wifi_busy = wifi_busy;
+                       return true;
+               }
+               if (under_4way != pre_under_4way) {
+                       pre_under_4way = under_4way;
+                       return true;
+               }
+               if (bt_hs_on != pre_bt_hs_on) {
+                       pre_bt_hs_on = bt_hs_on;
+                       return true;
+               }
+       }
+
+       return false;
+}
+
+static void btc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist)
 {
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-       bool    bt_hs_on = false;
+       bool bt_hs_on = false;
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 
@@ -444,13 +472,13 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist)
        bt_link_info->pan_exist = coex_sta->pan_exist;
        bt_link_info->hid_exist = coex_sta->hid_exist;
 
-       /* work around for HS mode.*/
+       /* work around for HS mode */
        if (bt_hs_on) {
                bt_link_info->pan_exist = true;
                bt_link_info->bt_link_exist = true;
        }
 
-       /* check if Sco only*/
+       /* check if Sco only */
        if (bt_link_info->sco_exist &&
            !bt_link_info->a2dp_exist &&
            !bt_link_info->pan_exist &&
@@ -459,7 +487,7 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist)
        else
                bt_link_info->sco_only = false;
 
-       /* check if A2dp only*/
+       /* check if A2dp only */
        if (!bt_link_info->sco_exist &&
            bt_link_info->a2dp_exist &&
            !bt_link_info->pan_exist &&
@@ -468,7 +496,7 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist)
        else
                bt_link_info->a2dp_only = false;
 
-       /* check if Pan only*/
+       /* check if Pan only */
        if (!bt_link_info->sco_exist &&
            !bt_link_info->a2dp_exist &&
            bt_link_info->pan_exist &&
@@ -477,7 +505,7 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist)
        else
                bt_link_info->pan_only = false;
 
-       /* check if Hid only*/
+       /* check if Hid only */
        if (!bt_link_info->sco_exist &&
            !bt_link_info->a2dp_exist &&
            !bt_link_info->pan_exist &&
@@ -487,13 +515,13 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist)
                bt_link_info->hid_only = false;
 }
 
-static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
+static u8 btc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-       bool    bt_hs_on = false;
-       u8      algorithm = BT_8821A_1ANT_COEX_ALGO_UNDEFINED;
-       u8      num_of_diff_profile = 0;
+       bool bt_hs_on = false;
+       u8 algorithm = BT_8821A_1ANT_COEX_ALGO_UNDEFINED;
+       u8 num_of_diff_profile = 0;
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 
@@ -605,7 +633,7 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
                                         "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
                                algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
                        } else if (bt_link_info->hid_exist &&
-                               bt_link_info->pan_exist) {
+                                  bt_link_info->pan_exist) {
                                if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
@@ -618,7 +646,7 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (bt_link_info->pan_exist &&
-                               bt_link_info->a2dp_exist) {
+                                  bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
@@ -670,53 +698,8 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
        return algorithm;
 }
 
-static void halbtc8821a1ant_set_bt_auto_report(struct btc_coexist *btcoexist,
-                                              bool enable_auto_report)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-       u8 h2c_parameter[1] = {0};
-
-       h2c_parameter[0] = 0;
-
-       if (enable_auto_report)
-               h2c_parameter[0] |= BIT0;
-
-       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
-                (enable_auto_report ? "Enabled!!" : "Disabled!!"),
-                h2c_parameter[0]);
-
-       btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
-}
-
-static void halbtc8821a1ant_bt_auto_report(struct btc_coexist *btcoexist,
-                                          bool force_exec,
-                                          bool enable_auto_report)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], %s BT Auto report = %s\n",
-                (force_exec ? "force to" : ""), ((enable_auto_report) ?
-                                                    "Enabled" : "Disabled"));
-       coex_dm->cur_bt_auto_report = enable_auto_report;
-
-       if (!force_exec) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
-                           coex_dm->pre_bt_auto_report,
-                           coex_dm->cur_bt_auto_report);
-
-               if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
-                       return;
-       }
-       halbtc8821a1ant_set_bt_auto_report(btcoexist, coex_dm->cur_bt_auto_report);
-
-       coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
-}
-
-static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist,
-                                           bool low_penalty_ra)
+static void btc8821a1ant_set_sw_penalty_tx_rate(struct btc_coexist *btcoexist,
+                                               bool low_penalty_ra)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[6] = {0};
@@ -725,11 +708,11 @@ static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist,
 
        if (low_penalty_ra) {
                h2c_parameter[1] |= BIT0;
-               /*normal rate except MCS7/6/5, OFDM54/48/36*/
+               /* normal rate except MCS7/6/5, OFDM54/48/36 */
                h2c_parameter[2] = 0x00;
-               h2c_parameter[3] = 0xf7;  /*MCS7 or OFDM54*/
-               h2c_parameter[4] = 0xf8;  /*MCS6 or OFDM48*/
-               h2c_parameter[5] = 0xf9;        /*MCS5 or OFDM36*/
+               h2c_parameter[3] = 0xf7; /* MCS7 or OFDM54 */
+               h2c_parameter[4] = 0xf8; /* MCS6 or OFDM48 */
+               h2c_parameter[5] = 0xf9; /* MCS5 or OFDM36 */
        }
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -739,8 +722,8 @@ static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist,
        btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
 }
 
-static void halbtc8821a1ant_low_penalty_ra(struct btc_coexist *btcoexist,
-                                          bool force_exec, bool low_penalty_ra)
+static void btc8821a1ant_low_penalty_ra(struct btc_coexist *btcoexist,
+                                       bool force_exec, bool low_penalty_ra)
 {
        coex_dm->cur_low_penalty_ra = low_penalty_ra;
 
@@ -748,14 +731,15 @@ static void halbtc8821a1ant_low_penalty_ra(struct btc_coexist *btcoexist,
                if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
                        return;
        }
-       btc8821a1ant_set_sw_pen_tx_rate(btcoexist, coex_dm->cur_low_penalty_ra);
+       btc8821a1ant_set_sw_penalty_tx_rate(btcoexist,
+                                           coex_dm->cur_low_penalty_ra);
 
        coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
 }
 
-static void halbtc8821a1ant_set_coex_table(struct btc_coexist *btcoexist,
-                                          u32 val0x6c0, u32 val0x6c4,
-                                          u32 val0x6c8, u8 val0x6cc)
+static void btc8821a1ant_set_coex_table(struct btc_coexist *btcoexist,
+                                       u32 val0x6c0, u32 val0x6c4,
+                                       u32 val0x6c8, u8 val0x6cc)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -776,9 +760,9 @@ static void halbtc8821a1ant_set_coex_table(struct btc_coexist *btcoexist,
        btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
-static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist,
-                                      bool force_exec, u32 val0x6c0,
-                                      u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
+static void btc8821a1ant_coex_table(struct btc_coexist *btcoexist,
+                                   bool force_exec, u32 val0x6c0, u32 val0x6c4,
+                                   u32 val0x6c8, u8 val0x6cc)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -798,8 +782,8 @@ static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist,
                    (coex_dm->pre_val_0x6cc == coex_dm->cur_val_0x6cc))
                        return;
        }
-       halbtc8821a1ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
-                                      val0x6c8, val0x6cc);
+       btc8821a1ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
+                                   val0x6c8, val0x6cc);
 
        coex_dm->pre_val_0x6c0 = coex_dm->cur_val_0x6c0;
        coex_dm->pre_val_0x6c4 = coex_dm->cur_val_0x6c4;
@@ -807,42 +791,41 @@ static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist,
        coex_dm->pre_val_0x6cc = coex_dm->cur_val_0x6cc;
 }
 
-static void halbtc8821a1ant_coex_table_with_type(struct btc_coexist *btcoexist,
-                                                bool force_exec, u8 type)
+static void btc8821a1ant_coex_table_with_type(struct btc_coexist *btcoexist,
+                                             bool force_exec, u8 type)
 {
        switch (type) {
        case 0:
-               halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555,
-                                          0x55555555, 0xffffff, 0x3);
+               btc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555,
+                                       0x55555555, 0xffffff, 0x3);
                break;
        case 1:
-                       halbtc8821a1ant_coex_table(btcoexist, force_exec,
-                                                  0x55555555, 0x5a5a5a5a,
-                                                  0xffffff, 0x3);
-                       break;
+               btc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555,
+                                       0x5a5a5a5a, 0xffffff, 0x3);
+               break;
        case 2:
-               halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
-                                          0x5a5a5a5a, 0xffffff, 0x3);
+               btc8821a1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+                                       0x5a5a5a5a, 0xffffff, 0x3);
                break;
        case 3:
-               halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555,
-                                          0xaaaaaaaa, 0xffffff, 0x3);
+               btc8821a1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+                                       0xaaaaaaaa, 0xffffff, 0x3);
                break;
        case 4:
-               halbtc8821a1ant_coex_table(btcoexist, force_exec, 0xffffffff,
-                                          0xffffffff, 0xffffff, 0x3);
+               btc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555,
+                                       0x5a5a5a5a, 0xffffff, 0x3);
                break;
        case 5:
-               halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
-                                          0x5fff5fff, 0xffffff, 0x3);
+               btc8821a1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+                                       0xaaaa5a5a, 0xffffff, 0x3);
                break;
        case 6:
-               halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
-                                          0x5a5a5a5a, 0xffffff, 0x3);
+               btc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555,
+                                       0xaaaa5a5a, 0xffffff, 0x3);
                break;
        case 7:
-               halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x5afa5afa,
-                                          0x5afa5afa, 0xffffff, 0x3);
+               btc8821a1ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa,
+                                       0xaaaaaaaa, 0xffffff, 0x3);
                break;
        default:
                break;
@@ -853,10 +836,10 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
                                                bool enable)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       u8      h2c_parameter[1] = {0};
+       u8 h2c_parameter[1] = {0};
 
        if (enable)
-               h2c_parameter[0] |= BIT0;       /* function enable*/
+               h2c_parameter[0] |= BIT0; /* function enable */
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
@@ -865,8 +848,8 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
        btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
 }
 
-static void halbtc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
-                                           bool force_exec, bool enable)
+static void btc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
+                                        bool force_exec, bool enable)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -890,24 +873,40 @@ static void halbtc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
        coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
 }
 
-static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist,
-                                         u8 byte1, u8 byte2, u8 byte3,
-                                         u8 byte4, u8 byte5)
+static void btc8821a1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
+                                       u8 byte2, u8 byte3, u8 byte4, u8 byte5)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[5] = {0};
+       u8 real_byte1 = byte1, real_byte5 = byte5;
+       bool ap_enable = false;
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+                          &ap_enable);
+
+       if (ap_enable) {
+               if (byte1 & BIT4 && !(byte1 & BIT5)) {
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], FW for 1Ant AP mode\n");
+                       real_byte1 &= ~BIT4;
+                       real_byte1 |= BIT5;
+
+                       real_byte5 |= BIT5;
+                       real_byte5 &= ~BIT6;
+               }
+       }
 
-       h2c_parameter[0] = byte1;
+       h2c_parameter[0] = real_byte1;
        h2c_parameter[1] = byte2;
        h2c_parameter[2] = byte3;
        h2c_parameter[3] = byte4;
-       h2c_parameter[4] = byte5;
+       h2c_parameter[4] = real_byte5;
 
-       coex_dm->ps_tdma_para[0] = byte1;
+       coex_dm->ps_tdma_para[0] = real_byte1;
        coex_dm->ps_tdma_para[1] = byte2;
        coex_dm->ps_tdma_para[2] = byte3;
        coex_dm->ps_tdma_para[3] = byte4;
-       coex_dm->ps_tdma_para[4] = byte5;
+       coex_dm->ps_tdma_para[4] = real_byte5;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
@@ -919,18 +918,18 @@ static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist,
        btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
 
-static void halbtc8821a1ant_set_lps_rpwm(struct btc_coexist *btcoexist,
-                                        u8 lps_val, u8 rpwm_val)
+static void btc8821a1ant_set_lps_rpwm(struct btc_coexist *btcoexist,
+                                     u8 lps_val, u8 rpwm_val)
 {
-       u8      lps = lps_val;
-       u8      rpwm = rpwm_val;
+       u8 lps = lps_val;
+       u8 rpwm = rpwm_val;
 
        btcoexist->btc_set(btcoexist, BTC_SET_U1_LPS_VAL, &lps);
        btcoexist->btc_set(btcoexist, BTC_SET_U1_RPWM_VAL, &rpwm);
 }
 
-static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
-                                    bool force_exec, u8 lps_val, u8 rpwm_val)
+static void btc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
+                                 bool force_exec, u8 lps_val, u8 rpwm_val)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -954,33 +953,33 @@ static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
                        return;
                }
        }
-       halbtc8821a1ant_set_lps_rpwm(btcoexist, lps_val, rpwm_val);
+       btc8821a1ant_set_lps_rpwm(btcoexist, lps_val, rpwm_val);
 
        coex_dm->pre_lps = coex_dm->cur_lps;
        coex_dm->pre_rpwm = coex_dm->cur_rpwm;
 }
 
-static void halbtc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist,
-                                        bool low_penalty_ra)
+static void btc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist,
+                                     bool low_penalty_ra)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
 
-       halbtc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
+       btc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
 }
 
-static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist,
-                                        u8 ant_pos_type, bool init_hw_cfg,
-                                        bool wifi_off)
+static void btc8821a1ant_set_ant_path(struct btc_coexist *btcoexist,
+                                     u8 ant_pos_type, bool init_hw_cfg,
+                                     bool wifi_off)
 {
        struct btc_board_info *board_info = &btcoexist->board_info;
        u32 u4_tmp = 0;
        u8 h2c_parameter[2] = {0};
 
        if (init_hw_cfg) {
-               /* 0x4c[23] = 0, 0x4c[24] = 1  Antenna control by WL/BT*/
+               /* 0x4c[23] = 0, 0x4c[24] = 1  Antenna control by WL/BT */
                u4_tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
                u4_tmp &= ~BIT23;
                u4_tmp |= BIT24;
@@ -990,41 +989,42 @@ static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist,
                btcoexist->btc_write_1byte(btcoexist, 0xcb4, 0x77);
 
                if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
-                       /*tell firmware "antenna inverse"  ==>
-                        * WRONG firmware antenna control code.==>need fw to fix
+                       /* tell firmware "antenna inverse"
+                        * WRONG firmware antenna control codeneed fw to fix
                         */
                        h2c_parameter[0] = 1;
                        h2c_parameter[1] = 1;
                        btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
                                                h2c_parameter);
-                       /*Main Ant to  BT for IPS case 0x4c[23] = 1*/
-                       btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64,
-                                                          0x1, 0x1);
                } else {
-                       /*tell firmware "no antenna inverse" ==>
-                        * WRONG firmware antenna control code.==>need fw to fix
+                       /* tell firmware "no antenna inverse"
+                        * WRONG firmware antenna control codeneed fw to fix
                         */
                        h2c_parameter[0] = 0;
                        h2c_parameter[1] = 1;
                        btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
                                                h2c_parameter);
-                       /*Aux Ant to  BT for IPS case 0x4c[23] = 1*/
-                       btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64,
-                                                          0x1, 0x0);
                }
        } else if (wifi_off) {
                /* 0x4c[24:23] = 00, Set Antenna control
-                *      by BT_RFE_CTRL  BT Vendor 0xac = 0xf002
+                * by BT_RFE_CTRL BT Vendor 0xac = 0xf002
                 */
                u4_tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
                u4_tmp &= ~BIT23;
                u4_tmp &= ~BIT24;
                btcoexist->btc_write_4byte(btcoexist, 0x4c, u4_tmp);
+
+               /* 0x765 = 0x18 */
+               btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x3);
+       } else {
+               /* 0x765 = 0x0 */
+               btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
        }
 
-       /* ext switch setting*/
+       /* ext switch setting */
        switch (ant_pos_type) {
        case BTC_ANT_PATH_WIFI:
+               btcoexist->btc_write_1byte(btcoexist, 0xcb4, 0x77);
                if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
                        btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb7,
                                                           0x30, 0x1);
@@ -1033,6 +1033,7 @@ static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist,
                                                           0x30, 0x2);
                break;
        case BTC_ANT_PATH_BT:
+               btcoexist->btc_write_1byte(btcoexist, 0xcb4, 0x77);
                if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
                        btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb7,
                                                           0x30, 0x2);
@@ -1042,6 +1043,7 @@ static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist,
                break;
        default:
        case BTC_ANT_PATH_PTA:
+               btcoexist->btc_write_1byte(btcoexist, 0xcb4, 0x66);
                if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
                        btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb7,
                                                           0x30, 0x1);
@@ -1052,8 +1054,8 @@ static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist,
        }
 }
 
-static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
-                                   bool force_exec, bool turn_on, u8 type)
+static void btc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
+                                bool force_exec, bool turn_on, u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 rssi_adjust_val = 0;
@@ -1078,185 +1080,189 @@ static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
        if (turn_on) {
                switch (type) {
                default:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x1a,
-                                                     0x1a, 0x0, 0x50);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1a,
+                                                   0x1a, 0x0, 0x50);
                        break;
                case 1:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x3a,
-                                                     0x03, 0x10, 0x50);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x3a,
+                                                   0x03, 0x10, 0x50);
                        rssi_adjust_val = 11;
                        break;
                case 2:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x2b,
-                                                     0x03, 0x10, 0x50);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x2b,
+                                                   0x03, 0x10, 0x50);
                        rssi_adjust_val = 14;
                        break;
                case 3:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x1d,
-                                                     0x1d, 0x0, 0x10);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1d,
+                                                   0x1d, 0x0, 0x10);
                        break;
                case 4:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x93, 0x15,
-                                                     0x3, 0x14, 0x0);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15,
+                                                   0x3, 0x14, 0x0);
                        rssi_adjust_val = 17;
                        break;
                case 5:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0x15,
-                                                     0x3, 0x11, 0x10);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x15,
+                                                   0x3, 0x11, 0x10);
                        break;
                case 6:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x13, 0xa,
-                                                     0x3, 0x0, 0x0);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa,
+                                                   0x3, 0x0, 0x0);
                        break;
                case 7:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x13, 0xc,
-                                                     0x5, 0x0, 0x0);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xc,
+                                                   0x5, 0x0, 0x0);
                        break;
                case 8:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x93, 0x25,
-                                                     0x3, 0x10, 0x0);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25,
+                                                   0x3, 0x10, 0x0);
                        break;
                case 9:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x21,
-                                                     0x3, 0x10, 0x50);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x21,
+                                                   0x3, 0x10, 0x50);
                        rssi_adjust_val = 18;
                        break;
                case 10:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x13, 0xa,
-                                                     0xa, 0x0, 0x40);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa,
+                                                   0xa, 0x0, 0x40);
                        break;
                case 11:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x14,
-                                                     0x03, 0x10, 0x10);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x14,
+                                                   0x03, 0x10, 0x10);
                        rssi_adjust_val = 20;
                        break;
                case 12:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x0a,
-                                                     0x0a, 0x0, 0x50);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x0a,
+                                                   0x0a, 0x0, 0x50);
                        break;
                case 13:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x18,
-                                                     0x18, 0x0, 0x10);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x18,
+                                                   0x18, 0x0, 0x10);
                        break;
                case 14:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x21,
-                                                     0x3, 0x10, 0x10);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1e,
+                                                   0x3, 0x10, 0x14);
                        break;
                case 15:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x13, 0xa,
-                                                     0x3, 0x8, 0x0);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa,
+                                                   0x3, 0x8, 0x0);
                        break;
                case 16:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x93, 0x15,
-                                                     0x3, 0x10, 0x0);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15,
+                                                   0x3, 0x10, 0x0);
                        rssi_adjust_val = 18;
                        break;
                case 18:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x93, 0x25,
-                                                     0x3, 0x10, 0x0);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25,
+                                                   0x3, 0x10, 0x0);
                        rssi_adjust_val = 14;
                        break;
                case 20:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0x35,
-                                                     0x03, 0x11, 0x10);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x35,
+                                                   0x03, 0x11, 0x10);
                        break;
                case 21:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0x15,
-                                                     0x03, 0x11, 0x10);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x15,
+                                                   0x03, 0x11, 0x10);
                        break;
                case 22:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0x25,
-                                                     0x03, 0x11, 0x10);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x25,
+                                                   0x03, 0x11, 0x10);
                        break;
                case 23:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0x25,
-                                                     0x3, 0x31, 0x18);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+                                                   0x3, 0x31, 0x18);
                        rssi_adjust_val = 22;
                        break;
                case 24:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0x15,
-                                                     0x3, 0x31, 0x18);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
+                                                   0x3, 0x31, 0x18);
                        rssi_adjust_val = 22;
                        break;
                case 25:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0xa,
-                                                     0x3, 0x31, 0x18);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+                                                   0x3, 0x31, 0x18);
                        rssi_adjust_val = 22;
                        break;
                case 26:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0xa,
-                                                     0x3, 0x31, 0x18);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+                                                   0x3, 0x31, 0x18);
                        rssi_adjust_val = 22;
                        break;
                case 27:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0x25,
-                                                     0x3, 0x31, 0x98);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+                                                   0x3, 0x31, 0x98);
                        rssi_adjust_val = 22;
                        break;
                case 28:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x69, 0x25,
-                                                     0x3, 0x31, 0x0);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x69, 0x25,
+                                                   0x3, 0x31, 0x0);
                        break;
                case 29:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xab, 0x1a,
-                                                     0x1a, 0x1, 0x10);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xab, 0x1a,
+                                                   0x1a, 0x1, 0x10);
                        break;
                case 30:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x14,
-                                                     0x3, 0x10, 0x50);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x14,
+                                                   0x3, 0x10, 0x50);
                        break;
                case 31:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xd3, 0x1a,
-                                                     0x1a, 0, 0x58);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1a,
+                                                   0x1a, 0, 0x58);
                        break;
                case 32:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0xa,
-                                                     0x3, 0x10, 0x0);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0xa,
+                                                   0x3, 0x10, 0x0);
                        break;
                case 33:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xa3, 0x25,
-                                                     0x3, 0x30, 0x90);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x25,
+                                                   0x3, 0x30, 0x90);
                        break;
                case 34:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x53, 0x1a,
-                                                     0x1a, 0x0, 0x10);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x53, 0x1a,
+                                                   0x1a, 0x0, 0x10);
                        break;
                case 35:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x63, 0x1a,
-                                                     0x1a, 0x0, 0x10);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x63, 0x1a,
+                                                   0x1a, 0x0, 0x10);
                        break;
                case 36:
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xd3, 0x12,
-                                                     0x3, 0x14, 0x50);
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x12,
+                                                   0x3, 0x14, 0x50);
                        break;
                }
        } else {
-               /* disable PS tdma*/
+               /* disable PS tdma */
                switch (type) {
-               case 8: /*PTA Control*/
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x8, 0x0, 0x0,
-                                                     0x0, 0x0);
-                       halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
-                                                    false, false);
+               case 8:
+                       /* PTA Control */
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x8, 0x0, 0x0,
+                                                   0x0, 0x0);
+                       btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
+                                                 false, false);
                        break;
                case 0:
-               default:  /*Software control, Antenna at BT side*/
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
-                                                     0x0, 0x0);
-                       halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
-                                                    false, false);
+               default:
+                       /* Software control, Antenna at BT side */
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+                                                   0x0, 0x0);
+                       btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
+                                                 false, false);
                        break;
-               case 9:   /*Software control, Antenna at WiFi side*/
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
-                                                     0x0, 0x0);
-                       halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_WIFI,
-                                                    false, false);
+               case 9:
+                       /* Software control, Antenna at WiFi side */
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+                                                   0x0, 0x0);
+                       btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_WIFI,
+                                                 false, false);
                        break;
-               case 10:        /* under 5G*/
-                       halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
-                                                     0x8, 0x0);
-                       halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
-                                                    false, false);
+               case 10:
+                       /* under 5G */
+                       btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+                                                   0x8, 0x0);
+                       btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
+                                                 false, false);
                        break;
                }
        }
@@ -1264,15 +1270,15 @@ static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
        btcoexist->btc_set(btcoexist,
                 BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, &rssi_adjust_val);
 
-       /* update pre state*/
+       /* update pre state */
        coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
        coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
 }
 
-static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
+static bool btc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       bool    common = false, wifi_connected = false, wifi_busy = false;
+       bool common = false, wifi_connected = false, wifi_busy = false;
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
                           &wifi_connected);
@@ -1283,7 +1289,7 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
            coex_dm->bt_status) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
-               halbtc8821a1ant_sw_mechanism(btcoexist, false);
+               btc8821a1ant_sw_mechanism(btcoexist, false);
 
                common = true;
        } else if (wifi_connected &&
@@ -1291,7 +1297,7 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
                    coex_dm->bt_status)) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], Wifi connected + BT non connected-idle!!\n");
-               halbtc8821a1ant_sw_mechanism(btcoexist, false);
+               btc8821a1ant_sw_mechanism(btcoexist, false);
 
                common = true;
        } else if (!wifi_connected &&
@@ -1299,15 +1305,15 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
                    coex_dm->bt_status)) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
-               halbtc8821a1ant_sw_mechanism(btcoexist, false);
+               btc8821a1ant_sw_mechanism(btcoexist, false);
 
                common = true;
        } else if (wifi_connected &&
                   (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
-                  coex_dm->bt_status)) {
+                   coex_dm->bt_status)) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], Wifi connected + BT connected-idle!!\n");
-               halbtc8821a1ant_sw_mechanism(btcoexist, false);
+               btc8821a1ant_sw_mechanism(btcoexist, false);
 
                common = true;
        } else if (!wifi_connected &&
@@ -1315,7 +1321,7 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
                    coex_dm->bt_status)) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
-               halbtc8821a1ant_sw_mechanism(btcoexist, false);
+               btc8821a1ant_sw_mechanism(btcoexist, false);
 
                common = true;
        } else {
@@ -1333,231 +1339,40 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
        return common;
 }
 
-static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
-                                     u8 wifi_status)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-       static long             up, dn, m, n, wait_count;
-       /*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/
-       long                    result;
-       u8                      retry_count = 0, bt_info_ext;
-
-       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], TdmaDurationAdjustForAcl()\n");
-
-       if ((BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
-            wifi_status) ||
-           (BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SCAN ==
-            wifi_status) ||
-           (BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT ==
-            wifi_status)) {
-               if (coex_dm->cur_ps_tdma != 1 &&
-                   coex_dm->cur_ps_tdma != 2 &&
-                   coex_dm->cur_ps_tdma != 3 &&
-                   coex_dm->cur_ps_tdma != 9) {
-                       halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 9);
-                       coex_dm->tdma_adj_type = 9;
-
-                       up = 0;
-                       dn = 0;
-                       m = 1;
-                       n = 3;
-                       result = 0;
-                       wait_count = 0;
-               }
-               return;
-       }
-
-       if (!coex_dm->auto_tdma_adjust) {
-               coex_dm->auto_tdma_adjust = true;
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], first run TdmaDurationAdjust()!!\n");
-
-               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
-               coex_dm->tdma_adj_type = 2;
-               /*============*/
-               up = 0;
-               dn = 0;
-               m = 1;
-               n = 3;
-               result = 0;
-               wait_count = 0;
-       } else {
-               /*accquire the BT TRx retry count from BT_Info byte2*/
-               retry_count = coex_sta->bt_retry_cnt;
-               bt_info_ext = coex_sta->bt_info_ext;
-               result = 0;
-               wait_count++;
-
-               if (retry_count == 0) {
-                       /* no retry in the last 2-second duration*/
-                       up++;
-                       dn--;
-
-                       if (dn <= 0)
-                               dn = 0;
-
-                       if (up >= n) {
-                               /* if (retry count == 0) for 2*n seconds ,
-                                * make WiFi duration wider
-                                */
-                               wait_count = 0;
-                               n = 3;
-                               up = 0;
-                               dn = 0;
-                               result = 1;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], Increase wifi duration!!\n");
-                       }
-               } else if (retry_count <= 3) {
-                       /* <=3 retry in the last 2-second duration*/
-                       up--;
-                       dn++;
-
-                       if (up <= 0)
-                               up = 0;
-
-                       if (dn == 2) {
-                               /* if retry count< 3 for 2*2 seconds,
-                                * shrink wifi duration
-                                */
-                               if (wait_count <= 2)
-                                       m++; /* avoid bounce in two levels */
-                               else
-                                       m = 1;
-
-                               if (m >= 20) {
-                                       /* m max value is 20, max time is 120 s,
-                                        *      recheck if adjust WiFi duration.
-                                        */
-                                       m = 20;
-                               }
-                               n = 3*m;
-                               up = 0;
-                               dn = 0;
-                               wait_count = 0;
-                               result = -1;
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
-                       }
-               } else {
-                       /* retry count > 3, if retry count > 3 happens once,
-                        *      shrink WiFi duration
-                        */
-                       if (wait_count == 1)
-                               m++; /* avoid bounce in two levels */
-                       else
-                               m = 1;
-               /* m max value is 20, max time is 120 second,
-                *      recheck if adjust WiFi duration.
-               */
-                       if (m >= 20)
-                               m = 20;
-
-                       n = 3*m;
-                       up = 0;
-                       dn = 0;
-                       wait_count = 0;
-                       result = -1;
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
-               }
-
-               if (result == -1) {
-                       if ((BT_INFO_8821A_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
-                           ((coex_dm->cur_ps_tdma == 1) ||
-                            (coex_dm->cur_ps_tdma == 2))) {
-                               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 9);
-                               coex_dm->tdma_adj_type = 9;
-                       } else if (coex_dm->cur_ps_tdma == 1) {
-                               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 9);
-                               coex_dm->tdma_adj_type = 9;
-                       } else if (coex_dm->cur_ps_tdma == 9) {
-                               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       }
-               } else if (result == 1) {
-                       if ((BT_INFO_8821A_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
-                           ((coex_dm->cur_ps_tdma == 1) ||
-                            (coex_dm->cur_ps_tdma == 2))) {
-                               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 9);
-                               coex_dm->tdma_adj_type = 9;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 9);
-                               coex_dm->tdma_adj_type = 9;
-                       } else if (coex_dm->cur_ps_tdma == 9) {
-                               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 1);
-                               coex_dm->tdma_adj_type = 1;
-                       }
-               } else {
-                       /*no change*/
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], ********** TDMA(on, %d) **********\n",
-                                coex_dm->cur_ps_tdma);
-               }
-
-               if (coex_dm->cur_ps_tdma != 1 &&
-                   coex_dm->cur_ps_tdma != 2 &&
-                   coex_dm->cur_ps_tdma != 9 &&
-                   coex_dm->cur_ps_tdma != 11) {
-                       /* recover to previous adjust type*/
-                       halbtc8821a1ant_ps_tdma(btcoexist,
-                                               NORMAL_EXEC, true,
-                                               coex_dm->tdma_adj_type);
-               }
-       }
-}
-
 static void btc8821a1ant_ps_tdma_check_for_pwr_save(struct btc_coexist *btcoex,
                                                    bool new_ps_state)
 {
-       u8      lps_mode = 0x0;
+       u8 lps_mode = 0x0;
 
        btcoex->btc_get(btcoex, BTC_GET_U1_LPS_MODE, &lps_mode);
 
        if (lps_mode) {
-               /* already under LPS state*/
+               /* already under LPS state */
                if (new_ps_state) {
-                       /* keep state under LPS, do nothing.*/
+                       /* keep state under LPS, do nothing */
                } else {
-                       /* will leave LPS state, turn off psTdma first*/
-                       halbtc8821a1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 0);
+                       /* will leave LPS state, turn off psTdma first */
+                       btc8821a1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 0);
                }
        } else {
                /* NO PS state*/
                if (new_ps_state) {
-                       /* will enter LPS state, turn off psTdma first*/
-                       halbtc8821a1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 0);
+                       /* will enter LPS state, turn off psTdma first */
+                       btc8821a1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 0);
                } else {
-                       /* keep state under NO PS state, do nothing.*/
+                       /* keep state under NO PS state, do nothing */
                }
        }
 }
 
-static void halbtc8821a1ant_power_save_state(struct btc_coexist *btcoexist,
-                                            u8 ps_type, u8 lps_val,
-                                            u8 rpwm_val)
+static void btc8821a1ant_power_save_state(struct btc_coexist *btcoexist,
+                                         u8 ps_type, u8 lps_val, u8 rpwm_val)
 {
        bool low_pwr_disable = false;
 
        switch (ps_type) {
        case BTC_PS_WIFI_NATIVE:
-               /* recover to original 32k low power setting*/
+               /* recover to original 32k low power setting */
                low_pwr_disable = false;
                btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
                                   &low_pwr_disable);
@@ -1566,13 +1381,13 @@ static void halbtc8821a1ant_power_save_state(struct btc_coexist *btcoexist,
        case BTC_PS_LPS_ON:
                btc8821a1ant_ps_tdma_check_for_pwr_save(btcoexist,
                                                        true);
-               halbtc8821a1ant_lps_rpwm(btcoexist,
-                                        NORMAL_EXEC, lps_val, rpwm_val);
-               /* when coex force to enter LPS, do not enter 32k low power.*/
+               btc8821a1ant_lps_rpwm(btcoexist, NORMAL_EXEC, lps_val,
+                                     rpwm_val);
+               /* when coex force to enter LPS, do not enter 32k low power */
                low_pwr_disable = true;
                btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
                                   &low_pwr_disable);
-               /* power save must executed before psTdma.*/
+               /* power save must executed before psTdma */
                btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
                break;
        case BTC_PS_LPS_OFF:
@@ -1584,295 +1399,332 @@ static void halbtc8821a1ant_power_save_state(struct btc_coexist *btcoexist,
        }
 }
 
-static void halbtc8821a1ant_coex_under_5g(struct btc_coexist *btcoexist)
+static void btc8821a1ant_coex_under_5g(struct btc_coexist *btcoexist)
 {
-       halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
-                                        0x0, 0x0);
-       halbtc8821a1ant_ignore_wlan_act(btcoexist, NORMAL_EXEC, true);
+       btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                     0x0, 0x0);
+       btc8821a1ant_ignore_wlan_act(btcoexist, NORMAL_EXEC, true);
 
-       halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 10);
+       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 10);
 
-       halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+       btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
 
-       halbtc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+       btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
 
-       halbtc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 5);
+       btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 5);
 }
 
-static void halbtc8821a1ant_action_wifi_only(struct btc_coexist *btcoexist)
+/***********************************************
+ *
+ *     Software Coex Mechanism start
+ *
+ ***********************************************/
+
+/* SCO only or SCO+PAN(HS) */
+static void btc8821a1ant_action_sco(struct btc_coexist *btcoexist)
 {
-       halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
-       halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9);
+       btc8821a1ant_sw_mechanism(btcoexist, true);
 }
 
-static void btc8821a1ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
+static void btc8821a1ant_action_hid(struct btc_coexist *btcoexist)
 {
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-       static bool     pre_bt_disabled;
-       static u32      bt_disable_cnt;
-       bool            bt_active = true, bt_disabled = false;
-
-       /* This function check if bt is disabled*/
-
-       if (coex_sta->high_priority_tx == 0 &&
-           coex_sta->high_priority_rx == 0 &&
-           coex_sta->low_priority_tx == 0 &&
-           coex_sta->low_priority_rx == 0) {
-               bt_active = false;
-       }
-       if (coex_sta->high_priority_tx == 0xffff &&
-           coex_sta->high_priority_rx == 0xffff &&
-           coex_sta->low_priority_tx == 0xffff &&
-           coex_sta->low_priority_rx == 0xffff) {
-               bt_active = false;
-       }
-       if (bt_active) {
-               bt_disable_cnt = 0;
-               bt_disabled = false;
-               btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
-                                  &bt_disabled);
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], BT is enabled !!\n");
-       } else {
-               bt_disable_cnt++;
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], bt all counters = 0, %d times!!\n",
-                        bt_disable_cnt);
-               if (bt_disable_cnt >= 2) {
-                       bt_disabled = true;
-                       btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
-                                          &bt_disabled);
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], BT is disabled !!\n");
-                       halbtc8821a1ant_action_wifi_only(btcoexist);
-               }
-       }
-       if (pre_bt_disabled != bt_disabled) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], BT is from %s to %s!!\n",
-                           (pre_bt_disabled ? "disabled" : "enabled"),
-                           (bt_disabled ? "disabled" : "enabled"));
-               pre_bt_disabled = bt_disabled;
-               if (bt_disabled) {
-                       btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS,
-                                          NULL);
-                       btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS,
-                                          NULL);
-               }
-       }
+       btc8821a1ant_sw_mechanism(btcoexist, true);
 }
 
-/*=============================================*/
-/**/
-/*     Software Coex Mechanism start*/
-/**/
-/*=============================================*/
-
-/* SCO only or SCO+PAN(HS)*/
-static void halbtc8821a1ant_action_sco(struct btc_coexist *btcoexist)
+/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
+static void btc8821a1ant_action_a2dp(struct btc_coexist *btcoexist)
 {
-       halbtc8821a1ant_sw_mechanism(btcoexist, true);
+       btc8821a1ant_sw_mechanism(btcoexist, false);
 }
 
-static void halbtc8821a1ant_action_hid(struct btc_coexist *btcoexist)
+static void btc8821a1ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
 {
-       halbtc8821a1ant_sw_mechanism(btcoexist, true);
+       btc8821a1ant_sw_mechanism(btcoexist, false);
 }
 
-/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS)*/
-static void halbtc8821a1ant_action_a2dp(struct btc_coexist *btcoexist)
+static void btc8821a1ant_action_pan_edr(struct btc_coexist *btcoexist)
 {
-       halbtc8821a1ant_sw_mechanism(btcoexist, false);
+       btc8821a1ant_sw_mechanism(btcoexist, false);
 }
 
-static void halbtc8821a1ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+/* PAN(HS) only */
+static void btc8821a1ant_action_pan_hs(struct btc_coexist *btcoexist)
 {
-       halbtc8821a1ant_sw_mechanism(btcoexist, false);
+       btc8821a1ant_sw_mechanism(btcoexist, false);
 }
 
-static void halbtc8821a1ant_action_pan_edr(struct btc_coexist *btcoexist)
+/* PAN(EDR)+A2DP */
+static void btc8821a1ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
 {
-       halbtc8821a1ant_sw_mechanism(btcoexist, false);
+       btc8821a1ant_sw_mechanism(btcoexist, false);
 }
 
-/*PAN(HS) only*/
-static void halbtc8821a1ant_action_pan_hs(struct btc_coexist *btcoexist)
+static void btc8821a1ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
 {
-       halbtc8821a1ant_sw_mechanism(btcoexist, false);
+       btc8821a1ant_sw_mechanism(btcoexist, true);
 }
 
-/*PAN(EDR)+A2DP*/
-static void halbtc8821a1ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+/* HID+A2DP+PAN(EDR) */
+static void btc8821a1ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
 {
-       halbtc8821a1ant_sw_mechanism(btcoexist, false);
+       btc8821a1ant_sw_mechanism(btcoexist, true);
 }
 
-static void halbtc8821a1ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+static void btc8821a1ant_action_hid_a2dp(struct btc_coexist *btcoexist)
 {
-       halbtc8821a1ant_sw_mechanism(btcoexist, true);
+       btc8821a1ant_sw_mechanism(btcoexist, true);
 }
 
-/* HID+A2DP+PAN(EDR)*/
-static void btc8821a1ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
+/***********************************************
+ *
+ *     Non-Software Coex Mechanism start
+ *
+ ***********************************************/
+static
+void btc8821a1ant_action_wifi_multi_port(struct btc_coexist *btcoexist)
 {
-       halbtc8821a1ant_sw_mechanism(btcoexist, true);
+       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+
+       btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+       /* tdma and coex table */
+       if (coex_dm->bt_status == BT_8821A_1ANT_BT_STATUS_ACL_BUSY) {
+               if (bt_link_info->a2dp_exist) {
+                       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+                       btc8821a1ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 1);
+               } else if (bt_link_info->a2dp_exist &&
+                          bt_link_info->pan_exist) {
+                       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+                       btc8821a1ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 4);
+               } else {
+                       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+                       btc8821a1ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 4);
+               }
+       } else if ((coex_dm->bt_status == BT_8821A_1ANT_BT_STATUS_SCO_BUSY) ||
+                  (BT_8821A_1ANT_BT_STATUS_ACL_SCO_BUSY ==
+                   coex_dm->bt_status)) {
+               btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist,
+                               BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SCAN);
+       } else {
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+       }
 }
 
-static void halbtc8821a1ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+static
+void btc8821a1ant_action_wifi_not_connected_asso_auth(
+                                       struct btc_coexist *btcoexist)
 {
-       halbtc8821a1ant_sw_mechanism(btcoexist, true);
+       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+
+       btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0,
+                                     0x0);
+
+       /* tdma and coex table */
+       if ((bt_link_info->sco_exist) || (bt_link_info->hid_exist)) {
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+       } else if ((bt_link_info->a2dp_exist) || (bt_link_info->pan_exist)) {
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
+       } else {
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+       }
 }
 
-/*=============================================*/
-/**/
-/*     Non-Software Coex Mechanism start*/
-/**/
-/*=============================================*/
 
-static void halbtc8821a1ant_action_hs(struct btc_coexist *btcoexist)
+static void btc8821a1ant_action_hs(struct btc_coexist *btcoexist)
 {
-       halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
-       halbtc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 2);
+       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+       btc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 2);
 }
 
-static void halbtc8821a1ant_action_bt_inquiry(struct btc_coexist *btcoexist)
+static void btc8821a1ant_action_bt_inquiry(struct btc_coexist *btcoexist)
 {
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
        bool wifi_connected = false;
+       bool ap_enable = false;
+       bool wifi_busy = false, bt_busy = false;
 
-       btcoexist->btc_get(btcoexist,
-                BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
-
-       if (!wifi_connected) {
-               halbtc8821a1ant_power_save_state(btcoexist,
-                                                BTC_PS_WIFI_NATIVE, 0x0, 0x0);
-               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
-               halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
-       } else if ((bt_link_info->sco_exist) ||
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+                          &wifi_connected);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+                          &ap_enable);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+       btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
+
+       if (!wifi_connected && !coex_sta->wifi_is_high_pri_task) {
+               btc8821a1ant_power_save_state(btcoexist,
+                                             BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+       } else if ((bt_link_info->sco_exist) || (bt_link_info->a2dp_exist) ||
                   (bt_link_info->hid_only)) {
-               /* SCO/HID-only busy*/
-               halbtc8821a1ant_power_save_state(btcoexist,
-                                                BTC_PS_WIFI_NATIVE, 0x0, 0x0);
-               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
-               halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+               /* SCO/HID-only busy */
+               btc8821a1ant_power_save_state(btcoexist,
+                                             BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
+       } else if ((bt_link_info->a2dp_exist) && (bt_link_info->hid_exist)) {
+               /* A2DP+HID busy */
+               btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+       } else if ((bt_link_info->pan_exist) || (wifi_busy)) {
+               btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
        } else {
-               halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_LPS_ON,
-                                                0x50, 0x4);
-               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 30);
-               halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+               btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
        }
 }
 
 static void btc8821a1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist,
-                                                 u8 wifi_status) {
-       /* tdma and coex table*/
-       halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+                                                 u8 wifi_status)
+{
+       /* tdma and coex table */
+       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
 
-       halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+       if (BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
+           wifi_status)
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+       else
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
 }
 
 static void btc8821a1ant_act_wifi_con_bt_acl_busy(struct btc_coexist *btcoexist,
                                                  u8 wifi_status)
 {
-       u8              bt_rssi_state;
+       u8 bt_rssi_state;
 
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
 
-       bt_rssi_state = halbtc8821a1ant_bt_rssi_state(btcoexist, 2, 28, 0);
+       bt_rssi_state = btc8821a1ant_bt_rssi_state(btcoexist, 2, 28, 0);
 
        if (bt_link_info->hid_only) {
-               /*HID*/
+               /* HID */
                btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist,
                                                      wifi_status);
                coex_dm->auto_tdma_adjust = false;
                return;
        } else if (bt_link_info->a2dp_only) {
-               /*A2DP*/
-               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a1ant_tdma_dur_adj(btcoexist, wifi_status);
-               } else {
-                       /*for low BT RSSI*/
-                       halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 11);
+               /* A2DP */
+               if ((bt_rssi_state != BTC_RSSI_STATE_HIGH) &&
+                   (bt_rssi_state != BTC_RSSI_STATE_STAY_HIGH)) {
+                       /* for low BT RSSI */
+                       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            true, 11);
                        coex_dm->auto_tdma_adjust = false;
                }
 
-               halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
        } else if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) {
-               /*HID+A2DP*/
+               /* HID+A2DP */
                if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 14);
+                       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            true, 14);
                        coex_dm->auto_tdma_adjust = false;
                } else {
                        /*for low BT RSSI*/
-                       halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 11);
+                       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            true, 11);
                        coex_dm->auto_tdma_adjust = false;
                }
 
-               halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
        } else if ((bt_link_info->pan_only) ||
                (bt_link_info->hid_exist && bt_link_info->pan_exist)) {
-               /*PAN(OPP, FTP), HID+PAN(OPP, FTP)*/
-               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
-               halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+               /* PAN(OPP, FTP), HID+PAN(OPP, FTP) */
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
                coex_dm->auto_tdma_adjust = false;
        } else if (((bt_link_info->a2dp_exist) && (bt_link_info->pan_exist)) ||
                   (bt_link_info->hid_exist && bt_link_info->a2dp_exist &&
                    bt_link_info->pan_exist)) {
-               /*A2DP+PAN(OPP, FTP), HID+A2DP+PAN(OPP, FTP)*/
-               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
-               halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+               /* A2DP+PAN(OPP, FTP), HID+A2DP+PAN(OPP, FTP) */
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
                coex_dm->auto_tdma_adjust = false;
        } else {
-               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
-               halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
                coex_dm->auto_tdma_adjust = false;
        }
 }
 
-static void halbtc8821a1ant_action_wifi_not_connected(
-       struct btc_coexist *btcoexist)
+static
+void btc8821a1ant_action_wifi_not_connected(struct btc_coexist *btcoexist)
 {
-       /* power save state*/
-       halbtc8821a1ant_power_save_state(btcoexist,
-                                        BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+       /* power save state */
+       btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
 
-       /* tdma and coex table*/
-       halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
-       halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+       /* tdma and coex table */
+       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+       btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
 }
 
 static void btc8821a1ant_act_wifi_not_conn_scan(struct btc_coexist *btcoexist)
 {
-       halbtc8821a1ant_power_save_state(btcoexist,
-                                        BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
 
-       halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
-       halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+       btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+       /* tdma and coex table */
+       if (coex_dm->bt_status == BT_8821A_1ANT_BT_STATUS_ACL_BUSY) {
+               if (bt_link_info->a2dp_exist) {
+                       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+                       btc8821a1ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 1);
+               } else if (bt_link_info->a2dp_exist &&
+                          bt_link_info->pan_exist) {
+                       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
+                       btc8821a1ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 4);
+               } else {
+                       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+                       btc8821a1ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 4);
+               }
+       } else if ((coex_dm->bt_status == BT_8821A_1ANT_BT_STATUS_SCO_BUSY) ||
+                  (BT_8821A_1ANT_BT_STATUS_ACL_SCO_BUSY ==
+                   coex_dm->bt_status)) {
+               btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist,
+                               BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SCAN);
+       } else {
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+       }
 }
 
-static void halbtc8821a1ant_action_wifi_connected_scan(
-       struct btc_coexist *btcoexist) {
+static
+void btc8821a1ant_action_wifi_connected_scan(struct btc_coexist *btcoexist)
+{
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
 
-       /* power save state*/
-       halbtc8821a1ant_power_save_state(btcoexist,
-                                        BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+       /* power save state */
+       btc8821a1ant_power_save_state(btcoexist,
+                                     BTC_PS_WIFI_NATIVE, 0x0, 0x0);
 
-       /* tdma and coex table*/
+       /* tdma and coex table */
        if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
                if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) {
-                       halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 22);
-                       halbtc8821a1ant_coex_table_with_type(btcoexist,
-                                                            NORMAL_EXEC, 1);
+                       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
+                       btc8821a1ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 1);
                } else {
-               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
-               halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
        }
        } else if ((BT_8821A_1ANT_BT_STATUS_SCO_BUSY ==
                    coex_dm->bt_status) ||
@@ -1881,52 +1733,52 @@ static void halbtc8821a1ant_action_wifi_connected_scan(
                btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist,
                        BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SCAN);
        } else {
-               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
-               halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
        }
 }
 
 static void btc8821a1ant_act_wifi_conn_sp_pkt(struct btc_coexist *btcoexist)
 {
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-       bool    hs_connecting = false;
+       bool hs_connecting = false;
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting);
 
-       halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
-                                        0x0, 0x0);
+       btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                     0x0, 0x0);
 
-       /* tdma and coex table*/
-       if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
+       /* tdma and coex table */
+       if (coex_dm->bt_status == BT_8821A_1ANT_BT_STATUS_ACL_BUSY) {
                if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) {
-                       halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 22);
-                       halbtc8821a1ant_coex_table_with_type(btcoexist,
-                                                            NORMAL_EXEC, 1);
+                       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            true, 22);
+                       btc8821a1ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 1);
                } else {
-                       halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 20);
-                       halbtc8821a1ant_coex_table_with_type(btcoexist,
-                                                            NORMAL_EXEC, 1);
+                       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            true, 20);
+                       btc8821a1ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 1);
                }
        } else {
-               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
-               halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
        }
 }
 
-static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
+static void btc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       bool    wifi_busy = false;
-       bool    scan = false, link = false, roam = false;
-       bool    under_4way = false;
+       bool wifi_busy = false;
+       bool scan = false, link = false, roam = false;
+       bool under_4way = false;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], CoexForWifiConnect()===>\n");
 
-       btcoexist->btc_get(btcoexist,
-                BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under_4way);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+                          &under_4way);
        if (under_4way) {
                btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -1938,7 +1790,7 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
        if (scan || link || roam) {
-               halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
+               btc8821a1ant_action_wifi_connected_scan(btcoexist);
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
                return;
@@ -1947,14 +1799,14 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
        /* power save state*/
        if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY ==
                        coex_dm->bt_status && !btcoexist->bt_link_info.hid_only)
-               halbtc8821a1ant_power_save_state(btcoexist,
-                                                BTC_PS_LPS_ON, 0x50, 0x4);
+               btc8821a1ant_power_save_state(btcoexist,
+                                             BTC_PS_LPS_ON, 0x50, 0x4);
        else
-               halbtc8821a1ant_power_save_state(btcoexist,
-                                                BTC_PS_WIFI_NATIVE,
-                                                0x0, 0x0);
+               btc8821a1ant_power_save_state(btcoexist,
+                                             BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
 
-       /* tdma and coex table*/
+       /* tdma and coex table */
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
        if (!wifi_busy) {
                if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
@@ -1967,10 +1819,10 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
                        btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist,
                                BT_8821A_1ANT_WIFI_STATUS_CONNECTED_IDLE);
                } else {
-                       halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 5);
-                       halbtc8821a1ant_coex_table_with_type(btcoexist,
-                                                            NORMAL_EXEC, 2);
+                       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            true, 5);
+                       btc8821a1ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 2);
                }
        } else {
                if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
@@ -1983,10 +1835,9 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
                        btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist,
                                BT_8821A_1ANT_WIFI_STATUS_CONNECTED_BUSY);
                } else {
-                       halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 5);
-                       halbtc8821a1ant_coex_table_with_type(btcoexist,
-                                                            NORMAL_EXEC, 2);
+                       btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+                       btc8821a1ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 2);
                }
        }
 }
@@ -1994,52 +1845,52 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
 static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       u8      algorithm = 0;
+       u8 algorithm = 0;
 
-       algorithm = halbtc8821a1ant_action_algorithm(btcoexist);
+       algorithm = btc8821a1ant_action_algorithm(btcoexist);
        coex_dm->cur_algorithm = algorithm;
 
-       if (!halbtc8821a1ant_is_common_action(btcoexist)) {
+       if (!btc8821a1ant_is_common_action(btcoexist)) {
                switch (coex_dm->cur_algorithm) {
                case BT_8821A_1ANT_COEX_ALGO_SCO:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action algorithm = SCO\n");
-                       halbtc8821a1ant_action_sco(btcoexist);
+                       btc8821a1ant_action_sco(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_HID:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action algorithm = HID\n");
-                       halbtc8821a1ant_action_hid(btcoexist);
+                       btc8821a1ant_action_hid(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_A2DP:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action algorithm = A2DP\n");
-                       halbtc8821a1ant_action_a2dp(btcoexist);
+                       btc8821a1ant_action_a2dp(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
-                       halbtc8821a1ant_action_a2dp_pan_hs(btcoexist);
+                       btc8821a1ant_action_a2dp_pan_hs(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_PANEDR:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action algorithm = PAN(EDR)\n");
-                       halbtc8821a1ant_action_pan_edr(btcoexist);
+                       btc8821a1ant_action_pan_edr(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_PANHS:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action algorithm = HS mode\n");
-                       halbtc8821a1ant_action_pan_hs(btcoexist);
+                       btc8821a1ant_action_pan_hs(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action algorithm = PAN+A2DP\n");
-                       halbtc8821a1ant_action_pan_edr_a2dp(btcoexist);
+                       btc8821a1ant_action_pan_edr_a2dp(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_PANEDR_HID:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
-                       halbtc8821a1ant_action_pan_edr_hid(btcoexist);
+                       btc8821a1ant_action_pan_edr_hid(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -2049,28 +1900,30 @@ static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
                case BT_8821A_1ANT_COEX_ALGO_HID_A2DP:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action algorithm = HID+A2DP\n");
-                       halbtc8821a1ant_action_hid_a2dp(btcoexist);
+                       btc8821a1ant_action_hid_a2dp(btcoexist);
                        break;
                default:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action algorithm = coexist All Off!!\n");
-                       /*halbtc8821a1ant_coex_all_off(btcoexist);*/
+                       /*btc8821a1ant_coex_all_off(btcoexist);*/
                        break;
                }
                coex_dm->pre_algorithm = coex_dm->cur_algorithm;
        }
 }
 
-static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+static void btc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-       bool    wifi_connected = false, bt_hs_on = false;
-       bool    increase_scan_dev_num = false;
-       bool    bt_ctrl_agg_buf_size = false;
-       u8      agg_buf_size = 5;
-       u8      wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-       bool    wifi_under_5g = false;
+       bool wifi_connected = false, bt_hs_on = false;
+       bool increase_scan_dev_num = false;
+       bool bt_ctrl_agg_buf_size = false;
+       u8 agg_buf_size = 5;
+       u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+       u32 wifi_link_status = 0;
+       u32 num_of_wifi_link = 0;
+       bool wifi_under_5g = false;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], RunCoexistMechanism()===>\n");
@@ -2097,7 +1950,7 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        if (wifi_under_5g) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
-               halbtc8821a1ant_coex_under_5g(btcoexist);
+               btc8821a1ant_coex_under_5g(btcoexist);
                return;
        }
 
@@ -2109,21 +1962,41 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        btcoexist->btc_set(btcoexist, BTC_SET_BL_INC_SCAN_DEV_NUM,
                           &increase_scan_dev_num);
 
-       btcoexist->btc_get(btcoexist,
-                BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+                          &wifi_connected);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
+                          &wifi_link_status);
+       num_of_wifi_link = wifi_link_status >> 16;
+       if ((num_of_wifi_link >= 2) ||
+           (wifi_link_status & WIFI_P2P_GO_CONNECTED)) {
+               btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+               btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
+                                       bt_ctrl_agg_buf_size, agg_buf_size);
+               btc8821a1ant_action_wifi_multi_port(btcoexist);
+               return;
+       }
 
        if (!bt_link_info->sco_exist && !bt_link_info->hid_exist) {
-               halbtc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+               btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
        } else {
                if (wifi_connected) {
                        wifi_rssi_state =
-                                halbtc8821a1ant_WifiRssiState(btcoexist, 1, 2,
-                                                              30, 0);
-                       halbtc8821a1ant_limited_tx(btcoexist,
-                                                  NORMAL_EXEC, 1, 1, 1, 1);
+                               btc8821a1ant_wifi_rssi_state(btcoexist, 1, 2,
+                                                            30, 0);
+                       if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                           (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                               btc8821a1ant_limited_tx(btcoexist,
+                                                       NORMAL_EXEC, 1, 1,
+                                                       1, 1);
+                       } else {
+                               btc8821a1ant_limited_tx(btcoexist,
+                                                       NORMAL_EXEC, 1, 1,
+                                                       1, 1);
+                       }
                } else {
-                       halbtc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC,
-                                                  0, 0, 0, 0);
+                       btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC,
+                                               0, 0, 0, 0);
                }
        }
 
@@ -2137,22 +2010,22 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
                bt_ctrl_agg_buf_size = true;
                agg_buf_size = 0x8;
        }
-       halbtc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
-                                  bt_ctrl_agg_buf_size, agg_buf_size);
+       btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
+                               bt_ctrl_agg_buf_size, agg_buf_size);
 
        btc8821a1ant_run_sw_coex_mech(btcoexist);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
        if (coex_sta->c2h_bt_inquiry_page) {
-               halbtc8821a1ant_action_bt_inquiry(btcoexist);
+               btc8821a1ant_action_bt_inquiry(btcoexist);
                return;
        } else if (bt_hs_on) {
-               halbtc8821a1ant_action_hs(btcoexist);
+               btc8821a1ant_action_hs(btcoexist);
                return;
        }
 
        if (!wifi_connected) {
-               bool    scan = false, link = false, roam = false;
+               bool scan = false, link = false, roam = false;
 
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], wifi is non connected-idle !!!\n");
@@ -2161,48 +2034,57 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
 
-               if (scan || link || roam)
-                       btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
-               else
-                       halbtc8821a1ant_action_wifi_not_connected(btcoexist);
+               if (scan || link || roam) {
+                       if (scan)
+                               btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
+                       else
+                               btc8821a1ant_action_wifi_not_connected_asso_auth(
+                                       btcoexist);
+               } else {
+                       btc8821a1ant_action_wifi_not_connected(btcoexist);
+               }
        } else {
-               /* wifi LPS/Busy*/
-               halbtc8821a1ant_action_wifi_connected(btcoexist);
+               /* wifi LPS/Busy */
+               btc8821a1ant_action_wifi_connected(btcoexist);
        }
 }
 
-static void halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
+static void btc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
-       /* force to reset coex mechanism*/
-       /* sw all off*/
-       halbtc8821a1ant_sw_mechanism(btcoexist, false);
+       /* force to reset coex mechanism
+        * sw all off
+        */
+       btc8821a1ant_sw_mechanism(btcoexist, false);
 
-       halbtc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
-       halbtc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+       btc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
+       btc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
 }
 
-static void halbtc8821a1ant_init_hw_config(struct btc_coexist *btcoexist,
-                                          bool back_up)
+static void btc8821a1ant_init_hw_config(struct btc_coexist *btcoexist,
+                                       bool back_up, bool wifi_only)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       u8      u1_tmp = 0;
-       bool    wifi_under_5g = false;
+       u8 u1_tmp = 0;
+       bool wifi_under_5g = false;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], 1Ant Init HW Config!!\n");
 
+       if (wifi_only)
+               return;
+
        if (back_up) {
                coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist,
                                                                      0x430);
                coex_dm->backup_arfr_cnt2 = btcoexist->btc_read_4byte(btcoexist,
                                                                      0x434);
                coex_dm->backup_retry_limit =
-                        btcoexist->btc_read_2byte(btcoexist, 0x42a);
+                       btcoexist->btc_read_2byte(btcoexist, 0x42a);
                coex_dm->backup_ampdu_max_time =
-                        btcoexist->btc_read_1byte(btcoexist, 0x456);
+                       btcoexist->btc_read_1byte(btcoexist, 0x456);
        }
 
-       /* 0x790[5:0] = 0x5*/
+       /* 0x790[5:0] = 0x5 */
        u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
        u1_tmp &= 0xc0;
        u1_tmp |= 0x5;
@@ -2210,35 +2092,33 @@ static void halbtc8821a1ant_init_hw_config(struct btc_coexist *btcoexist,
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
 
-       /*Antenna config*/
+       /* Antenna config */
        if (wifi_under_5g)
-               halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
-                                            true, false);
+               btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
+                                         true, false);
        else
-               halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
-                                            true, false);
-       /* PTA parameter*/
-       halbtc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
-
-       /* Enable counter statistics*/
-       /*0x76e[3] =1, WLAN_Act control by PTA*/
+               btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
+                                         true, false);
+       /* PTA parameter */
+       btc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+
+       /* Enable counter statistics
+        * 0x76e[3] =1, WLAN_Act control by PTA
+        */
        btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
        btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3);
        btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
 }
 
-/*============================================================*/
-/* work around function start with wa_halbtc8821a1ant_*/
-/*============================================================*/
-/*============================================================*/
-/* extern function start with EXhalbtc8821a1ant_*/
-/*============================================================*/
-void ex_halbtc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist)
+/**************************************************************
+ * extern function start with ex_btc8821a1ant_
+ **************************************************************/
+void ex_btc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist, bool wifionly)
 {
-       halbtc8821a1ant_init_hw_config(btcoexist, true);
+       btc8821a1ant_init_hw_config(btcoexist, true, wifionly);
 }
 
-void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
+void ex_btc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -2247,12 +2127,12 @@ void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
 
        btcoexist->stop_coex_dm = false;
 
-       halbtc8821a1ant_init_coex_dm(btcoexist);
+       btc8821a1ant_init_coex_dm(btcoexist);
 
-       halbtc8821a1ant_query_bt_info(btcoexist);
+       btc8821a1ant_query_bt_info(btcoexist);
 }
 
-void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
 {
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
@@ -2359,7 +2239,7 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
                 "uplink" : "downlink")));
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
                   "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]",
-                  ((btcoexist->bt_info.bt_disabled) ? ("disabled") :
+                  ((coex_sta->bt_disabled) ? ("disabled") :
                   ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
                   ((BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
                     coex_dm->bt_status) ?
@@ -2397,7 +2277,7 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
                 "\r\n %-35s = %s/%s, (0x%x/0x%x)",
                 "PS state, IPS/LPS, (lps/rpwm)",
                 ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
-                ((coex_sta->under_Lps ? "LPS ON" : "LPS OFF")),
+                ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
                 btcoexist->bt_info.lps_val,
                 btcoexist->bt_info.rpwm_val);
        btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
@@ -2422,7 +2302,7 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
                         "\r\n %-35s = 0x%x ", "Rate Mask",
                         btcoexist->bt_info.ra_mask);
 
-               /* Fw mechanism*/
+               /* Fw mechanism */
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
                         "============[Fw mechanism]============");
 
@@ -2444,7 +2324,7 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
                         coex_dm->cur_ignore_wlan_act);
        }
 
-       /* Hw setting*/
+       /* Hw setting */
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
                 "\r\n %-35s", "============[Hw setting]============");
 
@@ -2527,38 +2407,46 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
                 "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)",
                 coex_sta->low_priority_rx, coex_sta->low_priority_tx);
 #if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 1)
-       halbtc8821a1ant_monitor_bt_ctr(btcoexist);
+       btc8821a1ant_monitor_bt_ctr(btcoexist);
 #endif
        btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
 }
 
-void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
+       bool wifi_under_5g = false;
 
        if (btcoexist->manual_control || btcoexist->stop_coex_dm)
                return;
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+       if (wifi_under_5g) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+               btc8821a1ant_coex_under_5g(btcoexist);
+               return;
+       }
 
        if (BTC_IPS_ENTER == type) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], IPS ENTER notify\n");
                coex_sta->under_ips = true;
-               halbtc8821a1ant_set_ant_path(btcoexist,
-                                            BTC_ANT_PATH_BT, false, true);
-               /*set PTA control*/
-               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
-               halbtc8821a1ant_coex_table_with_type(btcoexist,
-                                                    NORMAL_EXEC, 0);
+               btc8821a1ant_set_ant_path(btcoexist,
+                                         BTC_ANT_PATH_BT, false, true);
+               /* set PTA control */
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+               btc8821a1ant_coex_table_with_type(btcoexist,
+                                                 NORMAL_EXEC, 0);
        } else if (BTC_IPS_LEAVE == type) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], IPS LEAVE notify\n");
                coex_sta->under_ips = false;
 
-               halbtc8821a1ant_run_coexist_mechanism(btcoexist);
+               btc8821a1ant_run_coexist_mechanism(btcoexist);
        }
 }
 
-void ex_halbtc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -2568,22 +2456,35 @@ void ex_halbtc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
        if (BTC_LPS_ENABLE == type) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], LPS ENABLE notify\n");
-               coex_sta->under_Lps = true;
+               coex_sta->under_lps = true;
        } else if (BTC_LPS_DISABLE == type) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], LPS DISABLE notify\n");
-               coex_sta->under_Lps = false;
+               coex_sta->under_lps = false;
        }
 }
 
-void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool wifi_connected = false, bt_hs_on = false;
+       bool bt_ctrl_agg_buf_size = false;
+       bool wifi_under_5g = false;
+       u32 wifi_link_status = 0;
+       u32 num_of_wifi_link = 0;
+       u8 agg_buf_size = 5;
+
+       if (btcoexist->manual_control || btcoexist->stop_coex_dm)
+               return;
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+       if (wifi_under_5g) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+               btc8821a1ant_coex_under_5g(btcoexist);
+               return;
+       }
 
-       if (btcoexist->manual_control ||
-           btcoexist->stop_coex_dm ||
-           btcoexist->bt_info.bt_disabled)
+       if (coex_sta->bt_disabled)
                return;
 
        btcoexist->btc_get(btcoexist,
@@ -2591,13 +2492,24 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
        btcoexist->btc_get(btcoexist,
                 BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
 
-       halbtc8821a1ant_query_bt_info(btcoexist);
+       btc8821a1ant_query_bt_info(btcoexist);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
+                          &wifi_link_status);
+       num_of_wifi_link = wifi_link_status >> 16;
+       if (num_of_wifi_link >= 2) {
+               btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+               btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
+                                       bt_ctrl_agg_buf_size, agg_buf_size);
+               btc8821a1ant_action_wifi_multi_port(btcoexist);
+               return;
+       }
 
        if (coex_sta->c2h_bt_inquiry_page) {
-               halbtc8821a1ant_action_bt_inquiry(btcoexist);
+               btc8821a1ant_action_bt_inquiry(btcoexist);
                return;
        } else if (bt_hs_on) {
-               halbtc8821a1ant_action_hs(btcoexist);
+               btc8821a1ant_action_hs(btcoexist);
                return;
        }
 
@@ -2605,40 +2517,62 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], SCAN START notify\n");
                if (!wifi_connected) {
-                       /* non-connected scan*/
+                       /* non-connected scan */
                        btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
                } else {
-                       /* wifi is connected*/
-                       halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
+                       /* wifi is connected */
+                       btc8821a1ant_action_wifi_connected_scan(btcoexist);
                }
        } else if (BTC_SCAN_FINISH == type) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], SCAN FINISH notify\n");
                if (!wifi_connected) {
-                       /* non-connected scan*/
-                       halbtc8821a1ant_action_wifi_not_connected(btcoexist);
+                       /* non-connected scan */
+                       btc8821a1ant_action_wifi_not_connected(btcoexist);
                } else {
-                       halbtc8821a1ant_action_wifi_connected(btcoexist);
+                       btc8821a1ant_action_wifi_connected(btcoexist);
                }
        }
 }
 
-void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool    wifi_connected = false, bt_hs_on = false;
+       u32 wifi_link_status = 0;
+       u32 num_of_wifi_link = 0;
+       bool bt_ctrl_agg_buf_size = false;
+       bool wifi_under_5g = false;
+       u8 agg_buf_size = 5;
+
+       if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+           coex_sta->bt_disabled)
+               return;
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+       if (wifi_under_5g) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+               btc8821a1ant_coex_under_5g(btcoexist);
+               return;
+       }
 
-       if (btcoexist->manual_control ||
-           btcoexist->stop_coex_dm ||
-           btcoexist->bt_info.bt_disabled)
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
+                          &wifi_link_status);
+       num_of_wifi_link = wifi_link_status >> 16;
+       if (num_of_wifi_link >= 2) {
+               btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+               btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
+                                       bt_ctrl_agg_buf_size, agg_buf_size);
+               btc8821a1ant_action_wifi_multi_port(btcoexist);
                return;
+       }
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
        if (coex_sta->c2h_bt_inquiry_page) {
-               halbtc8821a1ant_action_bt_inquiry(btcoexist);
+               btc8821a1ant_action_bt_inquiry(btcoexist);
                return;
        } else if (bt_hs_on) {
-               halbtc8821a1ant_action_hs(btcoexist);
+               btc8821a1ant_action_hs(btcoexist);
                return;
        }
 
@@ -2653,26 +2587,33 @@ void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
                btcoexist->btc_get(btcoexist,
                         BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
                if (!wifi_connected) {
-                       /* non-connected scan*/
-                       halbtc8821a1ant_action_wifi_not_connected(btcoexist);
+                       /* non-connected scan */
+                       btc8821a1ant_action_wifi_not_connected(btcoexist);
                } else {
-                       halbtc8821a1ant_action_wifi_connected(btcoexist);
+                       btc8821a1ant_action_wifi_connected(btcoexist);
                }
        }
 }
 
-void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
-                                           u8 type)
+void ex_btc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
+                                        u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[3] = {0};
        u32 wifi_bw;
        u8 wifi_central_chnl;
+       bool wifi_under_5g = false;
 
-       if (btcoexist->manual_control ||
-           btcoexist->stop_coex_dm ||
-           btcoexist->bt_info.bt_disabled)
+       if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+           coex_sta->bt_disabled)
+               return;
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+       if (wifi_under_5g) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+               btc8821a1ant_coex_under_5g(btcoexist);
                return;
+       }
 
        if (BTC_MEDIA_CONNECT == type) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -2682,17 +2623,16 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
                         "[BTCoex], MEDIA disconnect notify\n");
        }
 
-       /* only 2.4G we need to inform bt the chnl mask*/
+       /* only 2.4G we need to inform bt the chnl mask */
        btcoexist->btc_get(btcoexist,
                           BTC_GET_U1_WIFI_CENTRAL_CHNL,
                           &wifi_central_chnl);
-       if ((BTC_MEDIA_CONNECT == type) &&
+       if ((type == BTC_MEDIA_CONNECT) &&
            (wifi_central_chnl <= 14)) {
-               /*h2c_parameter[0] = 0x1;*/
                h2c_parameter[0] = 0x0;
                h2c_parameter[1] = wifi_central_chnl;
                btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
-               if (BTC_WIFI_BW_HT40 == wifi_bw)
+               if (wifi_bw == BTC_WIFI_BW_HT40)
                        h2c_parameter[2] = 0x30;
                else
                        h2c_parameter[2] = 0x20;
@@ -2711,25 +2651,48 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
        btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
 
-void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
-                                             u8 type)
+void ex_btc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
+                                          u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool bt_hs_on = false;
+       bool bt_ctrl_agg_buf_size = false;
+       bool wifi_under_5g = false;
+       u32 wifi_link_status = 0;
+       u32 num_of_wifi_link = 0;
+       u8 agg_buf_size = 5;
+
+       if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+           coex_sta->bt_disabled)
+               return;
 
-       if (btcoexist->manual_control ||
-           btcoexist->stop_coex_dm ||
-           btcoexist->bt_info.bt_disabled)
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+       if (wifi_under_5g) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+               btc8821a1ant_coex_under_5g(btcoexist);
                return;
+       }
 
        coex_sta->special_pkt_period_cnt = 0;
 
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
+                          &wifi_link_status);
+       num_of_wifi_link = wifi_link_status >> 16;
+       if (num_of_wifi_link >= 2) {
+               btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+               btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
+                                       bt_ctrl_agg_buf_size, agg_buf_size);
+               btc8821a1ant_action_wifi_multi_port(btcoexist);
+               return;
+       }
+
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
        if (coex_sta->c2h_bt_inquiry_page) {
-               halbtc8821a1ant_action_bt_inquiry(btcoexist);
+               btc8821a1ant_action_bt_inquiry(btcoexist);
                return;
        } else if (bt_hs_on) {
-               halbtc8821a1ant_action_hs(btcoexist);
+               btc8821a1ant_action_hs(btcoexist);
                return;
        }
 
@@ -2741,12 +2704,13 @@ void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
        }
 }
 
-void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
-                                      u8 *tmp_buf, u8 length)
+void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
+                                   u8 *tmp_buf, u8 length)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
+       u8 i;
        u8 bt_info = 0;
-       u8 i, rsp_source = 0;
+       u8 rsp_source = 0;
        bool wifi_connected = false;
        bool bt_busy = false;
        bool wifi_under_5g = false;
@@ -2756,7 +2720,7 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
        btcoexist->btc_get(btcoexist,
                 BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
 
-       rsp_source = tmp_buf[0]&0xf;
+       rsp_source = tmp_buf[0] & 0xf;
        if (rsp_source >= BT_INFO_SRC_8821A_1ANT_MAX)
                rsp_source = BT_INFO_SRC_8821A_1ANT_WIFI_FW;
        coex_sta->bt_info_c2h_cnt[rsp_source]++;
@@ -2768,7 +2732,7 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
                coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
                if (i == 1)
                        bt_info = tmp_buf[i];
-               if (i == length-1) {
+               if (i == length - 1) {
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "0x%02x]\n", tmp_buf[i]);
                } else {
@@ -2787,19 +2751,19 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
                coex_sta->bt_info_ext =
                        coex_sta->bt_info_c2h[rsp_source][4];
 
-               /* Here we need to resend some wifi info to BT*/
-               /* because bt is reset and loss of the info.*/
+               /* Here we need to resend some wifi info to BT
+                * because bt is reset and lost the info
+                */
                if (coex_sta->bt_info_ext & BIT1) {
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
-                       btcoexist->btc_get(btcoexist,
-                                          BTC_GET_BL_WIFI_CONNECTED,
+                       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
                                           &wifi_connected);
                        if (wifi_connected) {
-                               ex_halbtc8821a1ant_media_status_notify(btcoexist,
+                               ex_btc8821a1ant_media_status_notify(btcoexist,
                                                               BTC_MEDIA_CONNECT);
                        } else {
-                               ex_halbtc8821a1ant_media_status_notify(btcoexist,
+                               ex_btc8821a1ant_media_status_notify(btcoexist,
                                                               BTC_MEDIA_DISCONNECT);
                        }
                }
@@ -2809,36 +2773,28 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
                            !btcoexist->stop_coex_dm) {
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                         "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
-                               halbtc8821a1ant_ignore_wlan_act(btcoexist,
-                                                               FORCE_EXEC,
-                                                               false);
+                               btc8821a1ant_ignore_wlan_act(btcoexist,
+                                                            FORCE_EXEC,
+                                                            false);
                        }
                }
-#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
-               if (!(coex_sta->bt_info_ext & BIT4)) {
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n");
-                       halbtc8821a1ant_bt_auto_report(btcoexist,
-                                                      FORCE_EXEC, true);
-               }
-#endif
        }
 
-       /* check BIT2 first ==> check if bt is under inquiry or page scan*/
+       /* check BIT2 first ==> check if bt is under inquiry or page scan */
        if (bt_info & BT_INFO_8821A_1ANT_B_INQ_PAGE)
                coex_sta->c2h_bt_inquiry_page = true;
        else
                coex_sta->c2h_bt_inquiry_page = false;
 
-       /* set link exist status*/
-       if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) {
+       /* set link exist status */
+       if (!(bt_info & BT_INFO_8821A_1ANT_B_CONNECTION)) {
                coex_sta->bt_link_exist = false;
                coex_sta->pan_exist = false;
                coex_sta->a2dp_exist = false;
                coex_sta->hid_exist = false;
                coex_sta->sco_exist = false;
        } else {
-               /* connection exists*/
+               /* connection exists */
                coex_sta->bt_link_exist = true;
                if (bt_info & BT_INFO_8821A_1ANT_B_FTP)
                        coex_sta->pan_exist = true;
@@ -2858,14 +2814,19 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
                        coex_sta->sco_exist = false;
        }
 
-       halbtc8821a1ant_update_bt_link_info(btcoexist);
+       btc8821a1ant_update_bt_link_info(btcoexist);
 
-       if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) {
+       /* mask profile bit for connect-ilde identification
+        * (for CSR case: A2DP idle --> 0x41)
+        */
+       bt_info = bt_info & 0x1f;
+
+       if (!(bt_info & BT_INFO_8821A_1ANT_B_CONNECTION)) {
                coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
        } else if (bt_info == BT_INFO_8821A_1ANT_B_CONNECTION) {
-               /* connection exists but no busy*/
+               /* connection exists but no busy */
                coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE;
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
@@ -2895,33 +2856,48 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
        btcoexist->btc_set(btcoexist,
                           BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
 
-       halbtc8821a1ant_run_coexist_mechanism(btcoexist);
+       btc8821a1ant_run_coexist_mechanism(btcoexist);
 }
 
-void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
+void ex_btc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
+       bool wifi_under_5g = false;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], Halt notify\n");
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+       if (wifi_under_5g) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+               btc8821a1ant_coex_under_5g(btcoexist);
+               return;
+       }
+
 
        btcoexist->stop_coex_dm = true;
 
-       halbtc8821a1ant_set_ant_path(btcoexist,
-                                    BTC_ANT_PATH_BT, false, true);
-       halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+       btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false, true);
+       btc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
 
-       halbtc8821a1ant_power_save_state(btcoexist,
-                                        BTC_PS_WIFI_NATIVE, 0x0, 0x0);
-       halbtc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
+       btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+       btc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
 
-       ex_halbtc8821a1ant_media_status_notify(btcoexist,
-                                              BTC_MEDIA_DISCONNECT);
+       ex_btc8821a1ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
 }
 
-void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
+void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
+       bool wifi_under_5g = false;
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+       if (wifi_under_5g) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+               btc8821a1ant_coex_under_5g(btcoexist);
+               return;
+       }
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], Pnp notify\n");
@@ -2929,26 +2905,33 @@ void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
        if (BTC_WIFI_PNP_SLEEP == pnp_state) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], Pnp notify to SLEEP\n");
+               /* BT should clear UnderIPS/UnderLPS state to avoid mismatch
+                * state after wakeup.
+                */
+               coex_sta->under_ips = false;
+               coex_sta->under_lps = false;
                btcoexist->stop_coex_dm = true;
-               halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
-               halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
-                                                0x0, 0x0);
-               halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9);
+               btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+               btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
+               btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+               btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false,
+                                         true);
        } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], Pnp notify to WAKE UP\n");
                btcoexist->stop_coex_dm = false;
-               halbtc8821a1ant_init_hw_config(btcoexist, false);
-               halbtc8821a1ant_init_coex_dm(btcoexist);
-               halbtc8821a1ant_query_bt_info(btcoexist);
+               btc8821a1ant_init_hw_config(btcoexist, false, false);
+               btc8821a1ant_init_coex_dm(btcoexist);
+               btc8821a1ant_query_bt_info(btcoexist);
        }
 }
 
-void ex_halbtc8821a1ant_periodical(struct btc_coexist *btcoexist)
+void ex_btc8821a1ant_periodical(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       static u8       dis_ver_info_cnt;
-       u32             fw_ver = 0, bt_patch_ver = 0;
+       static u8 dis_ver_info_cnt;
+       u32 fw_ver = 0, bt_patch_ver = 0;
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
 
@@ -2982,16 +2965,9 @@ void ex_halbtc8821a1ant_periodical(struct btc_coexist *btcoexist)
        }
 
 #if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
-       halbtc8821a1ant_query_bt_info(btcoexist);
-       halbtc8821a1ant_monitor_bt_ctr(btcoexist);
-       btc8821a1ant_mon_bt_en_dis(btcoexist);
+       btc8821a1ant_query_bt_info(btcoexist);
+       btc8821a1ant_monitor_bt_ctr(btcoexist);
 #else
-       if (halbtc8821a1ant_Is_wifi_status_changed(btcoexist) ||
-           coex_dm->auto_tdma_adjust) {
-               if (coex_sta->special_pkt_period_cnt > 2)
-                       halbtc8821a1ant_run_coexist_mechanism(btcoexist);
-       }
-
        coex_sta->special_pkt_period_cnt++;
 #endif
 }
index 20e904890fc243a175ddd31dc64ca1524e117aca..1bd1ebe3364ebe3e0ac44e17cce0507a9aba566e 100644 (file)
@@ -140,13 +140,14 @@ struct coex_dm_8821a_1ant {
 };
 
 struct coex_sta_8821a_1ant {
+       bool    bt_disabled;
        bool    bt_link_exist;
        bool    sco_exist;
        bool    a2dp_exist;
        bool    hid_exist;
        bool    pan_exist;
 
-       bool    under_Lps;
+       bool    under_lps;
        bool    under_ips;
        u32     special_pkt_period_cnt;
        u32     high_priority_tx;
@@ -160,6 +161,7 @@ struct coex_sta_8821a_1ant {
        u8      bt_info_c2h[BT_INFO_SRC_8821A_1ANT_MAX][10];
        u32     bt_info_c2h_cnt[BT_INFO_SRC_8821A_1ANT_MAX];
        bool    c2h_bt_inquiry_page;
+       bool    wifi_is_high_pri_task;
        u8      bt_retry_cnt;
        u8      bt_info_ext;
 };
index 1717e9ce96caa311e7b6002ebac5ba49286b0aa0..841b4a83ab70c8f3c5c343702b4a9b7ad7c8e5cc 100644 (file)
@@ -23,7 +23,7 @@
  *
  *****************************************************************************/
 
-/*============================================================
+/************************************************************
  * Description:
  *
  * This file is for RTL8821A Co-exist mechanism
  * 2012/08/22 Cosa first check in.
  * 2012/11/14 Cosa Revise for 8821A 2Ant out sourcing.
  *
- *============================================================
- */
+ ************************************************************/
 
-/*============================================================
+/************************************************************
  * include files
- *============================================================
-*/
+ ************************************************************/
 #include "halbt_precomp.h"
-/*============================================================
+/************************************************************
  * Global variables, these are static variables
- *============================================================
- */
-static struct coex_dm_8821a_2ant       glcoex_dm_8821a_2ant;
-static struct coex_dm_8821a_2ant       *coex_dm = &glcoex_dm_8821a_2ant;
-static struct coex_sta_8821a_2ant      glcoex_sta_8821a_2ant;
-static struct coex_sta_8821a_2ant      *coex_sta = &glcoex_sta_8821a_2ant;
+ ************************************************************/
+static struct coex_dm_8821a_2ant glcoex_dm_8821a_2ant;
+static struct coex_dm_8821a_2ant *coex_dm = &glcoex_dm_8821a_2ant;
+static struct coex_sta_8821a_2ant glcoex_sta_8821a_2ant;
+static struct coex_sta_8821a_2ant *coex_sta = &glcoex_sta_8821a_2ant;
 
 static const char *const glbt_info_src_8821a_2ant[] = {
        "BT Info[wifi fw]",
@@ -55,32 +52,29 @@ static const char *const glbt_info_src_8821a_2ant[] = {
        "BT Info[bt auto report]",
 };
 
-static u32     glcoex_ver_date_8821a_2ant = 20130618;
-static u32     glcoex_ver_8821a_2ant = 0x5050;
+static u32 glcoex_ver_date_8821a_2ant = 20130618;
+static u32 glcoex_ver_8821a_2ant = 0x5050;
 
-/*============================================================
+/************************************************************
  * local function proto type if needed
- *============================================================
- *============================================================
- * local function start with halbtc8821a2ant_
- *============================================================
- */
-static u8 halbtc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist,
-                                       u8 level_num, u8 rssi_thresh,
-                                       u8 rssi_thresh1)
+ *
+ * local function start with btc8821a2ant_
+ ************************************************************/
+static u8 btc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist,
+                                    u8 level_num, u8 rssi_thresh,
+                                    u8 rssi_thresh1)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       long    bt_rssi = 0;
-       u8      bt_rssi_state = coex_sta->pre_bt_rssi_state;
+       long bt_rssi = 0;
+       u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
 
        bt_rssi = coex_sta->bt_rssi;
 
        if (level_num == 2) {
                if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
                    (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-                       long tmp = rssi_thresh +
-                                  BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT;
-                       if (bt_rssi >= tmp) {
+                       if (bt_rssi >=
+                           rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT) {
                                bt_rssi_state = BTC_RSSI_STATE_HIGH;
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                         "[BTCoex], BT Rssi state switch to High\n");
@@ -110,7 +104,8 @@ static u8 halbtc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist,
                if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
                    (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
                        if (bt_rssi >=
-                           (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
+                           (rssi_thresh +
+                            BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
                                bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                         "[BTCoex], BT Rssi state switch to Medium\n");
@@ -156,13 +151,13 @@ static u8 halbtc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist,
        return bt_rssi_state;
 }
 
-static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
-                                         u8 index, u8 level_num,
-                                         u8 rssi_thresh, u8 rssi_thresh1)
+static u8 btc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
+                                      u8 index, u8 level_num,
+                                      u8 rssi_thresh, u8 rssi_thresh1)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       long    wifi_rssi = 0;
-       u8      wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
+       long wifi_rssi = 0;
+       u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
 
        btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
 
@@ -204,7 +199,8 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                    (coex_sta->pre_wifi_rssi_state[index] ==
                     BTC_RSSI_STATE_STAY_LOW)) {
                        if (wifi_rssi >=
-                           (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
+                           (rssi_thresh +
+                            BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                         "[BTCoex], wifi RSSI state switch to Medium\n");
@@ -248,76 +244,57 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
        return wifi_rssi_state;
 }
 
-static void btc8821a2ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
+static
+void btc8821a2ant_limited_rx(struct btc_coexist *btcoexist, bool force_exec,
+                            bool rej_ap_agg_pkt, bool bt_ctrl_agg_buf_size,
+                            u8 agg_buf_size)
 {
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-       static bool     pre_bt_disabled;
-       static u32      bt_disable_cnt;
-       bool            bt_active = true, bt_disabled = false;
-
-       /* This function check if bt is disabled*/
-
-       if (coex_sta->high_priority_tx == 0 &&
-           coex_sta->high_priority_rx == 0 &&
-           coex_sta->low_priority_tx == 0 &&
-           coex_sta->low_priority_rx == 0)
-               bt_active = false;
-       if (coex_sta->high_priority_tx == 0xffff &&
-           coex_sta->high_priority_rx == 0xffff &&
-           coex_sta->low_priority_tx == 0xffff &&
-           coex_sta->low_priority_rx == 0xffff)
-               bt_active = false;
-       if (bt_active) {
-               bt_disable_cnt = 0;
-               bt_disabled = false;
-               btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
-                                  &bt_disabled);
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], BT is enabled !!\n");
-       } else {
-               bt_disable_cnt++;
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], bt all counters = 0, %d times!!\n",
-                        bt_disable_cnt);
-               if (bt_disable_cnt >= 2) {
-                       bt_disabled = true;
-                       btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
-                                          &bt_disabled);
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], BT is disabled !!\n");
-               }
-       }
-       if (pre_bt_disabled != bt_disabled) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], BT is from %s to %s!!\n",
-                        (pre_bt_disabled ? "disabled" : "enabled"),
-                        (bt_disabled ? "disabled" : "enabled"));
-               pre_bt_disabled = bt_disabled;
-       }
+       bool reject_rx_agg = rej_ap_agg_pkt;
+       bool bt_ctrl_rx_agg_size = bt_ctrl_agg_buf_size;
+       u8 rx_agg_size = agg_buf_size;
+
+       /* Rx Aggregation related setting */
+       btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT,
+                          &reject_rx_agg);
+       /* decide BT control aggregation buf size or not */
+       btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE,
+                          &bt_ctrl_rx_agg_size);
+       /* aggregation buf size, works when BT control Rx aggregation size */
+       btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rx_agg_size);
+       /* real update aggregation setting */
+       btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
 }
 
-static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+static void btc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       u32     reg_hp_txrx, reg_lp_txrx, u4tmp;
-       u32     reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0;
+       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+       u32 reg_hp_txrx, reg_lp_txrx, u4tmp;
+       u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0;
 
        reg_hp_txrx = 0x770;
        reg_lp_txrx = 0x774;
 
        u4tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
        reg_hp_tx = u4tmp & MASKLWORD;
-       reg_hp_rx = (u4tmp & MASKHWORD)>>16;
+       reg_hp_rx = (u4tmp & MASKHWORD) >> 16;
 
        u4tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
        reg_lp_tx = u4tmp & MASKLWORD;
-       reg_lp_rx = (u4tmp & MASKHWORD)>>16;
+       reg_lp_rx = (u4tmp & MASKHWORD) >> 16;
 
        coex_sta->high_priority_tx = reg_hp_tx;
        coex_sta->high_priority_rx = reg_hp_rx;
        coex_sta->low_priority_tx = reg_lp_tx;
        coex_sta->low_priority_rx = reg_lp_rx;
 
+       if ((coex_sta->low_priority_rx >= 950) &&
+           (coex_sta->low_priority_rx >= coex_sta->low_priority_tx) &&
+           (!coex_sta->under_ips))
+               bt_link_info->slave_role = true;
+       else
+               bt_link_info->slave_role = false;
+
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
                    reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
@@ -329,14 +306,51 @@ static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
        btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
 }
 
-static void halbtc8821a2ant_query_bt_info(struct btc_coexist *btcoexist)
+static void btc8821a2ant_monitor_wifi_ctr(struct btc_coexist *btcoexist)
+{
+       if (coex_sta->under_ips) {
+               coex_sta->crc_ok_cck = 0;
+               coex_sta->crc_ok_11g = 0;
+               coex_sta->crc_ok_11n = 0;
+               coex_sta->crc_ok_11n_agg = 0;
+
+               coex_sta->crc_err_cck = 0;
+               coex_sta->crc_err_11g = 0;
+               coex_sta->crc_err_11n = 0;
+               coex_sta->crc_err_11n_agg = 0;
+       } else {
+               coex_sta->crc_ok_cck =
+                       btcoexist->btc_read_4byte(btcoexist, 0xf88);
+               coex_sta->crc_ok_11g =
+                       btcoexist->btc_read_2byte(btcoexist, 0xf94);
+               coex_sta->crc_ok_11n =
+                       btcoexist->btc_read_2byte(btcoexist, 0xf90);
+               coex_sta->crc_ok_11n_agg =
+                       btcoexist->btc_read_2byte(btcoexist, 0xfb8);
+
+               coex_sta->crc_err_cck =
+                       btcoexist->btc_read_4byte(btcoexist, 0xf84);
+               coex_sta->crc_err_11g =
+                       btcoexist->btc_read_2byte(btcoexist, 0xf96);
+               coex_sta->crc_err_11n =
+                       btcoexist->btc_read_2byte(btcoexist, 0xf92);
+               coex_sta->crc_err_11n_agg =
+                       btcoexist->btc_read_2byte(btcoexist, 0xfba);
+       }
+
+       /* reset counter */
+       btcoexist->btc_write_1byte_bitmask(btcoexist, 0xf16, 0x1, 0x1);
+       btcoexist->btc_write_1byte_bitmask(btcoexist, 0xf16, 0x1, 0x0);
+}
+
+static void btc8821a2ant_query_bt_info(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        coex_sta->c2h_bt_info_req_sent = true;
 
-       h2c_parameter[0] |= BIT0;       /* trigger */
+       h2c_parameter[0] |= BIT0; /* trigger */
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
@@ -345,54 +359,135 @@ static void halbtc8821a2ant_query_bt_info(struct btc_coexist *btcoexist)
        btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
 
-static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
+bool btc8821a2ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
+{
+       static bool pre_wifi_busy = true;
+       static bool pre_under_4way = true;
+       static bool pre_bt_hs_on = true;
+       bool wifi_busy = false, under_4way = false, bt_hs_on = false;
+       bool wifi_connected = false;
+       u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+                          &wifi_connected);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+                          &under_4way);
+
+       if (wifi_connected) {
+               if (wifi_busy != pre_wifi_busy) {
+                       pre_wifi_busy = wifi_busy;
+                       return true;
+               }
+               if (under_4way != pre_under_4way) {
+                       pre_under_4way = under_4way;
+                       return true;
+               }
+               if (bt_hs_on != pre_bt_hs_on) {
+                       pre_bt_hs_on = bt_hs_on;
+                       return true;
+               }
+
+               wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 3, 2,
+                               BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_LOW))
+                       return true;
+       }
+
+       return false;
+}
+
+static void btc8821a2ant_update_bt_link_info(struct btc_coexist *btcoexist)
+{
+       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+       bool bt_hs_on = false;
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+       bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
+       bt_link_info->sco_exist = coex_sta->sco_exist;
+       bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
+       bt_link_info->pan_exist = coex_sta->pan_exist;
+       bt_link_info->hid_exist = coex_sta->hid_exist;
+
+       /* work around for HS mode. */
+       if (bt_hs_on) {
+               bt_link_info->pan_exist = true;
+               bt_link_info->bt_link_exist = true;
+       }
+
+       /* check if Sco only */
+       if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+           !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+               bt_link_info->sco_only = true;
+       else
+               bt_link_info->sco_only = false;
+
+       /* check if A2dp only */
+       if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist &&
+           !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+               bt_link_info->a2dp_only = true;
+       else
+               bt_link_info->a2dp_only = false;
+
+       /* check if Pan only */
+       if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+           bt_link_info->pan_exist && !bt_link_info->hid_exist)
+               bt_link_info->pan_only = true;
+       else
+               bt_link_info->pan_only = false;
+
+       /* check if Hid only */
+       if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+           !bt_link_info->pan_exist && bt_link_info->hid_exist)
+               bt_link_info->hid_only = true;
+       else
+               bt_link_info->hid_only = false;
+}
+
+static u8 btc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       struct btc_stack_info *stack_info = &btcoexist->stack_info;
+       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
        bool bt_hs_on = false;
        u8 algorithm = BT_8821A_2ANT_COEX_ALGO_UNDEFINED;
        u8 num_of_diff_profile = 0;
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 
-       /*for win-8 stack HID report error*/
-       /* sync  BTInfo with BT firmware and stack */
-       if (!stack_info->hid_exist)
-               stack_info->hid_exist = coex_sta->hid_exist;
-       /* when stack HID report error, here we use the info from bt fw. */
-       if (!stack_info->bt_link_exist)
-               stack_info->bt_link_exist = coex_sta->bt_link_exist;
-
-       if (!coex_sta->bt_link_exist) {
+       if (!bt_link_info->bt_link_exist) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], No profile exists!!!\n");
+                       "[BTCoex], No BT link exists!!!\n");
                return algorithm;
        }
 
-       if (coex_sta->sco_exist)
+       if (bt_link_info->sco_exist)
                num_of_diff_profile++;
-       if (coex_sta->hid_exist)
+       if (bt_link_info->hid_exist)
                num_of_diff_profile++;
-       if (coex_sta->pan_exist)
+       if (bt_link_info->pan_exist)
                num_of_diff_profile++;
-       if (coex_sta->a2dp_exist)
+       if (bt_link_info->a2dp_exist)
                num_of_diff_profile++;
 
        if (num_of_diff_profile == 1) {
-               if (coex_sta->sco_exist) {
+               if (bt_link_info->sco_exist) {
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], SCO only\n");
                        algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
                } else {
-                       if (coex_sta->hid_exist) {
+                       if (bt_link_info->hid_exist) {
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                         "[BTCoex], HID only\n");
                                algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
-                       } else if (coex_sta->a2dp_exist) {
+                       } else if (bt_link_info->a2dp_exist) {
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                         "[BTCoex], A2DP only\n");
                                algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP;
-                       } else if (coex_sta->pan_exist) {
+                       } else if (bt_link_info->pan_exist) {
                                if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
@@ -407,16 +502,16 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
                        }
                }
        } else if (num_of_diff_profile == 2) {
-               if (coex_sta->sco_exist) {
-                       if (coex_sta->hid_exist) {
+               if (bt_link_info->sco_exist) {
+                       if (bt_link_info->hid_exist) {
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                         "[BTCoex], SCO + HID\n");
-                               algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
-                       } else if (coex_sta->a2dp_exist) {
+                               algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
+                       } else if (bt_link_info->a2dp_exist) {
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                         "[BTCoex], SCO + A2DP ==> SCO\n");
-                               algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
-                       } else if (coex_sta->pan_exist) {
+                               algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
+                       } else if (bt_link_info->pan_exist) {
                                if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
@@ -426,99 +521,104 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "[BTCoex], SCO + PAN(EDR)\n");
-                                       algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
+                                       algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
                                }
                        }
                } else {
-                       if (coex_sta->hid_exist &&
-                           coex_sta->a2dp_exist) {
+                       if (bt_link_info->hid_exist &&
+                           bt_link_info->a2dp_exist) {
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                         "[BTCoex], HID + A2DP\n");
                                algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
-                       } else if (coex_sta->hid_exist &&
-                               coex_sta->pan_exist) {
+                       } else if (bt_link_info->hid_exist &&
+                               bt_link_info->pan_exist) {
                                if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "[BTCoex], HID + PAN(HS)\n");
-                                       algorithm =  BT_8821A_2ANT_COEX_ALGO_HID;
+                                       algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
                                } else {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "[BTCoex], HID + PAN(EDR)\n");
-                                       algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
+                                       algorithm =
+                                           BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                                }
-                       } else if (coex_sta->pan_exist &&
-                               coex_sta->a2dp_exist) {
+                       } else if (bt_link_info->pan_exist &&
+                               bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "[BTCoex], A2DP + PAN(HS)\n");
-                                       algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS;
+                                       algorithm =
+                                           BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS;
                                } else {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "[BTCoex], A2DP + PAN(EDR)\n");
-                                       algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP;
+                                       algorithm =
+                                           BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP;
                                }
                        }
                }
        } else if (num_of_diff_profile == 3) {
-               if (coex_sta->sco_exist) {
-                       if (coex_sta->hid_exist &&
-                           coex_sta->a2dp_exist) {
+               if (bt_link_info->sco_exist) {
+                       if (bt_link_info->hid_exist &&
+                           bt_link_info->a2dp_exist) {
                                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                         "[BTCoex], SCO + HID + A2DP ==> HID\n");
-                               algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
-                       } else if (coex_sta->hid_exist &&
-                               coex_sta->pan_exist) {
+                               algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
+                       } else if (bt_link_info->hid_exist &&
+                               bt_link_info->pan_exist) {
                                if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "[BTCoex], SCO + HID + PAN(HS)\n");
-                                       algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
+                                       algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
                                } else {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "[BTCoex], SCO + HID + PAN(EDR)\n");
-                                       algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
+                                       algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
                                }
-                       } else if (coex_sta->pan_exist &&
-                                  coex_sta->a2dp_exist) {
+                       } else if (bt_link_info->pan_exist &&
+                                  bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "[BTCoex], SCO + A2DP + PAN(HS)\n");
-                                       algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
+                                       algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
                                } else {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
-                                       algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
+                                       algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
                                }
                        }
                } else {
-                       if (coex_sta->hid_exist &&
-                           coex_sta->pan_exist &&
-                           coex_sta->a2dp_exist) {
+                       if (bt_link_info->hid_exist &&
+                           bt_link_info->pan_exist &&
+                           bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "[BTCoex], HID + A2DP + PAN(HS)\n");
-                                       algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
+                                       algorithm =
+                                           BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
                                } else {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "[BTCoex], HID + A2DP + PAN(EDR)\n");
-                                       algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+                                       algorithm =
+                                       BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
                                }
                        }
                }
        } else if (num_of_diff_profile >= 3) {
-               if (coex_sta->sco_exist) {
-                       if (coex_sta->hid_exist &&
-                           coex_sta->pan_exist &&
-                           coex_sta->a2dp_exist) {
+               if (bt_link_info->sco_exist) {
+                       if (bt_link_info->hid_exist &&
+                           bt_link_info->pan_exist &&
+                           bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
@@ -528,7 +628,7 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
                                        RT_TRACE(rtlpriv, COMP_BT_COEXIST,
                                                 DBG_LOUD,
                                                 "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
-                                       algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
+                                       algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
                                }
                        }
                }
@@ -536,44 +636,7 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
        return algorithm;
 }
 
-static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-       bool ret = false;
-       bool bt_hs_on = false, wifi_connected = false;
-       long bt_hs_rssi = 0;
-       u8 bt_rssi_state;
-
-       if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on))
-               return false;
-       if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
-                               &wifi_connected))
-               return false;
-       if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
-               return false;
-
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
-
-       if (wifi_connected) {
-               if (bt_hs_on) {
-                       if (bt_hs_rssi > 37) {
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], Need to decrease bt power for HS mode!!\n");
-                               ret = true;
-                       }
-               } else {
-                       if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                           (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                        "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
-                               ret = true;
-                       }
-               }
-       }
-       return ret;
-}
-
-static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist,
+static void btc8821a2ant_set_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
                                              u8 dac_swing_lvl)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -592,185 +655,47 @@ static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist,
        btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
 }
 
-static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
-                                             bool dec_bt_pwr)
+static void btc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
+                                          u8 dec_bt_pwr_lvl)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
-       h2c_parameter[0] = 0;
-
-       if (dec_bt_pwr)
-               h2c_parameter[0] |= BIT1;
+       h2c_parameter[0] = dec_bt_pwr_lvl;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n",
-                (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+                "[BTCoex], decrease Bt Power Level : %u, FW write 0x62 = 0x%x\n",
+                dec_bt_pwr_lvl, h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
 }
 
-static void halbtc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
-                                      bool force_exec, bool dec_bt_pwr)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], %s Dec BT power = %s\n",
-                   (force_exec ? "force to" : ""),
-                   ((dec_bt_pwr) ? "ON" : "OFF"));
-       coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
-
-       if (!force_exec) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n",
-                           coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
-
-               if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
-                       return;
-       }
-       halbtc8821a2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
-
-       coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
-}
-
-static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist,
-                                             bool bt_lna_cons_on)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-       u8 h2c_parameter[2] = {0};
-
-       h2c_parameter[0] = 0x3; /* opCode, 0x3 = BT_SET_LNA_CONSTRAIN */
-
-       if (bt_lna_cons_on)
-               h2c_parameter[1] |= BIT0;
-
-       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n",
-                bt_lna_cons_on ? "ON!!" : "OFF!!",
-                h2c_parameter[0] << 8 | h2c_parameter[1]);
-
-       btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
-}
-
-static void btc8821a2_set_bt_lna_const(struct btc_coexist *btcoexist,
-                                      bool force_exec, bool bt_lna_cons_on)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], %s BT Constrain = %s\n",
-                (force_exec ? "force" : ""),
-                ((bt_lna_cons_on) ? "ON" : "OFF"));
-       coex_dm->cur_bt_lna_constrain = bt_lna_cons_on;
-
-       if (!force_exec) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n",
-                           coex_dm->pre_bt_lna_constrain,
-                           coex_dm->cur_bt_lna_constrain);
-
-               if (coex_dm->pre_bt_lna_constrain ==
-                   coex_dm->cur_bt_lna_constrain)
-                       return;
-       }
-       btc8821a2ant_set_fw_bt_lna_constr(btcoexist,
-                                         coex_dm->cur_bt_lna_constrain);
-
-       coex_dm->pre_bt_lna_constrain = coex_dm->cur_bt_lna_constrain;
-}
-
-static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist,
-                                              u8 bt_psd_mode)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-       u8 h2c_parameter[2] = {0};
-
-       h2c_parameter[0] = 0x2; /* opCode, 0x2 = BT_SET_PSD_MODE */
-
-       h2c_parameter[1] = bt_psd_mode;
-
-       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n",
-                h2c_parameter[1],
-                h2c_parameter[0] << 8 | h2c_parameter[1]);
-
-       btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
-}
-
-static void halbtc8821a2ant_set_bt_psd_mode(struct btc_coexist *btcoexist,
-                                           bool force_exec, u8 bt_psd_mode)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], %s BT PSD mode = 0x%x\n",
-                (force_exec ? "force" : ""), bt_psd_mode);
-       coex_dm->cur_bt_psd_mode = bt_psd_mode;
-
-       if (!force_exec) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n",
-                        coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode);
-
-               if (coex_dm->pre_bt_psd_mode == coex_dm->cur_bt_psd_mode)
-                       return;
-       }
-       halbtc8821a2ant_set_fw_bt_psd_mode(btcoexist,
-                                          coex_dm->cur_bt_psd_mode);
-
-       coex_dm->pre_bt_psd_mode = coex_dm->cur_bt_psd_mode;
-}
-
-static void halbtc8821a2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
-                                              bool enable_auto_report)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-       u8 h2c_parameter[1] = {0};
-
-       h2c_parameter[0] = 0;
-
-       if (enable_auto_report)
-               h2c_parameter[0] |= BIT0;
-
-       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
-                (enable_auto_report ? "Enabled!!" : "Disabled!!"),
-                h2c_parameter[0]);
-
-       btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
-}
-
-static void halbtc8821a2ant_bt_auto_report(struct btc_coexist *btcoexist,
-                                          bool force_exec,
-                                          bool enable_auto_report)
+static void btc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
+                                   bool force_exec, u8 dec_bt_pwr_lvl)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], %s BT Auto report = %s\n",
-                (force_exec ? "force to" : ""),
-                ((enable_auto_report) ? "Enabled" : "Disabled"));
-       coex_dm->cur_bt_auto_report = enable_auto_report;
+                "[BTCoex], %s Dec BT power level = %u\n",
+                   (force_exec ? "force to" : ""), dec_bt_pwr_lvl);
+       coex_dm->cur_dec_bt_pwr_lvl = dec_bt_pwr_lvl;
 
        if (!force_exec) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
-                           coex_dm->pre_bt_auto_report,
-                           coex_dm->cur_bt_auto_report);
+                        "[BTCoex], pre_dec_bt_pwr_lvl = %d, cur_dec_bt_pwr_lvl = %d\n",
+                           coex_dm->pre_dec_bt_pwr_lvl,
+                           coex_dm->cur_dec_bt_pwr_lvl);
 
-               if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
+               if (coex_dm->pre_dec_bt_pwr_lvl == coex_dm->cur_dec_bt_pwr_lvl)
                        return;
        }
-       halbtc8821a2ant_set_bt_auto_report(btcoexist,
-                                          coex_dm->cur_bt_auto_report);
+       btc8821a2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr_lvl);
 
-       coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
+       coex_dm->pre_dec_bt_pwr_lvl = coex_dm->cur_dec_bt_pwr_lvl;
 }
 
-static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
-                                            bool force_exec,
-                                            u8 fw_dac_swing_lvl)
+static void btc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
+                                         bool force_exec, u8 fw_dac_swing_lvl)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -790,66 +715,14 @@ static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
                        return;
        }
 
-       btc8821a2ant_set_fw_dac_swing_lev(btcoexist,
+       btc8821a2ant_set_fw_dac_swing_lvl(btcoexist,
                                          coex_dm->cur_fw_dac_swing_lvl);
 
        coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
 }
 
-static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
-                                                bool rx_rf_shrink_on)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-       if (rx_rf_shrink_on) {
-               /* Shrink RF Rx LPF corner */
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], Shrink RF Rx LPF corner!!\n");
-               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
-                                         0xfffff, 0xffffc);
-       } else {
-               /* Resume RF Rx LPF corner
-                * After initialized, we can use coex_dm->bt_rf0x1e_backup
-                */
-               if (btcoexist->initilized) {
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Resume RF Rx LPF corner!!\n");
-                       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
-                                                 0x1e, 0xfffff,
-                                                  coex_dm->bt_rf0x1e_backup);
-               }
-       }
-}
-
-static void halbtc8821a2ant_RfShrink(struct btc_coexist *btcoexist,
-                                    bool force_exec, bool rx_rf_shrink_on)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], %s turn Rx RF Shrink = %s\n",
-                   (force_exec ? "force to" : ""),
-                   ((rx_rf_shrink_on) ? "ON" : "OFF"));
-       coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
-
-       if (!force_exec) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n",
-                        coex_dm->pre_rf_rx_lpf_shrink,
-                        coex_dm->cur_rf_rx_lpf_shrink);
-
-               if (coex_dm->pre_rf_rx_lpf_shrink ==
-                   coex_dm->cur_rf_rx_lpf_shrink)
-                       return;
-       }
-       btc8821a2ant_set_sw_rf_rx_lpf_corner(btcoexist,
-                                            coex_dm->cur_rf_rx_lpf_shrink);
-
-       coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
-}
-
-static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist,
-                                            bool low_penalty_ra)
+static void btc8821a2ant_set_sw_penalty_tx_rate_adaptive(
+               struct btc_coexist *btcoexist, bool low_penalty_ra)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[6] = {0};
@@ -858,14 +731,14 @@ static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist,
 
        if (low_penalty_ra) {
                h2c_parameter[1] |= BIT0;
-               /*normal rate except MCS7/6/5, OFDM54/48/36 */
+               /* normal rate except MCS7/6/5, OFDM54/48/36 */
                h2c_parameter[2] = 0x00;
-               /*MCS7 or OFDM54 */
-               h2c_parameter[3] = 0xf7;
-               /*MCS6 or OFDM48 */
-               h2c_parameter[4] = 0xf8;
-               /*MCS5 or OFDM36 */
-               h2c_parameter[5] = 0xf9;
+               /* MCS7 or OFDM54 */
+               h2c_parameter[3] = 0xf5;
+               /* MCS6 or OFDM48 */
+               h2c_parameter[4] = 0xa0;
+               /* MCS5 or OFDM36 */
+               h2c_parameter[5] = 0xa0;
        }
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -875,12 +748,11 @@ static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist,
        btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
 }
 
-static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist,
-                                          bool force_exec, bool low_penalty_ra)
+static void btc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist,
+                                       bool force_exec, bool low_penalty_ra)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
-       /*return;*/
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], %s turn LowPenaltyRA = %s\n",
                 (force_exec ? "force to" : ""),
@@ -891,19 +763,19 @@ static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist,
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
                         coex_dm->pre_low_penalty_ra,
-                           coex_dm->cur_low_penalty_ra);
+                        coex_dm->cur_low_penalty_ra);
 
                if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
                        return;
        }
-       btc8821a2ant_SetSwPenTxRateAdapt(btcoexist,
+       btc8821a2ant_set_sw_penalty_tx_rate_adaptive(btcoexist,
                                         coex_dm->cur_low_penalty_ra);
 
        coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
 }
 
-static void halbtc8821a2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
-                                             u32 level)
+static void btc8821a2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
+                                          u32 level)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 val = (u8)level;
@@ -918,14 +790,14 @@ static void btc8821a2ant_set_sw_full_dac_swing(struct btc_coexist *btcoexist,
                                               u32 sw_dac_swing_lvl)
 {
        if (sw_dac_swing_on)
-               halbtc8821a2ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl);
+               btc8821a2ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl);
        else
-               halbtc8821a2ant_set_dac_swing_reg(btcoexist, 0x18);
+               btc8821a2ant_set_dac_swing_reg(btcoexist, 0x18);
 }
 
-static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist,
-                                     bool force_exec, bool dac_swing_on,
-                                     u32 dac_swing_lvl)
+static void btc8821a2ant_dac_swing(struct btc_coexist *btcoexist,
+                                  bool force_exec, bool dac_swing_on,
+                                  u32 dac_swing_lvl)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -958,50 +830,9 @@ static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist,
        coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
 }
 
-static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist,
-                                            bool adc_back_off)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-       if (adc_back_off) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], BB BackOff Level On!\n");
-               btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3);
-       } else {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], BB BackOff Level Off!\n");
-               btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1);
-       }
-}
-
-static void halbtc8821a2ant_adc_back_off(struct btc_coexist *btcoexist,
-                                        bool force_exec, bool adc_back_off)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-
-       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                "[BTCoex], %s turn AdcBackOff = %s\n",
-                (force_exec ? "force to" : ""),
-                ((adc_back_off) ? "ON" : "OFF"));
-       coex_dm->cur_adc_back_off = adc_back_off;
-
-       if (!force_exec) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n",
-                        coex_dm->pre_adc_back_off,
-                        coex_dm->cur_adc_back_off);
-
-               if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off)
-                       return;
-       }
-       halbtc8821a2ant_set_adc_back_off(btcoexist, coex_dm->cur_adc_back_off);
-
-       coex_dm->pre_adc_back_off = coex_dm->cur_adc_back_off;
-}
-
-static void halbtc8821a2ant_set_coex_table(struct btc_coexist *btcoexist,
-                                          u32 val0x6c0, u32 val0x6c4,
-                                          u32 val0x6c8, u8 val0x6cc)
+static void btc8821a2ant_set_coex_table(struct btc_coexist *btcoexist,
+                                       u32 val0x6c0, u32 val0x6c4,
+                                       u32 val0x6c8, u8 val0x6cc)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -1022,9 +853,9 @@ static void halbtc8821a2ant_set_coex_table(struct btc_coexist *btcoexist,
        btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
-static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist,
-                                      bool force_exec, u32 val0x6c0,
-                                      u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
+static void btc8821a2ant_coex_table(struct btc_coexist *btcoexist,
+                                   bool force_exec, u32 val0x6c0,
+                                   u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -1057,8 +888,8 @@ static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist,
                    (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
                        return;
        }
-       halbtc8821a2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, val0x6c8,
-                                      val0x6cc);
+       btc8821a2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, val0x6c8,
+                                   val0x6cc);
 
        coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
        coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
@@ -1066,14 +897,97 @@ static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist,
        coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
 }
 
-static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
-                                                  bool enable)
+static void btc8821a2ant_coex_table_with_type(struct btc_coexist *btcoexist,
+                                             bool force_exec, u8 type)
+{
+       coex_sta->coex_table_type = type;
+
+       switch (type) {
+       case 0:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0x55555555,
+                                       0x55555555, 0xffffff, 0x3);
+               break;
+       case 1:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0x55555555,
+                                       0x5afa5afa, 0xffffff, 0x3);
+               break;
+       case 2:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0x5ada5ada,
+                                       0x5ada5ada, 0xffffff, 0x3);
+               break;
+       case 3:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa,
+                                       0xaaaaaaaa, 0xffffff, 0x3);
+               break;
+       case 4:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0xffffffff,
+                                       0xffffffff, 0xffffff, 0x3);
+               break;
+       case 5:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+                                       0x5fff5fff, 0xffffff, 0x3);
+               break;
+       case 6:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+                                       0x5a5a5a5a, 0xffffff, 0x3);
+               break;
+       case 7:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+                                       0x5ada5ada, 0xffffff, 0x3);
+               break;
+       case 8:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+                                       0x5ada5ada, 0xffffff, 0x3);
+               break;
+       case 9:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+                                       0x5ada5ada, 0xffffff, 0x3);
+               break;
+       case 10:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+                                       0x5ada5ada, 0xffffff, 0x3);
+               break;
+       case 11:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+                                       0x5ada5ada, 0xffffff, 0x3);
+               break;
+       case 12:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+                                       0x5ada5ada, 0xffffff, 0x3);
+               break;
+       case 13:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+                                       0xaaaaaaaa, 0xffffff, 0x3);
+               break;
+       case 14:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+                                       0x5ada5ada, 0xffffff, 0x3);
+               break;
+       case 15:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+                                       0xaaaaaaaa, 0xffffff, 0x3);
+               break;
+       case 16:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0x5fdf5fdf,
+                                       0x5fdb5fdb, 0xffffff, 0x3);
+               break;
+       case 17:
+               btc8821a2ant_coex_table(btcoexist, force_exec, 0xfafafafa,
+                                       0xfafafafa, 0xffffff, 0x3);
+               break;
+       default:
+               break;
+       }
+}
+
+static void btc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
+                                               bool enable)
 {
        struct rtl_priv *rtlpriv = btcoex->adapter;
        u8 h2c_parameter[1] = {0};
 
        if (enable)
-               h2c_parameter[0] |= BIT0;/* function enable */
+               h2c_parameter[0] |= BIT0; /* function enable */
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
@@ -1082,8 +996,35 @@ static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
        btcoex->btc_fill_h2c(btcoex, 0x63, 1, h2c_parameter);
 }
 
-static void halbtc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
-                                           bool force_exec, bool enable)
+static void btc8821a2ant_set_lps_rpwm(struct btc_coexist *btcoexist, u8 lps_val,
+                                     u8 rpwm_val)
+{
+       u8 lps = lps_val;
+       u8 rpwm = rpwm_val;
+
+       btcoexist->btc_set(btcoexist, BTC_SET_U1_LPS_VAL, &lps);
+       btcoexist->btc_set(btcoexist, BTC_SET_U1_RPWM_VAL, &rpwm);
+}
+
+static void btc8821a2ant_lps_rpwm(struct btc_coexist *btcoexist,
+                                 bool force_exec, u8 lps_val, u8 rpwm_val)
+{
+       coex_dm->cur_lps = lps_val;
+       coex_dm->cur_rpwm = rpwm_val;
+
+       if (!force_exec) {
+               if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
+                   (coex_dm->pre_rpwm == coex_dm->cur_rpwm))
+                       return;
+       }
+       btc8821a2ant_set_lps_rpwm(btcoexist, lps_val, rpwm_val);
+
+       coex_dm->pre_lps = coex_dm->cur_lps;
+       coex_dm->pre_rpwm = coex_dm->cur_rpwm;
+}
+
+static void btc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
+                                        bool force_exec, bool enable)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -1102,14 +1043,14 @@ static void halbtc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
                    coex_dm->cur_ignore_wlan_act)
                        return;
        }
-       halbtc8821a2ant_set_fw_ignore_wlan_act(btcoexist, enable);
+       btc8821a2ant_set_fw_ignore_wlan_act(btcoexist, enable);
 
        coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
 }
 
-static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist,
-                                         u8 byte1, u8 byte2, u8 byte3,
-                                         u8 byte4, u8 byte5)
+static void btc8821a2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
+                                       u8 byte1, u8 byte2, u8 byte3,
+                                       u8 byte4, u8 byte5)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[5];
@@ -1137,45 +1078,24 @@ static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist,
        btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
 
-static void btc8821a2ant_sw_mech1(struct btc_coexist *btcoexist,
-                                 bool shrink_rx_lpf,
-                                 bool low_penalty_ra, bool limited_dig,
-                                 bool bt_lna_constrain)
+static void btc8821a2ant_sw_mechanism1(struct btc_coexist *btcoexist,
+                                      bool shrink_rx_lpf, bool low_penalty_ra,
+                                      bool limited_dig, bool bt_lna_constrain)
 {
-       u32 wifi_bw;
-
-       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
-
-       if (BTC_WIFI_BW_HT40 != wifi_bw) {
-               /*only shrink RF Rx LPF for HT40*/
-               if (shrink_rx_lpf)
-                       shrink_rx_lpf = false;
-       }
-
-       halbtc8821a2ant_RfShrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
-       halbtc8821a2ant_low_penalty_ra(btcoexist,
-                                      NORMAL_EXEC, low_penalty_ra);
-
-       /* no limited DIG
-        * btc8821a2_set_bt_lna_const(btcoexist,
-               NORMAL_EXEC, bBTLNAConstrain);
-        */
+       btc8821a2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
 }
 
-static void btc8821a2ant_sw_mech2(struct btc_coexist *btcoexist,
-                                 bool agc_table_shift,
-                                 bool adc_back_off, bool sw_dac_swing,
-                                 u32 dac_swing_lvl)
+static void btc8821a2ant_sw_mechanism2(struct btc_coexist *btcoexist,
+                                      bool agc_table_shift, bool adc_back_off,
+                                      bool sw_dac_swing, u32 dac_swing_lvl)
 {
-       /* halbtc8821a2ant_AgcTable(btcoexist, NORMAL_EXEC, bAGCTableShift); */
-       halbtc8821a2ant_adc_back_off(btcoexist, NORMAL_EXEC, adc_back_off);
-       halbtc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing,
-                                 sw_dac_swing);
+       btc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing,
+                              dac_swing_lvl);
 }
 
-static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist,
-                                        u8 ant_pos_type, bool init_hw_cfg,
-                                        bool wifi_off)
+static void btc8821a2ant_set_ant_path(struct btc_coexist *btcoexist,
+                                     u8 ant_pos_type, bool init_hw_cfg,
+                                     bool wifi_off)
 {
        struct btc_board_info *board_info = &btcoexist->board_info;
        u32 u4tmp = 0;
@@ -1189,21 +1109,18 @@ static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist,
                btcoexist->btc_write_4byte(btcoexist, 0x4c, u4tmp);
 
                btcoexist->btc_write_4byte(btcoexist, 0x974, 0x3ff);
-               btcoexist->btc_write_1byte(btcoexist, 0xcb4, 0x77);
 
                if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
-                       /* tell firmware "antenna inverse"  ==>
-                        *      WRONG firmware antenna control code.
-                        *      ==>need fw to fix
+                       /* tell firmware "antenna inverse"  ==> WRONG firmware
+                        * antenna control code ==>need fw to fix
                         */
                        h2c_parameter[0] = 1;
                        h2c_parameter[1] = 1;
                        btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
                                                h2c_parameter);
                } else {
-                       /* tell firmware "no antenna inverse"
-                        *      ==> WRONG firmware antenna control code.
-                        *      ==>need fw to fix
+                       /* tell firmware "no antenna inverse" ==> WRONG firmware
+                        * antenna control code ==>need fw to fix
                         */
                        h2c_parameter[0] = 0;
                        h2c_parameter[1] = 1;
@@ -1223,11 +1140,25 @@ static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist,
        }
 }
 
-static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist,
-                                   bool force_exec, bool turn_on, u8 type)
+static void btc8821a2ant_ps_tdma(struct btc_coexist *btcoexist,
+                                bool force_exec, bool turn_on, u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
+       u8 wifi_rssi_state, bt_rssi_state;
+
+       wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+                               BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+       bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, 2,
+                               BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
+
+       if (!(BTC_RSSI_HIGH(wifi_rssi_state) &&
+             BTC_RSSI_HIGH(bt_rssi_state)) &&
+           turn_on) {
+               /* for WiFi RSSI low or BT RSSI low */
+               type = type + 100;
+       }
+
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], %s turn %s PS TDMA, type = %d\n",
                 (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
@@ -1251,108 +1182,181 @@ static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist,
                switch (type) {
                case 1:
                default:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a,
-                                                     0x1a, 0xe1, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c,
+                                                   0x03, 0xf1, 0x90);
                        break;
                case 2:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x12,
-                                                     0x12, 0xe1, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x2d,
+                                                   0x03, 0xf1, 0x90);
                        break;
                case 3:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1c,
-                                                     0x3, 0xf1, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+                                                   0x3, 0xf1, 0x90);
                        break;
                case 4:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x10,
-                                                     0x03, 0xf1, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
+                                                   0x03, 0xf1, 0x90);
                        break;
                case 5:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a,
-                                                     0x1a, 0x60, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c,
+                                                   0x3, 0x70, 0x90);
                        break;
                case 6:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x12,
-                                                     0x12, 0x60, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x2d,
+                                                   0x3, 0x70, 0x90);
                        break;
                case 7:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1c,
-                                                     0x3, 0x70, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+                                                   0x3, 0x70, 0x90);
                        break;
                case 8:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xa3, 0x10,
-                                                     0x3, 0x70, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10,
+                                                   0x3, 0x70, 0x90);
                        break;
                case 9:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a,
-                                                     0x1a, 0xe1, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c,
+                                                   0x03, 0xf1, 0x90);
                        break;
                case 10:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x12,
-                                                     0x12, 0xe1, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x2d,
+                                                   0x03, 0xf1, 0x90);
                        break;
                case 11:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0xa,
-                                                     0xa, 0xe1, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+                                                   0x3, 0xf1, 0x90);
                        break;
                case 12:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x5,
-                                                     0x5, 0xe1, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
+                                                   0x3, 0xf1, 0x90);
                        break;
                case 13:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a,
-                                                     0x1a, 0x60, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c,
+                                                   0x3, 0x70, 0x90);
                        break;
                case 14:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3,
-                                                     0x12, 0x12, 0x60, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x2d,
+                                                   0x3, 0x70, 0x90);
                        break;
                case 15:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0xa,
-                                                     0xa, 0x60, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+                                                   0x3, 0x70, 0x90);
                        break;
                case 16:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x5,
-                                                     0x5, 0x60, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
+                                                   0x3, 0x70, 0x90);
                        break;
                case 17:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xa3, 0x2f,
-                                                     0x2f, 0x60, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x2f,
+                                                   0x2f, 0x60, 0x90);
                        break;
                case 18:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x5,
-                                                     0x5, 0xe1, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5, 0x5,
+                                                   0xe1, 0x90);
                        break;
                case 19:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x25,
-                                                     0x25, 0xe1, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+                                                   0x25, 0xe1, 0x90);
                        break;
                case 20:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x25,
-                                                     0x25, 0x60, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+                                                   0x25, 0x60, 0x90);
                        break;
                case 21:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x15,
-                                                     0x03, 0x70, 0x90);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
+                                                   0x03, 0x70, 0x90);
                        break;
-               case 71:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a,
-                                                     0x1a, 0xe1, 0x90);
+               case 23:
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1e,
+                                                   0x03, 0xf0, 0x14);
                        break;
-               }
-       } else {
-               /* disable PS tdma */
-               switch (type) {
-               case 0:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
-                                                     0x40, 0x0);
+               case 24:
+               case 124:
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x3c,
+                                                   0x03, 0x70, 0x50);
+                       break;
+               case 25:
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x14,
+                                                   0x03, 0xf1, 0x90);
+                       break;
+               case 26:
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x30,
+                                                   0x03, 0xf1, 0x90);
+                       break;
+               case 71:
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c,
+                                                   0x03, 0xf1, 0x90);
+                       break;
+               case 101:
+               case 105:
+               case 171:
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x3a,
+                                                   0x03, 0x70, 0x50);
+                       break;
+               case 102:
+               case 106:
+               case 110:
+               case 114:
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x2d,
+                                                   0x03, 0x70, 0x50);
+                       break;
+               case 103:
+               case 107:
+               case 111:
+               case 115:
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1c,
+                                                   0x03, 0x70, 0x50);
+                       break;
+               case 104:
+               case 108:
+               case 112:
+               case 116:
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x10,
+                                                   0x03, 0x70, 0x50);
+                       break;
+               case 109:
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c,
+                                                   0x03, 0xf1, 0x90);
+                       break;
+               case 113:
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c,
+                                                   0x03, 0x70, 0x90);
+                       break;
+               case 121:
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
+                                                   0x03, 0x70, 0x90);
+                       break;
+               case 22:
+               case 122:
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x35,
+                                                   0x03, 0x71, 0x11);
+                       break;
+               case 123:
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1c,
+                                                   0x03, 0x70, 0x54);
+                       break;
+               case 125:
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x14,
+                                                   0x03, 0x70, 0x50);
+                       break;
+               case 126:
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x30,
+                                                   0x03, 0x70, 0x50);
+                       break;
+               }
+       } else {
+               /* disable PS tdma */
+               switch (type) {
+               case 0:
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+                                                   0x40, 0x0);
                        break;
                case 1:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
-                                                     0x48, 0x0);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+                                                   0x48, 0x0);
                        break;
                default:
-                       halbtc8821a2ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
-                                                     0x40, 0x0);
+                       btc8821a2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+                                                   0x40, 0x0);
                        break;
                }
        }
@@ -1362,867 +1366,450 @@ static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist,
        coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
 }
 
-static void halbtc8821a2ant_coex_all_off(struct btc_coexist *btcoexist)
+static void
+btc8821a2ant_ps_tdma_check_for_power_save_state(struct btc_coexist *btcoexist,
+                                               bool new_ps_state)
+{
+       u8 lps_mode = 0x0;
+
+       btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode);
+
+       if (lps_mode) {
+               /* already under LPS state */
+               if (new_ps_state) {
+                       /* keep state under LPS, do nothing */
+               } else {
+                       /* will leave LPS state, turn off psTdma first */
+                       btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+               }
+       } else {
+               /* NO PS state */
+               if (new_ps_state) {
+                       /* will enter LPS state, turn off psTdma first */
+                       btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+               } else {
+                       /* keep state under NO PS state, do nothing */
+               }
+       }
+}
+
+static void btc8821a2ant_power_save_state(struct btc_coexist *btcoexist,
+                                         u8 ps_type, u8 lps_val, u8 rpwm_val)
+{
+       bool low_pwr_disable = false;
+
+       switch (ps_type) {
+       case BTC_PS_WIFI_NATIVE:
+               /* recover to original 32k low power setting */
+               low_pwr_disable = false;
+               btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+                                  &low_pwr_disable);
+               btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+               coex_sta->force_lps_on = false;
+               break;
+       case BTC_PS_LPS_ON:
+               btc8821a2ant_ps_tdma_check_for_power_save_state(btcoexist,
+                                                               true);
+               btc8821a2ant_lps_rpwm(btcoexist, NORMAL_EXEC, lps_val,
+                                     rpwm_val);
+               /* when coex force to enter LPS, do not enter 32k low power */
+               low_pwr_disable = true;
+               btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+                                  &low_pwr_disable);
+               /* power save must executed before psTdma */
+               btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+               coex_sta->force_lps_on = true;
+               break;
+       case BTC_PS_LPS_OFF:
+               btc8821a2ant_ps_tdma_check_for_power_save_state(btcoexist,
+                                                               false);
+               btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+               coex_sta->force_lps_on = false;
+               break;
+       default:
+               break;
+       }
+}
+
+static void btc8821a2ant_coex_all_off(struct btc_coexist *btcoexist)
 {
        /* fw all off */
-       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
-       halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
-       halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+       btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+       btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+       btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+       btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
        /* sw all off */
-       btc8821a2ant_sw_mech1(btcoexist, false, false, false, false);
-       btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18);
+       btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, false);
+       btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
 
        /* hw all off */
-       halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC,
-                                  0x55555555, 0x55555555, 0xffff, 0x3);
+       btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
 }
 
-static void halbtc8821a2ant_coex_under_5g(struct btc_coexist *btcoexist)
+static void btc8821a2ant_coex_under_5g(struct btc_coexist *btcoexist)
 {
-       halbtc8821a2ant_coex_all_off(btcoexist);
+       btc8821a2ant_coex_all_off(btcoexist);
+       btc8821a2ant_ignore_wlan_act(btcoexist, NORMAL_EXEC, true);
 }
 
-static void halbtc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
+static void btc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
        /* force to reset coex mechanism */
-       halbtc8821a2ant_coex_table(btcoexist, FORCE_EXEC, 0x55555555,
-                                  0x55555555, 0xffff, 0x3);
+       btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
 
-       halbtc8821a2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
-       halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
-       halbtc8821a2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false);
+       btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+       btc8821a2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+       btc8821a2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+       btc8821a2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, 0);
 
-       btc8821a2ant_sw_mech1(btcoexist, false, false, false, false);
-       btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18);
+       btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, false);
+       btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
 }
 
-static void halbtc8821a2ant_bt_inquiry_page(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+       bool wifi_connected = false;
        bool low_pwr_disable = true;
+       bool scan = false, link = false, roam = false;
+
+       wifi_rssi_state =
+               btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+                               BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+       bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+               2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
 
        btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
                           &low_pwr_disable);
-
-       halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                  0x5afa5afa, 0xffff, 0x3);
-       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
-}
-
-static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
-       bool common = false, wifi_connected = false, wifi_busy = false;
-       bool low_pwr_disable = false;
-
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
                           &wifi_connected);
-       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
 
-       halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                  0x5afa5afa, 0xffff, 0x3);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
 
-       if (!wifi_connected &&
-           BT_8821A_2ANT_BT_STATUS_IDLE == coex_dm->bt_status) {
-               low_pwr_disable = false;
-               btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
-                                  &low_pwr_disable);
+       btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
 
+       if (scan || link || roam) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi link process + BT Inq/Page!!\n");
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
+               btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
+       } else if (wifi_connected) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi connected + BT Inq/Page!!\n");
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
+               btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
+       } else {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], Wifi IPS + BT IPS!!\n");
+                        "[BTCoex], Wifi no-link + BT Inq/Page!!\n");
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+               btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+       }
 
-               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
-               halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+       btc8821a2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+       btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
-               btc8821a2ant_sw_mech1(btcoexist, false, false, false, false);
-               btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18);
+       btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, false);
+       btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+}
 
-               common = true;
-       } else if (wifi_connected &&
-                  (BT_8821A_2ANT_BT_STATUS_IDLE == coex_dm->bt_status)) {
-               low_pwr_disable = false;
-               btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
-                                  &low_pwr_disable);
+void btc8821a2ant_action_wifi_link_process(struct btc_coexist *btcoexist)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       u8 u8tmpa, u8tmpb;
 
-               if (wifi_busy) {
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Wifi Busy + BT IPS!!\n");
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               false, 1);
-               } else {
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Wifi LPS + BT IPS!!\n");
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               false, 1);
-               }
+       btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
+       btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
 
-               halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+       btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, false);
+       btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
 
-               btc8821a2ant_sw_mech1(btcoexist, false, false, false, false);
-               btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18);
+       u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765);
+       u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x76e);
 
-               common = true;
-       } else if (!wifi_connected &&
-                  (BT_8821A_2ANT_BT_STATUS_CON_IDLE == coex_dm->bt_status)) {
-               low_pwr_disable = true;
-               btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
-                                  &low_pwr_disable);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], 0x765=0x%x, 0x76e=0x%x\n", u8tmpa, u8tmpb);
+}
 
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], Wifi IPS + BT LPS!!\n");
+static bool btc8821a2ant_action_wifi_idle_process(struct btc_coexist *btcoexist)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+       u8 ap_num = 0;
 
-               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
-               halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+       wifi_rssi_state =
+               btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+                       BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES - 20, 0);
+       bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+                       2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
 
-               btc8821a2ant_sw_mech1(btcoexist, false, false, false, false);
-               btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18);
-               common = true;
-       } else if (wifi_connected &&
-                  (BT_8821A_2ANT_BT_STATUS_CON_IDLE == coex_dm->bt_status)) {
-               low_pwr_disable = true;
-               btcoexist->btc_set(btcoexist,
-                       BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
+       btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
 
-               if (wifi_busy) {
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Wifi Busy + BT LPS!!\n");
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               false, 1);
-               } else {
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Wifi LPS + BT LPS!!\n");
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               false, 1);
-               }
+       /* define the office environment */
+       if (BTC_RSSI_HIGH(wifi_rssi_state1) && (coex_sta->hid_exist) &&
+           (coex_sta->a2dp_exist)) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi  idle process for BT HID+A2DP exist!!\n");
 
-               halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x6);
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
-               btc8821a2ant_sw_mech1(btcoexist, true, true, true, true);
-               btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18);
+               /* sw all off */
+               btc8821a2ant_sw_mechanism1(btcoexist, false, false, false,
+                                          false);
+               btc8821a2ant_sw_mechanism2(btcoexist, false, false, false,
+                                          0x18);
 
-               common = true;
-       } else if (!wifi_connected &&
-                  (BT_8821A_2ANT_BT_STATUS_NON_IDLE ==
-                   coex_dm->bt_status)) {
-               low_pwr_disable = false;
-               btcoexist->btc_set(btcoexist,
-                       BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+               btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+               btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
 
+               return true;
+       } else if (coex_sta->pan_exist) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], Wifi IPS + BT Busy!!\n");
-
-               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
-               halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
-
-               btc8821a2ant_sw_mech1(btcoexist, false, false,
-                                     false, false);
-               btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                     false, 0x18);
+                        "[BTCoex], Wifi  idle process for BT PAN exist!!\n");
 
-               common = true;
-       } else {
-               low_pwr_disable = true;
-               btcoexist->btc_set(btcoexist,
-                                  BTC_SET_ACT_DISABLE_LOW_POWER,
-                                  &low_pwr_disable);
+               btc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x6);
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
-               if (wifi_busy) {
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Wifi Busy + BT Busy!!\n");
-                       common = false;
-               } else {
-                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                                "[BTCoex], Wifi LPS + BT Busy!!\n");
-                       halbtc8821a2ant_ps_tdma(btcoexist,
-                                               NORMAL_EXEC, true, 21);
+               /* sw all off */
+               btc8821a2ant_sw_mechanism1(btcoexist, false, false, false,
+                                          false);
+               btc8821a2ant_sw_mechanism2(btcoexist, false, false, false,
+                                          0x18);
 
-                       if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
-                               halbtc8821a2ant_dec_bt_pwr(btcoexist,
-                                                          NORMAL_EXEC, true);
-                       else
-                               halbtc8821a2ant_dec_bt_pwr(btcoexist,
-                                                          NORMAL_EXEC, false);
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+               btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+               btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
 
-                       common = true;
-               }
-               btc8821a2ant_sw_mech1(btcoexist, true, true, true, true);
+               return true;
        }
-       return common;
+       btc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x18);
+       return false;
 }
 
-static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause,
-                          int result)
+static bool btc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
+       bool common = false, wifi_connected = false, wifi_busy = false;
+       bool low_pwr_disable = false;
+       bool bt_hs_on = false;
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+                          &wifi_connected);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+       if (!wifi_connected) {
+               low_pwr_disable = false;
+               btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+                                  &low_pwr_disable);
+               btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false,
+                                       0x8);
 
-       if (tx_pause) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 1\n");
-
-               if (coex_dm->cur_ps_tdma == 71) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 5);
-                       coex_dm->tdma_adj_type = 5;
-               } else if (coex_dm->cur_ps_tdma == 1) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 5);
-                       coex_dm->tdma_adj_type = 5;
-               } else if (coex_dm->cur_ps_tdma == 2) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 6);
-                       coex_dm->tdma_adj_type = 6;
-               } else if (coex_dm->cur_ps_tdma == 3) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 7);
-                       coex_dm->tdma_adj_type = 7;
-               } else if (coex_dm->cur_ps_tdma == 4) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 8);
-                       coex_dm->tdma_adj_type = 8;
-               }
-               if (coex_dm->cur_ps_tdma == 9) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 13);
-                       coex_dm->tdma_adj_type = 13;
-               } else if (coex_dm->cur_ps_tdma == 10) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 14);
-                       coex_dm->tdma_adj_type = 14;
-               } else if (coex_dm->cur_ps_tdma == 11) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 15);
-                       coex_dm->tdma_adj_type = 15;
-               } else if (coex_dm->cur_ps_tdma == 12) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 16);
-                       coex_dm->tdma_adj_type = 16;
-               }
+                           "[BTCoex], Wifi non-connected idle!!\n");
+
+               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+                                         0x0);
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+               btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+               btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+               btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+               btc8821a2ant_sw_mechanism1(btcoexist, false, false, false,
+                                          false);
+               btc8821a2ant_sw_mechanism2(btcoexist, false, false, false,
+                                          0x18);
 
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 5) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 6);
-                               coex_dm->tdma_adj_type = 6;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 8);
-                               coex_dm->tdma_adj_type = 8;
-                       } else if (coex_dm->cur_ps_tdma == 13) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 14);
-                               coex_dm->tdma_adj_type = 14;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 16);
-                               coex_dm->tdma_adj_type = 16;
-                       }
-               } else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 8) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 6);
-                               coex_dm->tdma_adj_type = 6;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 5);
-                               coex_dm->tdma_adj_type = 5;
-                       } else if (coex_dm->cur_ps_tdma == 16) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 14);
-                               coex_dm->tdma_adj_type = 14;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 13);
-                               coex_dm->tdma_adj_type = 13;
-                       }
-               }
+               common = true;
        } else {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 0\n");
-               if (coex_dm->cur_ps_tdma == 5) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 71);
-                       coex_dm->tdma_adj_type = 71;
-               } else if (coex_dm->cur_ps_tdma == 6) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 2);
-                       coex_dm->tdma_adj_type = 2;
-               } else if (coex_dm->cur_ps_tdma == 7) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 3);
-                       coex_dm->tdma_adj_type = 3;
-               } else if (coex_dm->cur_ps_tdma == 8) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 4);
-                       coex_dm->tdma_adj_type = 4;
-               }
-               if (coex_dm->cur_ps_tdma == 13) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 9);
-                       coex_dm->tdma_adj_type = 9;
-               } else if (coex_dm->cur_ps_tdma == 14) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 10);
-                       coex_dm->tdma_adj_type = 10;
-               } else if (coex_dm->cur_ps_tdma == 15) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 11);
-                       coex_dm->tdma_adj_type = 11;
-               } else if (coex_dm->cur_ps_tdma == 16) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 12);
-                       coex_dm->tdma_adj_type = 12;
-               }
+               if (BT_8821A_2ANT_BT_STATUS_IDLE ==
+                   coex_dm->bt_status) {
+                       low_pwr_disable = false;
+                       btcoexist->btc_set(btcoexist,
+                                          BTC_SET_ACT_DISABLE_LOW_POWER,
+                                          &low_pwr_disable);
+                       btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC,
+                                               false, false, 0x8);
 
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 71) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 1);
-                               coex_dm->tdma_adj_type = 1;
-                       } else if (coex_dm->cur_ps_tdma == 1) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 4);
-                               coex_dm->tdma_adj_type = 4;
-                       } else if (coex_dm->cur_ps_tdma == 9) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 10);
-                               coex_dm->tdma_adj_type = 10;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 12);
-                               coex_dm->tdma_adj_type = 12;
-                       }
-               } else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 4) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 1);
-                               coex_dm->tdma_adj_type = 1;
-                       } else if (coex_dm->cur_ps_tdma == 1) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 71);
-                               coex_dm->tdma_adj_type = 71;
-                       } else if (coex_dm->cur_ps_tdma == 12) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 10);
-                               coex_dm->tdma_adj_type = 10;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 9);
-                               coex_dm->tdma_adj_type = 9;
-                       }
-               }
-       }
-}
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Wifi connected + BT non connected-idle!!\n");
 
-static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause,
-                          int result)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
+                       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+                                                 0xfffff, 0x0);
+                       btc8821a2ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 0);
 
-       if (tx_pause) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 1\n");
-               if (coex_dm->cur_ps_tdma == 1) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 6);
-                       coex_dm->tdma_adj_type = 6;
-               } else if (coex_dm->cur_ps_tdma == 2) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 6);
-                       coex_dm->tdma_adj_type = 6;
-               } else if (coex_dm->cur_ps_tdma == 3) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 7);
-                       coex_dm->tdma_adj_type = 7;
-               } else if (coex_dm->cur_ps_tdma == 4) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 8);
-                       coex_dm->tdma_adj_type = 8;
-               }
-               if (coex_dm->cur_ps_tdma == 9) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 14);
-                       coex_dm->tdma_adj_type = 14;
-               } else if (coex_dm->cur_ps_tdma == 10) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 14);
-                       coex_dm->tdma_adj_type = 14;
-               } else if (coex_dm->cur_ps_tdma == 11) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 15);
-                       coex_dm->tdma_adj_type = 15;
-               } else if (coex_dm->cur_ps_tdma == 12) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 16);
-                       coex_dm->tdma_adj_type = 16;
-               }
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 5) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 6);
-                               coex_dm->tdma_adj_type = 6;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 8);
-                               coex_dm->tdma_adj_type = 8;
-                       } else if (coex_dm->cur_ps_tdma == 13) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 14);
-                               coex_dm->tdma_adj_type = 14;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 16);
-                               coex_dm->tdma_adj_type = 16;
-                       }
-               } else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 8) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 6);
-                               coex_dm->tdma_adj_type = 6;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 6);
-                               coex_dm->tdma_adj_type = 6;
-                       } else if (coex_dm->cur_ps_tdma == 16) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 14);
-                               coex_dm->tdma_adj_type = 14;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 14);
-                               coex_dm->tdma_adj_type = 14;
-                       }
-               }
-       } else {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 0\n");
-               if (coex_dm->cur_ps_tdma == 5) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 2);
-                       coex_dm->tdma_adj_type = 2;
-               } else if (coex_dm->cur_ps_tdma == 6) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 2);
-                       coex_dm->tdma_adj_type = 2;
-               } else if (coex_dm->cur_ps_tdma == 7) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 3);
-                       coex_dm->tdma_adj_type = 3;
-               } else if (coex_dm->cur_ps_tdma == 8) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 4);
-                       coex_dm->tdma_adj_type = 4;
-               }
-               if (coex_dm->cur_ps_tdma == 13) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 10);
-                       coex_dm->tdma_adj_type = 10;
-               } else if (coex_dm->cur_ps_tdma == 14) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 10);
-                       coex_dm->tdma_adj_type = 10;
-               } else if (coex_dm->cur_ps_tdma == 15) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 11);
-                       coex_dm->tdma_adj_type = 11;
-               } else if (coex_dm->cur_ps_tdma == 16) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 12);
-                       coex_dm->tdma_adj_type = 12;
-               }
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 1) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 4);
-                               coex_dm->tdma_adj_type = 4;
-                       } else if (coex_dm->cur_ps_tdma == 9) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 10);
-                               coex_dm->tdma_adj_type = 10;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 12);
-                               coex_dm->tdma_adj_type = 12;
-                       }
-               } else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 4) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 2);
-                               coex_dm->tdma_adj_type = 2;
-                       } else if (coex_dm->cur_ps_tdma == 12) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 10);
-                               coex_dm->tdma_adj_type = 10;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 10);
-                               coex_dm->tdma_adj_type = 10;
-                       }
-               }
-       }
-}
+                       btc8821a2ant_power_save_state(
+                               btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+                       btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+                       btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+                                                     0xb);
+                       btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
-static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause,
-                          int result)
-{
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
 
-       if (tx_pause) {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 1\n");
-               if (coex_dm->cur_ps_tdma == 1) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 7);
-                       coex_dm->tdma_adj_type = 7;
-               } else if (coex_dm->cur_ps_tdma == 2) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 7);
-                       coex_dm->tdma_adj_type = 7;
-               } else if (coex_dm->cur_ps_tdma == 3) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 7);
-                       coex_dm->tdma_adj_type = 7;
-               } else if (coex_dm->cur_ps_tdma == 4) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 8);
-                       coex_dm->tdma_adj_type = 8;
-               }
-               if (coex_dm->cur_ps_tdma == 9) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 15);
-                       coex_dm->tdma_adj_type = 15;
-               } else if (coex_dm->cur_ps_tdma == 10) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 15);
-                       coex_dm->tdma_adj_type = 15;
-               } else if (coex_dm->cur_ps_tdma == 11) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 15);
-                       coex_dm->tdma_adj_type = 15;
-               } else if (coex_dm->cur_ps_tdma == 12) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 16);
-                       coex_dm->tdma_adj_type = 16;
-               }
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 5) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 8);
-                               coex_dm->tdma_adj_type = 8;
-                       } else if (coex_dm->cur_ps_tdma == 13) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 16);
-                               coex_dm->tdma_adj_type = 16;
-                       }
-               } else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 8) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 7) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 6) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 7);
-                               coex_dm->tdma_adj_type = 7;
-                       } else if (coex_dm->cur_ps_tdma == 16) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 15) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       } else if (coex_dm->cur_ps_tdma == 14) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 15);
-                               coex_dm->tdma_adj_type = 15;
-                       }
-               }
-       } else {
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], TxPause = 0\n");
-               if (coex_dm->cur_ps_tdma == 5) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 3);
-                       coex_dm->tdma_adj_type = 3;
-               } else if (coex_dm->cur_ps_tdma == 6) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 3);
-                       coex_dm->tdma_adj_type = 3;
-               } else if (coex_dm->cur_ps_tdma == 7) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 3);
-                       coex_dm->tdma_adj_type = 3;
-               } else if (coex_dm->cur_ps_tdma == 8) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 4);
-                       coex_dm->tdma_adj_type = 4;
-               }
-               if (coex_dm->cur_ps_tdma == 13) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 11);
-                       coex_dm->tdma_adj_type = 11;
-               } else if (coex_dm->cur_ps_tdma == 14) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 11);
-                       coex_dm->tdma_adj_type = 11;
-               } else if (coex_dm->cur_ps_tdma == 15) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 11);
-                       coex_dm->tdma_adj_type = 11;
-               } else if (coex_dm->cur_ps_tdma == 16) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 12);
-                       coex_dm->tdma_adj_type = 12;
-               }
-               if (result == -1) {
-                       if (coex_dm->cur_ps_tdma == 1) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 4);
-                               coex_dm->tdma_adj_type = 4;
-                       } else if (coex_dm->cur_ps_tdma == 9) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 12);
-                               coex_dm->tdma_adj_type = 12;
-                       }
-               } else if (result == 1) {
-                       if (coex_dm->cur_ps_tdma == 4) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 3) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 2) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 3);
-                               coex_dm->tdma_adj_type = 3;
-                       } else if (coex_dm->cur_ps_tdma == 12) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 11) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
-                       } else if (coex_dm->cur_ps_tdma == 10) {
-                               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                                       true, 11);
-                               coex_dm->tdma_adj_type = 11;
+                       common = true;
+               } else if (BT_8821A_2ANT_BT_STATUS_CON_IDLE ==
+                          coex_dm->bt_status) {
+                       low_pwr_disable = true;
+                       btcoexist->btc_set(btcoexist,
+                                          BTC_SET_ACT_DISABLE_LOW_POWER,
+                                          &low_pwr_disable);
+
+                       if (bt_hs_on)
+                               return false;
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Wifi connected + BT connected-idle!!\n");
+                       btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC,
+                                               false, false, 0x8);
+
+                       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+                                                 0xfffff, 0x0);
+                       btc8821a2ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 0);
+
+                       btc8821a2ant_power_save_state(
+                               btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+                       btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+                       btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+                                                     0xb);
+                       btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+                       common = true;
+               } else {
+                       low_pwr_disable = true;
+                       btcoexist->btc_set(btcoexist,
+                                          BTC_SET_ACT_DISABLE_LOW_POWER,
+                                          &low_pwr_disable);
+
+                       if (wifi_busy) {
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+                               common = false;
+                       } else {
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+                               common =
+                                   btc8821a2ant_action_wifi_idle_process(
+                                            btcoexist);
                        }
                }
        }
+       return common;
 }
 
-static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
-                                     bool sco_hid, bool tx_pause,
-                                     u8 max_interval)
+static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
+                                             bool sco_hid, bool tx_pause,
+                                             u8 max_interval)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       static long     up, dn, m, n, wait_count;
-        /* 0: no change, +1: increase WiFi duration,
+       static long up, dn, m, n, wait_count;
+        /* 0 : no change
+         * +1: increase WiFi duration
          * -1: decrease WiFi duration
          */
-       int             result;
-       u8              retry_count = 0;
+       int result;
+       u8 retry_count = 0;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], TdmaDurationAdjust()\n");
 
-       if (coex_dm->reset_tdma_adjust) {
-               coex_dm->reset_tdma_adjust = false;
+       if (coex_dm->auto_tdma_adjust) {
+               coex_dm->auto_tdma_adjust = false;
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], first run TdmaDurationAdjust()!!\n");
                if (sco_hid) {
                        if (tx_pause) {
                                if (max_interval == 1) {
-                                       halbtc8821a2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 13);
-                                       coex_dm->tdma_adj_type = 13;
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 13);
+                                       coex_dm->ps_tdma_du_adj_type = 13;
                                } else if (max_interval == 2) {
-                                       halbtc8821a2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 14);
-                                       coex_dm->tdma_adj_type = 14;
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 14);
+                                       coex_dm->ps_tdma_du_adj_type = 14;
+                               } else if (max_interval == 3) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 15);
+                                       coex_dm->ps_tdma_du_adj_type = 15;
                                } else {
-                                       halbtc8821a2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 15);
-                                       coex_dm->tdma_adj_type = 15;
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 15);
+                                       coex_dm->ps_tdma_du_adj_type = 15;
                                }
                        } else {
                                if (max_interval == 1) {
-                                       halbtc8821a2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 9);
-                                       coex_dm->tdma_adj_type = 9;
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 9);
+                                       coex_dm->ps_tdma_du_adj_type = 9;
                                } else if (max_interval == 2) {
-                                       halbtc8821a2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 10);
-                                       coex_dm->tdma_adj_type = 10;
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 10);
+                                       coex_dm->ps_tdma_du_adj_type = 10;
+                               } else if (max_interval == 3) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 11);
+                                       coex_dm->ps_tdma_du_adj_type = 11;
                                } else {
-                                       halbtc8821a2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 11);
-                                       coex_dm->tdma_adj_type = 11;
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 11);
+                                       coex_dm->ps_tdma_du_adj_type = 11;
                                }
                        }
                } else {
                        if (tx_pause) {
                                if (max_interval == 1) {
-                                       halbtc8821a2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 5);
-                                       coex_dm->tdma_adj_type = 5;
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 5);
+                                       coex_dm->ps_tdma_du_adj_type = 5;
                                } else if (max_interval == 2) {
-                                       halbtc8821a2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 6);
-                                       coex_dm->tdma_adj_type = 6;
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 6);
+                                       coex_dm->ps_tdma_du_adj_type = 6;
+                               } else if (max_interval == 3) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 7);
+                                       coex_dm->ps_tdma_du_adj_type = 7;
                                } else {
-                                       halbtc8821a2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 7);
-                                       coex_dm->tdma_adj_type = 7;
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 7);
+                                       coex_dm->ps_tdma_du_adj_type = 7;
                                }
                        } else {
                                if (max_interval == 1) {
-                                       halbtc8821a2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 1);
-                                       coex_dm->tdma_adj_type = 1;
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 1);
+                                       coex_dm->ps_tdma_du_adj_type = 1;
                                } else if (max_interval == 2) {
-                                       halbtc8821a2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 2);
-                                       coex_dm->tdma_adj_type = 2;
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 2);
+                                       coex_dm->ps_tdma_du_adj_type = 2;
+                               } else if (max_interval == 3) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 3);
+                                       coex_dm->ps_tdma_du_adj_type = 3;
                                } else {
-                                       halbtc8821a2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 3);
-                                       coex_dm->tdma_adj_type = 3;
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 3);
+                                       coex_dm->ps_tdma_du_adj_type = 3;
                                }
                        }
                }
@@ -2273,7 +1860,7 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                                up = 0;
 
                        if (dn == 2) {
-                               /* if retry count< 3 for 2*2 seconds,
+                               /* if retry count < 3 for 2*2 seconds,
                                 * shrink wifi duration
                                 */
                                if (wait_count <= 2)
@@ -2286,7 +1873,7 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                                if (m >= 20)
                                        m = 20;
 
-                               n = 3*m;
+                               n = 3 * m;
                                up = 0;
                                dn = 0;
                                wait_count = 0;
@@ -2308,7 +1895,7 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                        if (m >= 20)
                                m = 20;
 
-                       n = 3*m;
+                       n = 3 * m;
                        up = 0;
                        dn = 0;
                        wait_count = 0;
@@ -2316,627 +1903,1316 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
                }
-
-               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
-                        "[BTCoex], max Interval = %d\n", max_interval);
-               if (max_interval == 1)
-                       btc8821a2_int1(btcoexist, tx_pause, result);
-               else if (max_interval == 2)
-                       btc8821a2_int2(btcoexist, tx_pause, result);
-               else if (max_interval == 3)
-                       btc8821a2_int3(btcoexist, tx_pause, result);
+
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], max Interval = %d\n", max_interval);
+
+               if (max_interval == 1) {
+                       if (tx_pause) {
+                               if (coex_dm->cur_ps_tdma == 71) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 5);
+                                       coex_dm->ps_tdma_du_adj_type = 5;
+                               } else if (coex_dm->cur_ps_tdma == 1) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 5);
+                                       coex_dm->ps_tdma_du_adj_type = 5;
+                               } else if (coex_dm->cur_ps_tdma == 2) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 6);
+                                       coex_dm->ps_tdma_du_adj_type = 6;
+                               } else if (coex_dm->cur_ps_tdma == 3) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 7);
+                                       coex_dm->ps_tdma_du_adj_type = 7;
+                               } else if (coex_dm->cur_ps_tdma == 4) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 8);
+                                       coex_dm->ps_tdma_du_adj_type = 8;
+                               }
+                               if (coex_dm->cur_ps_tdma == 9) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 13);
+                                       coex_dm->ps_tdma_du_adj_type = 13;
+                               } else if (coex_dm->cur_ps_tdma == 10) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 14);
+                                       coex_dm->ps_tdma_du_adj_type = 14;
+                               } else if (coex_dm->cur_ps_tdma == 11) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 15);
+                                       coex_dm->ps_tdma_du_adj_type = 15;
+                               } else if (coex_dm->cur_ps_tdma == 12) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 16);
+                                       coex_dm->ps_tdma_du_adj_type = 16;
+                               }
+
+                               if (result == -1) {
+                                       if (coex_dm->cur_ps_tdma == 5) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 6);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       6;
+                                       } else if (coex_dm->cur_ps_tdma == 6) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 7) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 8);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       8;
+                                       } else if (coex_dm->cur_ps_tdma == 13) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 14);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       14;
+                                       } else if (coex_dm->cur_ps_tdma == 14) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       } else if (coex_dm->cur_ps_tdma == 15) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 16);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       16;
+                                       }
+                               } else if (result == 1) {
+                                       if (coex_dm->cur_ps_tdma == 8) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 7) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 6);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       6;
+                                       } else if (coex_dm->cur_ps_tdma == 6) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 5);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       5;
+                                       } else if (coex_dm->cur_ps_tdma == 16) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       } else if (coex_dm->cur_ps_tdma == 15) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 14);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       14;
+                                       } else if (coex_dm->cur_ps_tdma == 14) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 13);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       13;
+                                       }
+                               }
+                       } else {
+                               if (coex_dm->cur_ps_tdma == 5) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 71);
+                                       coex_dm->ps_tdma_du_adj_type = 71;
+                               } else if (coex_dm->cur_ps_tdma == 6) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 2);
+                                       coex_dm->ps_tdma_du_adj_type = 2;
+                               } else if (coex_dm->cur_ps_tdma == 7) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 3);
+                                       coex_dm->ps_tdma_du_adj_type = 3;
+                               } else if (coex_dm->cur_ps_tdma == 8) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 4);
+                                       coex_dm->ps_tdma_du_adj_type = 4;
+                               }
+                               if (coex_dm->cur_ps_tdma == 13) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 9);
+                                       coex_dm->ps_tdma_du_adj_type = 9;
+                               } else if (coex_dm->cur_ps_tdma == 14) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 10);
+                                       coex_dm->ps_tdma_du_adj_type = 10;
+                               } else if (coex_dm->cur_ps_tdma == 15) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 11);
+                                       coex_dm->ps_tdma_du_adj_type = 11;
+                               } else if (coex_dm->cur_ps_tdma == 16) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 12);
+                                       coex_dm->ps_tdma_du_adj_type = 12;
+                               }
+
+                               if (result == -1) {
+                                       if (coex_dm->cur_ps_tdma == 71) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 1);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       1;
+                                       } else if (coex_dm->cur_ps_tdma == 1) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 2);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       2;
+                                       } else if (coex_dm->cur_ps_tdma == 2) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 3) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 4);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       4;
+                                       } else if (coex_dm->cur_ps_tdma == 9) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 10);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       10;
+                                       } else if (coex_dm->cur_ps_tdma == 10) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       } else if (coex_dm->cur_ps_tdma == 11) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 12);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       12;
+                                       }
+                               } else if (result == 1) {
+                                       if (coex_dm->cur_ps_tdma == 4) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 3) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 2);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       2;
+                                       } else if (coex_dm->cur_ps_tdma == 2) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 1);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       1;
+                                       } else if (coex_dm->cur_ps_tdma == 1) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 71);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       71;
+                                       } else if (coex_dm->cur_ps_tdma == 12) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       } else if (coex_dm->cur_ps_tdma == 11) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 10);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       10;
+                                       } else if (coex_dm->cur_ps_tdma == 10) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 9);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       9;
+                                       }
+                               }
+                       }
+               } else if (max_interval == 2) {
+                       if (tx_pause) {
+                               if (coex_dm->cur_ps_tdma == 1) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 6);
+                                       coex_dm->ps_tdma_du_adj_type = 6;
+                               } else if (coex_dm->cur_ps_tdma == 2) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 6);
+                                       coex_dm->ps_tdma_du_adj_type = 6;
+                               } else if (coex_dm->cur_ps_tdma == 3) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 7);
+                                       coex_dm->ps_tdma_du_adj_type = 7;
+                               } else if (coex_dm->cur_ps_tdma == 4) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 8);
+                                       coex_dm->ps_tdma_du_adj_type = 8;
+                               }
+                               if (coex_dm->cur_ps_tdma == 9) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 14);
+                                       coex_dm->ps_tdma_du_adj_type = 14;
+                               } else if (coex_dm->cur_ps_tdma == 10) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 14);
+                                       coex_dm->ps_tdma_du_adj_type = 14;
+                               } else if (coex_dm->cur_ps_tdma == 11) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 15);
+                                       coex_dm->ps_tdma_du_adj_type = 15;
+                               } else if (coex_dm->cur_ps_tdma == 12) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 16);
+                                       coex_dm->ps_tdma_du_adj_type = 16;
+                               }
+                               if (result == -1) {
+                                       if (coex_dm->cur_ps_tdma == 5) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 6);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       6;
+                                       } else if (coex_dm->cur_ps_tdma == 6) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 7) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 8);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       8;
+                                       } else if (coex_dm->cur_ps_tdma == 13) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 14);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       14;
+                                       } else if (coex_dm->cur_ps_tdma == 14) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       } else if (coex_dm->cur_ps_tdma == 15) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 16);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       16;
+                                       }
+                               } else if (result == 1) {
+                                       if (coex_dm->cur_ps_tdma == 8) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 7) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 6);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       6;
+                                       } else if (coex_dm->cur_ps_tdma == 6) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 6);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       6;
+                                       } else if (coex_dm->cur_ps_tdma == 16) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       } else if (coex_dm->cur_ps_tdma == 15) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 14);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       14;
+                                       } else if (coex_dm->cur_ps_tdma == 14) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 14);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       14;
+                                       }
+                               }
+                       } else {
+                               if (coex_dm->cur_ps_tdma == 5) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 2);
+                                       coex_dm->ps_tdma_du_adj_type = 2;
+                               } else if (coex_dm->cur_ps_tdma == 6) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 2);
+                                       coex_dm->ps_tdma_du_adj_type = 2;
+                               } else if (coex_dm->cur_ps_tdma == 7) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 3);
+                                       coex_dm->ps_tdma_du_adj_type = 3;
+                               } else if (coex_dm->cur_ps_tdma == 8) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 4);
+                                       coex_dm->ps_tdma_du_adj_type = 4;
+                               }
+                               if (coex_dm->cur_ps_tdma == 13) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 10);
+                                       coex_dm->ps_tdma_du_adj_type = 10;
+                               } else if (coex_dm->cur_ps_tdma == 14) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 10);
+                                       coex_dm->ps_tdma_du_adj_type = 10;
+                               } else if (coex_dm->cur_ps_tdma == 15) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 11);
+                                       coex_dm->ps_tdma_du_adj_type = 11;
+                               } else if (coex_dm->cur_ps_tdma == 16) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 12);
+                                       coex_dm->ps_tdma_du_adj_type = 12;
+                               }
+                               if (result == -1) {
+                                       if (coex_dm->cur_ps_tdma == 1) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 2);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       2;
+                                       } else if (coex_dm->cur_ps_tdma == 2) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 3) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 4);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       4;
+                                       } else if (coex_dm->cur_ps_tdma == 9) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 10);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       10;
+                                       } else if (coex_dm->cur_ps_tdma == 10) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       } else if (coex_dm->cur_ps_tdma == 11) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 12);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       12;
+                                       }
+                               } else if (result == 1) {
+                                       if (coex_dm->cur_ps_tdma == 4) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 3) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 2);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       2;
+                                       } else if (coex_dm->cur_ps_tdma == 2) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 2);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       2;
+                                       } else if (coex_dm->cur_ps_tdma == 12) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       } else if (coex_dm->cur_ps_tdma == 11) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 10);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       10;
+                                       } else if (coex_dm->cur_ps_tdma == 10) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 10);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       10;
+                                       }
+                               }
+                       }
+               } else if (max_interval == 3) {
+                       if (tx_pause) {
+                               if (coex_dm->cur_ps_tdma == 1) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 7);
+                                       coex_dm->ps_tdma_du_adj_type = 7;
+                               } else if (coex_dm->cur_ps_tdma == 2) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 7);
+                                       coex_dm->ps_tdma_du_adj_type = 7;
+                               } else if (coex_dm->cur_ps_tdma == 3) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 7);
+                                       coex_dm->ps_tdma_du_adj_type = 7;
+                               } else if (coex_dm->cur_ps_tdma == 4) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 8);
+                                       coex_dm->ps_tdma_du_adj_type = 8;
+                               }
+                               if (coex_dm->cur_ps_tdma == 9) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 15);
+                                       coex_dm->ps_tdma_du_adj_type = 15;
+                               } else if (coex_dm->cur_ps_tdma == 10) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 15);
+                                       coex_dm->ps_tdma_du_adj_type = 15;
+                               } else if (coex_dm->cur_ps_tdma == 11) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 15);
+                                       coex_dm->ps_tdma_du_adj_type = 15;
+                               } else if (coex_dm->cur_ps_tdma == 12) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 16);
+                                       coex_dm->ps_tdma_du_adj_type = 16;
+                               }
+                               if (result == -1) {
+                                       if (coex_dm->cur_ps_tdma == 5) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 6) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 7) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 8);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       8;
+                                       } else if (coex_dm->cur_ps_tdma == 13) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       } else if (coex_dm->cur_ps_tdma == 14) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       } else if (coex_dm->cur_ps_tdma == 15) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 16);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       16;
+                                       }
+                               } else if (result == 1) {
+                                       if (coex_dm->cur_ps_tdma == 8) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 7) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 6) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 7);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       7;
+                                       } else if (coex_dm->cur_ps_tdma == 16) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       } else if (coex_dm->cur_ps_tdma == 15) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       } else if (coex_dm->cur_ps_tdma == 14) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 15);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       15;
+                                       }
+                               }
+                       } else {
+                               if (coex_dm->cur_ps_tdma == 5) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 3);
+                                       coex_dm->ps_tdma_du_adj_type = 3;
+                               } else if (coex_dm->cur_ps_tdma == 6) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 3);
+                                       coex_dm->ps_tdma_du_adj_type = 3;
+                               } else if (coex_dm->cur_ps_tdma == 7) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 3);
+                                       coex_dm->ps_tdma_du_adj_type = 3;
+                               } else if (coex_dm->cur_ps_tdma == 8) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 4);
+                                       coex_dm->ps_tdma_du_adj_type = 4;
+                               }
+                               if (coex_dm->cur_ps_tdma == 13) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 11);
+                                       coex_dm->ps_tdma_du_adj_type = 11;
+                               } else if (coex_dm->cur_ps_tdma == 14) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 11);
+                                       coex_dm->ps_tdma_du_adj_type = 11;
+                               } else if (coex_dm->cur_ps_tdma == 15) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 11);
+                                       coex_dm->ps_tdma_du_adj_type = 11;
+                               } else if (coex_dm->cur_ps_tdma == 16) {
+                                       btc8821a2ant_ps_tdma(btcoexist,
+                                                       NORMAL_EXEC, true, 12);
+                                       coex_dm->ps_tdma_du_adj_type = 12;
+                               }
+                               if (result == -1) {
+                                       if (coex_dm->cur_ps_tdma == 1) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 2) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 3) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 4);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       4;
+                                       } else if (coex_dm->cur_ps_tdma == 9) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       } else if (coex_dm->cur_ps_tdma == 10) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       } else if (coex_dm->cur_ps_tdma == 11) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 12);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       12;
+                                       }
+                               } else if (result == 1) {
+                                       if (coex_dm->cur_ps_tdma == 4) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 3) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 2) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 3);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       3;
+                                       } else if (coex_dm->cur_ps_tdma == 12) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       } else if (coex_dm->cur_ps_tdma == 11) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       } else if (coex_dm->cur_ps_tdma == 10) {
+                                               btc8821a2ant_ps_tdma(
+                                                       btcoexist, NORMAL_EXEC,
+                                                       true, 11);
+                                               coex_dm->ps_tdma_du_adj_type =
+                                                       11;
+                                       }
+                               }
+                       }
+               }
        }
 
        /* if current PsTdma not match with the recorded one
         * (when scan, dhcp...), then we have to adjust it back to
         * the previous recorded one.
         */
-       if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
-               bool    scan = false, link = false, roam = false;
+       if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) {
+               bool scan = false, link = false, roam = false;
 
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
-                           coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+                        coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
 
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
 
                if (!scan && !link && !roam) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
-                                               coex_dm->tdma_adj_type);
+                       btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
+                                            coex_dm->ps_tdma_du_adj_type);
                } else {
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
                }
        }
-
-       halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0x6);
 }
 
 /* SCO only or SCO+PAN(HS)*/
-static void halbtc8821a2ant_action_sco(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_sco(struct btc_coexist *btcoexist)
 {
-       u8      wifi_rssi_state, bt_rssi_state;
+       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+       u8 wifi_rssi_state, bt_rssi_state;
        u32 wifi_bw;
 
-       wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
-                                                         15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+       wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
 
-       halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
+       btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+       btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
 
-       if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
        else
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       if (BTC_WIFI_BW_LEGACY == wifi_bw) {
+       if (wifi_bw == BTC_WIFI_BW_LEGACY) {
                /* for SCO quality at 11b/g mode */
-               halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC,
-                                          0x5a5a5a5a, 0x5a5a5a5a, 0xffff, 0x3);
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
        } else {
                /* for SCO quality & wifi performance balance at 11n mode */
-               halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC,
-                                          0x5aea5aea, 0x5aea5aea, 0xffff, 0x3);
+               if (wifi_bw == BTC_WIFI_BW_HT40) {
+                       btc8821a2ant_coex_table_with_type(btcoexist,
+                                                         NORMAL_EXEC, 8);
+               } else {
+                       if (bt_link_info->sco_only)
+                               btc8821a2ant_coex_table_with_type(
+                                       btcoexist, NORMAL_EXEC, 17);
+                       else
+                               btc8821a2ant_coex_table_with_type(
+                                       btcoexist, NORMAL_EXEC, 12);
+               }
        }
 
-       if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               /* fw mechanism
-                * halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
-                */
-
-               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                       false, 0); /*for voice quality*/
+       btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+       /* for voice quality */
+       btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
 
-               /* sw mechanism */
+       /* sw mechanism */
+       if (wifi_bw == BTC_WIFI_BW_HT40) {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, true, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  true, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, true, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  true, 0x18);
                }
        } else {
-               /* fw mechanism
-                * halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
-                */
-               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               false, 0); /*for voice quality*/
-               } else {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               false, 0); /*for voice quality*/
-               }
-
-               /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, false, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  true, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, false, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  true, 0x18);
                }
        }
 }
 
-static void halbtc8821a2ant_action_hid(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_hid(struct btc_coexist *btcoexist)
 {
-       u8      wifi_rssi_state, bt_rssi_state;
-       u32     wifi_bw;
+       u8 wifi_rssi_state, bt_rssi_state;
+       u32 wifi_bw;
+
+       wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+               2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
 
-       wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist,
-                                                         0, 2, 15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
 
-       halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+       btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+       btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
        else
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       if (BTC_WIFI_BW_LEGACY == wifi_bw) {
+       if (wifi_bw == BTC_WIFI_BW_LEGACY) {
                /* for HID at 11b/g mode */
-               halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                          0x5a5a5a5a, 0xffff, 0x3);
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
        } else {
                /* for HID quality & wifi performance balance at 11n mode */
-               halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                          0x5aea5aea, 0xffff, 0x3);
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
        }
 
-       if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               /* fw mechanism */
-               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 9);
-               } else {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 13);
-               }
+       btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+       btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 24);
 
+       if (wifi_bw == BTC_WIFI_BW_HT40) {
                /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, true, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, true, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        } else {
-               /* fw mechanism */
-               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 9);
-               } else {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 13);
-               }
-
                /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, false, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, false, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        }
 }
 
 /* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
-static void halbtc8821a2ant_action_a2dp(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_a2dp(struct btc_coexist *btcoexist)
 {
-       u8              wifi_rssi_state, bt_rssi_state;
-       u32             wifi_bw;
+       u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+       u8 ap_num = 0;
+       u32 wifi_bw;
 
-       wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
-                                                         15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+       wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+                               BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+       bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+               2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
 
-       /* fw dac swing is called in btc8821a2ant_tdma_dur_adj()
-        * halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
-        */
+       if ((ap_num >= 10) && BTC_RSSI_HIGH(wifi_rssi_state1) &&
+           BTC_RSSI_HIGH(bt_rssi_state)) {
+               btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
 
-       if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
-       else
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+                                         0x0);
+               btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false,
+                                       0x8);
+               btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
 
-       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
 
-       if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               /* fw mechanism */
-               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_tdma_dur_adj(btcoexist, false, false, 1);
+               btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+               btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23);
+
+               /* sw mechanism */
+               btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+               if (wifi_bw == BTC_WIFI_BW_HT40) {
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  true, 0x6);
                } else {
-                       btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 1);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  true, 0x6);
                }
+               return;
+       }
 
-               /* sw mechanism */
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+       btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+       btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+       else
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+       if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+               btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+       } else {
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13);
+               btc8821a2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+                                             0x4);
+       }
+
+       if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+               btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23);
+       } else {
+               btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23);
+       }
+
+       /* sw mechanism */
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+       if (wifi_bw == BTC_WIFI_BW_HT40) {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, true, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, true, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        } else {
-               /* fw mechanism */
-               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_tdma_dur_adj(btcoexist, false, false, 1);
-               } else {
-                       btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 1);
-               }
-
-               /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, false, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, false, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        }
 }
 
-static void halbtc8821a2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
 {
-       u8              wifi_rssi_state, bt_rssi_state, bt_info_ext;
-       u32             wifi_bw;
+       u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+       u32 wifi_bw;
 
-       bt_info_ext = coex_sta->bt_info_ext;
-       wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
-                                                         15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+       wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+                               BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+       bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+               2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
 
-       /*fw dac swing is called in btc8821a2ant_tdma_dur_adj()
-        *halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
-        */
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+       btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+       btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
        else
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
-       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+       if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+               btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+       } else {
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13);
+               btc8821a2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+                                             0x4);
+       }
 
-       if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               /* fw mechanism */
-               if (bt_info_ext&BIT0) {
-                       /*a2dp basic rate*/
-                       btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 2);
-               } else {
-                       /*a2dp edr rate*/
-                       btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 1);
-               }
+       btc8821a2ant_tdma_duration_adjust(btcoexist, false, true, 2);
 
-               /* sw mechanism */
+       /* sw mechanism */
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+       if (wifi_bw == BTC_WIFI_BW_HT40) {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, true, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, true, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        } else {
-               /* fw mechanism */
-               if (bt_info_ext&BIT0) {
-                       /* a2dp basic rate */
-                       btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 2);
-               } else {
-                       /* a2dp edr rate */
-                       btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 1);
-               }
-
-               /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, false, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, false, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        }
 }
 
-static void halbtc8821a2ant_action_pan_edr(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_pan_edr(struct btc_coexist *btcoexist)
 {
-       u8              wifi_rssi_state, bt_rssi_state;
-       u32             wifi_bw;
+       u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+       u32 wifi_bw;
 
-       wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
-                                                         15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+       wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+                               BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+       bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+                               2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
 
-       halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
 
-       if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
-       else
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+       btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
 
-       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+       btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       if (BTC_WIFI_BW_LEGACY == wifi_bw) {
-               /* for HID at 11b/g mode */
-               halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                          0x5aff5aff, 0xffff, 0x3);
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       else
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+       if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 10);
+               btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
        } else {
-               /* for HID quality & wifi performance balance at 11n mode */
-               halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                          0x5aff5aff, 0xffff, 0x3);
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13);
+               btc8821a2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+                                             0x4);
        }
 
-       if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               /* fw mechanism */
-               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 1);
-               } else {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 5);
-               }
+       if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+               btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 26);
+       else
+               btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 26);
 
-               /* sw mechanism */
+       /* sw mechanism */
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+       if (wifi_bw == BTC_WIFI_BW_HT40) {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, true, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, true, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        } else {
-               /* fw mechanism */
-               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 1);
-               } else {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 5);
-               }
-
-               /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, false, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, false, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        }
 }
 
 /* PAN(HS) only */
-static void halbtc8821a2ant_action_pan_hs(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_pan_hs(struct btc_coexist *btcoexist)
 {
-       u8              wifi_rssi_state, bt_rssi_state;
-       u32             wifi_bw;
+       u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+       u32 wifi_bw;
 
-       wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist,
-                                                         0, 2, 15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+       wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+                               BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+       bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+                               2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
 
-       halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
 
-       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+       btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+       btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               /* fw mechanism */
-               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
-                                                  true);
-               } else {
-                       halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
-                                                  false);
-               }
-               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+       else
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
-               /* sw mechanism */
+       btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+       btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+       btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+       if (wifi_bw == BTC_WIFI_BW_HT40) {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, true, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, true, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        } else {
-               /* fw mechanism */
-               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       halbtc8821a2ant_dec_bt_pwr(btcoexist,
-                                                  NORMAL_EXEC, true);
-               } else {
-                       halbtc8821a2ant_dec_bt_pwr(btcoexist,
-                                                  NORMAL_EXEC, false);
-               }
-
-               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
-
-               /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, false, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, false, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        }
 }
 
 /* PAN(EDR)+A2DP */
-static void halbtc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
 {
-       u8      wifi_rssi_state, bt_rssi_state, bt_info_ext;
-       u32     wifi_bw;
+       u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+       u32 wifi_bw;
 
-       bt_info_ext = coex_sta->bt_info_ext;
-       wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
-                                                         15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+       wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+                               BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+       bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+                               2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
+
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+       btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+       btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+       else
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
-       if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state))
+               btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
        else
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btc8821a2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+                                             0x4);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                  0x5afa5afa, 0xffff, 0x3);
+       if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 12);
 
-       if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               /* fw mechanism */
-               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
-                       btc8821a2ant_tdma_dur_adj(btcoexist, false,
-                                                 false, 3);
+               if (wifi_bw == BTC_WIFI_BW_HT40)
+                       btc8821a2ant_tdma_duration_adjust(btcoexist, false,
+                                                         true, 3);
                else
-                       btc8821a2ant_tdma_dur_adj(btcoexist, false,
-                                                 true, 3);
+                       btc8821a2ant_tdma_duration_adjust(btcoexist, false,
+                                                         false, 3);
+       } else {
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13);
+               btc8821a2ant_tdma_duration_adjust(btcoexist, false, true, 3);
+       }
 
-               /* sw mechanism */
+       /* sw mechanism  */
+       if (wifi_bw == BTC_WIFI_BW_HT40) {
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, true, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, true, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        } else {
-               /* fw mechanism */
-               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
-                       btc8821a2ant_tdma_dur_adj(btcoexist, false, false, 3);
-               else
-                       btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 3);
-
-               /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, false, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, false, false,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        }
 }
 
-static void halbtc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
 {
-       u8      wifi_rssi_state, bt_rssi_state;
-       u32     wifi_bw;
+       u8 wifi_rssi_state, bt_rssi_state;
+       u32 wifi_bw;
 
-       wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
-                                                         15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+       wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+                               2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
 
-       halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+       btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
        else
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                  0x5a5f5a5f, 0xffff, 0x3);
+       if (wifi_bw == BTC_WIFI_BW_LEGACY) {
+               /* for HID at 11b/g mode */
+               btc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
+                                       0x5a5f5a5f, 0xffff, 0x3);
+       } else {
+               /* for HID quality & wifi performance balance at 11n mode */
+               btc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
+                                       0x5a5f5a5f, 0xffff, 0x3);
+       }
 
-       if (BTC_WIFI_BW_HT40 == wifi_bw) {
-               halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 3);
+       if (wifi_bw == BTC_WIFI_BW_HT40) {
+               btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 3);
                /* fw mechanism */
                if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 10);
+                       btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            true, 10);
                } else {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 14);
+                       btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
                }
 
                /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, true, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, true, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        } else {
-               halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+               btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
                /* fw mechanism */
                if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 10);
+                       btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
                } else {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 14);
+                       btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
                }
 
                /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, false, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, false, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        }
 }
@@ -2944,42 +3220,70 @@ static void halbtc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
 /* HID+A2DP+PAN(EDR) */
 static void btc8821a2ant_act_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
 {
-       u8      wifi_rssi_state, bt_rssi_state, bt_info_ext;
-       u32     wifi_bw;
+       u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
+       u32 wifi_bw;
 
        bt_info_ext = coex_sta->bt_info_ext;
-       wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist,
-                                                         0, 2, 15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+       wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
 
-       halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+       btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+       btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
-       if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
        else
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                  0x5a5a5a5a, 0xffff, 0x3);
+       if (wifi_bw == BTC_WIFI_BW_LEGACY) {
+               /* for HID at 11b/g mode */
+               btc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
+                                       0x5a5a5a5a, 0xffff, 0x3);
+       } else {
+               /* for HID quality & wifi performance balance at 11n mode */
+               btc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
+                                       0x5a5a5a5a, 0xffff, 0x3);
+       }
 
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
                /* fw mechanism */
-               btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 3);
+               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       if (bt_info_ext&BIT0) {
+                               /* a2dp basic rate */
+                               btc8821a2ant_tdma_duration_adjust(btcoexist,
+                                                       true, true, 3);
+                       } else {
+                               /* a2dp edr rate */
+                               btc8821a2ant_tdma_duration_adjust(btcoexist,
+                                                       true, true, 3);
+                       }
+               } else {
+                       if (bt_info_ext&BIT0) {
+                               /* a2dp basic rate */
+                               btc8821a2ant_tdma_duration_adjust(btcoexist,
+                                                       true, true, 3);
+                       } else {
+                               /* a2dp edr rate */
+                               btc8821a2ant_tdma_duration_adjust(btcoexist,
+                                                       true, true, 3);
+                       }
+               }
 
                /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, true, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, true, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        } else {
                /* fw mechanism */
@@ -2987,103 +3291,183 @@ static void btc8821a2ant_act_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
                    (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
                        if (bt_info_ext&BIT0) {
                                /* a2dp basic rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist, true,
-                                                         false, 3);
+                               btc8821a2ant_tdma_duration_adjust(btcoexist,
+                                                       true, false, 3);
                        } else {
                                /* a2dp edr rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist, true,
-                                                         false, 3);
+                               btc8821a2ant_tdma_duration_adjust(btcoexist,
+                                                       true, false, 3);
                        }
                } else {
                        if (bt_info_ext&BIT0) {
                                /* a2dp basic rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist, true,
-                                                         true, 3);
+                               btc8821a2ant_tdma_duration_adjust(btcoexist,
+                                                                 true, true,
+                                                                 3);
                        } else {
                                /* a2dp edr rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist, true,
-                                                         true, 3);
+                               btc8821a2ant_tdma_duration_adjust(btcoexist,
+                                                                 true, true,
+                                                                 3);
                        }
                }
 
                /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, false, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, false, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        }
 }
 
-static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
 {
-       u8      wifi_rssi_state, bt_rssi_state, bt_info_ext;
-       u32     wifi_bw;
+       u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
+       u32 wifi_bw;
 
        bt_info_ext = coex_sta->bt_info_ext;
-       wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
-                                                         15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+       wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+       bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
 
-       if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       if (BTC_RSSI_HIGH(bt_rssi_state))
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
        else
-               halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+               btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                  0x5f5b5f5b, 0xffffff, 0x3);
+       if (wifi_bw == BTC_WIFI_BW_LEGACY) {
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+               btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+                                             0x0, 0x0);
+       } else {
+               btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 14);
+               btc8821a2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+                                             0x4);
+       }
 
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
                /* fw mechanism */
-               btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 2);
+               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       if (bt_info_ext & BIT0) {
+                               /* a2dp basic rate */
+                               btc8821a2ant_tdma_duration_adjust(btcoexist,
+                                                                 true, true,
+                                                                 2);
+                       } else {
+                               /* a2dp edr rate */
+                               btc8821a2ant_tdma_duration_adjust(btcoexist,
+                                                                 true, true,
+                                                                 2);
+                       }
+               } else {
+                       if (bt_info_ext & BIT0) {
+                               /* a2dp basic rate */
+                               btc8821a2ant_tdma_duration_adjust(btcoexist,
+                                                                 true, true,
+                                                                 2);
+                       } else {
+                               /* a2dp edr rate */
+                               btc8821a2ant_tdma_duration_adjust(btcoexist,
+                                                                 true, true,
+                                                                 2);
+                       }
+               }
 
                /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, true, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, true, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        } else {
                /* fw mechanism */
-               btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 2);
+               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       if (bt_info_ext & BIT0) {
+                               /* a2dp basic rate */
+                               btc8821a2ant_tdma_duration_adjust(btcoexist,
+                                                                 true, true,
+                                                                 2);
+
+                       } else {
+                               /* a2dp edr rate */
+                               btc8821a2ant_tdma_duration_adjust(btcoexist,
+                                                                 true, true,
+                                                                 2);
+                       }
+               } else {
+                       if (bt_info_ext & BIT0) {
+                               /*a2dp basic rate*/
+                               btc8821a2ant_tdma_duration_adjust(btcoexist,
+                                                                 true, true,
+                                                                 2);
+                       } else {
+                               /*a2dp edr rate*/
+                               btc8821a2ant_tdma_duration_adjust(btcoexist,
+                                                                 true, true,
+                                                                 2);
+                       }
+               }
 
                /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
                    (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       btc8821a2ant_sw_mech1(btcoexist, false, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, true, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
                } else {
-                       btc8821a2ant_sw_mech1(btcoexist, false, true,
-                                             false, false);
-                       btc8821a2ant_sw_mech2(btcoexist, false, false,
-                                             false, 0x18);
+                       btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
                }
        }
 }
 
-static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_wifi_multi_port(struct btc_coexist *btcoexist)
+{
+       btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+       btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+       /* sw all off */
+       btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, false);
+       btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+       /* hw all off */
+       btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+
+       btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+       btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+}
+
+static void btc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       bool    wifi_under_5g = false;
-       u8      algorithm = 0;
+       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+       bool wifi_under_5g = false;
+       u8 algorithm = 0;
+       u32 num_of_wifi_link = 0;
+       u32 wifi_link_status = 0;
+       bool miracast_plus_bt = false;
+       bool scan = false, link = false, roam = false;
 
        if (btcoexist->manual_control) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -3091,30 +3475,73 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
                return;
        }
 
-       btcoexist->btc_get(btcoexist,
-               BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
 
        if (wifi_under_5g) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
-               halbtc8821a2ant_coex_under_5g(btcoexist);
+               btc8821a2ant_coex_under_5g(btcoexist);
+               return;
+       }
+
+       if (coex_sta->under_ips) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], wifi is under IPS !!!\n");
                return;
        }
 
-       algorithm = halbtc8821a2ant_action_algorithm(btcoexist);
+       algorithm = btc8821a2ant_action_algorithm(btcoexist);
        if (coex_sta->c2h_bt_inquiry_page &&
            (BT_8821A_2ANT_COEX_ALGO_PANHS != algorithm)) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], BT is under inquiry/page scan !!\n");
-               halbtc8821a2ant_bt_inquiry_page(btcoexist);
+               btc8821a2ant_action_bt_inquiry(btcoexist);
+               return;
+       }
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+       if (scan || link || roam) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], WiFi is under Link Process !!\n");
+               btc8821a2ant_action_wifi_link_process(btcoexist);
+               return;
+       }
+
+       /* for P2P */
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
+                          &wifi_link_status);
+       num_of_wifi_link = wifi_link_status >> 16;
+
+       if ((num_of_wifi_link >= 2) ||
+           (wifi_link_status & WIFI_P2P_GO_CONNECTED)) {
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "############# [BTCoex],  Multi-Port num_of_wifi_link = %d, wifi_link_status = 0x%x\n",
+                        num_of_wifi_link, wifi_link_status);
+
+               if (bt_link_info->bt_link_exist)
+                       miracast_plus_bt = true;
+               else
+                       miracast_plus_bt = false;
+
+               btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT,
+                                  &miracast_plus_bt);
+               btc8821a2ant_action_wifi_multi_port(btcoexist);
+
                return;
        }
 
+       miracast_plus_bt = false;
+       btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT,
+                          &miracast_plus_bt);
+
        coex_dm->cur_algorithm = algorithm;
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
 
-       if (halbtc8821a2ant_is_common_action(btcoexist)) {
+       if (btc8821a2ant_is_common_action(btcoexist)) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], Action 2-Ant common\n");
                coex_dm->reset_tdma_adjust = true;
@@ -3130,42 +3557,42 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
                case BT_8821A_2ANT_COEX_ALGO_SCO:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action 2-Ant, algorithm = SCO\n");
-                       halbtc8821a2ant_action_sco(btcoexist);
+                       btc8821a2ant_action_sco(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_HID:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action 2-Ant, algorithm = HID\n");
-                       halbtc8821a2ant_action_hid(btcoexist);
+                       btc8821a2ant_action_hid(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_A2DP:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
-                       halbtc8821a2ant_action_a2dp(btcoexist);
+                       btc8821a2ant_action_a2dp(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
-                       halbtc8821a2ant_action_a2dp_pan_hs(btcoexist);
+                       btc8821a2ant_action_a2dp_pan_hs(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_PANEDR:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
-                       halbtc8821a2ant_action_pan_edr(btcoexist);
+                       btc8821a2ant_action_pan_edr(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_PANHS:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
-                       halbtc8821a2ant_action_pan_hs(btcoexist);
+                       btc8821a2ant_action_pan_hs(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
-                       halbtc8821a2ant_action_pan_edr_a2dp(btcoexist);
+                       btc8821a2ant_action_pan_edr_a2dp(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_PANEDR_HID:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
-                       halbtc8821a2ant_action_pan_edr_hid(btcoexist);
+                       btc8821a2ant_action_pan_edr_hid(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -3175,26 +3602,22 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
                case BT_8821A_2ANT_COEX_ALGO_HID_A2DP:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
-                       halbtc8821a2ant_action_hid_a2dp(btcoexist);
+                       btc8821a2ant_action_hid_a2dp(btcoexist);
                        break;
                default:
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
-                       halbtc8821a2ant_coex_all_off(btcoexist);
+                       btc8821a2ant_coex_all_off(btcoexist);
                        break;
                }
                coex_dm->pre_algorithm = coex_dm->cur_algorithm;
        }
 }
 
-/*============================================================
- *work around function start with wa_halbtc8821a2ant_
- *============================================================
- *============================================================
- * extern function start with EXhalbtc8821a2ant_
- *============================================================
- */
-void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist)
+/**************************************************************
+ * extern function start with ex_btc8821a2ant_
+ **************************************************************/
+void ex_btc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 u1tmp = 0;
@@ -3212,36 +3635,30 @@ void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist)
        u1tmp |= 0x5;
        btcoexist->btc_write_1byte(btcoexist, 0x790, u1tmp);
 
-       /*Antenna config */
-       halbtc8821a2ant_set_ant_path(btcoexist,
-                                    BTC_ANT_WIFI_AT_MAIN, true, false);
+       /* Antenna config */
+       btc8821a2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_MAIN, true, false);
 
        /* PTA parameter */
-       halbtc8821a2ant_coex_table(btcoexist,
-                                  FORCE_EXEC, 0x55555555, 0x55555555,
-                                  0xffff, 0x3);
+       btc8821a2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
 
        /* Enable counter statistics */
-       /*0x76e[3] = 1, WLAN_Act control by PTA*/
-       btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+       /* 0x76e[3] = 1, WLAN_Act control by PTA */
+       btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
        btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3);
        btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
 }
 
-void ex_halbtc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
+void ex_btc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], Coex Mechanism Init!!\n");
 
-       halbtc8821a2ant_init_coex_dm(btcoexist);
+       btc8821a2ant_init_coex_dm(btcoexist);
 }
 
-void
-ex_halbtc8821a2ant_display_coex_info(
-       struct btc_coexist *btcoexist
-       )
+void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
 {
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
@@ -3397,7 +3814,7 @@ ex_halbtc8821a2ant_display_coex_info(
 
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
                         "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct",
-                        coex_dm->cur_dec_bt_pwr,
+                        coex_dm->cur_dec_bt_pwr_lvl,
                         coex_dm->cur_ignore_wlan_act);
        }
 
@@ -3475,7 +3892,7 @@ ex_halbtc8821a2ant_display_coex_info(
        btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
 }
 
-void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -3483,16 +3900,15 @@ void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], IPS ENTER notify\n");
                coex_sta->under_ips = true;
-               halbtc8821a2ant_coex_all_off(btcoexist);
+               btc8821a2ant_coex_all_off(btcoexist);
        } else if (BTC_IPS_LEAVE == type) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                         "[BTCoex], IPS LEAVE notify\n");
                coex_sta->under_ips = false;
-               /*halbtc8821a2ant_init_coex_dm(btcoexist);*/
        }
 }
 
-void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -3507,7 +3923,7 @@ void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
        }
 }
 
-void ex_halbtc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -3520,7 +3936,7 @@ void ex_halbtc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
        }
 }
 
-void ex_halbtc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
@@ -3533,13 +3949,14 @@ void ex_halbtc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
        }
 }
 
-void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
-                                           u8 type)
+void ex_btc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
+                                        u8 type)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       u8      h2c_parameter[3] = {0};
-       u32     wifi_bw;
-       u8      wifi_central_chnl;
+       u8 h2c_parameter[3] = {0};
+       u32 wifi_bw;
+       u8 wifi_central_chnl;
+       u8 ap_num = 0;
 
        if (BTC_MEDIA_CONNECT == type) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -3549,7 +3966,7 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
                         "[BTCoex], MEDIA disconnect notify\n");
        }
 
-       /* only 2.4G we need to inform bt the chnl mask*/
+       /* only 2.4G we need to inform bt the chnl mask */
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
                           &wifi_central_chnl);
        if ((BTC_MEDIA_CONNECT == type) &&
@@ -3557,10 +3974,15 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
                h2c_parameter[0] = 0x1;
                h2c_parameter[1] = wifi_central_chnl;
                btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
-               if (BTC_WIFI_BW_HT40 == wifi_bw)
+               if (wifi_bw == BTC_WIFI_BW_HT40) {
                        h2c_parameter[2] = 0x30;
-               else
+               } else {
                        h2c_parameter[2] = 0x20;
+                       if (ap_num < 10)
+                               h2c_parameter[2] = 0x30;
+                       else
+                               h2c_parameter[2] = 0x20;
+               }
        }
 
        coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
@@ -3576,8 +3998,9 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
        btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
 
-void ex_halbtc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist,
-                                             u8 type) {
+void ex_btc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist,
+                                          u8 type)
+{
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
        if (type == BTC_PACKET_DHCP) {
@@ -3586,19 +4009,18 @@ void ex_halbtc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist,
        }
 }
 
-void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
-                                      u8 *tmp_buf, u8 length)
+void ex_btc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
+                                   u8 *tmp_buf, u8 length)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       u8              bt_info = 0;
-       u8              i, rsp_source = 0;
-       static u32      set_bt_lna_cnt, set_bt_psd_mode;
-       bool            bt_busy = false, limited_dig = false;
-       bool            wifi_connected = false, bt_hs_on = false;
+       u8 bt_info = 0;
+       u8 i, rsp_source = 0;
+       bool bt_busy = false, limited_dig = false;
+       bool wifi_connected = false, bt_hs_on = false;
 
        coex_sta->c2h_bt_info_req_sent = false;
 
-       rsp_source = tmp_buf[0]&0xf;
+       rsp_source = tmp_buf[0] & 0xf;
        if (rsp_source >= BT_INFO_SRC_8821A_2ANT_MAX)
                rsp_source = BT_INFO_SRC_8821A_2ANT_WIFI_FW;
        coex_sta->bt_info_c2h_cnt[rsp_source]++;
@@ -3610,7 +4032,7 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
                coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
                if (i == 1)
                        bt_info = tmp_buf[i];
-               if (i == length-1) {
+               if (i == length - 1) {
                        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                                 "0x%02x]\n", tmp_buf[i]);
                } else {
@@ -3620,7 +4042,8 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
        }
 
        if (BT_INFO_SRC_8821A_2ANT_WIFI_FW != rsp_source) {
-               coex_sta->bt_retry_cnt =        /* [3:0]*/
+               /* [3:0] */
+               coex_sta->bt_retry_cnt =
                        coex_sta->bt_info_c2h[rsp_source][2]&0xf;
 
                coex_sta->bt_rssi =
@@ -3629,53 +4052,28 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
                coex_sta->bt_info_ext =
                        coex_sta->bt_info_c2h[rsp_source][4];
 
-               /* Here we need to resend some wifi info to BT*/
-               /* because bt is reset and loss of the info.*/
+               /* Here we need to resend some wifi info to BT
+                * because bt is reset and loss of the info
+                */
                if ((coex_sta->bt_info_ext & BIT1)) {
                        btcoexist->btc_get(btcoexist,
                                BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
                        if (wifi_connected) {
-                               ex_halbtc8821a2ant_media_status_notify(btcoexist,
+                               ex_btc8821a2ant_media_status_notify(btcoexist,
                                        BTC_MEDIA_CONNECT);
                        } else {
-                               ex_halbtc8821a2ant_media_status_notify(btcoexist,
+                               ex_btc8821a2ant_media_status_notify(btcoexist,
                                        BTC_MEDIA_DISCONNECT);
                        }
 
-                       set_bt_psd_mode = 0;
-               }
-               if (set_bt_psd_mode <= 3) {
-                       halbtc8821a2ant_set_bt_psd_mode(btcoexist, FORCE_EXEC,
-                                                       0x0); /*fix CH-BW mode*/
-                       set_bt_psd_mode++;
-               }
-
-               if (coex_dm->cur_bt_lna_constrain) {
-                       if (!(coex_sta->bt_info_ext & BIT2)) {
-                               if (set_bt_lna_cnt <= 3) {
-                                       btc8821a2_set_bt_lna_const(btcoexist,
-                                                                  FORCE_EXEC,
-                                                                  true);
-                                       set_bt_lna_cnt++;
-                               }
-                       }
-               } else {
-                       set_bt_lna_cnt = 0;
                }
 
                if ((coex_sta->bt_info_ext & BIT3)) {
-                       halbtc8821a2ant_ignore_wlan_act(btcoexist,
-                                                       FORCE_EXEC, false);
+                       btc8821a2ant_ignore_wlan_act(btcoexist,
+                                                    FORCE_EXEC, false);
                } else {
                        /* BT already NOT ignore Wlan active, do nothing here.*/
                }
-
-               if ((coex_sta->bt_info_ext & BIT4)) {
-                       /* BT auto report already enabled, do nothing*/
-               } else {
-                       halbtc8821a2ant_bt_auto_report(btcoexist,
-                                                      FORCE_EXEC, true);
-               }
        }
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
@@ -3718,8 +4116,7 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
                        coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_IDLE;
                }
 
-               if (bt_hs_on)
-                       coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_NON_IDLE;
+               btc8821a2ant_update_bt_link_info(btcoexist);
        }
 
        if (BT_8821A_2ANT_BT_STATUS_NON_IDLE == coex_dm->bt_status)
@@ -3736,27 +4133,27 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
        btcoexist->btc_set(btcoexist,
                BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
 
-       halbtc8821a2ant_run_coexist_mechanism(btcoexist);
+       btc8821a2ant_run_coexist_mechanism(btcoexist);
 }
 
-void ex_halbtc8821a2ant_halt_notify(struct btc_coexist *btcoexist)
+void ex_btc8821a2ant_halt_notify(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], Halt notify\n");
 
-       halbtc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
-       ex_halbtc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+       btc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+       ex_btc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
 }
 
-void ex_halbtc8821a2ant_periodical(struct btc_coexist *btcoexist)
+void ex_btc8821a2ant_periodical(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
-       static u8       dis_ver_info_cnt;
-       u32             fw_ver = 0, bt_patch_ver = 0;
+       static u8 dis_ver_info_cnt;
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
+       u32 fw_ver = 0, bt_patch_ver = 0;
 
        RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
                 "[BTCoex], ==========================Periodical===========================\n");
@@ -3785,7 +4182,7 @@ void ex_halbtc8821a2ant_periodical(struct btc_coexist *btcoexist)
                         "[BTCoex], ****************************************************************\n");
        }
 
-       halbtc8821a2ant_query_bt_info(btcoexist);
-       halbtc8821a2ant_monitor_bt_ctr(btcoexist);
-       btc8821a2ant_mon_bt_en_dis(btcoexist);
+       btc8821a2ant_query_bt_info(btcoexist);
+       btc8821a2ant_monitor_bt_ctr(btcoexist);
+       btc8821a2ant_monitor_wifi_ctr(btcoexist);
 }
index b4cf1f53d5105845b875f3f5ebb9e408dcb7151d..535ca10e910b34e03cd66276f1102b87c668f96f 100644 (file)
 
 #define        BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT     2
 
+/* WiFi RSSI Threshold for 2-Ant TDMA/1-Ant PS-TDMA translation */
+#define BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES 42
+/* BT RSSI Threshold for 2-Ant TDMA/1-Ant PS-TDMA translation */
+#define BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES 46
+
 enum _BT_INFO_SRC_8821A_2ANT {
        BT_INFO_SRC_8821A_2ANT_WIFI_FW          = 0x0,
        BT_INFO_SRC_8821A_2ANT_BT_RSP           = 0x1,
@@ -69,8 +74,8 @@ enum _BT_8821A_2ANT_COEX_ALGO {
 
 struct coex_dm_8821a_2ant {
        /* fw mechanism */
-       bool            pre_dec_bt_pwr;
-       bool            cur_dec_bt_pwr;
+       bool            pre_dec_bt_pwr_lvl;
+       bool            cur_dec_bt_pwr_lvl;
        bool            pre_bt_lna_constrain;
        bool            cur_bt_lna_constrain;
        u8              pre_bt_psd_mode;
@@ -82,8 +87,9 @@ struct coex_dm_8821a_2ant {
        u8              pre_ps_tdma;
        u8              cur_ps_tdma;
        u8              ps_tdma_para[5];
-       u8              tdma_adj_type;
+       u8              ps_tdma_du_adj_type;
        bool            reset_tdma_adjust;
+       bool            auto_tdma_adjust;
        bool            pre_ps_tdma_on;
        bool            cur_ps_tdma_on;
        bool            pre_bt_auto_report;
@@ -118,6 +124,10 @@ struct coex_dm_8821a_2ant {
        u8              cur_algorithm;
        u8              bt_status;
        u8              wifi_chnl_info[3];
+       u8              pre_lps;
+       u8              cur_lps;
+       u8              pre_rpwm;
+       u8              cur_rpwm;
 };
 
 struct coex_sta_8821a_2ant {
@@ -141,6 +151,19 @@ struct coex_sta_8821a_2ant {
        bool    c2h_bt_inquiry_page;
        u8      bt_retry_cnt;
        u8      bt_info_ext;
+
+       u32     crc_ok_cck;
+       u32     crc_ok_11g;
+       u32     crc_ok_11n;
+       u32     crc_ok_11n_agg;
+
+       u32     crc_err_cck;
+       u32     crc_err_11g;
+       u32     crc_err_11n;
+       u32     crc_err_11n_agg;
+
+       u8      coex_table_type;
+       bool    force_lps_on;
 };
 
 /*===========================================
index 150aeb8e79d1a4870abbc6e6f8174e3ae7fb6990..f1300061291375dda7084923dd4682d7ad13ce25 100644 (file)
@@ -466,7 +466,7 @@ static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
        case BTC_SET_ACT_DISABLE_LOW_POWER:
                halbtc_disable_low_power();
                break;
-       case BTC_SET_ACT_UPDATE_ra_mask:
+       case BTC_SET_ACT_UPDATE_RAMASK:
                btcoexist->bt_info.ra_mask = *u32_tmp;
                break;
        case BTC_SET_ACT_SEND_MIMO_PS:
index 601bbe1d22b35ff911064c3db5638108eca91f94..c8271135aaaaed26af1ba359374fc1f133542b38 100644 (file)
 #define                BTC_ANT_WIFI_AT_CPL_MAIN                0
 #define                BTC_ANT_WIFI_AT_CPL_AUX                 1
 
+enum btc_bt_reg_type {
+       BTC_BT_REG_RF           = 0,
+       BTC_BT_REG_MODEM        = 1,
+       BTC_BT_REG_BLUEWIZE     = 2,
+       BTC_BT_REG_VENDOR       = 3,
+       BTC_BT_REG_LE           = 4,
+       BTC_BT_REG_MAX
+};
+
 enum btc_chip_interface {
        BTC_INTF_UNKNOWN        = 0,
        BTC_INTF_PCI            = 1,
@@ -139,6 +148,7 @@ struct btc_board_info {
        u8 pg_ant_num;  /* pg ant number */
        u8 btdm_ant_num;        /* ant number for btdm */
        u8 btdm_ant_pos;
+       u8 single_ant_path; /* current used for 8723b only, 1=>s0,  0=>s1 */
        bool bt_exist;
 };
 
@@ -205,6 +215,7 @@ enum btc_get_type {
        BTC_GET_BL_WIFI_ENABLE_ENCRYPTION,
        BTC_GET_BL_WIFI_UNDER_B_MODE,
        BTC_GET_BL_EXT_SWITCH,
+       BTC_GET_BL_WIFI_IS_IN_MP_MODE,
 
        /* type s4Byte */
        BTC_GET_S4_WIFI_RSSI,
@@ -249,6 +260,8 @@ enum btc_set_type {
        BTC_SET_BL_TO_REJ_AP_AGG_PKT,
        BTC_SET_BL_BT_CTRL_AGG_SIZE,
        BTC_SET_BL_INC_SCAN_DEV_NUM,
+       BTC_SET_BL_BT_TX_RX_MASK,
+       BTC_SET_BL_MIRACAST_PLUS_BT,
 
        /* type u1Byte */
        BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
@@ -275,7 +288,7 @@ enum btc_set_type {
        BTC_SET_ACT_NORMAL_LPS,
        BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT,
        BTC_SET_ACT_DISABLE_LOW_POWER,
-       BTC_SET_ACT_UPDATE_ra_mask,
+       BTC_SET_ACT_UPDATE_RAMASK,
        BTC_SET_ACT_SEND_MIMO_PS,
        /* BT Coex related */
        BTC_SET_ACT_CTRL_BT_INFO,
@@ -366,6 +379,7 @@ typedef void (*bfp_btc_w2)(void *btc_context, u32 reg_addr, u16 data);
 
 typedef void (*bfp_btc_w4)(void *btc_context, u32 reg_addr, u32 data);
 
+typedef void (*bfp_btc_local_reg_w1)(void *btc_context, u32 reg_addr, u8 data);
 typedef void (*bfp_btc_wr_1byte_bit_mask)(void *btc_context, u32 reg_addr,
                                          u8 bit_mask, u8 data);
 
@@ -388,6 +402,9 @@ typedef     bool (*bfp_btc_get)(void *btcoexist, u8 get_type, void *out_buf);
 
 typedef        bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf);
 
+typedef void (*bfp_btc_set_bt_reg)(void *btc_context, u8 reg_type, u32 offset,
+                                  u32 value);
+
 typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type);
 
 struct btc_bt_info {
@@ -459,6 +476,7 @@ struct btc_bt_link_info {
        bool hid_only;
        bool pan_exist;
        bool pan_only;
+       bool slave_role;
 };
 
 enum btc_antenna_pos {
@@ -492,6 +510,7 @@ struct btc_coexist {
        bfp_btc_w2 btc_write_2byte;
        bfp_btc_r4 btc_read_4byte;
        bfp_btc_w4 btc_write_4byte;
+       bfp_btc_local_reg_w1 btc_write_local_reg_1byte;
 
        bfp_btc_set_bb_reg btc_set_bb_reg;
        bfp_btc_get_bb_reg btc_get_bb_reg;
@@ -505,6 +524,8 @@ struct btc_coexist {
 
        bfp_btc_get btc_get;
        bfp_btc_set btc_set;
+
+       bfp_btc_set_bt_reg btc_set_bt_reg;
 };
 
 bool halbtc_is_wifi_uplink(struct rtl_priv *adapter);
index 558c31bf5c807dda6b56539b39f3bdc3e29292f3..1bf3eb25c1da85a805451e885e51665b85478781 100644 (file)
@@ -435,7 +435,7 @@ int rtl_regd_init(struct ieee80211_hw *hw,
                channel_plan_to_country_code(rtlpriv->efuse.channel_plan);
 
        RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
-                "rtl: EEPROM regdomain: 0x%0x conuntry code: %d\n",
+                "rtl: EEPROM regdomain: 0x%0x country code: %d\n",
                 rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code);
 
        if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
index 9fec345a42a01c1a03fd74f6b4b9f14cfd148598..1f42ce5f8f27fb8f46c69b91e0c7d636b5f5ce98 100644 (file)
@@ -468,8 +468,10 @@ void rtl92ee_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus)
 #define PSPOLL_PG              2
 #define NULL_PG                        3
 #define PROBERSP_PG            4 /* ->5 */
+#define QOS_NULL_PG            6
+#define BT_QOS_NULL_PG 7
 
-#define TOTAL_RESERVED_PKT_LEN 768
+#define TOTAL_RESERVED_PKT_LEN 1024
 
 static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
        /* page 0 beacon */
@@ -570,6 +572,42 @@ static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+       /* page 6 qos null data */
+       0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
+       0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+       0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+       0x00, 0x00, 0x80, 0x00, 0x00, 0x01, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+       /* page 7 BT-qos null data */
+       0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
+       0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+       0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -595,6 +633,8 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
        u8 *p_pspoll;
        u8 *nullfunc;
        u8 *p_probersp;
+       u8 *qosnull;
+       u8 *btqosnull;
        /*---------------------------------------------------------
         *                      (1) beacon
         *---------------------------------------------------------
@@ -636,6 +676,28 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
 
        SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG);
 
+       /*---------------------------------------------------------
+        *                      (5) QoS null data
+        *----------------------------------------------------------
+        */
+       qosnull = &reserved_page_packet[QOS_NULL_PG * 128];
+       SET_80211_HDR_ADDRESS1(qosnull, mac->bssid);
+       SET_80211_HDR_ADDRESS2(qosnull, mac->mac_addr);
+       SET_80211_HDR_ADDRESS3(qosnull, mac->bssid);
+
+       SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1rsvdpageloc, QOS_NULL_PG);
+
+       /*---------------------------------------------------------
+        *                      (6) BT QoS null data
+        *----------------------------------------------------------
+        */
+       btqosnull = &reserved_page_packet[BT_QOS_NULL_PG * 128];
+       SET_80211_HDR_ADDRESS1(btqosnull, mac->bssid);
+       SET_80211_HDR_ADDRESS2(btqosnull, mac->mac_addr);
+       SET_80211_HDR_ADDRESS3(btqosnull, mac->bssid);
+
+       SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1rsvdpageloc, BT_QOS_NULL_PG);
+
        totalpacketlen = TOTAL_RESERVED_PKT_LEN;
 
        RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD ,
index 72da3f92f02c57911ee307e5c74e22d8750e97fb..af8271967a880bab1c2f9f5c050c0478c6777d3c 100644 (file)
@@ -165,6 +165,10 @@ enum rtl8192e_c2h_evt {
        SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
 #define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val)            \
        SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val)                \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 3, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val)     \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val)
 
 /* _MEDIA_STATUS_RPT_PARM_CMD1 */
 #define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __val)            \
index 56ca7f5351eaba043a792764fe80c5860a0a9aae..6f5098a18655536e6c210a5cd70e0cef55c4b3a5 100644 (file)
@@ -699,9 +699,9 @@ static bool _rtl92ee_llt_table_init(struct ieee80211_hw *hw)
        u8 txpktbuf_bndy;
        u8 u8tmp, testcnt = 0;
 
-       txpktbuf_bndy = 0xFA;
+       txpktbuf_bndy = 0xF7;
 
-       rtl_write_dword(rtlpriv, REG_RQPN, 0x80E90808);
+       rtl_write_dword(rtlpriv, REG_RQPN, 0x80E60808);
 
        rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy);
        rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x3d00 - 1);
index c7ee9ba5e26ea15e4152e2ed4110e946f4d5b78a..4fc839b1d60165321f302006fdc813edb5826613 100644 (file)
@@ -284,8 +284,10 @@ void rtl8723be_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus)
 #define PSPOLL_PG              2
 #define NULL_PG                        3
 #define PROBERSP_PG            4 /* ->5 */
+#define QOS_NULL_PG            6
+#define BT_QOS_NULL_PG 7
 
-#define TOTAL_RESERVED_PKT_LEN 768
+#define TOTAL_RESERVED_PKT_LEN 1024    /* can be up to 1280 (tx_bndy=245) */
 
 static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
        /* page 0 beacon */
@@ -390,11 +392,48 @@ static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+       /* page 6 qos null data */
+       0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
+       0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+       0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+       0x00, 0x00, 0x80, 0x00, 0x00, 0x01, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+       /* page 7 BT-qos null data */
+       0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
+       0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+       0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
 };
 
 void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
@@ -413,6 +452,8 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
        u8 *p_pspoll;
        u8 *nullfunc;
        u8 *p_probersp;
+       u8 *qosnull;
+       u8 *btqosnull;
        /*---------------------------------------------------------
         *                      (1) beacon
         *---------------------------------------------------------
@@ -454,6 +495,28 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
 
        SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG);
 
+       /*---------------------------------------------------------
+        *                      (5) QoS Null
+        *---------------------------------------------------------
+        */
+       qosnull = &reserved_page_packet[QOS_NULL_PG * 128];
+       SET_80211_HDR_ADDRESS1(qosnull, mac->bssid);
+       SET_80211_HDR_ADDRESS2(qosnull, mac->mac_addr);
+       SET_80211_HDR_ADDRESS3(qosnull, mac->bssid);
+
+       SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1rsvdpageloc, QOS_NULL_PG);
+
+       /*---------------------------------------------------------
+        *                      (5) QoS Null
+        *---------------------------------------------------------
+        */
+       btqosnull = &reserved_page_packet[BT_QOS_NULL_PG * 128];
+       SET_80211_HDR_ADDRESS1(btqosnull, mac->bssid);
+       SET_80211_HDR_ADDRESS2(btqosnull, mac->mac_addr);
+       SET_80211_HDR_ADDRESS3(btqosnull, mac->bssid);
+
+       SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1rsvdpageloc, BT_QOS_NULL_PG);
+
        totalpacketlen = TOTAL_RESERVED_PKT_LEN;
 
        RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
@@ -461,7 +524,7 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
                      &reserved_page_packet[0], totalpacketlen);
        RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
                      "rtl8723be_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
-                     u1rsvdpageloc, 3);
+                     u1rsvdpageloc, sizeof(u1rsvdpageloc));
 
        skb = dev_alloc_skb(totalpacketlen);
        memcpy((u8 *)skb_put(skb, totalpacketlen),
@@ -476,7 +539,7 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
                RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
                         "Set RSVD page location to Fw.\n");
                RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "H2C_RSVDPAGE:\n",
-                             u1rsvdpageloc, 3);
+                             u1rsvdpageloc, sizeof(u1rsvdpageloc));
                rtl8723be_fill_h2c_cmd(hw, H2C_8723B_RSVDPAGE,
                                       sizeof(u1rsvdpageloc), u1rsvdpageloc);
        } else
index c652fa1339a760642a8c021c8200ba4777de49ed..2482b3bc2bfaa890a0711402029ed285e5093929 100644 (file)
@@ -139,6 +139,10 @@ enum rtl8723b_c2h_evt {
        SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
 #define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val)            \
        SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val)        \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 3, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val)     \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val)
 
 
 void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
index 92dbfa8f297f318a766506e50be2d102c312f470..8c0ac96b543005efe8348b7f09dfa4f2be725e90 100644 (file)
@@ -91,7 +91,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-       char *fw_name = "rtlwifi/rtl8723befw.bin";
+       char *fw_name = "rtlwifi/rtl8723befw_36.bin";
 
        rtl8723be_bt_reg_init(hw);
        rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
@@ -187,8 +187,16 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
                                      rtlpriv->io.dev, GFP_KERNEL, hw,
                                      rtl_fw_cb);
        if (err) {
-               pr_err("Failed to request firmware!\n");
-               return 1;
+               /* Failed to get firmware. Check if old version available */
+               fw_name = "rtlwifi/rtl8723befw.bin";
+               pr_info("Using firmware %s\n", fw_name);
+               err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
+                                             rtlpriv->io.dev, GFP_KERNEL, hw,
+                                             rtl_fw_cb);
+               if (err) {
+                       pr_err("Failed to request firmware!\n");
+                       return 1;
+               }
        }
        return 0;
 }
@@ -384,6 +392,7 @@ MODULE_AUTHOR("Realtek WlanFAE      <wlanfae@realtek.com>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Realtek 8723BE 802.11n PCI wireless");
 MODULE_FIRMWARE("rtlwifi/rtl8723befw.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723befw_36.bin");
 
 module_param_named(swenc, rtl8723be_mod_params.sw_crypto, bool, 0444);
 module_param_named(debug_level, rtl8723be_mod_params.debug_level, int, 0644);
index a504dfae4ed37486ea90451bd000022c4eac56f4..73350103b736c181b040e722112861ce107831e4 100644 (file)
@@ -678,12 +678,13 @@ void rtl8821ae_set_fw_global_info_cmd(struct ieee80211_hw *hw)
 #define PSPOLL_PG              1
 #define NULL_PG                        2
 #define QOSNULL_PG             3
-#define ARPRESP_PG             4
-#define REMOTE_PG              5
-#define GTKEXT_PG              6
+#define BT_QOSNULL_PG  4
+#define ARPRESP_PG             5
+#define REMOTE_PG              6
+#define GTKEXT_PG              7
 
-#define TOTAL_RESERVED_PKT_LEN_8812    3584
-#define TOTAL_RESERVED_PKT_LEN_8821    1792
+#define TOTAL_RESERVED_PKT_LEN_8812    4096
+#define TOTAL_RESERVED_PKT_LEN_8821    2048
 
 static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = {
        /* page 0: beacon */
@@ -813,13 +814,46 @@ static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = {
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x1A, 0x00, 0x28, 0x8C,  0x00, 0x12, 0x00, 0x00,
+       0x00, 0x00, 0x80, 0x00,  0x00, 0x01, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x80, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       /* page 4: BT qos null data */
+       0xC8, 0x01, 0x00, 0x00,  0x84, 0xC9, 0xB2, 0xA7,
+       0xB3, 0x6E, 0x00, 0xE0,  0x4C, 0x02, 0x51, 0x02,
+       0x84, 0xC9, 0xB2, 0xA7,  0xB3, 0x6E, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x3C, 0x00, 0x28, 0x8C,  0x00, 0x12, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x01, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x80, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
-       /* page 4~6 is for wowlan */
-       /* page 4: ARP resp */
+       /* page 5~7 is for wowlan */
+       /* page 5: ARP resp */
        0x08, 0x01, 0x00, 0x00,  0x84, 0xC9, 0xB2, 0xA7,
        0xB3, 0x6E, 0x00, 0xE0,  0x4C, 0x02, 0x51, 0x02,
        0x84, 0xC9, 0xB2, 0xA7,  0xB3, 0x6E, 0x00, 0x00,
@@ -852,7 +886,7 @@ static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = {
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
-       /* page 5: H2C_REMOTE_WAKE_CTRL_INFO */
+       /* page 6: H2C_REMOTE_WAKE_CTRL_INFO */
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
@@ -885,7 +919,7 @@ static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = {
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
-       /* page 6: Rsvd GTK extend memory (zero memory) */
+       /* page 7: Rsvd GTK extend memory (zero memory) */
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
@@ -1176,13 +1210,78 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = {
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x1A, 0x00, 0x28, 0x8C,  0x00, 0x12, 0x00, 0x00,
+       0x00, 0x00, 0x80, 0x00,  0x00, 0x01, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x80, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       /* page 4: BT Qos null data */
+       0xC8, 0x01, 0x00, 0x00,  0x84, 0xC9, 0xB2, 0xA7,
+       0xB3, 0x6E, 0x00, 0xE0,  0x4C, 0x02, 0x51, 0x02,
+       0x84, 0xC9, 0xB2, 0xA7,  0xB3, 0x6E, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x3C, 0x00, 0x28, 0x8C,  0x00, 0x12, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x01, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x80, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
-       /* page 4~6 is for wowlan */
-       /* page 4: ARP resp */
+       /* page 5~7 is for wowlan */
+       /* page 5: ARP resp */
        0x08, 0x01, 0x00, 0x00,  0x84, 0xC9, 0xB2, 0xA7,
        0xB3, 0x6E, 0x00, 0xE0,  0x4C, 0x02, 0x51, 0x02,
        0x84, 0xC9, 0xB2, 0xA7,  0xB3, 0x6E, 0x00, 0x00,
@@ -1247,7 +1346,7 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = {
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
-       /* page 5: H2C_REMOTE_WAKE_CTRL_INFO */
+       /* page 6: H2C_REMOTE_WAKE_CTRL_INFO */
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
@@ -1312,7 +1411,7 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = {
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
-       /* page 6: Rsvd GTK extend memory (zero memory) */
+       /* page 7: Rsvd GTK extend memory (zero memory) */
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
@@ -1394,6 +1493,7 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
        u8 *p_pspoll;
        u8 *nullfunc;
        u8 *qosnull;
+       u8 *btqosnull;
        u8 *arpresp;
 
        /*---------------------------------------------------------
@@ -1441,12 +1541,23 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
 
        SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1RsvdPageLoc, QOSNULL_PG);
 
+       /*---------------------------------------------------------
+        *                      (5) BT Qos null data
+        *----------------------------------------------------------
+        */
+       btqosnull = &reserved_page_packet_8812[BT_QOSNULL_PG * 512];
+       SET_80211_HDR_ADDRESS1(btqosnull, mac->bssid);
+       SET_80211_HDR_ADDRESS2(btqosnull, mac->mac_addr);
+       SET_80211_HDR_ADDRESS3(btqosnull, mac->bssid);
+
+       SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1RsvdPageLoc, BT_QOSNULL_PG);
+
        if (!dl_whole_packets) {
-               totalpacketlen = 512 * (QOSNULL_PG + 1) - 40;
+               totalpacketlen = 512 * (BT_QOSNULL_PG + 1) - 40;
                goto out;
        }
        /*---------------------------------------------------------
-        *                      (5) ARP Resp
+        *                      (6) ARP Resp
         *----------------------------------------------------------
         */
        arpresp = &reserved_page_packet_8812[ARPRESP_PG * 512];
@@ -1457,14 +1568,14 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
        SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(u1RsvdPageLoc2, ARPRESP_PG);
 
        /*---------------------------------------------------------
-        *                      (6) Remote Wake Ctrl
+        *                      (7) Remote Wake Ctrl
         *----------------------------------------------------------
         */
        SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(u1RsvdPageLoc2,
                                                                REMOTE_PG);
 
        /*---------------------------------------------------------
-        *                      (7) GTK Ext Memory
+        *                      (8) GTK Ext Memory
         *----------------------------------------------------------
         */
        SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(u1RsvdPageLoc2, GTKEXT_PG);
@@ -1518,6 +1629,7 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
        u8 *p_pspoll;
        u8 *nullfunc;
        u8 *qosnull;
+       u8 *btqosnull;
        u8 *arpresp;
 
        /*---------------------------------------------------------
@@ -1565,12 +1677,23 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
 
        SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1RsvdPageLoc, QOSNULL_PG);
 
+       /*---------------------------------------------------------
+        *                      (5) Qos null data
+        *----------------------------------------------------------
+        */
+       btqosnull = &reserved_page_packet_8821[BT_QOSNULL_PG * 256];
+       SET_80211_HDR_ADDRESS1(btqosnull, mac->bssid);
+       SET_80211_HDR_ADDRESS2(btqosnull, mac->mac_addr);
+       SET_80211_HDR_ADDRESS3(btqosnull, mac->bssid);
+
+       SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1RsvdPageLoc, BT_QOSNULL_PG);
+
        if (!dl_whole_packets) {
-               totalpacketlen = 256 * (QOSNULL_PG + 1) - 40;
+               totalpacketlen = 256 * (BT_QOSNULL_PG + 1) - 40;
                goto out;
        }
        /*---------------------------------------------------------
-        *                      (5) ARP Resp
+        *                      (6) ARP Resp
         *----------------------------------------------------------
         */
        arpresp = &reserved_page_packet_8821[ARPRESP_PG * 256];
@@ -1581,14 +1704,14 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
        SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(u1RsvdPageLoc2, ARPRESP_PG);
 
        /*---------------------------------------------------------
-        *                      (6) Remote Wake Ctrl
+        *                      (7) Remote Wake Ctrl
         *----------------------------------------------------------
         */
        SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(u1RsvdPageLoc2,
                                                                        REMOTE_PG);
 
        /*---------------------------------------------------------
-        *                      (7) GTK Ext Memory
+        *                      (8) GTK Ext Memory
         *----------------------------------------------------------
         */
        SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(u1RsvdPageLoc2, GTKEXT_PG);
index 90a98ed879f7e839b6f85a12240a673f4a18c5b6..98d871afd92ab4c0c1555f3bf4ac5612ae001b35 100644 (file)
@@ -229,6 +229,8 @@ enum rtl8821a_h2c_cmd {
        SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
 #define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val)                \
        SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val)     \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val)
 
 /* _MEDIA_STATUS_RPT_PARM_CMD1 */
 #define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __value)  \
index 363d2f28da1fccca8ef986eea90a1d607b87a4d2..3571ce4bd276584618680c1242445c27296e7212 100644 (file)
@@ -842,12 +842,8 @@ static bool _rtl8821ae_llt_table_init(struct ieee80211_hw *hw)
        bool status;
 
        maxpage = 255;
-       txpktbuf_bndy = 0xF8;
-       rqpn = 0x80e70808;
-       if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE) {
-               txpktbuf_bndy = 0xFA;
-               rqpn = 0x80e90808;
-       }
+       txpktbuf_bndy = 0xF7;
+       rqpn = 0x80e60808;
 
        rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy);
        rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, MAX_RX_DMA_BUFFER_SIZE - 1);
index 8da874cbec1a19353877f5cdf4855050f333bbc6..94a5e587a1cdafd746245a1a28ebb0f6b87a71df 100644 (file)
@@ -660,6 +660,88 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
        return;
 }
 
+static bool _rtl8821ae_check_positive(struct ieee80211_hw *hw,
+                                     const u32 condition1,
+                                     const u32 condition2)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       u32 cut_ver = ((rtlhal->version & CHIP_VER_RTL_MASK)
+                                       >> CHIP_VER_RTL_SHIFT);
+       u32 intf = (rtlhal->interface == INTF_USB ? BIT(1) : BIT(0));
+
+       u8  board_type = ((rtlhal->board_type & BIT(4)) >> 4) << 0 | /* _GLNA */
+                        ((rtlhal->board_type & BIT(3)) >> 3) << 1 | /* _GPA  */
+                        ((rtlhal->board_type & BIT(7)) >> 7) << 2 | /* _ALNA */
+                        ((rtlhal->board_type & BIT(6)) >> 6) << 3 | /* _APA  */
+                        ((rtlhal->board_type & BIT(2)) >> 2) << 4;  /* _BT   */
+
+       u32 cond1 = condition1, cond2 = condition2;
+       u32 driver1 = cut_ver << 24 |   /* CUT ver */
+                     0 << 20 |                 /* interface 2/2 */
+                     0x04 << 16 |              /* platform */
+                     rtlhal->package_type << 12 |
+                     intf << 8 |                       /* interface 1/2 */
+                     board_type;
+
+       u32 driver2 = rtlhal->type_glna <<  0 |
+                     rtlhal->type_gpa  <<  8 |
+                     rtlhal->type_alna << 16 |
+                     rtlhal->type_apa  << 24;
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n",
+                cond1, cond2);
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n",
+                driver1, driver2);
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                "      (Platform, Interface) = (0x%X, 0x%X)\n", 0x04, intf);
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                "      (Board, Package) = (0x%X, 0x%X)\n",
+                rtlhal->board_type, rtlhal->package_type);
+
+       /*============== Value Defined Check ===============*/
+       /*QFN Type [15:12] and Cut Version [27:24] need to do value check*/
+
+       if (((cond1 & 0x0000F000) != 0) && ((cond1 & 0x0000F000) !=
+               (driver1 & 0x0000F000)))
+               return false;
+       if (((cond1 & 0x0F000000) != 0) && ((cond1 & 0x0F000000) !=
+               (driver1 & 0x0F000000)))
+               return false;
+
+       /*=============== Bit Defined Check ================*/
+       /* We don't care [31:28] */
+
+       cond1   &= 0x00FF0FFF;
+       driver1 &= 0x00FF0FFF;
+
+       if ((cond1 & driver1) == cond1) {
+               u32 mask = 0;
+
+               if ((cond1 & 0x0F) == 0) /* BoardType is DONTCARE*/
+                       return true;
+
+               if ((cond1 & BIT(0)) != 0) /*GLNA*/
+                       mask |= 0x000000FF;
+               if ((cond1 & BIT(1)) != 0) /*GPA*/
+                       mask |= 0x0000FF00;
+               if ((cond1 & BIT(2)) != 0) /*ALNA*/
+                       mask |= 0x00FF0000;
+               if ((cond1 & BIT(3)) != 0) /*APA*/
+                       mask |= 0xFF000000;
+
+               /* BoardType of each RF path is matched*/
+               if ((cond2 & mask) == (driver2 & mask))
+                       return true;
+               else
+                       return false;
+       } else
+               return false;
+}
+
 static bool _rtl8821ae_check_condition(struct ieee80211_hw *hw,
                                       const u32 condition)
 {
@@ -1695,55 +1777,78 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
        return true;
 }
 
+static bool
+__rtl8821ae_phy_config_with_headerfile(struct ieee80211_hw *hw,
+                                      u32 *array_table, u16 arraylen,
+                                      void (*set_reg)(struct ieee80211_hw *hw,
+                                                      u32 regaddr, u32 data))
+{
+       #define COND_ELSE  2
+       #define COND_ENDIF 3
+
+       int i = 0;
+       u8 cond;
+       bool matched = true, skipped = false;
+
+       while ((i + 1) < arraylen) {
+               u32 v1 = array_table[i];
+               u32 v2 = array_table[i + 1];
+
+               if (v1 & (BIT(31) | BIT(30))) {/*positive & negative condition*/
+                       if (v1 & BIT(31)) {/* positive condition*/
+                               cond  = (u8)((v1 & (BIT(29) | BIT(28))) >> 28);
+                               if (cond == COND_ENDIF) {/*end*/
+                                       matched = true;
+                                       skipped = false;
+                               } else if (cond == COND_ELSE) /*else*/
+                                       matched = skipped ? false : true;
+                               else {/*if , else if*/
+                                       if (skipped) {
+                                               matched = false;
+                                       } else {
+                                               if (_rtl8821ae_check_positive(
+                                                               hw, v1, v2)) {
+                                                       matched = true;
+                                                       skipped = true;
+                                               } else {
+                                                       matched = false;
+                                                       skipped = false;
+                                               }
+                                       }
+                               }
+                       } else if (v1 & BIT(30)) { /*negative condition*/
+                       /*do nothing*/
+                       }
+               } else {
+                       if (matched)
+                               set_reg(hw, v1, v2);
+               }
+               i = i + 2;
+       }
+
+       return true;
+}
+
 static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
-       u32 i, v1, v2;
        u32 arraylength;
        u32 *ptrarray;
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read MAC_REG_Array\n");
        if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
-               arraylength = RTL8821AEMAC_1T_ARRAYLEN;
+               arraylength = RTL8821AE_MAC_1T_ARRAYLEN;
                ptrarray = RTL8821AE_MAC_REG_ARRAY;
        } else {
-               arraylength = RTL8812AEMAC_1T_ARRAYLEN;
+               arraylength = RTL8812AE_MAC_1T_ARRAYLEN;
                ptrarray = RTL8812AE_MAC_REG_ARRAY;
        }
        RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
                 "Img: MAC_REG_ARRAY LEN %d\n", arraylength);
-       for (i = 0; i < arraylength; i += 2) {
-               v1 = ptrarray[i];
-               v2 = (u8)ptrarray[i + 1];
-               if (v1 < 0xCDCDCDCD) {
-                       rtl_write_byte(rtlpriv, v1, (u8)v2);
-                       continue;
-               } else {
-                       if (!_rtl8821ae_check_condition(hw, v1)) {
-                               /*Discard the following (offset, data) pairs*/
-                               READ_NEXT_PAIR(ptrarray, v1, v2, i);
-                               while (v2 != 0xDEAD &&
-                                      v2 != 0xCDEF &&
-                                      v2 != 0xCDCD && i < arraylength - 2) {
-                                       READ_NEXT_PAIR(ptrarray, v1, v2, i);
-                               }
-                               i -= 2; /* prevent from for-loop += 2*/
-                       } else {/*Configure matched pairs and skip to end of if-else.*/
-                               READ_NEXT_PAIR(ptrarray, v1, v2, i);
-                               while (v2 != 0xDEAD &&
-                                      v2 != 0xCDEF &&
-                                      v2 != 0xCDCD && i < arraylength - 2) {
-                                       rtl_write_byte(rtlpriv, v1, v2);
-                                       READ_NEXT_PAIR(ptrarray, v1, v2, i);
-                               }
 
-                               while (v2 != 0xDEAD && i < arraylength - 2)
-                                       READ_NEXT_PAIR(ptrarray, v1, v2, i);
-                       }
-               }
-       }
-       return true;
+       return __rtl8821ae_phy_config_with_headerfile(hw,
+                       ptrarray, arraylength, rtl_write_byte_with_val32);
 }
 
 static bool _rtl8821ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
@@ -1751,111 +1856,33 @@ static bool _rtl8821ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
-       int i;
        u32 *array_table;
        u16 arraylen;
-       u32 v1 = 0, v2 = 0;
 
        if (configtype == BASEBAND_CONFIG_PHY_REG) {
                if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
-                       arraylen = RTL8812AEPHY_REG_1TARRAYLEN;
+                       arraylen = RTL8812AE_PHY_REG_1TARRAYLEN;
                        array_table = RTL8812AE_PHY_REG_ARRAY;
                } else {
-                       arraylen = RTL8821AEPHY_REG_1TARRAYLEN;
+                       arraylen = RTL8821AE_PHY_REG_1TARRAYLEN;
                        array_table = RTL8821AE_PHY_REG_ARRAY;
                }
 
-               for (i = 0; i < arraylen; i += 2) {
-                       v1 = array_table[i];
-                       v2 = array_table[i + 1];
-                       if (v1 < 0xCDCDCDCD) {
-                               _rtl8821ae_config_bb_reg(hw, v1, v2);
-                               continue;
-                       } else {/*This line is the start line of branch.*/
-                               if (!_rtl8821ae_check_condition(hw, v1)) {
-                                       /*Discard the following (offset, data) pairs*/
-                                       READ_NEXT_PAIR(array_table, v1, v2, i);
-                                       while (v2 != 0xDEAD &&
-                                              v2 != 0xCDEF &&
-                                              v2 != 0xCDCD &&
-                                              i < arraylen - 2) {
-                                               READ_NEXT_PAIR(array_table, v1,
-                                                               v2, i);
-                                       }
-
-                                       i -= 2; /* prevent from for-loop += 2*/
-                               } else {/*Configure matched pairs and skip to end of if-else.*/
-                                       READ_NEXT_PAIR(array_table, v1, v2, i);
-                                       while (v2 != 0xDEAD &&
-                                              v2 != 0xCDEF &&
-                                              v2 != 0xCDCD &&
-                                              i < arraylen - 2) {
-                                               _rtl8821ae_config_bb_reg(hw, v1,
-                                                                        v2);
-                                               READ_NEXT_PAIR(array_table, v1,
-                                                              v2, i);
-                                       }
-
-                                       while (v2 != 0xDEAD &&
-                                              i < arraylen - 2) {
-                                               READ_NEXT_PAIR(array_table, v1,
-                                                              v2, i);
-                                       }
-                               }
-                       }
-               }
+               return __rtl8821ae_phy_config_with_headerfile(hw,
+                               array_table, arraylen,
+                               _rtl8821ae_config_bb_reg);
        } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
                if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
-                       arraylen = RTL8812AEAGCTAB_1TARRAYLEN;
+                       arraylen = RTL8812AE_AGC_TAB_1TARRAYLEN;
                        array_table = RTL8812AE_AGC_TAB_ARRAY;
                } else {
-                       arraylen = RTL8821AEAGCTAB_1TARRAYLEN;
+                       arraylen = RTL8821AE_AGC_TAB_1TARRAYLEN;
                        array_table = RTL8821AE_AGC_TAB_ARRAY;
                }
 
-               for (i = 0; i < arraylen; i = i + 2) {
-                       v1 = array_table[i];
-                       v2 = array_table[i+1];
-                       if (v1 < 0xCDCDCDCD) {
-                               rtl_set_bbreg(hw, v1, MASKDWORD, v2);
-                               udelay(1);
-                               continue;
-                       } else {/*This line is the start line of branch.*/
-                               if (!_rtl8821ae_check_condition(hw, v1)) {
-                                       /*Discard the following (offset, data) pairs*/
-                                       READ_NEXT_PAIR(array_table, v1, v2, i);
-                                       while (v2 != 0xDEAD &&
-                                              v2 != 0xCDEF &&
-                                              v2 != 0xCDCD &&
-                                              i < arraylen - 2) {
-                                               READ_NEXT_PAIR(array_table, v1,
-                                                               v2, i);
-                                       }
-                                       i -= 2; /* prevent from for-loop += 2*/
-                               } else {/*Configure matched pairs and skip to end of if-else.*/
-                                       READ_NEXT_PAIR(array_table, v1, v2, i);
-                                       while (v2 != 0xDEAD &&
-                                              v2 != 0xCDEF &&
-                                              v2 != 0xCDCD &&
-                                              i < arraylen - 2) {
-                                               rtl_set_bbreg(hw, v1, MASKDWORD,
-                                                             v2);
-                                               udelay(1);
-                                               READ_NEXT_PAIR(array_table, v1,
-                                                              v2, i);
-                                       }
-
-                                       while (v2 != 0xDEAD &&
-                                               i < arraylen - 2) {
-                                               READ_NEXT_PAIR(array_table, v1,
-                                                               v2, i);
-                                       }
-                               }
-                               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                                        "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n",
-                                         array_table[i],  array_table[i + 1]);
-                       }
-               }
+               return __rtl8821ae_phy_config_with_headerfile(hw,
+                               array_table, arraylen,
+                               rtl_set_bbreg_with_dwmask);
        }
        return true;
 }
@@ -1913,10 +1940,10 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
        u32 v1, v2, v3, v4, v5, v6;
 
        if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
-               arraylen = RTL8812AEPHY_REG_ARRAY_PGLEN;
+               arraylen = RTL8812AE_PHY_REG_ARRAY_PGLEN;
                array = RTL8812AE_PHY_REG_ARRAY_PG;
        } else {
-               arraylen = RTL8821AEPHY_REG_ARRAY_PGLEN;
+               arraylen = RTL8821AE_PHY_REG_ARRAY_PGLEN;
                array = RTL8821AE_PHY_REG_ARRAY_PG;
        }
 
@@ -1980,12 +2007,10 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
 bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                             enum radio_path rfpath)
 {
-       int i;
        bool rtstatus = true;
        u32 *radioa_array_table_a, *radioa_array_table_b;
        u16 radioa_arraylen_a, radioa_arraylen_b;
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 v1 = 0, v2 = 0;
 
        radioa_arraylen_a = RTL8812AE_RADIOA_1TARRAYLEN;
        radioa_array_table_a = RTL8812AE_RADIOA_ARRAY;
@@ -1997,69 +2022,14 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
        rtstatus = true;
        switch (rfpath) {
        case RF90_PATH_A:
-               for (i = 0; i < radioa_arraylen_a; i = i + 2) {
-                       v1 = radioa_array_table_a[i];
-                       v2 = radioa_array_table_a[i+1];
-                       if (v1 < 0xcdcdcdcd) {
-                               _rtl8821ae_config_rf_radio_a(hw, v1, v2);
-                               continue;
-                       } else{/*This line is the start line of branch.*/
-                               if (!_rtl8821ae_check_condition(hw, v1)) {
-                                       /*Discard the following (offset, data) pairs*/
-                                       READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i);
-                                       while (v2 != 0xDEAD &&
-                                              v2 != 0xCDEF &&
-                                              v2 != 0xCDCD && i < radioa_arraylen_a-2)
-                                               READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i);
-
-                                       i -= 2; /* prevent from for-loop += 2*/
-                               } else {/*Configure matched pairs and skip to end of if-else.*/
-                                       READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i);
-                                       while (v2 != 0xDEAD &&
-                                              v2 != 0xCDEF &&
-                                              v2 != 0xCDCD && i < radioa_arraylen_a - 2) {
-                                               _rtl8821ae_config_rf_radio_a(hw, v1, v2);
-                                               READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i);
-                                       }
-
-                                       while (v2 != 0xDEAD && i < radioa_arraylen_a-2)
-                                               READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i);
-
-                               }
-                       }
-               }
+               return __rtl8821ae_phy_config_with_headerfile(hw,
+                               radioa_array_table_a, radioa_arraylen_a,
+                               _rtl8821ae_config_rf_radio_a);
                break;
        case RF90_PATH_B:
-               for (i = 0; i < radioa_arraylen_b; i = i + 2) {
-                       v1 = radioa_array_table_b[i];
-                       v2 = radioa_array_table_b[i+1];
-                       if (v1 < 0xcdcdcdcd) {
-                               _rtl8821ae_config_rf_radio_b(hw, v1, v2);
-                               continue;
-                       } else{/*This line is the start line of branch.*/
-                               if (!_rtl8821ae_check_condition(hw, v1)) {
-                                       /*Discard the following (offset, data) pairs*/
-                                       READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i);
-                                       while (v2 != 0xDEAD &&
-                                              v2 != 0xCDEF &&
-                                              v2 != 0xCDCD && i < radioa_arraylen_b-2)
-                                               READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i);
-
-                                       i -= 2; /* prevent from for-loop += 2*/
-                               } else {/*Configure matched pairs and skip to end of if-else.*/
-                                       READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i);
-                                       while (v2 != 0xDEAD &&
-                                              v2 != 0xCDEF &&
-                                              v2 != 0xCDCD && i < radioa_arraylen_b-2) {
-                                               _rtl8821ae_config_rf_radio_b(hw, v1, v2);
-                                               READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i);
-                                       }
-
-                                       while (v2 != 0xDEAD && i < radioa_arraylen_b-2)
-                                               READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i);
-                               }
-                       }
-               }
+               return __rtl8821ae_phy_config_with_headerfile(hw,
+                               radioa_array_table_b, radioa_arraylen_b,
+                               _rtl8821ae_config_rf_radio_b);
                break;
        case RF90_PATH_C:
        case RF90_PATH_D:
@@ -2072,21 +2042,10 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
 bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                                enum radio_path rfpath)
 {
-       #define READ_NEXT_RF_PAIR(v1, v2, i) \
-       do { \
-               i += 2; \
-               v1 = radioa_array_table[i]; \
-               v2 = radioa_array_table[i+1]; \
-       } \
-       while (0)
-
-       int i;
        bool rtstatus = true;
        u32 *radioa_array_table;
        u16 radioa_arraylen;
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       /* struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); */
-       u32 v1 = 0, v2 = 0;
 
        radioa_arraylen = RTL8821AE_RADIOA_1TARRAYLEN;
        radioa_array_table = RTL8821AE_RADIOA_ARRAY;
@@ -2096,35 +2055,9 @@ bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
        rtstatus = true;
        switch (rfpath) {
        case RF90_PATH_A:
-               for (i = 0; i < radioa_arraylen; i = i + 2) {
-                       v1 = radioa_array_table[i];
-                       v2 = radioa_array_table[i+1];
-                       if (v1 < 0xcdcdcdcd)
-                               _rtl8821ae_config_rf_radio_a(hw, v1, v2);
-                       else{/*This line is the start line of branch.*/
-                               if (!_rtl8821ae_check_condition(hw, v1)) {
-                                       /*Discard the following (offset, data) pairs*/
-                                       READ_NEXT_RF_PAIR(v1, v2, i);
-                                       while (v2 != 0xDEAD &&
-                                               v2 != 0xCDEF &&
-                                               v2 != 0xCDCD && i < radioa_arraylen - 2)
-                                               READ_NEXT_RF_PAIR(v1, v2, i);
-
-                                       i -= 2; /* prevent from for-loop += 2*/
-                               } else {/*Configure matched pairs and skip to end of if-else.*/
-                                       READ_NEXT_RF_PAIR(v1, v2, i);
-                                       while (v2 != 0xDEAD &&
-                                              v2 != 0xCDEF &&
-                                              v2 != 0xCDCD && i < radioa_arraylen - 2) {
-                                               _rtl8821ae_config_rf_radio_a(hw, v1, v2);
-                                               READ_NEXT_RF_PAIR(v1, v2, i);
-                                       }
-
-                                       while (v2 != 0xDEAD && i < radioa_arraylen - 2)
-                                               READ_NEXT_RF_PAIR(v1, v2, i);
-                               }
-                       }
-               }
+               return __rtl8821ae_phy_config_with_headerfile(hw,
+                       radioa_array_table, radioa_arraylen,
+                       _rtl8821ae_config_rf_radio_a);
                break;
 
        case RF90_PATH_B:
index 77cf3b2cd3f1f4c7ba93437996efe55ad3d94fb5..abaf34cb14331487d8bd3ad789ea5450148d74f4 100644 (file)
@@ -203,7 +203,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
                fw_name = "rtlwifi/rtl8812aefw.bin";
                wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
        } else {
-               fw_name = "rtlwifi/rtl8821aefw.bin";
+               fw_name = "rtlwifi/rtl8821aefw_29.bin";
                wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
        }
 
@@ -214,8 +214,16 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
                                      rtlpriv->io.dev, GFP_KERNEL, hw,
                                      rtl_fw_cb);
        if (err) {
-               pr_err("Failed to request normal firmware!\n");
-               return 1;
+               /* Failed to get firmware. Check if old version available */
+               fw_name = "rtlwifi/rtl8821aefw.bin";
+               pr_info("Using firmware %s\n", fw_name);
+               err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
+                                             rtlpriv->io.dev, GFP_KERNEL, hw,
+                                             rtl_fw_cb);
+               if (err) {
+                       pr_err("Failed to request normal firmware!\n");
+                       return 1;
+               }
        }
        /*load wowlan firmware*/
        pr_info("Using firmware %s\n", wowlan_fw_name);
@@ -428,6 +436,7 @@ MODULE_AUTHOR("Realtek WlanFAE      <wlanfae@realtek.com>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Realtek 8821ae 802.11ac PCI wireless");
 MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8821aefw_29.bin");
 
 module_param_named(swenc, rtl8821ae_mod_params.sw_crypto, bool, 0444);
 module_param_named(debug_level, rtl8821ae_mod_params.debug_level, int, 0644);
index 62a0fb76f080d6c756d73a1302eeabc891330c60..408c4611e5dee5798911d67d1915e6afd06f3e82 100644 (file)
@@ -38,7 +38,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
                0x824, 0x00030FE0,
                0x828, 0x00000000,
                0x82C, 0x002083DD,
-               0x830, 0x2AAA6C86,
+               0x830, 0x2EAAEEB8,
                0x834, 0x0037A706,
                0x838, 0x06C89B44,
                0x83C, 0x0000095B,
@@ -68,7 +68,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
                0x8BC, 0x4CA520A3,
                0x8C0, 0x27F00020,
                0x8C4, 0x00000000,
-               0x8C8, 0x00013169,
+               0x8C8, 0x00012D69,
                0x8CC, 0x08248492,
                0x8D0, 0x0000B800,
                0x8DC, 0x00000000,
@@ -76,13 +76,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
                0x8D8, 0x290B5612,
                0x8F8, 0x400002C0,
                0x8FC, 0x00000000,
-       0xFF0F07D8, 0xABCD,
                0x900, 0x00000701,
-       0xFF0F07D0, 0xCDEF,
-               0x900, 0x00000701,
-       0xCDCDCDCD, 0xCDCD,
-               0x900, 0x00000700,
-       0xFF0F07D8, 0xDEAD,
                0x90C, 0x00000000,
                0x910, 0x0000FC00,
                0x914, 0x00000404,
@@ -120,7 +114,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
                0x9D4, 0x00000000,
                0x9D8, 0x00000000,
                0x9DC, 0x00000000,
-               0x9E4, 0x00000002,
+               0x9E4, 0x00000003,
                0x9E8, 0x000002D5,
                0xA00, 0x00D047C8,
                0xA04, 0x01FF000C,
@@ -189,7 +183,21 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
                0xC5C, 0x00000058,
                0xC60, 0x34344443,
                0xC64, 0x07003333,
+       0x80000008, 0x00000000, 0x40000000, 0x00000000,
+               0xC68, 0x59791979,
+       0x90000008, 0x05000000, 0x40000000, 0x00000000,
+               0xC68, 0x59791979,
+       0x90000002, 0x00000000, 0x40000000, 0x00000000,
                0xC68, 0x59791979,
+       0x90000004, 0x00000000, 0x40000000, 0x00000000,
+               0xC68, 0x59791979,
+       0x90000001, 0x00000000, 0x40000000, 0x00000000,
+               0xC68, 0x59791979,
+       0x90000001, 0x00000005, 0x40000000, 0x00000000,
+               0xC68, 0x59791979,
+       0xA0000000, 0x00000000,
+               0xC68, 0x59799979,
+       0xB0000000, 0x00000000,
                0xC6C, 0x59795979,
                0xC70, 0x19795979,
                0xC74, 0x19795979,
@@ -203,19 +211,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
                0xCA0, 0x00000029,
                0xCA4, 0x08040201,
                0xCA8, 0x80402010,
-       0xFF0F0740, 0xABCD,
-               0xCB0, 0x77547717,
-       0xFF0F01C0, 0xCDEF,
-               0xCB0, 0x77547717,
-       0xFF0F02C0, 0xCDEF,
-               0xCB0, 0x77547717,
-       0xFF0F07D8, 0xCDEF,
-               0xCB0, 0x54547710,
-       0xFF0F07D0, 0xCDEF,
-               0xCB0, 0x54547710,
-       0xCDCDCDCD, 0xCDCD,
                0xCB0, 0x77547777,
-       0xFF0F0740, 0xDEAD,
                0xCB4, 0x00000077,
                0xCB8, 0x00508242,
                0xE00, 0x00000007,
@@ -257,23 +253,14 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
                0xEA0, 0x00000029,
                0xEA4, 0x08040201,
                0xEA8, 0x80402010,
-       0xFF0F0740, 0xABCD,
-               0xEB0, 0x77547717,
-       0xFF0F01C0, 0xCDEF,
-               0xEB0, 0x77547717,
-       0xFF0F02C0, 0xCDEF,
-               0xEB0, 0x77547717,
-       0xFF0F07D8, 0xCDEF,
-               0xEB0, 0x54547710,
-       0xFF0F07D0, 0xCDEF,
-               0xEB0, 0x54547710,
-       0xCDCDCDCD, 0xCDCD,
                0xEB0, 0x77547777,
-       0xFF0F0740, 0xDEAD,
                0xEB4, 0x00000077,
                0xEB8, 0x00508242,
 };
 
+u32 RTL8812AE_PHY_REG_1TARRAYLEN =
+       sizeof(RTL8812AE_PHY_REG_ARRAY) / sizeof(u32);
+
 u32 RTL8821AE_PHY_REG_ARRAY[] = {
        0x800, 0x0020D090,
        0x804, 0x080112E0,
@@ -449,6 +436,9 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
        0xCB8, 0x00508240,
 };
 
+u32 RTL8821AE_PHY_REG_1TARRAYLEN =
+       sizeof(RTL8821AE_PHY_REG_ARRAY) / sizeof(u32);
+
 u32 RTL8812AE_PHY_REG_ARRAY_PG[] = {
        0, 0, 0, 0x00000c20, 0xffffffff, 0x34363840,
        0, 0, 0, 0x00000c24, 0xffffffff, 0x42424444,
@@ -498,6 +488,9 @@ u32 RTL8812AE_PHY_REG_ARRAY_PG[] = {
        1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628
 };
 
+u32 RTL8812AE_PHY_REG_ARRAY_PGLEN =
+               sizeof(RTL8812AE_PHY_REG_ARRAY_PG) / sizeof(u32);
+
 u32 RTL8821AE_PHY_REG_ARRAY_PG[] = {
        0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
        0, 0, 0, 0x00000c24, 0xffffffff, 0x36363838,
@@ -516,6 +509,9 @@ u32 RTL8821AE_PHY_REG_ARRAY_PG[] = {
        1, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022
 };
 
+u32 RTL8821AE_PHY_REG_ARRAY_PGLEN =
+               sizeof(RTL8821AE_PHY_REG_ARRAY_PG) / sizeof(u32);
+
 u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x000, 0x00010000,
                0x018, 0x0001712A,
@@ -523,26 +519,25 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x066, 0x00040000,
                0x01E, 0x00080000,
                0x089, 0x00000080,
-       0xFF0F0740, 0xABCD,
-               0x086, 0x00014B38,
-       0xFF0F02C0, 0xCDEF,
-               0x086, 0x00014B38,
-       0xFF0F01C0, 0xCDEF,
-               0x086, 0x00014B38,
-       0xFF0F07D8, 0xCDEF,
+       0x80000001, 0x00000000, 0x40000000, 0x00000000,
                0x086, 0x00014B3A,
-       0xFF0F07D0, 0xCDEF,
+       0x90000001, 0x00000005, 0x40000000, 0x00000000,
                0x086, 0x00014B3A,
-       0xCDCDCDCD, 0xCDCD,
+       0xA0000000, 0x00000000,
                0x086, 0x00014B38,
-       0xFF0F0740, 0xDEAD,
+       0xB0000000, 0x00000000,
+       0x80000004, 0x00000000, 0x40000000, 0x00000000,
+               0x08B, 0x00080180,
+       0xA0000000, 0x00000000,
+               0x08B, 0x00087180,
+       0xB0000000, 0x00000000,
                0x0B1, 0x0001FC1A,
                0x0B3, 0x000F0810,
                0x0B4, 0x0001A78D,
                0x0BA, 0x00086180,
                0x018, 0x00000006,
                0x0EF, 0x00002000,
-       0xFF0F07D8, 0xABCD,
+       0x80000001, 0x00000000, 0x40000000, 0x00000000,
                0x03B, 0x0003F218,
                0x03B, 0x00030A58,
                0x03B, 0x0002FA58,
@@ -550,7 +545,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x03B, 0x0001FA50,
                0x03B, 0x00010248,
                0x03B, 0x00008240,
-       0xFF0F07D0, 0xCDEF,
+       0x90000001, 0x00000005, 0x40000000, 0x00000000,
                0x03B, 0x0003F218,
                0x03B, 0x00030A58,
                0x03B, 0x0002FA58,
@@ -558,7 +553,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x03B, 0x0001FA50,
                0x03B, 0x00010248,
                0x03B, 0x00008240,
-       0xCDCDCDCD, 0xCDCD,
+       0xA0000000, 0x00000000,
                0x03B, 0x00038A58,
                0x03B, 0x00037A58,
                0x03B, 0x0002A590,
@@ -566,9 +561,9 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x03B, 0x00018248,
                0x03B, 0x00010240,
                0x03B, 0x00008240,
-       0xFF0F07D8, 0xDEAD,
+       0xB0000000, 0x00000000,
                0x0EF, 0x00000100,
-       0xFF0F07D8, 0xABCD,
+       0x80000002, 0x00000000, 0x40000000, 0x00000000,
                0x034, 0x0000A4EE,
                0x034, 0x00009076,
                0x034, 0x00008073,
@@ -580,7 +575,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x034, 0x00002028,
                0x034, 0x00001025,
                0x034, 0x00000022,
-       0xCDCDCDCD, 0xCDCD,
+       0xA0000000, 0x00000000,
                0x034, 0x0000ADF4,
                0x034, 0x00009DF1,
                0x034, 0x00008DEE,
@@ -592,7 +587,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x034, 0x000024E7,
                0x034, 0x0000146B,
                0x034, 0x0000006D,
-       0xFF0F07D8, 0xDEAD,
+       0xB0000000, 0x00000000,
                0x0EF, 0x00000000,
                0x0EF, 0x000020A2,
                0x0DF, 0x00000080,
@@ -646,7 +641,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x03B, 0x0006B064,
                0x03C, 0x00004000,
                0x03A, 0x000000D8,
-               0x03B, 0x00023070,
+               0x03B, 0x00063070,
                0x03C, 0x00004000,
                0x03A, 0x00000468,
                0x03B, 0x0005B870,
@@ -685,31 +680,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x03B, 0x00082080,
                0x03C, 0x00010000,
                0x0EF, 0x00001100,
-       0xFF0F0740, 0xABCD,
-               0x034, 0x0004A0B2,
-               0x034, 0x000490AF,
-               0x034, 0x00048070,
-               0x034, 0x0004706D,
-               0x034, 0x00046050,
-               0x034, 0x0004504D,
-               0x034, 0x0004404A,
-               0x034, 0x00043047,
-               0x034, 0x0004200A,
-               0x034, 0x00041007,
-               0x034, 0x00040004,
-       0xFF0F02C0, 0xCDEF,
-               0x034, 0x0004A0B2,
-               0x034, 0x000490AF,
-               0x034, 0x00048070,
-               0x034, 0x0004706D,
-               0x034, 0x00046050,
-               0x034, 0x0004504D,
-               0x034, 0x0004404A,
-               0x034, 0x00043047,
-               0x034, 0x0004200A,
-               0x034, 0x00041007,
-               0x034, 0x00040004,
-       0xFF0F01C0, 0xCDEF,
+       0x80000008, 0x00000000, 0x40000000, 0x00000000,
                0x034, 0x0004A0B2,
                0x034, 0x000490AF,
                0x034, 0x00048070,
@@ -721,92 +692,32 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x034, 0x0004200A,
                0x034, 0x00041007,
                0x034, 0x00040004,
-       0xFF0F07D8, 0xCDEF,
+       0x90000008, 0x05000000, 0x40000000, 0x00000000,
                0x034, 0x0004A0B2,
                0x034, 0x000490AF,
                0x034, 0x00048070,
                0x034, 0x0004706D,
-               0x034, 0x00046050,
-               0x034, 0x0004504D,
-               0x034, 0x0004404A,
-               0x034, 0x00043047,
-               0x034, 0x0004200A,
-               0x034, 0x00041007,
-               0x034, 0x00040004,
-       0xFF0F07D0, 0xCDEF,
-               0x034, 0x0004A0B2,
-               0x034, 0x000490AF,
-               0x034, 0x00048070,
-               0x034, 0x0004706D,
-               0x034, 0x00046050,
-               0x034, 0x0004504D,
-               0x034, 0x0004404A,
-               0x034, 0x00043047,
-               0x034, 0x0004200A,
-               0x034, 0x00041007,
-               0x034, 0x00040004,
-       0xCDCDCDCD, 0xCDCD,
+               0x034, 0x0004604D,
+               0x034, 0x0004504A,
+               0x034, 0x00044047,
+               0x034, 0x00043044,
+               0x034, 0x00042007,
+               0x034, 0x00041004,
+               0x034, 0x00040001,
+       0xA0000000, 0x00000000,
                0x034, 0x0004ADF5,
                0x034, 0x00049DF2,
                0x034, 0x00048DEF,
                0x034, 0x00047DEC,
                0x034, 0x00046DE9,
-               0x034, 0x00045DC9,
-               0x034, 0x00044CE8,
-               0x034, 0x000438CA,
-               0x034, 0x00042889,
-               0x034, 0x0004184A,
-               0x034, 0x0004044A,
-       0xFF0F0740, 0xDEAD,
-       0xFF0F0740, 0xABCD,
-               0x034, 0x0002A0B2,
-               0x034, 0x000290AF,
-               0x034, 0x00028070,
-               0x034, 0x0002706D,
-               0x034, 0x00026050,
-               0x034, 0x0002504D,
-               0x034, 0x0002404A,
-               0x034, 0x00023047,
-               0x034, 0x0002200A,
-               0x034, 0x00021007,
-               0x034, 0x00020004,
-       0xFF0F02C0, 0xCDEF,
-               0x034, 0x0002A0B2,
-               0x034, 0x000290AF,
-               0x034, 0x00028070,
-               0x034, 0x0002706D,
-               0x034, 0x00026050,
-               0x034, 0x0002504D,
-               0x034, 0x0002404A,
-               0x034, 0x00023047,
-               0x034, 0x0002200A,
-               0x034, 0x00021007,
-               0x034, 0x00020004,
-       0xFF0F01C0, 0xCDEF,
-               0x034, 0x0002A0B2,
-               0x034, 0x000290AF,
-               0x034, 0x00028070,
-               0x034, 0x0002706D,
-               0x034, 0x00026050,
-               0x034, 0x0002504D,
-               0x034, 0x0002404A,
-               0x034, 0x00023047,
-               0x034, 0x0002200A,
-               0x034, 0x00021007,
-               0x034, 0x00020004,
-       0xFF0F07D8, 0xCDEF,
-               0x034, 0x0002A0B2,
-               0x034, 0x000290AF,
-               0x034, 0x00028070,
-               0x034, 0x0002706D,
-               0x034, 0x00026050,
-               0x034, 0x0002504D,
-               0x034, 0x0002404A,
-               0x034, 0x00023047,
-               0x034, 0x0002200A,
-               0x034, 0x00021007,
-               0x034, 0x00020004,
-       0xFF0F07D0, 0xCDEF,
+               0x034, 0x00045DE6,
+               0x034, 0x00044DE3,
+               0x034, 0x000438C8,
+               0x034, 0x000428C5,
+               0x034, 0x000418C2,
+               0x034, 0x000408C0,
+       0xB0000000, 0x00000000,
+       0x80000008, 0x00000000, 0x40000000, 0x00000000,
                0x034, 0x0002A0B2,
                0x034, 0x000290AF,
                0x034, 0x00028070,
@@ -818,32 +729,32 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x034, 0x0002200A,
                0x034, 0x00021007,
                0x034, 0x00020004,
-       0xCDCDCDCD, 0xCDCD,
+       0x90000008, 0x05000000, 0x40000000, 0x00000000,
+               0x034, 0x0002A0B4,
+               0x034, 0x000290B1,
+               0x034, 0x00028072,
+               0x034, 0x0002706F,
+               0x034, 0x0002604F,
+               0x034, 0x0002504C,
+               0x034, 0x00024049,
+               0x034, 0x00023046,
+               0x034, 0x00022009,
+               0x034, 0x00021006,
+               0x034, 0x00020003,
+       0xA0000000, 0x00000000,
                0x034, 0x0002ADF5,
                0x034, 0x00029DF2,
                0x034, 0x00028DEF,
                0x034, 0x00027DEC,
                0x034, 0x00026DE9,
-               0x034, 0x00025DC9,
-               0x034, 0x00024CE8,
-               0x034, 0x000238CA,
-               0x034, 0x00022889,
-               0x034, 0x0002184A,
-               0x034, 0x0002044A,
-       0xFF0F0740, 0xDEAD,
-       0xFF0F0740, 0xABCD,
-               0x034, 0x0000A0B2,
-               0x034, 0x000090AF,
-               0x034, 0x00008070,
-               0x034, 0x0000706D,
-               0x034, 0x00006050,
-               0x034, 0x0000504D,
-               0x034, 0x0000404A,
-               0x034, 0x00003047,
-               0x034, 0x0000200A,
-               0x034, 0x00001007,
-               0x034, 0x00000004,
-       0xFF0F02C0, 0xCDEF,
+               0x034, 0x00025DE6,
+               0x034, 0x00024DE3,
+               0x034, 0x000238C8,
+               0x034, 0x000228C5,
+               0x034, 0x000218C2,
+               0x034, 0x000208C0,
+       0xB0000000, 0x00000000,
+       0x80000008, 0x00000000, 0x40000000, 0x00000000,
                0x034, 0x0000A0B2,
                0x034, 0x000090AF,
                0x034, 0x00008070,
@@ -855,93 +766,33 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x034, 0x0000200A,
                0x034, 0x00001007,
                0x034, 0x00000004,
-       0xFF0F01C0, 0xCDEF,
+       0x90000008, 0x05000000, 0x40000000, 0x00000000,
                0x034, 0x0000A0B2,
                0x034, 0x000090AF,
                0x034, 0x00008070,
                0x034, 0x0000706D,
-               0x034, 0x00006050,
-               0x034, 0x0000504D,
-               0x034, 0x0000404A,
-               0x034, 0x00003047,
-               0x034, 0x0000200A,
-               0x034, 0x00001007,
-               0x034, 0x00000004,
-       0xFF0F07D8, 0xCDEF,
-               0x034, 0x0000A0B2,
-               0x034, 0x000090AF,
-               0x034, 0x00008070,
-               0x034, 0x0000706D,
-               0x034, 0x00006050,
-               0x034, 0x0000504D,
-               0x034, 0x0000404A,
-               0x034, 0x00003047,
-               0x034, 0x0000200A,
-               0x034, 0x00001007,
-               0x034, 0x00000004,
-       0xFF0F07D0, 0xCDEF,
-               0x034, 0x0000A0B2,
-               0x034, 0x000090AF,
-               0x034, 0x00008070,
-               0x034, 0x0000706D,
-               0x034, 0x00006050,
-               0x034, 0x0000504D,
-               0x034, 0x0000404A,
-               0x034, 0x00003047,
-               0x034, 0x0000200A,
-               0x034, 0x00001007,
-               0x034, 0x00000004,
-       0xCDCDCDCD, 0xCDCD,
+               0x034, 0x0000604D,
+               0x034, 0x0000504A,
+               0x034, 0x00004047,
+               0x034, 0x00003044,
+               0x034, 0x00002007,
+               0x034, 0x00001004,
+               0x034, 0x00000001,
+       0xA0000000, 0x00000000,
                0x034, 0x0000AFF7,
                0x034, 0x00009DF7,
                0x034, 0x00008DF4,
                0x034, 0x00007DF1,
                0x034, 0x00006DEE,
-               0x034, 0x00005DCD,
-               0x034, 0x00004CEB,
+               0x034, 0x00005DEB,
+               0x034, 0x00004DE8,
                0x034, 0x000038CC,
-               0x034, 0x0000288B,
-               0x034, 0x0000184C,
-               0x034, 0x0000044C,
-       0xFF0F0740, 0xDEAD,
+               0x034, 0x000028C9,
+               0x034, 0x000018C6,
+               0x034, 0x000008C3,
+       0xB0000000, 0x00000000,
                0x0EF, 0x00000000,
-       0xFF0F0740, 0xABCD,
-               0x018, 0x0001712A,
-               0x0EF, 0x00000040,
-               0x035, 0x000001D4,
-               0x035, 0x000081D4,
-               0x035, 0x000101D4,
-               0x035, 0x000201B4,
-               0x035, 0x000281B4,
-               0x035, 0x000301B4,
-               0x035, 0x000401B4,
-               0x035, 0x000481B4,
-               0x035, 0x000501B4,
-       0xFF0F02C0, 0xCDEF,
-               0x018, 0x0001712A,
-               0x0EF, 0x00000040,
-               0x035, 0x000001D4,
-               0x035, 0x000081D4,
-               0x035, 0x000101D4,
-               0x035, 0x000201B4,
-               0x035, 0x000281B4,
-               0x035, 0x000301B4,
-               0x035, 0x000401B4,
-               0x035, 0x000481B4,
-               0x035, 0x000501B4,
-       0xFF0F01C0, 0xCDEF,
-               0x018, 0x0001712A,
-               0x0EF, 0x00000040,
-               0x035, 0x000001D4,
-               0x035, 0x000081D4,
-               0x035, 0x000101D4,
-               0x035, 0x000201B4,
-               0x035, 0x000281B4,
-               0x035, 0x000301B4,
-               0x035, 0x000401B4,
-               0x035, 0x000481B4,
-               0x035, 0x000501B4,
-       0xFF0F07D8, 0xCDEF,
+       0x80000008, 0x00000000, 0x40000000, 0x00000000,
                0x018, 0x0001712A,
                0x0EF, 0x00000040,
                0x035, 0x000001D4,
@@ -953,7 +804,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x035, 0x000401B4,
                0x035, 0x000481B4,
                0x035, 0x000501B4,
-       0xFF0F07D0, 0xCDEF,
+       0x90000008, 0x05000000, 0x40000000, 0x00000000,
                0x018, 0x0001712A,
                0x0EF, 0x00000040,
                0x035, 0x000001D4,
@@ -965,7 +816,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x035, 0x000401B4,
                0x035, 0x000481B4,
                0x035, 0x000501B4,
-       0xCDCDCDCD, 0xCDCD,
+       0xA0000000, 0x00000000,
                0x018, 0x0001712A,
                0x0EF, 0x00000040,
                0x035, 0x00000188,
@@ -977,54 +828,9 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x035, 0x000401D8,
                0x035, 0x000481D8,
                0x035, 0x000501D8,
-       0xFF0F0740, 0xDEAD,
+       0xB0000000, 0x00000000,
                0x0EF, 0x00000000,
-       0xFF0F0740, 0xABCD,
-               0x018, 0x0001712A,
-               0x0EF, 0x00000010,
-               0x036, 0x00004BFB,
-               0x036, 0x0000CBFB,
-               0x036, 0x00014BFB,
-               0x036, 0x0001CBFB,
-               0x036, 0x00024F4B,
-               0x036, 0x0002CF4B,
-               0x036, 0x00034F4B,
-               0x036, 0x0003CF4B,
-               0x036, 0x00044F4B,
-               0x036, 0x0004CF4B,
-               0x036, 0x00054F4B,
-               0x036, 0x0005CF4B,
-       0xFF0F02C0, 0xCDEF,
-               0x018, 0x0001712A,
-               0x0EF, 0x00000010,
-               0x036, 0x00004BFB,
-               0x036, 0x0000CBFB,
-               0x036, 0x00014BFB,
-               0x036, 0x0001CBFB,
-               0x036, 0x00024F4B,
-               0x036, 0x0002CF4B,
-               0x036, 0x00034F4B,
-               0x036, 0x0003CF4B,
-               0x036, 0x00044F4B,
-               0x036, 0x0004CF4B,
-               0x036, 0x00054F4B,
-               0x036, 0x0005CF4B,
-       0xFF0F01C0, 0xCDEF,
-               0x018, 0x0001712A,
-               0x0EF, 0x00000010,
-               0x036, 0x00004BFB,
-               0x036, 0x0000CBFB,
-               0x036, 0x00014BFB,
-               0x036, 0x0001CBFB,
-               0x036, 0x00024F4B,
-               0x036, 0x0002CF4B,
-               0x036, 0x00034F4B,
-               0x036, 0x0003CF4B,
-               0x036, 0x00044F4B,
-               0x036, 0x0004CF4B,
-               0x036, 0x00054F4B,
-               0x036, 0x0005CF4B,
-       0xFF0F07D8, 0xCDEF,
+       0x80000008, 0x00000000, 0x40000000, 0x00000000,
                0x018, 0x0001712A,
                0x0EF, 0x00000010,
                0x036, 0x00004BFB,
@@ -1039,7 +845,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x036, 0x0004CF4B,
                0x036, 0x00054F4B,
                0x036, 0x0005CF4B,
-       0xFF0F07D0, 0xCDEF,
+       0x90000008, 0x05000000, 0x40000000, 0x00000000,
                0x018, 0x0001712A,
                0x0EF, 0x00000010,
                0x036, 0x00004BFB,
@@ -1054,91 +860,61 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x036, 0x0004CF4B,
                0x036, 0x00054F4B,
                0x036, 0x0005CF4B,
-       0xCDCDCDCD, 0xCDCD,
+       0xA0000000, 0x00000000,
                0x018, 0x0001712A,
                0x0EF, 0x00000010,
                0x036, 0x00084EB4,
                0x036, 0x0008CC35,
                0x036, 0x00094C35,
                0x036, 0x0009CC35,
-               0x036, 0x000A4935,
+               0x036, 0x000A4C35,
                0x036, 0x000ACC35,
                0x036, 0x000B4C35,
                0x036, 0x000BCC35,
-               0x036, 0x000C4EB4,
-               0x036, 0x000CCEB5,
-               0x036, 0x000D4EB5,
-               0x036, 0x000DCEB5,
-       0xFF0F0740, 0xDEAD,
+               0x036, 0x000C4C34,
+               0x036, 0x000CCC35,
+               0x036, 0x000D4C35,
+               0x036, 0x000DCC35,
+       0xB0000000, 0x00000000,
                0x0EF, 0x00000000,
                0x0EF, 0x00000008,
-       0xFF0F0740, 0xABCD,
-               0x03C, 0x000002CC,
-               0x03C, 0x00000522,
-               0x03C, 0x00000902,
-       0xFF0F02C0, 0xCDEF,
-               0x03C, 0x000002CC,
-               0x03C, 0x00000522,
-               0x03C, 0x00000902,
-       0xFF0F01C0, 0xCDEF,
+       0x80000008, 0x00000000, 0x40000000, 0x00000000,
                0x03C, 0x000002CC,
                0x03C, 0x00000522,
                0x03C, 0x00000902,
-       0xFF0F07D8, 0xCDEF,
+       0x90000008, 0x05000000, 0x40000000, 0x00000000,
                0x03C, 0x000002CC,
                0x03C, 0x00000522,
                0x03C, 0x00000902,
-       0xFF0F07D0, 0xCDEF,
-               0x03C, 0x000002CC,
-               0x03C, 0x00000522,
-               0x03C, 0x00000902,
-       0xCDCDCDCD, 0xCDCD,
+       0xA0000000, 0x00000000,
                0x03C, 0x000002A8,
                0x03C, 0x000005A2,
                0x03C, 0x00000880,
-       0xFF0F0740, 0xDEAD,
+       0xB0000000, 0x00000000,
                0x0EF, 0x00000000,
                0x018, 0x0001712A,
                0x0EF, 0x00000002,
                0x0DF, 0x00000080,
-               0x01F, 0x00040064,
-       0xFF0F0740, 0xABCD,
-               0x061, 0x000FDD43,
-               0x062, 0x00038F4B,
-               0x063, 0x00032117,
-               0x064, 0x000194AC,
-               0x065, 0x000931D1,
-       0xFF0F02C0, 0xCDEF,
+               0x01F, 0x00000064,
+       0x80000008, 0x00000000, 0x40000000, 0x00000000,
                0x061, 0x000FDD43,
                0x062, 0x00038F4B,
                0x063, 0x00032117,
                0x064, 0x000194AC,
                0x065, 0x000931D1,
-       0xFF0F01C0, 0xCDEF,
+       0x90000008, 0x05000000, 0x40000000, 0x00000000,
                0x061, 0x000FDD43,
                0x062, 0x00038F4B,
                0x063, 0x00032117,
                0x064, 0x000194AC,
-               0x065, 0x000931D1,
-       0xFF0F07D8, 0xCDEF,
-               0x061, 0x000FDD43,
-               0x062, 0x00038F4B,
-               0x063, 0x00032117,
-               0x064, 0x000194AC,
-               0x065, 0x000931D1,
-       0xFF0F07D0, 0xCDEF,
-               0x061, 0x000FDD43,
-               0x062, 0x00038F4B,
-               0x063, 0x00032117,
-               0x064, 0x000194AC,
-               0x065, 0x000931D1,
-       0xCDCDCDCD, 0xCDCD,
+               0x065, 0x000931D2,
+       0xA0000000, 0x00000000,
                0x061, 0x000E5D53,
                0x062, 0x00038FCD,
-               0x063, 0x000314EB,
+               0x063, 0x000114EB,
                0x064, 0x000196AC,
                0x065, 0x000911D7,
-       0xFF0F0740, 0xDEAD,
+       0xB0000000, 0x00000000,
                0x008, 0x00008400,
                0x01C, 0x000739D2,
                0x0B4, 0x0001E78D,
@@ -1149,29 +925,29 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
                0x0FE, 0x00000000,
                0x0B4, 0x0001A78D,
                0x018, 0x0001712A,
-
 };
 
+u32 RTL8812AE_RADIOA_1TARRAYLEN = sizeof(RTL8812AE_RADIOA_ARRAY) / sizeof(u32);
+
 u32 RTL8812AE_RADIOB_ARRAY[] = {
                0x056, 0x00051CF2,
                0x066, 0x00040000,
                0x089, 0x00000080,
-       0xFF0F0740, 0xABCD,
-               0x086, 0x00014B38,
-       0xFF0F01C0, 0xCDEF,
-               0x086, 0x00014B38,
-       0xFF0F02C0, 0xCDEF,
-               0x086, 0x00014B38,
-       0xFF0F07D8, 0xCDEF,
+       0x80000001, 0x00000000, 0x40000000, 0x00000000,
                0x086, 0x00014B3A,
-       0xFF0F07D0, 0xCDEF,
+       0x90000001, 0x00000005, 0x40000000, 0x00000000,
                0x086, 0x00014B3A,
-       0xCDCDCDCD, 0xCDCD,
+       0xA0000000, 0x00000000,
                0x086, 0x00014B38,
-       0xFF0F0740, 0xDEAD,
+       0xB0000000, 0x00000000,
+       0x80000004, 0x00000000, 0x40000000, 0x00000000,
+               0x08B, 0x00080180,
+       0xA0000000, 0x00000000,
+               0x08B, 0x00087180,
+       0xB0000000, 0x00000000,
                0x018, 0x00000006,
                0x0EF, 0x00002000,
-       0xFF0F07D8, 0xABCD,
+       0x80000001, 0x00000000, 0x40000000, 0x00000000,
                0x03B, 0x0003F218,
                0x03B, 0x00030A58,
                0x03B, 0x0002FA58,
@@ -1179,7 +955,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
                0x03B, 0x0001FA50,
                0x03B, 0x00010248,
                0x03B, 0x00008240,
-       0xFF0F07D0, 0xCDEF,
+       0x90000001, 0x00000005, 0x40000000, 0x00000000,
                0x03B, 0x0003F218,
                0x03B, 0x00030A58,
                0x03B, 0x0002FA58,
@@ -1187,7 +963,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
                0x03B, 0x0001FA50,
                0x03B, 0x00010248,
                0x03B, 0x00008240,
-       0xCDCDCDCD, 0xCDCD,
+       0xA0000000, 0x00000000,
                0x03B, 0x00038A58,
                0x03B, 0x00037A58,
                0x03B, 0x0002A590,
@@ -1195,9 +971,9 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
                0x03B, 0x00018248,
                0x03B, 0x00010240,
                0x03B, 0x00008240,
-       0xFF0F07D8, 0xDEAD,
+       0xB0000000, 0x00000000,
                0x0EF, 0x00000100,
-       0xFF0F07D8, 0xABCD,
+       0x80000002, 0x00000000, 0x40000000, 0x00000000,
                0x034, 0x0000A4EE,
                0x034, 0x00009076,
                0x034, 0x00008073,
@@ -1209,7 +985,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
                0x034, 0x00002028,
                0x034, 0x00001025,
                0x034, 0x00000022,
-       0xCDCDCDCD, 0xCDCD,
+       0xA0000000, 0x00000000,
                0x034, 0x0000ADF4,
                0x034, 0x00009DF1,
                0x034, 0x00008DEE,
@@ -1221,7 +997,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
                0x034, 0x000024E7,
                0x034, 0x0000146B,
                0x034, 0x0000006D,
-       0xFF0F07D8, 0xDEAD,
+       0xB0000000, 0x00000000,
                0x0EF, 0x00000000,
                0x0EF, 0x000020A2,
                0x0DF, 0x00000080,
@@ -1314,55 +1090,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
                0x03B, 0x00082080,
                0x03C, 0x00010000,
                0x0EF, 0x00001100,
-       0xFF0F0740, 0xABCD,
-               0x034, 0x0004A0B2,
-               0x034, 0x000490AF,
-               0x034, 0x00048070,
-               0x034, 0x0004706D,
-               0x034, 0x00046050,
-               0x034, 0x0004504D,
-               0x034, 0x0004404A,
-               0x034, 0x00043047,
-               0x034, 0x0004200A,
-               0x034, 0x00041007,
-               0x034, 0x00040004,
-       0xFF0F01C0, 0xCDEF,
-               0x034, 0x0004A0B2,
-               0x034, 0x000490AF,
-               0x034, 0x00048070,
-               0x034, 0x0004706D,
-               0x034, 0x00046050,
-               0x034, 0x0004504D,
-               0x034, 0x0004404A,
-               0x034, 0x00043047,
-               0x034, 0x0004200A,
-               0x034, 0x00041007,
-               0x034, 0x00040004,
-       0xFF0F02C0, 0xCDEF,
-               0x034, 0x0004A0B2,
-               0x034, 0x000490AF,
-               0x034, 0x00048070,
-               0x034, 0x0004706D,
-               0x034, 0x00046050,
-               0x034, 0x0004504D,
-               0x034, 0x0004404A,
-               0x034, 0x00043047,
-               0x034, 0x0004200A,
-               0x034, 0x00041007,
-               0x034, 0x00040004,
-       0xFF0F07D8, 0xCDEF,
-               0x034, 0x0004A0B2,
-               0x034, 0x000490AF,
-               0x034, 0x00048070,
-               0x034, 0x0004706D,
-               0x034, 0x00046050,
-               0x034, 0x0004504D,
-               0x034, 0x0004404A,
-               0x034, 0x00043047,
-               0x034, 0x0004200A,
-               0x034, 0x00041007,
-               0x034, 0x00040004,
-       0xFF0F07D0, 0xCDEF,
+       0x80000008, 0x00000000, 0x40000000, 0x00000000,
                0x034, 0x0004A0B2,
                0x034, 0x000490AF,
                0x034, 0x00048070,
@@ -1374,68 +1102,32 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
                0x034, 0x0004200A,
                0x034, 0x00041007,
                0x034, 0x00040004,
-       0xCDCDCDCD, 0xCDCD,
+       0x90000008, 0x05000000, 0x40000000, 0x00000000,
+               0x034, 0x0004A0B1,
+               0x034, 0x000490AE,
+               0x034, 0x0004806F,
+               0x034, 0x0004706C,
+               0x034, 0x0004604C,
+               0x034, 0x00045049,
+               0x034, 0x00044046,
+               0x034, 0x00043043,
+               0x034, 0x00042006,
+               0x034, 0x00041003,
+               0x034, 0x00040000,
+       0xA0000000, 0x00000000,
                0x034, 0x0004ADF5,
                0x034, 0x00049DF2,
                0x034, 0x00048DEF,
                0x034, 0x00047DEC,
                0x034, 0x00046DE9,
-               0x034, 0x00045DC9,
-               0x034, 0x00044CE8,
-               0x034, 0x000438CA,
-               0x034, 0x00042889,
-               0x034, 0x0004184A,
-               0x034, 0x0004044A,
-       0xFF0F0740, 0xDEAD,
-       0xFF0F0740, 0xABCD,
-               0x034, 0x0002A0B2,
-               0x034, 0x000290AF,
-               0x034, 0x00028070,
-               0x034, 0x0002706D,
-               0x034, 0x00026050,
-               0x034, 0x0002504D,
-               0x034, 0x0002404A,
-               0x034, 0x00023047,
-               0x034, 0x0002200A,
-               0x034, 0x00021007,
-               0x034, 0x00020004,
-       0xFF0F01C0, 0xCDEF,
-               0x034, 0x0002A0B2,
-               0x034, 0x000290AF,
-               0x034, 0x00028070,
-               0x034, 0x0002706D,
-               0x034, 0x00026050,
-               0x034, 0x0002504D,
-               0x034, 0x0002404A,
-               0x034, 0x00023047,
-               0x034, 0x0002200A,
-               0x034, 0x00021007,
-               0x034, 0x00020004,
-       0xFF0F02C0, 0xCDEF,
-               0x034, 0x0002A0B2,
-               0x034, 0x000290AF,
-               0x034, 0x00028070,
-               0x034, 0x0002706D,
-               0x034, 0x00026050,
-               0x034, 0x0002504D,
-               0x034, 0x0002404A,
-               0x034, 0x00023047,
-               0x034, 0x0002200A,
-               0x034, 0x00021007,
-               0x034, 0x00020004,
-       0xFF0F07D8, 0xCDEF,
-               0x034, 0x0002A0B2,
-               0x034, 0x000290AF,
-               0x034, 0x00028070,
-               0x034, 0x0002706D,
-               0x034, 0x00026050,
-               0x034, 0x0002504D,
-               0x034, 0x0002404A,
-               0x034, 0x00023047,
-               0x034, 0x0002200A,
-               0x034, 0x00021007,
-               0x034, 0x00020004,
-       0xFF0F07D0, 0xCDEF,
+               0x034, 0x00045DE6,
+               0x034, 0x00044DE3,
+               0x034, 0x000438C8,
+               0x034, 0x000428C5,
+               0x034, 0x000418C2,
+               0x034, 0x000408C0,
+       0xB0000000, 0x00000000,
+       0x80000008, 0x00000000, 0x40000000, 0x00000000,
                0x034, 0x0002A0B2,
                0x034, 0x000290AF,
                0x034, 0x00028070,
@@ -1447,56 +1139,32 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
                0x034, 0x0002200A,
                0x034, 0x00021007,
                0x034, 0x00020004,
-       0xCDCDCDCD, 0xCDCD,
+       0x90000008, 0x05000000, 0x40000000, 0x00000000,
+               0x034, 0x0002A0B3,
+               0x034, 0x000290B0,
+               0x034, 0x00028071,
+               0x034, 0x0002706E,
+               0x034, 0x0002604E,
+               0x034, 0x0002504B,
+               0x034, 0x00024048,
+               0x034, 0x00023045,
+               0x034, 0x00022008,
+               0x034, 0x00021005,
+               0x034, 0x00020002,
+       0xA0000000, 0x00000000,
                0x034, 0x0002ADF5,
                0x034, 0x00029DF2,
                0x034, 0x00028DEF,
                0x034, 0x00027DEC,
                0x034, 0x00026DE9,
-               0x034, 0x00025DC9,
-               0x034, 0x00024CE8,
-               0x034, 0x000238CA,
-               0x034, 0x00022889,
-               0x034, 0x0002184A,
-               0x034, 0x0002044A,
-       0xFF0F0740, 0xDEAD,
-       0xFF0F0740, 0xABCD,
-               0x034, 0x0000A0B2,
-               0x034, 0x000090AF,
-               0x034, 0x00008070,
-               0x034, 0x0000706D,
-               0x034, 0x00006050,
-               0x034, 0x0000504D,
-               0x034, 0x0000404A,
-               0x034, 0x00003047,
-               0x034, 0x0000200A,
-               0x034, 0x00001007,
-               0x034, 0x00000004,
-       0xFF0F01C0, 0xCDEF,
-               0x034, 0x0000A0B2,
-               0x034, 0x000090AF,
-               0x034, 0x00008070,
-               0x034, 0x0000706D,
-               0x034, 0x00006050,
-               0x034, 0x0000504D,
-               0x034, 0x0000404A,
-               0x034, 0x00003047,
-               0x034, 0x0000200A,
-               0x034, 0x00001007,
-               0x034, 0x00000004,
-       0xFF0F02C0, 0xCDEF,
-               0x034, 0x0000A0B2,
-               0x034, 0x000090AF,
-               0x034, 0x00008070,
-               0x034, 0x0000706D,
-               0x034, 0x00006050,
-               0x034, 0x0000504D,
-               0x034, 0x0000404A,
-               0x034, 0x00003047,
-               0x034, 0x0000200A,
-               0x034, 0x00001007,
-               0x034, 0x00000004,
-       0xFF0F07D8, 0xCDEF,
+               0x034, 0x00025DE6,
+               0x034, 0x00024DE3,
+               0x034, 0x000238C8,
+               0x034, 0x000228C5,
+               0x034, 0x000218C2,
+               0x034, 0x000208C0,
+       0xB0000000, 0x00000000,
+       0x80000008, 0x00000000, 0x40000000, 0x00000000,
                0x034, 0x0000A0B2,
                0x034, 0x000090AF,
                0x034, 0x00008070,
@@ -1508,72 +1176,33 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
                0x034, 0x0000200A,
                0x034, 0x00001007,
                0x034, 0x00000004,
-       0xFF0F07D0, 0xCDEF,
-               0x034, 0x0000A0B2,
-               0x034, 0x000090AF,
+       0x90000008, 0x05000000, 0x40000000, 0x00000000,
+               0x034, 0x0000A0B3,
+               0x034, 0x000090B0,
                0x034, 0x00008070,
                0x034, 0x0000706D,
-               0x034, 0x00006050,
-               0x034, 0x0000504D,
-               0x034, 0x0000404A,
-               0x034, 0x00003047,
-               0x034, 0x0000200A,
-               0x034, 0x00001007,
-               0x034, 0x00000004,
-       0xCDCDCDCD, 0xCDCD,
+               0x034, 0x0000604D,
+               0x034, 0x0000504A,
+               0x034, 0x00004047,
+               0x034, 0x00003044,
+               0x034, 0x00002007,
+               0x034, 0x00001004,
+               0x034, 0x00000001,
+       0xA0000000, 0x00000000,
                0x034, 0x0000AFF7,
                0x034, 0x00009DF7,
                0x034, 0x00008DF4,
                0x034, 0x00007DF1,
                0x034, 0x00006DEE,
-               0x034, 0x00005DCD,
-               0x034, 0x00004CEB,
+               0x034, 0x00005DEB,
+               0x034, 0x00004DE8,
                0x034, 0x000038CC,
-               0x034, 0x0000288B,
-               0x034, 0x0000184C,
-               0x034, 0x0000044C,
-       0xFF0F0740, 0xDEAD,
-               0x0EF, 0x00000000,
-       0xFF0F0740, 0xABCD,
-               0x018, 0x0001712A,
-               0x0EF, 0x00000040,
-               0x035, 0x000001C5,
-               0x035, 0x000081C5,
-               0x035, 0x000101C5,
-               0x035, 0x00020174,
-               0x035, 0x00028174,
-               0x035, 0x00030174,
-               0x035, 0x00040185,
-               0x035, 0x00048185,
-               0x035, 0x00050185,
-               0x0EF, 0x00000000,
-       0xFF0F01C0, 0xCDEF,
-               0x018, 0x0001712A,
-               0x0EF, 0x00000040,
-               0x035, 0x000001C5,
-               0x035, 0x000081C5,
-               0x035, 0x000101C5,
-               0x035, 0x00020174,
-               0x035, 0x00028174,
-               0x035, 0x00030174,
-               0x035, 0x00040185,
-               0x035, 0x00048185,
-               0x035, 0x00050185,
-               0x0EF, 0x00000000,
-       0xFF0F02C0, 0xCDEF,
-               0x018, 0x0001712A,
-               0x0EF, 0x00000040,
-               0x035, 0x000001C5,
-               0x035, 0x000081C5,
-               0x035, 0x000101C5,
-               0x035, 0x00020174,
-               0x035, 0x00028174,
-               0x035, 0x00030174,
-               0x035, 0x00040185,
-               0x035, 0x00048185,
-               0x035, 0x00050185,
+               0x034, 0x000028C9,
+               0x034, 0x000018C6,
+               0x034, 0x000008C3,
+       0xB0000000, 0x00000000,
                0x0EF, 0x00000000,
-       0xFF0F07D8, 0xCDEF,
+       0x80000008, 0x00000000, 0x40000000, 0x00000000,
                0x018, 0x0001712A,
                0x0EF, 0x00000040,
                0x035, 0x000001C5,
@@ -1586,7 +1215,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
                0x035, 0x00048185,
                0x035, 0x00050185,
                0x0EF, 0x00000000,
-       0xFF0F07D0, 0xCDEF,
+       0x90000008, 0x05000000, 0x40000000, 0x00000000,
                0x018, 0x0001712A,
                0x0EF, 0x00000040,
                0x035, 0x000001C5,
@@ -1599,36 +1228,21 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
                0x035, 0x00048185,
                0x035, 0x00050185,
                0x0EF, 0x00000000,
-       0xCDCDCDCD, 0xCDCD,
+       0xA0000000, 0x00000000,
                0x018, 0x0001712A,
                0x0EF, 0x00000040,
-               0x035, 0x00000186,
-               0x035, 0x00008186,
-               0x035, 0x00010185,
-               0x035, 0x000201D5,
-               0x035, 0x000281D5,
-               0x035, 0x000301D5,
-               0x035, 0x000401D5,
-               0x035, 0x000481D5,
-               0x035, 0x000501D5,
+               0x035, 0x00000188,
+               0x035, 0x00008147,
+               0x035, 0x00010147,
+               0x035, 0x000201D7,
+               0x035, 0x000281D7,
+               0x035, 0x000301D7,
+               0x035, 0x000401D8,
+               0x035, 0x000481D8,
+               0x035, 0x000501D8,
                0x0EF, 0x00000000,
-       0xFF0F0740, 0xDEAD,
-       0xFF0F0740, 0xABCD,
-               0x018, 0x0001712A,
-               0x0EF, 0x00000010,
-               0x036, 0x00005B8B,
-               0x036, 0x0000DB8B,
-               0x036, 0x00015B8B,
-               0x036, 0x0001DB8B,
-               0x036, 0x000262DB,
-               0x036, 0x0002E2DB,
-               0x036, 0x000362DB,
-               0x036, 0x0003E2DB,
-               0x036, 0x0004553B,
-               0x036, 0x0004D53B,
-               0x036, 0x0005553B,
-               0x036, 0x0005D53B,
-       0xFF0F01C0, 0xCDEF,
+       0xB0000000, 0x00000000,
+       0x80000008, 0x00000000, 0x40000000, 0x00000000,
                0x018, 0x0001712A,
                0x0EF, 0x00000010,
                0x036, 0x00005B8B,
@@ -1643,37 +1257,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
                0x036, 0x0004D53B,
                0x036, 0x0005553B,
                0x036, 0x0005D53B,
-       0xFF0F02C0, 0xCDEF,
-               0x018, 0x0001712A,
-               0x0EF, 0x00000010,
-               0x036, 0x00005B8B,
-               0x036, 0x0000DB8B,
-               0x036, 0x00015B8B,
-               0x036, 0x0001DB8B,
-               0x036, 0x000262DB,
-               0x036, 0x0002E2DB,
-               0x036, 0x000362DB,
-               0x036, 0x0003E2DB,
-               0x036, 0x0004553B,
-               0x036, 0x0004D53B,
-               0x036, 0x0005553B,
-               0x036, 0x0005D53B,
-       0xFF0F07D8, 0xCDEF,
-               0x018, 0x0001712A,
-               0x0EF, 0x00000010,
-               0x036, 0x00005B8B,
-               0x036, 0x0000DB8B,
-               0x036, 0x00015B8B,
-               0x036, 0x0001DB8B,
-               0x036, 0x000262DB,
-               0x036, 0x0002E2DB,
-               0x036, 0x000362DB,
-               0x036, 0x0003E2DB,
-               0x036, 0x0004553B,
-               0x036, 0x0004D53B,
-               0x036, 0x0005553B,
-               0x036, 0x0005D53B,
-       0xFF0F07D0, 0xCDEF,
+       0x90000008, 0x05000000, 0x40000000, 0x00000000,
                0x018, 0x0001712A,
                0x0EF, 0x00000010,
                0x036, 0x00005B8B,
@@ -1688,94 +1272,71 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
                0x036, 0x0004D53B,
                0x036, 0x0005553B,
                0x036, 0x0005D53B,
-       0xCDCDCDCD, 0xCDCD,
+       0xA0000000, 0x00000000,
                0x018, 0x0001712A,
                0x0EF, 0x00000010,
                0x036, 0x00084EB4,
-               0x036, 0x0008C9B4,
-               0x036, 0x000949B4,
-               0x036, 0x0009C9B4,
-               0x036, 0x000A4935,
-               0x036, 0x000AC935,
-               0x036, 0x000B4935,
-               0x036, 0x000BC935,
-               0x036, 0x000C4EB4,
-               0x036, 0x000CCEB4,
-               0x036, 0x000D4EB4,
-               0x036, 0x000DCEB4,
-       0xFF0F0740, 0xDEAD,
-               0x0EF, 0x00000000,
-               0x0EF, 0x00000008,
-       0xFF0F0740, 0xABCD,
-               0x03C, 0x000002DC,
-               0x03C, 0x00000524,
-               0x03C, 0x00000902,
-       0xFF0F01C0, 0xCDEF,
-               0x03C, 0x000002DC,
-               0x03C, 0x00000524,
-               0x03C, 0x00000902,
-       0xFF0F02C0, 0xCDEF,
-               0x03C, 0x000002DC,
-               0x03C, 0x00000524,
-               0x03C, 0x00000902,
-       0xFF0F07D8, 0xCDEF,
+               0x036, 0x0008CC35,
+               0x036, 0x00094C35,
+               0x036, 0x0009CC35,
+               0x036, 0x000A4C35,
+               0x036, 0x000ACC35,
+               0x036, 0x000B4C35,
+               0x036, 0x000BCC35,
+               0x036, 0x000C4C34,
+               0x036, 0x000CCC35,
+               0x036, 0x000D4C35,
+               0x036, 0x000DCC35,
+       0xB0000000, 0x00000000,
+               0x0EF, 0x00000000,
+               0x0EF, 0x00000008,
+       0x80000008, 0x00000000, 0x40000000, 0x00000000,
                0x03C, 0x000002DC,
                0x03C, 0x00000524,
                0x03C, 0x00000902,
-       0xFF0F07D0, 0xCDEF,
+       0x90000008, 0x05000000, 0x40000000, 0x00000000,
                0x03C, 0x000002DC,
                0x03C, 0x00000524,
                0x03C, 0x00000902,
-       0xCDCDCDCD, 0xCDCD,
-               0x03C, 0x000002AA,
+       0xA0000000, 0x00000000,
+               0x03C, 0x000002A8,
                0x03C, 0x000005A2,
                0x03C, 0x00000880,
-       0xFF0F0740, 0xDEAD,
+       0xB0000000, 0x00000000,
                0x0EF, 0x00000000,
                0x018, 0x0001712A,
                0x0EF, 0x00000002,
                0x0DF, 0x00000080,
-       0xFF0F0740, 0xABCD,
-               0x061, 0x000EAC43,
-               0x062, 0x00038F47,
-               0x063, 0x00031157,
-               0x064, 0x0001C4AC,
-               0x065, 0x000931D1,
-       0xFF0F01C0, 0xCDEF,
-               0x061, 0x000EAC43,
-               0x062, 0x00038F47,
-               0x063, 0x00031157,
-               0x064, 0x0001C4AC,
-               0x065, 0x000931D1,
-       0xFF0F02C0, 0xCDEF,
+       0x80000008, 0x00000000, 0x40000000, 0x00000000,
                0x061, 0x000EAC43,
                0x062, 0x00038F47,
                0x063, 0x00031157,
                0x064, 0x0001C4AC,
                0x065, 0x000931D1,
-       0xFF0F07D8, 0xCDEF,
+       0x90000008, 0x05000000, 0x40000000, 0x00000000,
                0x061, 0x000EAC43,
                0x062, 0x00038F47,
                0x063, 0x00031157,
                0x064, 0x0001C4AC,
-               0x065, 0x000931D1,
-       0xFF0F07D0, 0xCDEF,
+               0x065, 0x000931D2,
+       0x90000002, 0x00000000, 0x40000000, 0x00000000,
                0x061, 0x000EAC43,
                0x062, 0x00038F47,
                0x063, 0x00031157,
                0x064, 0x0001C4AC,
                0x065, 0x000931D1,
-       0xCDCDCDCD, 0xCDCD,
+       0xA0000000, 0x00000000,
                0x061, 0x000E5D53,
                0x062, 0x00038FCD,
-               0x063, 0x000314EB,
+               0x063, 0x000114EB,
                0x064, 0x000196AC,
-               0x065, 0x000931D7,
-       0xFF0F0740, 0xDEAD,
+               0x065, 0x000911D7,
+       0xB0000000, 0x00000000,
                0x008, 0x00008400,
-
 };
 
+u32 RTL8812AE_RADIOB_1TARRAYLEN = sizeof(RTL8812AE_RADIOB_ARRAY) / sizeof(u32);
+
 u32 RTL8821AE_RADIOA_ARRAY[] = {
                0x018, 0x0001712A,
                0x056, 0x00051CF2,
@@ -2285,16 +1846,16 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
                0x0EF, 0x00000000,
                0x0EF, 0x00000100,
                0x034, 0x0000ADF3,
-               0x034, 0x00009DEF,
-               0x034, 0x00008DEC,
-               0x034, 0x00007DE9,
-               0x034, 0x00006CED,
-               0x034, 0x00005CE9,
-               0x034, 0x000044E9,
-               0x034, 0x000034E6,
-               0x034, 0x0000246A,
-               0x034, 0x00001467,
-               0x034, 0x00000068,
+               0x034, 0x00009DF0,
+               0x034, 0x00008D70,
+               0x034, 0x00007D6D,
+               0x034, 0x00006CEE,
+               0x034, 0x00005CCC,
+               0x034, 0x000044EC,
+               0x034, 0x000034AC,
+               0x034, 0x0000246D,
+               0x034, 0x0000106F,
+               0x034, 0x0000006C,
                0x0EF, 0x00000000,
                0x0ED, 0x00000010,
                0x044, 0x0000ADF2,
@@ -2365,18 +1926,21 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
                0x0FE, 0x00000000,
                0x0FE, 0x00000000,
                0x018, 0x0001712A,
+
 };
 
+u32 RTL8821AE_RADIOA_1TARRAYLEN = sizeof(RTL8821AE_RADIOA_ARRAY) / sizeof(u32);
+
 u32 RTL8812AE_MAC_REG_ARRAY[] = {
                0x010, 0x0000000C,
-       0xFF0F0180, 0xABCD,
+       0x80000200, 0x00000000, 0x40000000, 0x00000000,
+               0x011, 0x00000066,
+       0xA0000000, 0x00000000,
+               0x011, 0x0000005A,
+       0xB0000000, 0x00000000,
                0x025, 0x0000000F,
-       0xFF0F01C0, 0xCDEF,
-               0x025, 0x0000000F,
-       0xCDCDCDCD, 0xCDCD,
-               0x025, 0x0000006F,
-       0xFF0F0180, 0xDEAD,
                0x072, 0x00000000,
+               0x420, 0x00000080,
                0x428, 0x0000000A,
                0x429, 0x00000010,
                0x430, 0x00000000,
@@ -2443,7 +2007,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
                0x559, 0x00000002,
                0x55C, 0x00000050,
                0x55D, 0x000000FF,
-               0x604, 0x00000001,
+               0x604, 0x00000009,
                0x605, 0x00000030,
                0x607, 0x00000003,
                0x608, 0x0000000E,
@@ -2475,9 +2039,10 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
                0x70A, 0x00000065,
                0x70B, 0x00000087,
                0x718, 0x00000040,
-
 };
 
+u32 RTL8812AE_MAC_1T_ARRAYLEN = sizeof(RTL8812AE_MAC_REG_ARRAY) / sizeof(u32);
+
 u32 RTL8821AE_MAC_REG_ARRAY[] = {
                0x428, 0x0000000A,
                0x429, 0x00000010,
@@ -2523,584 +2088,261 @@ u32 RTL8821AE_MAC_REG_ARRAY[] = {
                0x500, 0x00000026,
                0x501, 0x000000A2,
                0x502, 0x0000002F,
-               0x503, 0x00000000,
-               0x504, 0x00000028,
-               0x505, 0x000000A3,
-               0x506, 0x0000005E,
-               0x507, 0x00000000,
-               0x508, 0x0000002B,
-               0x509, 0x000000A4,
-               0x50A, 0x0000005E,
-               0x50B, 0x00000000,
-               0x50C, 0x0000004F,
-               0x50D, 0x000000A4,
-               0x50E, 0x00000000,
-               0x50F, 0x00000000,
-               0x512, 0x0000001C,
-               0x514, 0x0000000A,
-               0x516, 0x0000000A,
-               0x525, 0x0000004F,
-               0x550, 0x00000010,
-               0x551, 0x00000010,
-               0x559, 0x00000002,
-               0x55C, 0x00000050,
-               0x55D, 0x000000FF,
-               0x605, 0x00000030,
-               0x607, 0x00000007,
-               0x608, 0x0000000E,
-               0x609, 0x0000002A,
-               0x620, 0x000000FF,
-               0x621, 0x000000FF,
-               0x622, 0x000000FF,
-               0x623, 0x000000FF,
-               0x624, 0x000000FF,
-               0x625, 0x000000FF,
-               0x626, 0x000000FF,
-               0x627, 0x000000FF,
-               0x638, 0x00000050,
-               0x63C, 0x0000000A,
-               0x63D, 0x0000000A,
-               0x63E, 0x0000000E,
-               0x63F, 0x0000000E,
-               0x640, 0x00000040,
-               0x642, 0x00000040,
-               0x643, 0x00000000,
-               0x652, 0x000000C8,
-               0x66E, 0x00000005,
-               0x700, 0x00000021,
-               0x701, 0x00000043,
-               0x702, 0x00000065,
-               0x703, 0x00000087,
-               0x708, 0x00000021,
-               0x709, 0x00000043,
-               0x70A, 0x00000065,
-               0x70B, 0x00000087,
-               0x718, 0x00000040,
-};
-
-u32 RTL8812AE_AGC_TAB_ARRAY[] = {
-       0xFF0F07D8, 0xABCD,
-               0x81C, 0xFC000001,
-               0x81C, 0xFB020001,
-               0x81C, 0xFA040001,
-               0x81C, 0xF9060001,
-               0x81C, 0xF8080001,
-               0x81C, 0xF70A0001,
-               0x81C, 0xF60C0001,
-               0x81C, 0xF50E0001,
-               0x81C, 0xF4100001,
-               0x81C, 0xF3120001,
-               0x81C, 0xF2140001,
-               0x81C, 0xF1160001,
-               0x81C, 0xF0180001,
-               0x81C, 0xEF1A0001,
-               0x81C, 0xEE1C0001,
-               0x81C, 0xED1E0001,
-               0x81C, 0xEC200001,
-               0x81C, 0xEB220001,
-               0x81C, 0xEA240001,
-               0x81C, 0xCD260001,
-               0x81C, 0xCC280001,
-               0x81C, 0xCB2A0001,
-               0x81C, 0xCA2C0001,
-               0x81C, 0xC92E0001,
-               0x81C, 0xC8300001,
-               0x81C, 0xA6320001,
-               0x81C, 0xA5340001,
-               0x81C, 0xA4360001,
-               0x81C, 0xA3380001,
-               0x81C, 0xA23A0001,
-               0x81C, 0x883C0001,
-               0x81C, 0x873E0001,
-               0x81C, 0x86400001,
-               0x81C, 0x85420001,
-               0x81C, 0x84440001,
-               0x81C, 0x83460001,
-               0x81C, 0x82480001,
-               0x81C, 0x814A0001,
-               0x81C, 0x484C0001,
-               0x81C, 0x474E0001,
-               0x81C, 0x46500001,
-               0x81C, 0x45520001,
-               0x81C, 0x44540001,
-               0x81C, 0x43560001,
-               0x81C, 0x42580001,
-               0x81C, 0x415A0001,
-               0x81C, 0x255C0001,
-               0x81C, 0x245E0001,
-               0x81C, 0x23600001,
-               0x81C, 0x22620001,
-               0x81C, 0x21640001,
-               0x81C, 0x21660001,
-               0x81C, 0x21680001,
-               0x81C, 0x216A0001,
-               0x81C, 0x216C0001,
-               0x81C, 0x216E0001,
-               0x81C, 0x21700001,
-               0x81C, 0x21720001,
-               0x81C, 0x21740001,
-               0x81C, 0x21760001,
-               0x81C, 0x21780001,
-               0x81C, 0x217A0001,
-               0x81C, 0x217C0001,
-               0x81C, 0x217E0001,
-       0xFF0F07D0, 0xCDEF,
-               0x81C, 0xF9000001,
-               0x81C, 0xF8020001,
-               0x81C, 0xF7040001,
-               0x81C, 0xF6060001,
-               0x81C, 0xF5080001,
-               0x81C, 0xF40A0001,
-               0x81C, 0xF30C0001,
-               0x81C, 0xF20E0001,
-               0x81C, 0xF1100001,
-               0x81C, 0xF0120001,
-               0x81C, 0xEF140001,
-               0x81C, 0xEE160001,
-               0x81C, 0xED180001,
-               0x81C, 0xEC1A0001,
-               0x81C, 0xEB1C0001,
-               0x81C, 0xEA1E0001,
-               0x81C, 0xCD200001,
-               0x81C, 0xCC220001,
-               0x81C, 0xCB240001,
-               0x81C, 0xCA260001,
-               0x81C, 0xC9280001,
-               0x81C, 0xC82A0001,
-               0x81C, 0xC72C0001,
-               0x81C, 0xC62E0001,
-               0x81C, 0xA5300001,
-               0x81C, 0xA4320001,
-               0x81C, 0xA3340001,
-               0x81C, 0xA2360001,
-               0x81C, 0x88380001,
-               0x81C, 0x873A0001,
-               0x81C, 0x863C0001,
-               0x81C, 0x853E0001,
-               0x81C, 0x84400001,
-               0x81C, 0x83420001,
-               0x81C, 0x82440001,
-               0x81C, 0x81460001,
-               0x81C, 0x48480001,
-               0x81C, 0x474A0001,
-               0x81C, 0x464C0001,
-               0x81C, 0x454E0001,
-               0x81C, 0x44500001,
-               0x81C, 0x43520001,
-               0x81C, 0x42540001,
-               0x81C, 0x41560001,
-               0x81C, 0x25580001,
-               0x81C, 0x245A0001,
-               0x81C, 0x235C0001,
-               0x81C, 0x225E0001,
-               0x81C, 0x21600001,
-               0x81C, 0x21620001,
-               0x81C, 0x21640001,
-               0x81C, 0x21660001,
-               0x81C, 0x21680001,
-               0x81C, 0x216A0001,
-               0x81C, 0x236C0001,
-               0x81C, 0x226E0001,
-               0x81C, 0x21700001,
-               0x81C, 0x21720001,
-               0x81C, 0x21740001,
-               0x81C, 0x21760001,
-               0x81C, 0x21780001,
-               0x81C, 0x217A0001,
-               0x81C, 0x217C0001,
-               0x81C, 0x217E0001,
-       0xCDCDCDCD, 0xCDCD,
-               0x81C, 0xFF000001,
-               0x81C, 0xFF020001,
-               0x81C, 0xFF040001,
-               0x81C, 0xFF060001,
-               0x81C, 0xFF080001,
-               0x81C, 0xFE0A0001,
-               0x81C, 0xFD0C0001,
-               0x81C, 0xFC0E0001,
-               0x81C, 0xFB100001,
-               0x81C, 0xFA120001,
-               0x81C, 0xF9140001,
-               0x81C, 0xF8160001,
-               0x81C, 0xF7180001,
-               0x81C, 0xF61A0001,
-               0x81C, 0xF51C0001,
-               0x81C, 0xF41E0001,
-               0x81C, 0xF3200001,
-               0x81C, 0xF2220001,
-               0x81C, 0xF1240001,
-               0x81C, 0xF0260001,
-               0x81C, 0xEF280001,
-               0x81C, 0xEE2A0001,
-               0x81C, 0xED2C0001,
-               0x81C, 0xEC2E0001,
-               0x81C, 0xEB300001,
-               0x81C, 0xEA320001,
-               0x81C, 0xE9340001,
-               0x81C, 0xE8360001,
-               0x81C, 0xE7380001,
-               0x81C, 0xE63A0001,
-               0x81C, 0xE53C0001,
-               0x81C, 0xC73E0001,
-               0x81C, 0xC6400001,
-               0x81C, 0xC5420001,
-               0x81C, 0xC4440001,
-               0x81C, 0xC3460001,
-               0x81C, 0xC2480001,
-               0x81C, 0xC14A0001,
-               0x81C, 0xA74C0001,
-               0x81C, 0xA64E0001,
-               0x81C, 0xA5500001,
-               0x81C, 0xA4520001,
-               0x81C, 0xA3540001,
-               0x81C, 0xA2560001,
-               0x81C, 0xA1580001,
-               0x81C, 0x675A0001,
-               0x81C, 0x665C0001,
-               0x81C, 0x655E0001,
-               0x81C, 0x64600001,
-               0x81C, 0x63620001,
-               0x81C, 0x48640001,
-               0x81C, 0x47660001,
-               0x81C, 0x46680001,
-               0x81C, 0x456A0001,
-               0x81C, 0x446C0001,
-               0x81C, 0x436E0001,
-               0x81C, 0x42700001,
-               0x81C, 0x41720001,
-               0x81C, 0x41740001,
-               0x81C, 0x41760001,
-               0x81C, 0x41780001,
-               0x81C, 0x417A0001,
-               0x81C, 0x417C0001,
-               0x81C, 0x417E0001,
-       0xFF0F07D8, 0xDEAD,
-       0xFF0F0180, 0xABCD,
-               0x81C, 0xFC800001,
-               0x81C, 0xFB820001,
-               0x81C, 0xFA840001,
-               0x81C, 0xF9860001,
-               0x81C, 0xF8880001,
-               0x81C, 0xF78A0001,
-               0x81C, 0xF68C0001,
-               0x81C, 0xF58E0001,
-               0x81C, 0xF4900001,
-               0x81C, 0xF3920001,
-               0x81C, 0xF2940001,
-               0x81C, 0xF1960001,
-               0x81C, 0xF0980001,
-               0x81C, 0xEF9A0001,
-               0x81C, 0xEE9C0001,
-               0x81C, 0xED9E0001,
-               0x81C, 0xECA00001,
-               0x81C, 0xEBA20001,
-               0x81C, 0xEAA40001,
-               0x81C, 0xE9A60001,
-               0x81C, 0xE8A80001,
-               0x81C, 0xE7AA0001,
-               0x81C, 0xE6AC0001,
-               0x81C, 0xE5AE0001,
-               0x81C, 0xE4B00001,
-               0x81C, 0xE3B20001,
-               0x81C, 0xA8B40001,
-               0x81C, 0xA7B60001,
-               0x81C, 0xA6B80001,
-               0x81C, 0xA5BA0001,
-               0x81C, 0xA4BC0001,
-               0x81C, 0xA3BE0001,
-               0x81C, 0xA2C00001,
-               0x81C, 0xA1C20001,
-               0x81C, 0x68C40001,
-               0x81C, 0x67C60001,
-               0x81C, 0x66C80001,
-               0x81C, 0x65CA0001,
-               0x81C, 0x64CC0001,
-               0x81C, 0x47CE0001,
-               0x81C, 0x46D00001,
-               0x81C, 0x45D20001,
-               0x81C, 0x44D40001,
-               0x81C, 0x43D60001,
-               0x81C, 0x42D80001,
-               0x81C, 0x08DA0001,
-               0x81C, 0x07DC0001,
-               0x81C, 0x06DE0001,
-               0x81C, 0x05E00001,
-               0x81C, 0x04E20001,
-               0x81C, 0x03E40001,
-               0x81C, 0x02E60001,
-               0x81C, 0x01E80001,
-               0x81C, 0x01EA0001,
-               0x81C, 0x01EC0001,
-               0x81C, 0x01EE0001,
-               0x81C, 0x01F00001,
-               0x81C, 0x01F20001,
-               0x81C, 0x01F40001,
-               0x81C, 0x01F60001,
-               0x81C, 0x01F80001,
-               0x81C, 0x01FA0001,
-               0x81C, 0x01FC0001,
-               0x81C, 0x01FE0001,
-       0xFF0F0280, 0xCDEF,
-               0x81C, 0xFC800001,
-               0x81C, 0xFB820001,
-               0x81C, 0xFA840001,
-               0x81C, 0xF9860001,
-               0x81C, 0xF8880001,
-               0x81C, 0xF78A0001,
-               0x81C, 0xF68C0001,
-               0x81C, 0xF58E0001,
-               0x81C, 0xF4900001,
-               0x81C, 0xF3920001,
-               0x81C, 0xF2940001,
-               0x81C, 0xF1960001,
-               0x81C, 0xF0980001,
-               0x81C, 0xEF9A0001,
-               0x81C, 0xEE9C0001,
-               0x81C, 0xED9E0001,
-               0x81C, 0xECA00001,
-               0x81C, 0xEBA20001,
-               0x81C, 0xEAA40001,
-               0x81C, 0xE9A60001,
-               0x81C, 0xE8A80001,
-               0x81C, 0xE7AA0001,
-               0x81C, 0xE6AC0001,
-               0x81C, 0xE5AE0001,
-               0x81C, 0xE4B00001,
-               0x81C, 0xE3B20001,
-               0x81C, 0xA8B40001,
-               0x81C, 0xA7B60001,
-               0x81C, 0xA6B80001,
-               0x81C, 0xA5BA0001,
-               0x81C, 0xA4BC0001,
-               0x81C, 0xA3BE0001,
-               0x81C, 0xA2C00001,
-               0x81C, 0xA1C20001,
-               0x81C, 0x68C40001,
-               0x81C, 0x67C60001,
-               0x81C, 0x66C80001,
-               0x81C, 0x65CA0001,
-               0x81C, 0x64CC0001,
-               0x81C, 0x47CE0001,
-               0x81C, 0x46D00001,
-               0x81C, 0x45D20001,
-               0x81C, 0x44D40001,
-               0x81C, 0x43D60001,
-               0x81C, 0x42D80001,
-               0x81C, 0x08DA0001,
-               0x81C, 0x07DC0001,
-               0x81C, 0x06DE0001,
-               0x81C, 0x05E00001,
-               0x81C, 0x04E20001,
-               0x81C, 0x03E40001,
-               0x81C, 0x02E60001,
-               0x81C, 0x01E80001,
-               0x81C, 0x01EA0001,
-               0x81C, 0x01EC0001,
-               0x81C, 0x01EE0001,
-               0x81C, 0x01F00001,
-               0x81C, 0x01F20001,
-               0x81C, 0x01F40001,
-               0x81C, 0x01F60001,
-               0x81C, 0x01F80001,
-               0x81C, 0x01FA0001,
-               0x81C, 0x01FC0001,
-               0x81C, 0x01FE0001,
-       0xFF0F01C0, 0xCDEF,
-               0x81C, 0xFC800001,
-               0x81C, 0xFB820001,
-               0x81C, 0xFA840001,
-               0x81C, 0xF9860001,
-               0x81C, 0xF8880001,
-               0x81C, 0xF78A0001,
-               0x81C, 0xF68C0001,
-               0x81C, 0xF58E0001,
-               0x81C, 0xF4900001,
-               0x81C, 0xF3920001,
-               0x81C, 0xF2940001,
-               0x81C, 0xF1960001,
-               0x81C, 0xF0980001,
-               0x81C, 0xEF9A0001,
-               0x81C, 0xEE9C0001,
-               0x81C, 0xED9E0001,
-               0x81C, 0xECA00001,
-               0x81C, 0xEBA20001,
-               0x81C, 0xEAA40001,
-               0x81C, 0xE9A60001,
-               0x81C, 0xE8A80001,
-               0x81C, 0xE7AA0001,
-               0x81C, 0xE6AC0001,
-               0x81C, 0xE5AE0001,
-               0x81C, 0xE4B00001,
-               0x81C, 0xE3B20001,
-               0x81C, 0xA8B40001,
-               0x81C, 0xA7B60001,
-               0x81C, 0xA6B80001,
-               0x81C, 0xA5BA0001,
-               0x81C, 0xA4BC0001,
-               0x81C, 0xA3BE0001,
-               0x81C, 0xA2C00001,
-               0x81C, 0xA1C20001,
-               0x81C, 0x68C40001,
-               0x81C, 0x67C60001,
-               0x81C, 0x66C80001,
-               0x81C, 0x65CA0001,
-               0x81C, 0x64CC0001,
-               0x81C, 0x47CE0001,
-               0x81C, 0x46D00001,
-               0x81C, 0x45D20001,
-               0x81C, 0x44D40001,
-               0x81C, 0x43D60001,
-               0x81C, 0x42D80001,
-               0x81C, 0x08DA0001,
-               0x81C, 0x07DC0001,
-               0x81C, 0x06DE0001,
-               0x81C, 0x05E00001,
-               0x81C, 0x04E20001,
-               0x81C, 0x03E40001,
-               0x81C, 0x02E60001,
-               0x81C, 0x01E80001,
-               0x81C, 0x01EA0001,
-               0x81C, 0x01EC0001,
-               0x81C, 0x01EE0001,
-               0x81C, 0x01F00001,
-               0x81C, 0x01F20001,
-               0x81C, 0x01F40001,
-               0x81C, 0x01F60001,
-               0x81C, 0x01F80001,
-               0x81C, 0x01FA0001,
-               0x81C, 0x01FC0001,
-               0x81C, 0x01FE0001,
-       0xFF0F02C0, 0xCDEF,
-               0x81C, 0xFC800001,
-               0x81C, 0xFB820001,
-               0x81C, 0xFA840001,
-               0x81C, 0xF9860001,
-               0x81C, 0xF8880001,
-               0x81C, 0xF78A0001,
-               0x81C, 0xF68C0001,
-               0x81C, 0xF58E0001,
-               0x81C, 0xF4900001,
-               0x81C, 0xF3920001,
-               0x81C, 0xF2940001,
-               0x81C, 0xF1960001,
-               0x81C, 0xF0980001,
-               0x81C, 0xEF9A0001,
-               0x81C, 0xEE9C0001,
-               0x81C, 0xED9E0001,
-               0x81C, 0xECA00001,
-               0x81C, 0xEBA20001,
-               0x81C, 0xEAA40001,
-               0x81C, 0xE9A60001,
-               0x81C, 0xE8A80001,
-               0x81C, 0xE7AA0001,
-               0x81C, 0xE6AC0001,
-               0x81C, 0xE5AE0001,
-               0x81C, 0xE4B00001,
-               0x81C, 0xE3B20001,
-               0x81C, 0xA8B40001,
-               0x81C, 0xA7B60001,
-               0x81C, 0xA6B80001,
-               0x81C, 0xA5BA0001,
-               0x81C, 0xA4BC0001,
-               0x81C, 0xA3BE0001,
-               0x81C, 0xA2C00001,
-               0x81C, 0xA1C20001,
-               0x81C, 0x68C40001,
-               0x81C, 0x67C60001,
-               0x81C, 0x66C80001,
-               0x81C, 0x65CA0001,
-               0x81C, 0x64CC0001,
-               0x81C, 0x47CE0001,
-               0x81C, 0x46D00001,
-               0x81C, 0x45D20001,
-               0x81C, 0x44D40001,
-               0x81C, 0x43D60001,
-               0x81C, 0x42D80001,
-               0x81C, 0x08DA0001,
-               0x81C, 0x07DC0001,
-               0x81C, 0x06DE0001,
-               0x81C, 0x05E00001,
-               0x81C, 0x04E20001,
-               0x81C, 0x03E40001,
-               0x81C, 0x02E60001,
-               0x81C, 0x01E80001,
-               0x81C, 0x01EA0001,
-               0x81C, 0x01EC0001,
-               0x81C, 0x01EE0001,
-               0x81C, 0x01F00001,
-               0x81C, 0x01F20001,
-               0x81C, 0x01F40001,
-               0x81C, 0x01F60001,
-               0x81C, 0x01F80001,
-               0x81C, 0x01FA0001,
-               0x81C, 0x01FC0001,
-               0x81C, 0x01FE0001,
-       0xFF0F07D8, 0xCDEF,
-               0x81C, 0xFC800001,
-               0x81C, 0xFB820001,
-               0x81C, 0xFA840001,
-               0x81C, 0xF9860001,
-               0x81C, 0xF8880001,
-               0x81C, 0xF78A0001,
-               0x81C, 0xF68C0001,
-               0x81C, 0xF58E0001,
-               0x81C, 0xF4900001,
-               0x81C, 0xF3920001,
-               0x81C, 0xF2940001,
-               0x81C, 0xF1960001,
-               0x81C, 0xF0980001,
-               0x81C, 0xEF9A0001,
-               0x81C, 0xEE9C0001,
-               0x81C, 0xED9E0001,
-               0x81C, 0xECA00001,
-               0x81C, 0xEBA20001,
-               0x81C, 0xEAA40001,
-               0x81C, 0xE9A60001,
-               0x81C, 0xE8A80001,
-               0x81C, 0xE7AA0001,
-               0x81C, 0xE6AC0001,
-               0x81C, 0xE5AE0001,
-               0x81C, 0xE4B00001,
-               0x81C, 0xE3B20001,
-               0x81C, 0xA8B40001,
-               0x81C, 0xA7B60001,
-               0x81C, 0xA6B80001,
-               0x81C, 0xA5BA0001,
-               0x81C, 0xA4BC0001,
-               0x81C, 0xA3BE0001,
-               0x81C, 0xA2C00001,
-               0x81C, 0xA1C20001,
-               0x81C, 0x68C40001,
-               0x81C, 0x67C60001,
-               0x81C, 0x66C80001,
-               0x81C, 0x65CA0001,
-               0x81C, 0x64CC0001,
-               0x81C, 0x47CE0001,
-               0x81C, 0x46D00001,
-               0x81C, 0x45D20001,
-               0x81C, 0x44D40001,
-               0x81C, 0x43D60001,
-               0x81C, 0x42D80001,
-               0x81C, 0x08DA0001,
-               0x81C, 0x07DC0001,
-               0x81C, 0x06DE0001,
-               0x81C, 0x05E00001,
-               0x81C, 0x04E20001,
-               0x81C, 0x03E40001,
-               0x81C, 0x02E60001,
-               0x81C, 0x01E80001,
-               0x81C, 0x01EA0001,
-               0x81C, 0x01EC0001,
-               0x81C, 0x01EE0001,
-               0x81C, 0x01F00001,
-               0x81C, 0x01F20001,
-               0x81C, 0x01F40001,
-               0x81C, 0x01F60001,
-               0x81C, 0x01F80001,
-               0x81C, 0x01FA0001,
-               0x81C, 0x01FC0001,
-               0x81C, 0x01FE0001,
-       0xFF0F07D0, 0xCDEF,
+               0x503, 0x00000000,
+               0x504, 0x00000028,
+               0x505, 0x000000A3,
+               0x506, 0x0000005E,
+               0x507, 0x00000000,
+               0x508, 0x0000002B,
+               0x509, 0x000000A4,
+               0x50A, 0x0000005E,
+               0x50B, 0x00000000,
+               0x50C, 0x0000004F,
+               0x50D, 0x000000A4,
+               0x50E, 0x00000000,
+               0x50F, 0x00000000,
+               0x512, 0x0000001C,
+               0x514, 0x0000000A,
+               0x516, 0x0000000A,
+               0x525, 0x0000004F,
+               0x550, 0x00000010,
+               0x551, 0x00000010,
+               0x559, 0x00000002,
+               0x55C, 0x00000050,
+               0x55D, 0x000000FF,
+               0x605, 0x00000030,
+               0x607, 0x00000007,
+               0x608, 0x0000000E,
+               0x609, 0x0000002A,
+               0x620, 0x000000FF,
+               0x621, 0x000000FF,
+               0x622, 0x000000FF,
+               0x623, 0x000000FF,
+               0x624, 0x000000FF,
+               0x625, 0x000000FF,
+               0x626, 0x000000FF,
+               0x627, 0x000000FF,
+               0x638, 0x00000050,
+               0x63C, 0x0000000A,
+               0x63D, 0x0000000A,
+               0x63E, 0x0000000E,
+               0x63F, 0x0000000E,
+               0x640, 0x00000040,
+               0x642, 0x00000040,
+               0x643, 0x00000000,
+               0x652, 0x000000C8,
+               0x66E, 0x00000005,
+               0x700, 0x00000021,
+               0x701, 0x00000043,
+               0x702, 0x00000065,
+               0x703, 0x00000087,
+               0x708, 0x00000021,
+               0x709, 0x00000043,
+               0x70A, 0x00000065,
+               0x70B, 0x00000087,
+               0x718, 0x00000040,
+};
+
+u32 RTL8821AE_MAC_1T_ARRAYLEN = sizeof(RTL8821AE_MAC_REG_ARRAY) / sizeof(u32);
+
+u32 RTL8812AE_AGC_TAB_ARRAY[] = {
+       0x80000001, 0x00000000, 0x40000000, 0x00000000,
+               0x81C, 0xFC000001,
+               0x81C, 0xFB020001,
+               0x81C, 0xFA040001,
+               0x81C, 0xF9060001,
+               0x81C, 0xF8080001,
+               0x81C, 0xF70A0001,
+               0x81C, 0xF60C0001,
+               0x81C, 0xF50E0001,
+               0x81C, 0xF4100001,
+               0x81C, 0xF3120001,
+               0x81C, 0xF2140001,
+               0x81C, 0xF1160001,
+               0x81C, 0xF0180001,
+               0x81C, 0xEF1A0001,
+               0x81C, 0xEE1C0001,
+               0x81C, 0xED1E0001,
+               0x81C, 0xEC200001,
+               0x81C, 0xEB220001,
+               0x81C, 0xEA240001,
+               0x81C, 0xCD260001,
+               0x81C, 0xCC280001,
+               0x81C, 0xCB2A0001,
+               0x81C, 0xCA2C0001,
+               0x81C, 0xC92E0001,
+               0x81C, 0xC8300001,
+               0x81C, 0xA6320001,
+               0x81C, 0xA5340001,
+               0x81C, 0xA4360001,
+               0x81C, 0xA3380001,
+               0x81C, 0xA23A0001,
+               0x81C, 0x883C0001,
+               0x81C, 0x873E0001,
+               0x81C, 0x86400001,
+               0x81C, 0x85420001,
+               0x81C, 0x84440001,
+               0x81C, 0x83460001,
+               0x81C, 0x82480001,
+               0x81C, 0x814A0001,
+               0x81C, 0x484C0001,
+               0x81C, 0x474E0001,
+               0x81C, 0x46500001,
+               0x81C, 0x45520001,
+               0x81C, 0x44540001,
+               0x81C, 0x43560001,
+               0x81C, 0x42580001,
+               0x81C, 0x415A0001,
+               0x81C, 0x255C0001,
+               0x81C, 0x245E0001,
+               0x81C, 0x23600001,
+               0x81C, 0x22620001,
+               0x81C, 0x21640001,
+               0x81C, 0x21660001,
+               0x81C, 0x21680001,
+               0x81C, 0x216A0001,
+               0x81C, 0x216C0001,
+               0x81C, 0x216E0001,
+               0x81C, 0x21700001,
+               0x81C, 0x21720001,
+               0x81C, 0x21740001,
+               0x81C, 0x21760001,
+               0x81C, 0x21780001,
+               0x81C, 0x217A0001,
+               0x81C, 0x217C0001,
+               0x81C, 0x217E0001,
+       0x90000001, 0x00000005, 0x40000000, 0x00000000,
+               0x81C, 0xF9000001,
+               0x81C, 0xF8020001,
+               0x81C, 0xF7040001,
+               0x81C, 0xF6060001,
+               0x81C, 0xF5080001,
+               0x81C, 0xF40A0001,
+               0x81C, 0xF30C0001,
+               0x81C, 0xF20E0001,
+               0x81C, 0xF1100001,
+               0x81C, 0xF0120001,
+               0x81C, 0xEF140001,
+               0x81C, 0xEE160001,
+               0x81C, 0xED180001,
+               0x81C, 0xEC1A0001,
+               0x81C, 0xEB1C0001,
+               0x81C, 0xEA1E0001,
+               0x81C, 0xCD200001,
+               0x81C, 0xCC220001,
+               0x81C, 0xCB240001,
+               0x81C, 0xCA260001,
+               0x81C, 0xC9280001,
+               0x81C, 0xC82A0001,
+               0x81C, 0xC72C0001,
+               0x81C, 0xC62E0001,
+               0x81C, 0xA5300001,
+               0x81C, 0xA4320001,
+               0x81C, 0xA3340001,
+               0x81C, 0xA2360001,
+               0x81C, 0x88380001,
+               0x81C, 0x873A0001,
+               0x81C, 0x863C0001,
+               0x81C, 0x853E0001,
+               0x81C, 0x84400001,
+               0x81C, 0x83420001,
+               0x81C, 0x82440001,
+               0x81C, 0x81460001,
+               0x81C, 0x48480001,
+               0x81C, 0x474A0001,
+               0x81C, 0x464C0001,
+               0x81C, 0x454E0001,
+               0x81C, 0x44500001,
+               0x81C, 0x43520001,
+               0x81C, 0x42540001,
+               0x81C, 0x41560001,
+               0x81C, 0x25580001,
+               0x81C, 0x245A0001,
+               0x81C, 0x235C0001,
+               0x81C, 0x225E0001,
+               0x81C, 0x21600001,
+               0x81C, 0x21620001,
+               0x81C, 0x21640001,
+               0x81C, 0x21660001,
+               0x81C, 0x21680001,
+               0x81C, 0x216A0001,
+               0x81C, 0x236C0001,
+               0x81C, 0x226E0001,
+               0x81C, 0x21700001,
+               0x81C, 0x21720001,
+               0x81C, 0x21740001,
+               0x81C, 0x21760001,
+               0x81C, 0x21780001,
+               0x81C, 0x217A0001,
+               0x81C, 0x217C0001,
+               0x81C, 0x217E0001,
+       0xA0000000, 0x00000000,
+               0x81C, 0xFF000001,
+               0x81C, 0xFF020001,
+               0x81C, 0xFF040001,
+               0x81C, 0xFF060001,
+               0x81C, 0xFF080001,
+               0x81C, 0xFE0A0001,
+               0x81C, 0xFD0C0001,
+               0x81C, 0xFC0E0001,
+               0x81C, 0xFB100001,
+               0x81C, 0xFA120001,
+               0x81C, 0xF9140001,
+               0x81C, 0xF8160001,
+               0x81C, 0xF7180001,
+               0x81C, 0xF61A0001,
+               0x81C, 0xF51C0001,
+               0x81C, 0xF41E0001,
+               0x81C, 0xF3200001,
+               0x81C, 0xF2220001,
+               0x81C, 0xF1240001,
+               0x81C, 0xF0260001,
+               0x81C, 0xEF280001,
+               0x81C, 0xEE2A0001,
+               0x81C, 0xED2C0001,
+               0x81C, 0xEC2E0001,
+               0x81C, 0xEB300001,
+               0x81C, 0xEA320001,
+               0x81C, 0xE9340001,
+               0x81C, 0xE8360001,
+               0x81C, 0xE7380001,
+               0x81C, 0xE63A0001,
+               0x81C, 0xE53C0001,
+               0x81C, 0xC73E0001,
+               0x81C, 0xC6400001,
+               0x81C, 0xC5420001,
+               0x81C, 0xC4440001,
+               0x81C, 0xC3460001,
+               0x81C, 0xC2480001,
+               0x81C, 0xC14A0001,
+               0x81C, 0xA74C0001,
+               0x81C, 0xA64E0001,
+               0x81C, 0xA5500001,
+               0x81C, 0xA4520001,
+               0x81C, 0xA3540001,
+               0x81C, 0xA2560001,
+               0x81C, 0xA1580001,
+               0x81C, 0x675A0001,
+               0x81C, 0x665C0001,
+               0x81C, 0x655E0001,
+               0x81C, 0x64600001,
+               0x81C, 0x63620001,
+               0x81C, 0x48640001,
+               0x81C, 0x47660001,
+               0x81C, 0x46680001,
+               0x81C, 0x456A0001,
+               0x81C, 0x446C0001,
+               0x81C, 0x436E0001,
+               0x81C, 0x42700001,
+               0x81C, 0x41720001,
+               0x81C, 0x41740001,
+               0x81C, 0x41760001,
+               0x81C, 0x41780001,
+               0x81C, 0x417A0001,
+               0x81C, 0x417C0001,
+               0x81C, 0x417E0001,
+       0xB0000000, 0x00000000,
+       0x80000004, 0x00000000, 0x40000000, 0x00000000,
                0x81C, 0xFC800001,
                0x81C, 0xFB820001,
                0x81C, 0xFA840001,
@@ -3165,7 +2407,7 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = {
                0x81C, 0x01FA0001,
                0x81C, 0x01FC0001,
                0x81C, 0x01FE0001,
-       0xCDCDCDCD, 0xCDCD,
+       0xA0000000, 0x00000000,
                0x81C, 0xFF800001,
                0x81C, 0xFF820001,
                0x81C, 0xFF840001,
@@ -3230,14 +2472,16 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = {
                0x81C, 0x01FA0001,
                0x81C, 0x01FC0001,
                0x81C, 0x01FE0001,
-       0xFF0F0180, 0xDEAD,
+       0xB0000000, 0x00000000,
                0xC50, 0x00000022,
                0xC50, 0x00000020,
                0xE50, 0x00000022,
                0xE50, 0x00000020,
-
 };
 
+u32 RTL8812AE_AGC_TAB_1TARRAYLEN =
+       sizeof(RTL8812AE_AGC_TAB_ARRAY) / sizeof(u32);
+
 u32 RTL8821AE_AGC_TAB_ARRAY[] = {
                0x81C, 0xBF000001,
                0x81C, 0xBF020001,
@@ -3430,9 +2674,11 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
                0x81C, 0x017E0101,
                0xC50, 0x00000022,
                0xC50, 0x00000020,
-
 };
 
+u32 RTL8821AE_AGC_TAB_1TARRAYLEN =
+       sizeof(RTL8821AE_AGC_TAB_ARRAY) / sizeof(u32);
+
 /******************************************************************************
 *                           TXPWR_LMT.TXT
 ******************************************************************************/
@@ -3717,9 +2963,9 @@ u8 *RTL8812AE_TXPWR_LMT[] = {
        "FCC", "5G", "20M", "OFDM", "1T", "100", "30",
        "ETSI", "5G", "20M", "OFDM", "1T", "100", "32",
        "MKK", "5G", "20M", "OFDM", "1T", "100", "32",
-       "FCC", "5G", "20M", "OFDM", "1T", "114", "30",
-       "ETSI", "5G", "20M", "OFDM", "1T", "114", "32",
-       "MKK", "5G", "20M", "OFDM", "1T", "114", "32",
+       "FCC", "5G", "20M", "OFDM", "1T", "104", "30",
+       "ETSI", "5G", "20M", "OFDM", "1T", "104", "32",
+       "MKK", "5G", "20M", "OFDM", "1T", "104", "32",
        "FCC", "5G", "20M", "OFDM", "1T", "108", "32",
        "ETSI", "5G", "20M", "OFDM", "1T", "108", "32",
        "MKK", "5G", "20M", "OFDM", "1T", "108", "32",
@@ -3789,9 +3035,9 @@ u8 *RTL8812AE_TXPWR_LMT[] = {
        "FCC", "5G", "20M", "HT", "1T", "100", "30",
        "ETSI", "5G", "20M", "HT", "1T", "100", "32",
        "MKK", "5G", "20M", "HT", "1T", "100", "32",
-       "FCC", "5G", "20M", "HT", "1T", "114", "30",
-       "ETSI", "5G", "20M", "HT", "1T", "114", "32",
-       "MKK", "5G", "20M", "HT", "1T", "114", "32",
+       "FCC", "5G", "20M", "HT", "1T", "104", "30",
+       "ETSI", "5G", "20M", "HT", "1T", "104", "32",
+       "MKK", "5G", "20M", "HT", "1T", "104", "32",
        "FCC", "5G", "20M", "HT", "1T", "108", "32",
        "ETSI", "5G", "20M", "HT", "1T", "108", "32",
        "MKK", "5G", "20M", "HT", "1T", "108", "32",
@@ -3861,9 +3107,9 @@ u8 *RTL8812AE_TXPWR_LMT[] = {
        "FCC", "5G", "20M", "HT", "2T", "100", "28",
        "ETSI", "5G", "20M", "HT", "2T", "100", "30",
        "MKK", "5G", "20M", "HT", "2T", "100", "30",
-       "FCC", "5G", "20M", "HT", "2T", "114", "28",
-       "ETSI", "5G", "20M", "HT", "2T", "114", "30",
-       "MKK", "5G", "20M", "HT", "2T", "114", "30",
+       "FCC", "5G", "20M", "HT", "2T", "104", "28",
+       "ETSI", "5G", "20M", "HT", "2T", "104", "30",
+       "MKK", "5G", "20M", "HT", "2T", "104", "30",
        "FCC", "5G", "20M", "HT", "2T", "108", "30",
        "ETSI", "5G", "20M", "HT", "2T", "108", "30",
        "MKK", "5G", "20M", "HT", "2T", "108", "30",
@@ -4004,6 +3250,8 @@ u8 *RTL8812AE_TXPWR_LMT[] = {
        "MKK", "5G", "80M", "VHT", "2T", "155", "63"
 };
 
+u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8812AE_TXPWR_LMT) / sizeof(u8 *);
+
 u8 *RTL8821AE_TXPWR_LMT[] = {
        "FCC", "2.4G", "20M", "CCK", "1T", "01", "32",
        "ETSI", "2.4G", "20M", "CCK", "1T", "01", "32",
@@ -4284,9 +3532,9 @@ u8 *RTL8821AE_TXPWR_LMT[] = {
        "FCC", "5G", "20M", "OFDM", "1T", "100", "32",
        "ETSI", "5G", "20M", "OFDM", "1T", "100", "30",
        "MKK", "5G", "20M", "OFDM", "1T", "100", "30",
-       "FCC", "5G", "20M", "OFDM", "1T", "114", "32",
-       "ETSI", "5G", "20M", "OFDM", "1T", "114", "30",
-       "MKK", "5G", "20M", "OFDM", "1T", "114", "30",
+       "FCC", "5G", "20M", "OFDM", "1T", "104", "32",
+       "ETSI", "5G", "20M", "OFDM", "1T", "104", "30",
+       "MKK", "5G", "20M", "OFDM", "1T", "104", "30",
        "FCC", "5G", "20M", "OFDM", "1T", "108", "32",
        "ETSI", "5G", "20M", "OFDM", "1T", "108", "30",
        "MKK", "5G", "20M", "OFDM", "1T", "108", "30",
@@ -4356,9 +3604,9 @@ u8 *RTL8821AE_TXPWR_LMT[] = {
        "FCC", "5G", "20M", "HT", "1T", "100", "32",
        "ETSI", "5G", "20M", "HT", "1T", "100", "30",
        "MKK", "5G", "20M", "HT", "1T", "100", "30",
-       "FCC", "5G", "20M", "HT", "1T", "114", "32",
-       "ETSI", "5G", "20M", "HT", "1T", "114", "30",
-       "MKK", "5G", "20M", "HT", "1T", "114", "30",
+       "FCC", "5G", "20M", "HT", "1T", "104", "32",
+       "ETSI", "5G", "20M", "HT", "1T", "104", "30",
+       "MKK", "5G", "20M", "HT", "1T", "104", "30",
        "FCC", "5G", "20M", "HT", "1T", "108", "32",
        "ETSI", "5G", "20M", "HT", "1T", "108", "30",
        "MKK", "5G", "20M", "HT", "1T", "108", "30",
@@ -4428,9 +3676,9 @@ u8 *RTL8821AE_TXPWR_LMT[] = {
        "FCC", "5G", "20M", "HT", "2T", "100", "28",
        "ETSI", "5G", "20M", "HT", "2T", "100", "30",
        "MKK", "5G", "20M", "HT", "2T", "100", "30",
-       "FCC", "5G", "20M", "HT", "2T", "114", "28",
-       "ETSI", "5G", "20M", "HT", "2T", "114", "30",
-       "MKK", "5G", "20M", "HT", "2T", "114", "30",
+       "FCC", "5G", "20M", "HT", "2T", "104", "28",
+       "ETSI", "5G", "20M", "HT", "2T", "104", "30",
+       "MKK", "5G", "20M", "HT", "2T", "104", "30",
        "FCC", "5G", "20M", "HT", "2T", "108", "30",
        "ETSI", "5G", "20M", "HT", "2T", "108", "30",
        "MKK", "5G", "20M", "HT", "2T", "108", "30",
@@ -4570,3 +3818,5 @@ u8 *RTL8821AE_TXPWR_LMT[] = {
        "ETSI", "5G", "80M", "VHT", "2T", "155", "30",
        "MKK", "5G", "80M", "VHT", "2T", "155", "63"
 };
+
+u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8821AE_TXPWR_LMT) / sizeof(u8 *);
index 24bcff6bc507bff6b27a03e939790db9d59bd5dd..36c2388b60bca303e3df7405996952a32ca3dcd7 100644 (file)
 #define __RTL8821AE_TABLE__H_
 
 #include <linux/types.h>
-#define  RTL8821AEPHY_REG_1TARRAYLEN   344
+extern u32 RTL8821AE_PHY_REG_1TARRAYLEN;
 extern u32 RTL8821AE_PHY_REG_ARRAY[];
-#define  RTL8812AEPHY_REG_1TARRAYLEN   490
+extern u32 RTL8812AE_PHY_REG_1TARRAYLEN;
 extern u32 RTL8812AE_PHY_REG_ARRAY[];
-#define RTL8821AEPHY_REG_ARRAY_PGLEN   90
+extern u32 RTL8821AE_PHY_REG_ARRAY_PGLEN;
 extern u32 RTL8821AE_PHY_REG_ARRAY_PG[];
-#define RTL8812AEPHY_REG_ARRAY_PGLEN   276
+extern u32 RTL8812AE_PHY_REG_ARRAY_PGLEN;
 extern u32 RTL8812AE_PHY_REG_ARRAY_PG[];
-/* #define     RTL8723BE_RADIOA_1TARRAYLEN     206 */
-/* extern u8 *RTL8821AE_TXPWR_LMT_ARRAY[]; */
-#define        RTL8812AE_RADIOA_1TARRAYLEN     1264
+extern u32 RTL8812AE_RADIOA_1TARRAYLEN;
 extern u32 RTL8812AE_RADIOA_ARRAY[];
-#define        RTL8812AE_RADIOB_1TARRAYLEN     1240
+extern u32 RTL8812AE_RADIOB_1TARRAYLEN;
 extern u32 RTL8812AE_RADIOB_ARRAY[];
-#define        RTL8821AE_RADIOA_1TARRAYLEN     1176
+extern u32 RTL8821AE_RADIOA_1TARRAYLEN;
 extern u32 RTL8821AE_RADIOA_ARRAY[];
-#define RTL8821AEMAC_1T_ARRAYLEN               194
+extern u32 RTL8821AE_MAC_1T_ARRAYLEN;
 extern u32 RTL8821AE_MAC_REG_ARRAY[];
-#define RTL8812AEMAC_1T_ARRAYLEN               214
+extern u32 RTL8812AE_MAC_1T_ARRAYLEN;
 extern u32 RTL8812AE_MAC_REG_ARRAY[];
-#define RTL8821AEAGCTAB_1TARRAYLEN             382
+extern u32 RTL8821AE_AGC_TAB_1TARRAYLEN;
 extern u32 RTL8821AE_AGC_TAB_ARRAY[];
-#define RTL8812AEAGCTAB_1TARRAYLEN             1312
+extern u32 RTL8812AE_AGC_TAB_1TARRAYLEN;
 extern u32 RTL8812AE_AGC_TAB_ARRAY[];
-#define RTL8812AE_TXPWR_LMT_ARRAY_LEN          3948
+extern u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN;
 extern u8 *RTL8812AE_TXPWR_LMT[];
-#define RTL8821AE_TXPWR_LMT_ARRAY_LEN          3948
+extern u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN;
 extern u8 *RTL8821AE_TXPWR_LMT[];
 #endif
index 65ef42b376515dfac7de588ba5ea67504f8c6630..c0d2601bc55fbb337ea854afd4207b1e67b073eb 100644 (file)
@@ -1529,6 +1529,10 @@ struct rtl_hal {
        u8 external_lna_2g;
        u8 external_pa_5g;
        u8 external_lna_5g;
+       u8 type_glna;
+       u8 type_gpa;
+       u8 type_alna;
+       u8 type_apa;
        u8 rfe_type;
 
        /*firmware */
@@ -2933,6 +2937,14 @@ static inline void rtl_write_byte(struct rtl_priv *rtlpriv, u32 addr, u8 val8)
                rtlpriv->io.read8_sync(rtlpriv, addr);
 }
 
+static inline void rtl_write_byte_with_val32(struct ieee80211_hw *hw,
+                                            u32 addr, u32 val8)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_byte(rtlpriv, addr, (u8)val8);
+}
+
 static inline void rtl_write_word(struct rtl_priv *rtlpriv, u32 addr, u16 val16)
 {
        rtlpriv->io.write16_async(rtlpriv, addr, val16);
@@ -2966,6 +2978,12 @@ static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr,
        rtlpriv->cfg->ops->set_bbreg(hw, regaddr, bitmask, data);
 }
 
+static inline void rtl_set_bbreg_with_dwmask(struct ieee80211_hw *hw,
+                                u32 regaddr, u32 data)
+{
+       rtl_set_bbreg(hw, regaddr, 0xffffffff, data);
+}
+
 static inline u32 rtl_get_rfreg(struct ieee80211_hw *hw,
                                enum radio_path rfpath, u32 regaddr,
                                u32 bitmask)
index 785334f7a5386e82c32cd6fe41dee6d36b37b969..3b68eaffb48c32e709be0cdb0f12f539a06dbfc5 100644 (file)
@@ -3392,6 +3392,7 @@ static const struct net_device_ops rndis_wlan_netdev_ops = {
        .ndo_stop               = usbnet_stop,
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
+       .ndo_get_stats64        = usbnet_get_stats64,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = rndis_wlan_set_multicast_list,
index d3acc85932a569024a51d4b9d68b6dbc79dcfec8..709f56e5ad875c534dc44fac64d371c74b88fe1a 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/interrupt.h>
 #include <linux/gpio.h>
 #include <linux/delay.h>
 #include <linux/mmc/host.h>
index 58e148d7bc7b66f4d819209cb42b89c998496cbb..de7e2a5fdffa048e2f00f2edab72f9363b56e01a 100644 (file)
@@ -1249,7 +1249,7 @@ static ssize_t fw_logger_write(struct file *file,
        }
 
        if (wl->conf.fwlog.output == 0) {
-               wl1271_warning("iligal opperation - fw logger disabled by default, please change mode via wlconf");
+               wl1271_warning("invalid operation - fw logger disabled by default, please change mode via wlconf");
                return -EINVAL;
        }
 
index c5effd6c6be9beea8481d3c1a68e57f5ace403cf..01ca1d57b3d92074d2953195b9776b368f653ead 100644 (file)
@@ -1278,6 +1278,9 @@ static int eject_installer(struct usb_interface *intf)
        u8 bulk_out_ep;
        int r;
 
+       if (iface_desc->desc.bNumEndpoints < 2)
+               return -ENODEV;
+
        /* Find bulk out endpoint */
        for (r = 1; r >= 0; r--) {
                endpoint = &iface_desc->endpoint[r].desc;
index 829b26cd4549a4e07ccdf30ea87d902424ce737b..8397f6c9245158e8b3ff005bc58a419e4250169d 100644 (file)
@@ -165,13 +165,17 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct xenvif *vif = netdev_priv(dev);
        struct xenvif_queue *queue = NULL;
-       unsigned int num_queues = vif->num_queues;
+       unsigned int num_queues;
        u16 index;
        struct xenvif_rx_cb *cb;
 
        BUG_ON(skb->dev != dev);
 
-       /* Drop the packet if queues are not set up */
+       /* Drop the packet if queues are not set up.
+        * This handler should be called inside an RCU read section
+        * so we don't need to enter it here explicitly.
+        */
+       num_queues = READ_ONCE(vif->num_queues);
        if (num_queues < 1)
                goto drop;
 
@@ -222,18 +226,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
 {
        struct xenvif *vif = netdev_priv(dev);
        struct xenvif_queue *queue = NULL;
+       unsigned int num_queues;
        u64 rx_bytes = 0;
        u64 rx_packets = 0;
        u64 tx_bytes = 0;
        u64 tx_packets = 0;
        unsigned int index;
 
-       spin_lock(&vif->lock);
-       if (vif->queues == NULL)
-               goto out;
+       rcu_read_lock();
+       num_queues = READ_ONCE(vif->num_queues);
 
        /* Aggregate tx and rx stats from each queue */
-       for (index = 0; index < vif->num_queues; ++index) {
+       for (index = 0; index < num_queues; ++index) {
                queue = &vif->queues[index];
                rx_bytes += queue->stats.rx_bytes;
                rx_packets += queue->stats.rx_packets;
@@ -241,8 +245,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
                tx_packets += queue->stats.tx_packets;
        }
 
-out:
-       spin_unlock(&vif->lock);
+       rcu_read_unlock();
 
        vif->dev->stats.rx_bytes = rx_bytes;
        vif->dev->stats.rx_packets = rx_packets;
@@ -378,10 +381,13 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
                                     struct ethtool_stats *stats, u64 * data)
 {
        struct xenvif *vif = netdev_priv(dev);
-       unsigned int num_queues = vif->num_queues;
+       unsigned int num_queues;
        int i;
        unsigned int queue_index;
 
+       rcu_read_lock();
+       num_queues = READ_ONCE(vif->num_queues);
+
        for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
                unsigned long accum = 0;
                for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -390,6 +396,8 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
                }
                data[i] = accum;
        }
+
+       rcu_read_unlock();
 }
 
 static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
index f9bcf4a665bcaebc4f33bd28849cef2dadc4f698..602d408fa25e98a4651716b1390d2507bced4605 100644 (file)
@@ -214,7 +214,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif)
        netdev_err(vif->dev, "fatal error; disabling device\n");
        vif->disabled = true;
        /* Disable the vif from queue 0's kthread */
-       if (vif->queues)
+       if (vif->num_queues)
                xenvif_kick_thread(&vif->queues[0]);
 }
 
index d2d7cd9145b1c259a1f0f11414acca706e67bb7a..a56d3eab35dd650c4acfcda9e981c0220cba9e61 100644 (file)
@@ -495,26 +495,26 @@ static void backend_disconnect(struct backend_info *be)
        struct xenvif *vif = be->vif;
 
        if (vif) {
+               unsigned int num_queues = vif->num_queues;
                unsigned int queue_index;
-               struct xenvif_queue *queues;
 
                xen_unregister_watchers(vif);
 #ifdef CONFIG_DEBUG_FS
                xenvif_debugfs_delif(vif);
 #endif /* CONFIG_DEBUG_FS */
                xenvif_disconnect_data(vif);
-               for (queue_index = 0;
-                    queue_index < vif->num_queues;
-                    ++queue_index)
-                       xenvif_deinit_queue(&vif->queues[queue_index]);
 
-               spin_lock(&vif->lock);
-               queues = vif->queues;
+               /* At this point some of the handlers may still be active
+                * so we need to have additional synchronization here.
+                */
                vif->num_queues = 0;
-               vif->queues = NULL;
-               spin_unlock(&vif->lock);
+               synchronize_net();
 
-               vfree(queues);
+               for (queue_index = 0; queue_index < num_queues; ++queue_index)
+                       xenvif_deinit_queue(&vif->queues[queue_index]);
+
+               vfree(vif->queues);
+               vif->queues = NULL;
 
                xenvif_disconnect_ctrl(vif);
        }
index 779f516e7a4ec405ff919f9b248d21ff0f748b8b..47a479f26e5d7de3605c0263d1a0cdbba1b7e1c1 100644 (file)
@@ -343,8 +343,6 @@ static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
        struct ib_device *ibdev = dev->dev;
        int ret;
 
-       BUG_ON(queue_idx >= ctrl->queue_count);
-
        ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
                        DMA_TO_DEVICE);
        if (ret)
@@ -652,8 +650,22 @@ out_free_queues:
 
 static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
 {
+       struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+       unsigned int nr_io_queues;
        int i, ret;
 
+       nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
+       ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
+       if (ret)
+               return ret;
+
+       ctrl->queue_count = nr_io_queues + 1;
+       if (ctrl->queue_count < 2)
+               return 0;
+
+       dev_info(ctrl->ctrl.device,
+               "creating %d I/O queues.\n", nr_io_queues);
+
        for (i = 1; i < ctrl->queue_count; i++) {
                ret = nvme_rdma_init_queue(ctrl, i,
                                           ctrl->ctrl.opts->queue_size);
@@ -1791,20 +1803,8 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
 
 static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
 {
-       struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
        int ret;
 
-       ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
-       if (ret)
-               return ret;
-
-       ctrl->queue_count = opts->nr_io_queues + 1;
-       if (ctrl->queue_count < 2)
-               return 0;
-
-       dev_info(ctrl->ctrl.device,
-               "creating %d I/O queues.\n", opts->nr_io_queues);
-
        ret = nvme_rdma_init_io_queues(ctrl);
        if (ret)
                return ret;
index 11b0a0a5f661b502d15a22868fe78369ef428627..798653b329b28cb8925cfab4993cc5c6b6afc147 100644 (file)
@@ -425,6 +425,13 @@ void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
        ctrl->sqs[qid] = sq;
 }
 
+static void nvmet_confirm_sq(struct percpu_ref *ref)
+{
+       struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
+
+       complete(&sq->confirm_done);
+}
+
 void nvmet_sq_destroy(struct nvmet_sq *sq)
 {
        /*
@@ -433,7 +440,8 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
         */
        if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
                nvmet_async_events_free(sq->ctrl);
-       percpu_ref_kill(&sq->ref);
+       percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
+       wait_for_completion(&sq->confirm_done);
        wait_for_completion(&sq->free_done);
        percpu_ref_exit(&sq->ref);
 
@@ -461,6 +469,7 @@ int nvmet_sq_init(struct nvmet_sq *sq)
                return ret;
        }
        init_completion(&sq->free_done);
+       init_completion(&sq->confirm_done);
 
        return 0;
 }
index d1f06e7768ff1d7ff6ee787ff6d94eb01576252f..22f7bc6bac7fa77dd48198cde3a31ef60ead531b 100644 (file)
@@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
                struct nvme_loop_iod *iod, unsigned int queue_idx)
 {
-       BUG_ON(queue_idx >= ctrl->queue_count);
-
        iod->req.cmd = &iod->cmd;
        iod->req.rsp = &iod->rsp;
        iod->queue = &ctrl->queues[queue_idx];
@@ -288,9 +286,9 @@ static struct blk_mq_ops nvme_loop_admin_mq_ops = {
 
 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
 {
+       nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
        blk_cleanup_queue(ctrl->ctrl.admin_q);
        blk_mq_free_tag_set(&ctrl->admin_tag_set);
-       nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
 }
 
 static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
@@ -314,6 +312,43 @@ free_ctrl:
        kfree(ctrl);
 }
 
+static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+       int i;
+
+       for (i = 1; i < ctrl->queue_count; i++)
+               nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+}
+
+static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+       struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+       unsigned int nr_io_queues;
+       int ret, i;
+
+       nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
+       ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
+       if (ret || !nr_io_queues)
+               return ret;
+
+       dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
+
+       for (i = 1; i <= nr_io_queues; i++) {
+               ctrl->queues[i].ctrl = ctrl;
+               ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
+               if (ret)
+                       goto out_destroy_queues;
+
+               ctrl->queue_count++;
+       }
+
+       return 0;
+
+out_destroy_queues:
+       nvme_loop_destroy_io_queues(ctrl);
+       return ret;
+}
+
 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
 {
        int error;
@@ -385,17 +420,13 @@ out_free_sq:
 
 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
 {
-       int i;
-
        nvme_stop_keep_alive(&ctrl->ctrl);
 
        if (ctrl->queue_count > 1) {
                nvme_stop_queues(&ctrl->ctrl);
                blk_mq_tagset_busy_iter(&ctrl->tag_set,
                                        nvme_cancel_request, &ctrl->ctrl);
-
-               for (i = 1; i < ctrl->queue_count; i++)
-                       nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+               nvme_loop_destroy_io_queues(ctrl);
        }
 
        if (ctrl->ctrl.state == NVME_CTRL_LIVE)
@@ -467,19 +498,14 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
        if (ret)
                goto out_disable;
 
-       for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
-               ctrl->queues[i].ctrl = ctrl;
-               ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
-               if (ret)
-                       goto out_free_queues;
-
-               ctrl->queue_count++;
-       }
+       ret = nvme_loop_init_io_queues(ctrl);
+       if (ret)
+               goto out_destroy_admin;
 
-       for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
+       for (i = 1; i < ctrl->queue_count; i++) {
                ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
                if (ret)
-                       goto out_free_queues;
+                       goto out_destroy_io;
        }
 
        changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -492,9 +518,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
 
        return;
 
-out_free_queues:
-       for (i = 1; i < ctrl->queue_count; i++)
-               nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+out_destroy_io:
+       nvme_loop_destroy_io_queues(ctrl);
+out_destroy_admin:
        nvme_loop_destroy_admin_queue(ctrl);
 out_disable:
        dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
@@ -533,25 +559,12 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
 
 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
 {
-       struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
        int ret, i;
 
-       ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
-       if (ret || !opts->nr_io_queues)
+       ret = nvme_loop_init_io_queues(ctrl);
+       if (ret)
                return ret;
 
-       dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
-               opts->nr_io_queues);
-
-       for (i = 1; i <= opts->nr_io_queues; i++) {
-               ctrl->queues[i].ctrl = ctrl;
-               ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
-               if (ret)
-                       goto out_destroy_queues;
-
-               ctrl->queue_count++;
-       }
-
        memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
        ctrl->tag_set.ops = &nvme_loop_mq_ops;
        ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
@@ -575,7 +588,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
                goto out_free_tagset;
        }
 
-       for (i = 1; i <= opts->nr_io_queues; i++) {
+       for (i = 1; i < ctrl->queue_count; i++) {
                ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
                if (ret)
                        goto out_cleanup_connect_q;
@@ -588,8 +601,7 @@ out_cleanup_connect_q:
 out_free_tagset:
        blk_mq_free_tag_set(&ctrl->tag_set);
 out_destroy_queues:
-       for (i = 1; i < ctrl->queue_count; i++)
-               nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+       nvme_loop_destroy_io_queues(ctrl);
        return ret;
 }
 
index 1370eee0a3c0f6295722d22e0c103a2f6cece47b..f7ff15f17ca97d65161cdb6a164e62deff339916 100644 (file)
@@ -73,6 +73,7 @@ struct nvmet_sq {
        u16                     qid;
        u16                     size;
        struct completion       free_done;
+       struct completion       confirm_done;
 };
 
 /**
index 9aa1da3778b3ac1d2262bfe9b845b65b9cd942d9..ecc4fe8625612442ded6a554e762d029b908c8dc 100644 (file)
@@ -703,11 +703,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
 {
        u16 status;
 
-       cmd->queue = queue;
-       cmd->n_rdma = 0;
-       cmd->req.port = queue->port;
-
-
        ib_dma_sync_single_for_cpu(queue->dev->device,
                cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
                DMA_FROM_DEVICE);
@@ -760,9 +755,12 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
 
        cmd->queue = queue;
        rsp = nvmet_rdma_get_rsp(queue);
+       rsp->queue = queue;
        rsp->cmd = cmd;
        rsp->flags = 0;
        rsp->req.cmd = cmd->nvme_cmd;
+       rsp->req.port = queue->port;
+       rsp->n_rdma = 0;
 
        if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
                unsigned long flags;
index bc090daa850a4b8fdb8a29c8592582eda8aee668..5dc53d420ca8ca805c0c036c23e3c1a3fc42ac00 100644 (file)
@@ -939,8 +939,10 @@ parport_register_dev_model(struct parport *port, const char *name,
         * pardevice fields. -arca
         */
        port->ops->init_state(par_dev, par_dev->state);
-       port->proc_device = par_dev;
-       parport_device_proc_register(par_dev);
+       if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
+               port->proc_device = par_dev;
+               parport_device_proc_register(par_dev);
+       }
 
        return par_dev;
 
index 993b650ef2759cbffc56c0bc086a8d2172ef4fcf..44f774c12fb25e7ab6f98df5edf8a09638971eca 100644 (file)
@@ -132,10 +132,6 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
        struct device *dev = pci->dev;
        struct resource *res;
 
-       /* If using the PHY framework, doesn't need to get other resource */
-       if (ep->using_phy)
-               return 0;
-
        ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL);
        if (!ep->mem_res)
                return -ENOMEM;
@@ -145,6 +141,10 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
        if (IS_ERR(ep->mem_res->elbi_base))
                return PTR_ERR(ep->mem_res->elbi_base);
 
+       /* If using the PHY framework, doesn't need to get other resource */
+       if (ep->using_phy)
+               return 0;
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        ep->mem_res->phy_base = devm_ioremap_resource(dev, res);
        if (IS_ERR(ep->mem_res->phy_base))
index 52b5bdccf5f0c2ab462cc695e332a8b3e3234970..b89c373555c553d6042571b1b780d04b5cef0960 100644 (file)
@@ -14,6 +14,7 @@
  * Copyright (C) 2015 - 2016 Cavium, Inc.
  */
 
+#include <linux/bitfield.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/of_address.h>
@@ -334,6 +335,50 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
 
 #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
 
+#define PEM_RES_BASE           0x87e0c0000000UL
+#define PEM_NODE_MASK          GENMASK(45, 44)
+#define PEM_INDX_MASK          GENMASK(26, 24)
+#define PEM_MIN_DOM_IN_NODE    4
+#define PEM_MAX_DOM_IN_NODE    10
+
+static void thunder_pem_reserve_range(struct device *dev, int seg,
+                                     struct resource *r)
+{
+       resource_size_t start = r->start, end = r->end;
+       struct resource *res;
+       const char *regionid;
+
+       regionid = kasprintf(GFP_KERNEL, "PEM RC:%d", seg);
+       if (!regionid)
+               return;
+
+       res = request_mem_region(start, end - start + 1, regionid);
+       if (res)
+               res->flags &= ~IORESOURCE_BUSY;
+       else
+               kfree(regionid);
+
+       dev_info(dev, "%pR %s reserved\n", r,
+                res ? "has been" : "could not be");
+}
+
+static void thunder_pem_legacy_fw(struct acpi_pci_root *root,
+                                struct resource *res_pem)
+{
+       int node = acpi_get_node(root->device->handle);
+       int index;
+
+       if (node == NUMA_NO_NODE)
+               node = 0;
+
+       index = root->segment - PEM_MIN_DOM_IN_NODE;
+       index -= node * PEM_MAX_DOM_IN_NODE;
+       res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) |
+                                       FIELD_PREP(PEM_INDX_MASK, index);
+       res_pem->end = res_pem->start + SZ_16M - 1;
+       res_pem->flags = IORESOURCE_MEM;
+}
+
 static int thunder_pem_acpi_init(struct pci_config_window *cfg)
 {
        struct device *dev = cfg->parent;
@@ -346,10 +391,17 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg)
        if (!res_pem)
                return -ENOMEM;
 
-       ret = acpi_get_rc_resources(dev, "THRX0002", root->segment, res_pem);
+       ret = acpi_get_rc_resources(dev, "CAVA02B", root->segment, res_pem);
+
+       /*
+        * If we fail to gather resources it means that we run with old
+        * FW where we need to calculate PEM-specific resources manually.
+        */
        if (ret) {
-               dev_err(dev, "can't get rc base address\n");
-               return ret;
+               thunder_pem_legacy_fw(root, res_pem);
+               /* Reserve PEM-specific resources and PCI configuration space */
+               thunder_pem_reserve_range(dev, root->segment, res_pem);
+               thunder_pem_reserve_range(dev, root->segment, &cfg->res);
        }
 
        return thunder_pem_init(dev, cfg, res_pem);
index bd4c9ec25edc22531ae450b2bb08f1d2aecd7b62..384c27e664fec8aa777246dce0bca499728ae42a 100644 (file)
@@ -44,8 +44,7 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
 {
        struct device *dev = &bdev->dev;
        struct iproc_pcie *pcie;
-       LIST_HEAD(res);
-       struct resource res_mem;
+       LIST_HEAD(resources);
        int ret;
 
        pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
@@ -63,22 +62,23 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
 
        pcie->base_addr = bdev->addr;
 
-       res_mem.start = bdev->addr_s[0];
-       res_mem.end = bdev->addr_s[0] + SZ_128M - 1;
-       res_mem.name = "PCIe MEM space";
-       res_mem.flags = IORESOURCE_MEM;
-       pci_add_resource(&res, &res_mem);
+       pcie->mem.start = bdev->addr_s[0];
+       pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
+       pcie->mem.name = "PCIe MEM space";
+       pcie->mem.flags = IORESOURCE_MEM;
+       pci_add_resource(&resources, &pcie->mem);
 
        pcie->map_irq = iproc_pcie_bcma_map_irq;
 
-       ret = iproc_pcie_setup(pcie, &res);
-       if (ret)
+       ret = iproc_pcie_setup(pcie, &resources);
+       if (ret) {
                dev_err(dev, "PCIe controller setup failed\n");
-
-       pci_free_resource_list(&res);
+               pci_free_resource_list(&resources);
+               return ret;
+       }
 
        bcma_set_drvdata(bdev, pcie);
-       return ret;
+       return 0;
 }
 
 static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
index f4909bb0b2ad1505c3e084820f36104387add8f5..8c6a327ca6cdf883f32ea9a6fa0e7e1e28ab0e98 100644 (file)
@@ -51,7 +51,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
        struct device_node *np = dev->of_node;
        struct resource reg;
        resource_size_t iobase = 0;
-       LIST_HEAD(res);
+       LIST_HEAD(resources);
        int ret;
 
        pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
@@ -96,10 +96,10 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
                pcie->phy = NULL;
        }
 
-       ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase);
+       ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &resources,
+                                              &iobase);
        if (ret) {
-               dev_err(dev,
-                       "unable to get PCI host bridge resources\n");
+               dev_err(dev, "unable to get PCI host bridge resources\n");
                return ret;
        }
 
@@ -112,14 +112,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
                pcie->map_irq = of_irq_parse_and_map_pci;
        }
 
-       ret = iproc_pcie_setup(pcie, &res);
-       if (ret)
+       ret = iproc_pcie_setup(pcie, &resources);
+       if (ret) {
                dev_err(dev, "PCIe controller setup failed\n");
-
-       pci_free_resource_list(&res);
+               pci_free_resource_list(&resources);
+               return ret;
+       }
 
        platform_set_drvdata(pdev, pcie);
-       return ret;
+       return 0;
 }
 
 static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
index 04fed8e907f12b602b5fb1f11ff59971ebd0b87f..0bbe2ea44f3e1559dda22adc85ea70a8862684bd 100644 (file)
@@ -90,6 +90,7 @@ struct iproc_pcie {
 #ifdef CONFIG_ARM
        struct pci_sys_data sysdata;
 #endif
+       struct resource mem;
        struct pci_bus *root_bus;
        struct phy *phy;
        int (*map_irq)(const struct pci_dev *, u8, u8);
index d571bc330686517a389efec290d0cf499fb0dbd5..0042c365b29b9b94c791ca99d00dc7263354bb71 100644 (file)
@@ -973,27 +973,6 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
        return msix_capability_init(dev, entries, nvec, affd);
 }
 
-/**
- * pci_enable_msix - configure device's MSI-X capability structure
- * @dev: pointer to the pci_dev data structure of MSI-X device function
- * @entries: pointer to an array of MSI-X entries (optional)
- * @nvec: number of MSI-X irqs requested for allocation by device driver
- *
- * Setup the MSI-X capability structure of device function with the number
- * of requested irqs upon its software driver call to request for
- * MSI-X mode enabled on its hardware device function. A return of zero
- * indicates the successful configuration of MSI-X capability structure
- * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
- * Or a return of > 0 indicates that driver request is exceeding the number
- * of irqs or MSI-X vectors available. Driver should use the returned value to
- * re-send its request.
- **/
-int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
-{
-       return __pci_enable_msix(dev, entries, nvec, NULL);
-}
-EXPORT_SYMBOL(pci_enable_msix);
-
 void pci_msix_shutdown(struct pci_dev *dev)
 {
        struct msi_desc *entry;
index 973472c23d89045000cf1119a09867c921f2fdf8..1dfa10cc566bebed005c2fe11a72c85a37036c32 100644 (file)
@@ -478,7 +478,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
 
 static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
 {
-       struct pci_dev *child, *parent = link->pdev;
+       struct pci_dev *child = link->downstream, *parent = link->pdev;
        struct pci_bus *linkbus = parent->subordinate;
        struct aspm_register_info upreg, dwreg;
 
@@ -491,9 +491,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
 
        /* Get upstream/downstream components' register state */
        pcie_get_aspm_reg(parent, &upreg);
-       child = pci_function_0(linkbus);
        pcie_get_aspm_reg(child, &dwreg);
-       link->downstream = child;
 
        /*
         * If ASPM not supported, don't mess with the clocks and link,
@@ -800,6 +798,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
        INIT_LIST_HEAD(&link->children);
        INIT_LIST_HEAD(&link->link);
        link->pdev = pdev;
+       link->downstream = pci_function_0(pdev->subordinate);
 
        /*
         * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
index f754453fe754e985361cb49cee0bddf54d752443..673683660b5c70567d7c49cd091c5c8ecf088655 100644 (file)
@@ -2174,6 +2174,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
                quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
 
 /*
  * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
index dc5277ad1b5a7a5a7b27a7329f0f6382587e6050..005cadb7a3f8e9076700bf24d280383a0d321c85 100644 (file)
@@ -449,6 +449,7 @@ config PHY_QCOM_UFS
 config PHY_QCOM_USB_HS
        tristate "Qualcomm USB HS PHY module"
        depends on USB_ULPI_BUS
+       depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
        select GENERIC_PHY
        help
          Support for the USB high-speed ULPI compliant phy on Qualcomm
@@ -510,12 +511,4 @@ config PHY_MESON8B_USB2
          and GXBB SoCs.
          If unsure, say N.
 
-config PHY_NSP_USB3
-       tristate "Broadcom NorthStar plus USB3 PHY driver"
-       depends on OF && (ARCH_BCM_NSP || COMPILE_TEST)
-       select GENERIC_PHY
-       default ARCH_BCM_NSP
-       help
-         Enable this to support the Broadcom Northstar plus USB3 PHY.
-         If unsure, say N.
 endmenu
index e7b0feb1e125a58c25fb96e784deae62bfb891e7..dd8f3b5d2918cd91bd48592b6771ca539321b086 100644 (file)
@@ -62,4 +62,3 @@ obj-$(CONFIG_PHY_CYGNUS_PCIE)         += phy-bcm-cygnus-pcie.o
 obj-$(CONFIG_ARCH_TEGRA) += tegra/
 obj-$(CONFIG_PHY_NS2_PCIE)             += phy-bcm-ns2-pcie.o
 obj-$(CONFIG_PHY_MESON8B_USB2)         += phy-meson8b-usb2.o
-obj-$(CONFIG_PHY_NSP_USB3)             += phy-bcm-nsp-usb3.o
diff --git a/drivers/phy/phy-bcm-nsp-usb3.c b/drivers/phy/phy-bcm-nsp-usb3.c
deleted file mode 100644 (file)
index 49024ea..0000000
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Copyright (C) 2016 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
-#include <linux/mdio.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/phy/phy.h>
-#include <linux/regmap.h>
-
-#define NSP_USB3_RST_CTRL_OFFSET       0x3f8
-
-/* mdio reg access */
-#define NSP_USB3_PHY_BASE_ADDR_REG     0x1f
-
-#define NSP_USB3_PHY_PLL30_BLOCK       0x8000
-#define NSP_USB3_PLL_CONTROL           0x01
-#define NSP_USB3_PLLA_CONTROL0         0x0a
-#define NSP_USB3_PLLA_CONTROL1         0x0b
-
-#define NSP_USB3_PHY_TX_PMD_BLOCK      0x8040
-#define NSP_USB3_TX_PMD_CONTROL1       0x01
-
-#define NSP_USB3_PHY_PIPE_BLOCK                0x8060
-#define NSP_USB3_LFPS_CMP              0x02
-#define NSP_USB3_LFPS_DEGLITCH         0x03
-
-struct nsp_usb3_phy {
-       struct regmap *usb3_ctrl;
-       struct phy *phy;
-       struct mdio_device *mdiodev;
-};
-
-static int nsp_usb3_phy_init(struct phy *phy)
-{
-       struct nsp_usb3_phy *iphy = phy_get_drvdata(phy);
-       struct mii_bus *bus = iphy->mdiodev->bus;
-       int addr = iphy->mdiodev->addr;
-       u32 data;
-       int rc;
-
-       rc = regmap_read(iphy->usb3_ctrl, 0, &data);
-       if (rc)
-               return rc;
-       data |= 1;
-       rc = regmap_write(iphy->usb3_ctrl, 0, data);
-       if (rc)
-               return rc;
-
-       rc = regmap_write(iphy->usb3_ctrl, NSP_USB3_RST_CTRL_OFFSET, 1);
-       if (rc)
-               return rc;
-
-       rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG,
-                          NSP_USB3_PHY_PLL30_BLOCK);
-       if (rc)
-               return rc;
-
-       rc = mdiobus_write(bus, addr, NSP_USB3_PLL_CONTROL, 0x1000);
-       if (rc)
-               return rc;
-
-       rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL0, 0x6400);
-       if (rc)
-               return rc;
-
-       rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL1, 0xc000);
-       if (rc)
-               return rc;
-
-       rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL1, 0x8000);
-       if (rc)
-               return rc;
-
-       rc = regmap_write(iphy->usb3_ctrl, NSP_USB3_RST_CTRL_OFFSET, 0);
-       if (rc)
-               return rc;
-
-       rc = mdiobus_write(bus, addr, NSP_USB3_PLL_CONTROL, 0x9000);
-       if (rc)
-               return rc;
-
-       rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG,
-                          NSP_USB3_PHY_PIPE_BLOCK);
-       if (rc)
-               return rc;
-
-       rc = mdiobus_write(bus, addr, NSP_USB3_LFPS_CMP, 0xf30d);
-       if (rc)
-               return rc;
-
-       rc = mdiobus_write(bus, addr, NSP_USB3_LFPS_DEGLITCH, 0x6302);
-       if (rc)
-               return rc;
-
-       rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG,
-                          NSP_USB3_PHY_TX_PMD_BLOCK);
-       if (rc)
-               return rc;
-
-       rc = mdiobus_write(bus, addr, NSP_USB3_TX_PMD_CONTROL1, 0x1003);
-
-       return rc;
-}
-
-static struct phy_ops nsp_usb3_phy_ops = {
-       .init   = nsp_usb3_phy_init,
-       .owner  = THIS_MODULE,
-};
-
-static int nsp_usb3_phy_probe(struct mdio_device *mdiodev)
-{
-       struct device *dev = &mdiodev->dev;
-       struct phy_provider *provider;
-       struct nsp_usb3_phy *iphy;
-
-       iphy = devm_kzalloc(dev, sizeof(*iphy), GFP_KERNEL);
-       if (!iphy)
-               return -ENOMEM;
-       iphy->mdiodev = mdiodev;
-
-       iphy->usb3_ctrl = syscon_regmap_lookup_by_phandle(dev->of_node,
-                                                "usb3-ctrl-syscon");
-       if (IS_ERR(iphy->usb3_ctrl))
-               return PTR_ERR(iphy->usb3_ctrl);
-
-       iphy->phy = devm_phy_create(dev, dev->of_node, &nsp_usb3_phy_ops);
-       if (IS_ERR(iphy->phy)) {
-               dev_err(dev, "failed to create PHY\n");
-               return PTR_ERR(iphy->phy);
-       }
-
-       phy_set_drvdata(iphy->phy, iphy);
-
-       provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
-       if (IS_ERR(provider)) {
-               dev_err(dev, "could not register PHY provider\n");
-               return PTR_ERR(provider);
-       }
-
-       return 0;
-}
-
-static const struct of_device_id nsp_usb3_phy_of_match[] = {
-       {.compatible = "brcm,nsp-usb3-phy",},
-       { /* sentinel */ }
-};
-
-static struct mdio_driver nsp_usb3_phy_driver = {
-       .mdiodrv = {
-               .driver = {
-                       .name = "nsp-usb3-phy",
-                       .of_match_table = nsp_usb3_phy_of_match,
-               },
-       },
-       .probe = nsp_usb3_phy_probe,
-};
-
-mdio_module_driver(nsp_usb3_phy_driver);
-
-MODULE_DESCRIPTION("Broadcom NSP USB3 PHY driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Yendapally Reddy Dhananjaya Reddy <yendapally.reddy@broadcom.com");
index 4f60b83641d5952d55edd37137283114ea9ba836..60baf25d98e25eb3e716979a54223dd3d03e198d 100644 (file)
@@ -254,8 +254,8 @@ static int exynos_pcie_phy_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        exynos_phy->blk_base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(exynos_phy->phy_base))
-               return PTR_ERR(exynos_phy->phy_base);
+       if (IS_ERR(exynos_phy->blk_base))
+               return PTR_ERR(exynos_phy->blk_base);
 
        exynos_phy->drv_data = drv_data;
 
index 7671424d46cbe0a5628caee0615cde2d79b8d478..31a3a98d067caa4440a25e901eba69bdf3b0e862 100644 (file)
@@ -667,11 +667,11 @@ static const char * const uart_ao_b_groups[] = {
 };
 
 static const char * const i2c_ao_groups[] = {
-       "i2c_sdk_ao", "i2c_sda_ao",
+       "i2c_sck_ao", "i2c_sda_ao",
 };
 
 static const char * const i2c_slave_ao_groups[] = {
-       "i2c_slave_sdk_ao", "i2c_slave_sda_ao",
+       "i2c_slave_sck_ao", "i2c_slave_sda_ao",
 };
 
 static const char * const remote_input_ao_groups[] = {
index 676efcc032d26178718c601116a1a387622760c5..3ae8066bc1279c1c23dfc7144215fb48879efae8 100644 (file)
@@ -1285,6 +1285,22 @@ static void st_gpio_irq_unmask(struct irq_data *d)
        writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK);
 }
 
+static int st_gpio_irq_request_resources(struct irq_data *d)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+       st_gpio_direction_input(gc, d->hwirq);
+
+       return gpiochip_lock_as_irq(gc, d->hwirq);
+}
+
+static void st_gpio_irq_release_resources(struct irq_data *d)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+       gpiochip_unlock_as_irq(gc, d->hwirq);
+}
+
 static int st_gpio_irq_set_type(struct irq_data *d, unsigned type)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1438,12 +1454,14 @@ static struct gpio_chip st_gpio_template = {
 };
 
 static struct irq_chip st_gpio_irqchip = {
-       .name           = "GPIO",
-       .irq_disable    = st_gpio_irq_mask,
-       .irq_mask       = st_gpio_irq_mask,
-       .irq_unmask     = st_gpio_irq_unmask,
-       .irq_set_type   = st_gpio_irq_set_type,
-       .flags          = IRQCHIP_SKIP_SET_WAKE,
+       .name                   = "GPIO",
+       .irq_request_resources  = st_gpio_irq_request_resources,
+       .irq_release_resources  = st_gpio_irq_release_resources,
+       .irq_disable            = st_gpio_irq_mask,
+       .irq_mask               = st_gpio_irq_mask,
+       .irq_unmask             = st_gpio_irq_unmask,
+       .irq_set_type           = st_gpio_irq_set_type,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE,
 };
 
 static int st_gpiolib_register_bank(struct st_pinctrl *info,
index b68ae424cee247d51541d4ceb36aaa8c8c9132f6..743d1f458205fac1a5c26c683fdc38f12e401095 100644 (file)
@@ -405,6 +405,36 @@ static const struct msm_pingroup ipq4019_groups[] = {
        PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
        PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
        PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
 };
 
 static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
index f8e9e1c2b2f6f45078aa2fe9a3356b450d60fd58..273badd925611aa86e19e3a4aebc6691cf812fcb 100644 (file)
@@ -422,6 +422,20 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
        return 0;
 }
 
+static int msm_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+       struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
+       const struct msm_pingroup *g;
+       u32 val;
+
+       g = &pctrl->soc->groups[offset];
+
+       val = readl(pctrl->regs + g->ctl_reg);
+
+       /* 0 = output, 1 = input */
+       return val & BIT(g->oe_bit) ? 0 : 1;
+}
+
 static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
        const struct msm_pingroup *g;
@@ -510,6 +524,7 @@ static void msm_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 static struct gpio_chip msm_gpio_template = {
        .direction_input  = msm_gpio_direction_input,
        .direction_output = msm_gpio_direction_output,
+       .get_direction    = msm_gpio_get_direction,
        .get              = msm_gpio_get,
        .set              = msm_gpio_set,
        .request          = gpiochip_generic_request,
@@ -594,10 +609,6 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
 
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
-       val = readl(pctrl->regs + g->intr_status_reg);
-       val &= ~BIT(g->intr_status_bit);
-       writel(val, pctrl->regs + g->intr_status_reg);
-
        val = readl(pctrl->regs + g->intr_cfg_reg);
        val |= BIT(g->intr_enable_bit);
        writel(val, pctrl->regs + g->intr_cfg_reg);
index f9ddba7decc18563916d5adc118958fad46f0073..d7aa22cff480ed63d73c9e1e6f8fbf22d0eda290 100644 (file)
@@ -988,9 +988,16 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
 
        for (i = 0; i < ctrl->nr_ext_resources + 1; i++) {
                res = platform_get_resource(pdev, IORESOURCE_MEM, i);
-               virt_base[i] = devm_ioremap_resource(&pdev->dev, res);
-               if (IS_ERR(virt_base[i]))
-                       return ERR_CAST(virt_base[i]);
+               if (!res) {
+                       dev_err(&pdev->dev, "failed to get mem%d resource\n", i);
+                       return ERR_PTR(-EINVAL);
+               }
+               virt_base[i] = devm_ioremap(&pdev->dev, res->start,
+                                               resource_size(res));
+               if (!virt_base[i]) {
+                       dev_err(&pdev->dev, "failed to ioremap %pR\n", res);
+                       return ERR_PTR(-EIO);
+               }
        }
 
        bank = d->pin_banks;
index 815a88673d38193b8c1313265d46b536738b3586..542077069391b9b262192a502809708476072e7e 100644 (file)
@@ -1,6 +1,6 @@
 config PINCTRL_TI_IODELAY
        tristate "TI IODelay Module pinconf driver"
-       depends on OF
+       depends on OF && (SOC_DRA7XX || COMPILE_TEST)
        select GENERIC_PINCTRL_GROUPS
        select GENERIC_PINMUX_FUNCTIONS
        select GENERIC_PINCONF
index 77a0236ee781dd06949ed288005bb45b99396287..83f8864fa76ac5a26a1947902f691877f0510c56 100644 (file)
@@ -390,22 +390,22 @@ static const struct pinctrl_pin_desc uniphier_ld11_pins[] = {
        UNIPHIER_PINCTRL_PIN(140, "AO1D0", 140,
                             140, UNIPHIER_PIN_DRV_1BIT,
                             140, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(141, "TCON0", 141,
+       UNIPHIER_PINCTRL_PIN(141, "AO1D1", 141,
                             141, UNIPHIER_PIN_DRV_1BIT,
                             141, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(142, "TCON1", 142,
+       UNIPHIER_PINCTRL_PIN(142, "AO1D2", 142,
                             142, UNIPHIER_PIN_DRV_1BIT,
                             142, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(143, "TCON2", 143,
+       UNIPHIER_PINCTRL_PIN(143, "XIRQ9", 143,
                             143, UNIPHIER_PIN_DRV_1BIT,
                             143, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(144, "TCON3", 144,
+       UNIPHIER_PINCTRL_PIN(144, "XIRQ10", 144,
                             144, UNIPHIER_PIN_DRV_1BIT,
                             144, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(145, "TCON4", 145,
+       UNIPHIER_PINCTRL_PIN(145, "XIRQ11", 145,
                             145, UNIPHIER_PIN_DRV_1BIT,
                             145, UNIPHIER_PIN_PULL_DOWN),
-       UNIPHIER_PINCTRL_PIN(146, "TCON5", 146,
+       UNIPHIER_PINCTRL_PIN(146, "XIRQ13", 146,
                             146, UNIPHIER_PIN_DRV_1BIT,
                             146, UNIPHIER_PIN_PULL_DOWN),
        UNIPHIER_PINCTRL_PIN(147, "PWMA", 147,
index 5be4783e40d4c9e9ca547d89d5faa6c1437f927a..dea98ffb6f606a6079f40607c8f5476c7a738f7d 100644 (file)
@@ -103,15 +103,6 @@ static struct quirk_entry quirk_asus_x200ca = {
        .wapf = 2,
 };
 
-static struct quirk_entry quirk_no_rfkill = {
-       .no_rfkill = true,
-};
-
-static struct quirk_entry quirk_no_rfkill_wapf4 = {
-       .wapf = 4,
-       .no_rfkill = true,
-};
-
 static struct quirk_entry quirk_asus_ux303ub = {
        .wmi_backlight_native = true,
 };
@@ -194,7 +185,7 @@ static const struct dmi_system_id asus_quirks[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
                        DMI_MATCH(DMI_PRODUCT_NAME, "X456UA"),
                },
-               .driver_data = &quirk_no_rfkill_wapf4,
+               .driver_data = &quirk_asus_wapf4,
        },
        {
                .callback = dmi_matched,
@@ -203,7 +194,7 @@ static const struct dmi_system_id asus_quirks[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
                        DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"),
                },
-               .driver_data = &quirk_no_rfkill_wapf4,
+               .driver_data = &quirk_asus_wapf4,
        },
        {
                .callback = dmi_matched,
@@ -367,42 +358,6 @@ static const struct dmi_system_id asus_quirks[] = {
                },
                .driver_data = &quirk_asus_x200ca,
        },
-       {
-               .callback = dmi_matched,
-               .ident = "ASUSTeK COMPUTER INC. X555UB",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "X555UB"),
-               },
-               .driver_data = &quirk_no_rfkill,
-       },
-       {
-               .callback = dmi_matched,
-               .ident = "ASUSTeK COMPUTER INC. N552VW",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N552VW"),
-               },
-               .driver_data = &quirk_no_rfkill,
-       },
-       {
-               .callback = dmi_matched,
-               .ident = "ASUSTeK COMPUTER INC. U303LB",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "U303LB"),
-               },
-               .driver_data = &quirk_no_rfkill,
-       },
-       {
-               .callback = dmi_matched,
-               .ident = "ASUSTeK COMPUTER INC. Z550MA",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Z550MA"),
-               },
-               .driver_data = &quirk_no_rfkill,
-       },
        {
                .callback = dmi_matched,
                .ident = "ASUSTeK COMPUTER INC. UX303UB",
index 43cb680adbb42045aaeea332220460440180215a..8fe5890bf539f4f2eb722139f4c56a3065403e92 100644 (file)
@@ -159,6 +159,8 @@ MODULE_LICENSE("GPL");
 #define USB_INTEL_XUSB2PR              0xD0
 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI  0x9c31
 
+static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
+
 struct bios_args {
        u32 arg0;
        u32 arg1;
@@ -2051,6 +2053,16 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
        return 0;
 }
 
+static bool ashs_present(void)
+{
+       int i = 0;
+       while (ashs_ids[i]) {
+               if (acpi_dev_found(ashs_ids[i++]))
+                       return true;
+       }
+       return false;
+}
+
 /*
  * WMI Driver
  */
@@ -2095,7 +2107,11 @@ static int asus_wmi_add(struct platform_device *pdev)
        if (err)
                goto fail_leds;
 
-       if (!asus->driver->quirks->no_rfkill) {
+       asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
+       if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
+               asus->driver->wlan_ctrl_by_user = 1;
+
+       if (!(asus->driver->wlan_ctrl_by_user && ashs_present())) {
                err = asus_wmi_rfkill_init(asus);
                if (err)
                        goto fail_rfkill;
@@ -2134,10 +2150,6 @@ static int asus_wmi_add(struct platform_device *pdev)
        if (err)
                goto fail_debugfs;
 
-       asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
-       if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
-               asus->driver->wlan_ctrl_by_user = 1;
-
        return 0;
 
 fail_debugfs:
index fdff626c3b51b039f3b63473a6cf333d04fda819..c9589d9342bbf8f883c49abbcd7cebf9a23c608e 100644 (file)
@@ -39,7 +39,6 @@ struct key_entry;
 struct asus_wmi;
 
 struct quirk_entry {
-       bool no_rfkill;
        bool hotplug_wireless;
        bool scalar_panel_brightness;
        bool store_backlight_power;
index 2b218b1d13e55dc985a2ca27e44b6a6ddf905141..e12cc3504d48799b447e636e21c2c6c440fc5827 100644 (file)
 
 #define FUJITSU_LCD_N_LEVELS 8
 
-#define ACPI_FUJITSU_CLASS              "fujitsu"
-#define ACPI_FUJITSU_HID                "FUJ02B1"
-#define ACPI_FUJITSU_DRIVER_NAME       "Fujitsu laptop FUJ02B1 ACPI brightness driver"
-#define ACPI_FUJITSU_DEVICE_NAME        "Fujitsu FUJ02B1"
-#define ACPI_FUJITSU_HOTKEY_HID        "FUJ02E3"
-#define ACPI_FUJITSU_HOTKEY_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver"
-#define ACPI_FUJITSU_HOTKEY_DEVICE_NAME "Fujitsu FUJ02E3"
+#define ACPI_FUJITSU_CLASS             "fujitsu"
+#define ACPI_FUJITSU_BL_HID            "FUJ02B1"
+#define ACPI_FUJITSU_BL_DRIVER_NAME    "Fujitsu laptop FUJ02B1 ACPI brightness driver"
+#define ACPI_FUJITSU_BL_DEVICE_NAME    "Fujitsu FUJ02B1"
+#define ACPI_FUJITSU_LAPTOP_HID                "FUJ02E3"
+#define ACPI_FUJITSU_LAPTOP_DRIVER_NAME        "Fujitsu laptop FUJ02E3 ACPI hotkeys driver"
+#define ACPI_FUJITSU_LAPTOP_DEVICE_NAME        "Fujitsu FUJ02E3"
 
 #define ACPI_FUJITSU_NOTIFY_CODE1     0x80
 
 /* FUNC interface - command values */
-#define FUNC_RFKILL    0x1000
+#define FUNC_FLAGS     0x1000
 #define FUNC_LEDS      0x1001
 #define FUNC_BUTTONS   0x1002
 #define FUNC_BACKLIGHT  0x1004
 /* FUNC interface - responses */
 #define UNSUPPORTED_CMD 0x80000000
 
+/* FUNC interface - status flags */
+#define FLAG_RFKILL    0x020
+#define FLAG_LID       0x100
+#define FLAG_DOCK      0x200
+
 #if IS_ENABLED(CONFIG_LEDS_CLASS)
 /* FUNC interface - LED control */
 #define FUNC_LED_OFF   0x1
 #endif
 
 /* Device controlling the backlight and associated keys */
-struct fujitsu_t {
+struct fujitsu_bl {
        acpi_handle acpi_handle;
        struct acpi_device *dev;
        struct input_dev *input;
@@ -150,12 +155,12 @@ struct fujitsu_t {
        unsigned int brightness_level;
 };
 
-static struct fujitsu_t *fujitsu;
+static struct fujitsu_bl *fujitsu_bl;
 static int use_alt_lcd_levels = -1;
 static int disable_brightness_adjust = -1;
 
-/* Device used to access other hotkeys on the laptop */
-struct fujitsu_hotkey_t {
+/* Device used to access hotkeys and other features on the laptop */
+struct fujitsu_laptop {
        acpi_handle acpi_handle;
        struct acpi_device *dev;
        struct input_dev *input;
@@ -163,17 +168,15 @@ struct fujitsu_hotkey_t {
        struct platform_device *pf_device;
        struct kfifo fifo;
        spinlock_t fifo_lock;
-       int rfkill_supported;
-       int rfkill_state;
+       int flags_supported;
+       int flags_state;
        int logolamp_registered;
        int kblamps_registered;
        int radio_led_registered;
        int eco_led_registered;
 };
 
-static struct fujitsu_hotkey_t *fujitsu_hotkey;
-
-static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);
+static struct fujitsu_laptop *fujitsu_laptop;
 
 #if IS_ENABLED(CONFIG_LEDS_CLASS)
 static enum led_brightness logolamp_get(struct led_classdev *cdev);
@@ -222,8 +225,6 @@ static struct led_classdev eco_led = {
 static u32 dbg_level = 0x03;
 #endif
 
-static void acpi_fujitsu_notify(struct acpi_device *device, u32 event);
-
 /* Fujitsu ACPI interface function */
 
 static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
@@ -239,7 +240,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
        unsigned long long value;
        acpi_handle handle = NULL;
 
-       status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle);
+       status = acpi_get_handle(fujitsu_laptop->acpi_handle, "FUNC", &handle);
        if (ACPI_FAILURE(status)) {
                vdbg_printk(FUJLAPTOP_DBG_ERROR,
                                "FUNC interface is not present\n");
@@ -300,9 +301,9 @@ static int radio_led_set(struct led_classdev *cdev,
                                enum led_brightness brightness)
 {
        if (brightness >= LED_FULL)
-               return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON);
+               return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, RADIO_LED_ON);
        else
-               return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
+               return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, 0x0);
 }
 
 static int eco_led_set(struct led_classdev *cdev,
@@ -346,7 +347,7 @@ static enum led_brightness radio_led_get(struct led_classdev *cdev)
 {
        enum led_brightness brightness = LED_OFF;
 
-       if (call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0) & RADIO_LED_ON)
+       if (call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0) & RADIO_LED_ON)
                brightness = LED_FULL;
 
        return brightness;
@@ -373,10 +374,10 @@ static int set_lcd_level(int level)
        vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n",
                    level);
 
-       if (level < 0 || level >= fujitsu->max_brightness)
+       if (level < 0 || level >= fujitsu_bl->max_brightness)
                return -EINVAL;
 
-       status = acpi_get_handle(fujitsu->acpi_handle, "SBLL", &handle);
+       status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBLL", &handle);
        if (ACPI_FAILURE(status)) {
                vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n");
                return -ENODEV;
@@ -398,10 +399,10 @@ static int set_lcd_level_alt(int level)
        vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n",
                    level);
 
-       if (level < 0 || level >= fujitsu->max_brightness)
+       if (level < 0 || level >= fujitsu_bl->max_brightness)
                return -EINVAL;
 
-       status = acpi_get_handle(fujitsu->acpi_handle, "SBL2", &handle);
+       status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBL2", &handle);
        if (ACPI_FAILURE(status)) {
                vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n");
                return -ENODEV;
@@ -421,19 +422,19 @@ static int get_lcd_level(void)
 
        vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n");
 
-       status =
-           acpi_evaluate_integer(fujitsu->acpi_handle, "GBLL", NULL, &state);
+       status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "GBLL", NULL,
+                                      &state);
        if (ACPI_FAILURE(status))
                return 0;
 
-       fujitsu->brightness_level = state & 0x0fffffff;
+       fujitsu_bl->brightness_level = state & 0x0fffffff;
 
        if (state & 0x80000000)
-               fujitsu->brightness_changed = 1;
+               fujitsu_bl->brightness_changed = 1;
        else
-               fujitsu->brightness_changed = 0;
+               fujitsu_bl->brightness_changed = 0;
 
-       return fujitsu->brightness_level;
+       return fujitsu_bl->brightness_level;
 }
 
 static int get_max_brightness(void)
@@ -443,14 +444,14 @@ static int get_max_brightness(void)
 
        vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n");
 
-       status =
-           acpi_evaluate_integer(fujitsu->acpi_handle, "RBLL", NULL, &state);
+       status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "RBLL", NULL,
+                                      &state);
        if (ACPI_FAILURE(status))
                return -1;
 
-       fujitsu->max_brightness = state;
+       fujitsu_bl->max_brightness = state;
 
-       return fujitsu->max_brightness;
+       return fujitsu_bl->max_brightness;
 }
 
 /* Backlight device stuff */
@@ -483,7 +484,7 @@ static int bl_update_status(struct backlight_device *b)
        return ret;
 }
 
-static const struct backlight_ops fujitsubl_ops = {
+static const struct backlight_ops fujitsu_bl_ops = {
        .get_brightness = bl_get_brightness,
        .update_status = bl_update_status,
 };
@@ -511,7 +512,7 @@ show_brightness_changed(struct device *dev,
 
        int ret;
 
-       ret = fujitsu->brightness_changed;
+       ret = fujitsu_bl->brightness_changed;
        if (ret < 0)
                return ret;
 
@@ -539,7 +540,7 @@ static ssize_t store_lcd_level(struct device *dev,
        int level, ret;
 
        if (sscanf(buf, "%i", &level) != 1
-           || (level < 0 || level >= fujitsu->max_brightness))
+           || (level < 0 || level >= fujitsu_bl->max_brightness))
                return -EINVAL;
 
        if (use_alt_lcd_levels)
@@ -567,9 +568,9 @@ static ssize_t
 show_lid_state(struct device *dev,
                        struct device_attribute *attr, char *buf)
 {
-       if (!(fujitsu_hotkey->rfkill_supported & 0x100))
+       if (!(fujitsu_laptop->flags_supported & FLAG_LID))
                return sprintf(buf, "unknown\n");
-       if (fujitsu_hotkey->rfkill_state & 0x100)
+       if (fujitsu_laptop->flags_state & FLAG_LID)
                return sprintf(buf, "open\n");
        else
                return sprintf(buf, "closed\n");
@@ -579,9 +580,9 @@ static ssize_t
 show_dock_state(struct device *dev,
                        struct device_attribute *attr, char *buf)
 {
-       if (!(fujitsu_hotkey->rfkill_supported & 0x200))
+       if (!(fujitsu_laptop->flags_supported & FLAG_DOCK))
                return sprintf(buf, "unknown\n");
-       if (fujitsu_hotkey->rfkill_state & 0x200)
+       if (fujitsu_laptop->flags_state & FLAG_DOCK)
                return sprintf(buf, "docked\n");
        else
                return sprintf(buf, "undocked\n");
@@ -591,9 +592,9 @@ static ssize_t
 show_radios_state(struct device *dev,
                        struct device_attribute *attr, char *buf)
 {
-       if (!(fujitsu_hotkey->rfkill_supported & 0x20))
+       if (!(fujitsu_laptop->flags_supported & FLAG_RFKILL))
                return sprintf(buf, "unknown\n");
-       if (fujitsu_hotkey->rfkill_state & 0x20)
+       if (fujitsu_laptop->flags_state & FLAG_RFKILL)
                return sprintf(buf, "on\n");
        else
                return sprintf(buf, "killed\n");
@@ -607,7 +608,7 @@ static DEVICE_ATTR(lid, 0444, show_lid_state, ignore_store);
 static DEVICE_ATTR(dock, 0444, show_dock_state, ignore_store);
 static DEVICE_ATTR(radios, 0444, show_radios_state, ignore_store);
 
-static struct attribute *fujitsupf_attributes[] = {
+static struct attribute *fujitsu_pf_attributes[] = {
        &dev_attr_brightness_changed.attr,
        &dev_attr_max_brightness.attr,
        &dev_attr_lcd_level.attr,
@@ -617,11 +618,11 @@ static struct attribute *fujitsupf_attributes[] = {
        NULL
 };
 
-static struct attribute_group fujitsupf_attribute_group = {
-       .attrs = fujitsupf_attributes
+static struct attribute_group fujitsu_pf_attribute_group = {
+       .attrs = fujitsu_pf_attributes
 };
 
-static struct platform_driver fujitsupf_driver = {
+static struct platform_driver fujitsu_pf_driver = {
        .driver = {
                   .name = "fujitsu-laptop",
                   }
@@ -630,39 +631,30 @@ static struct platform_driver fujitsupf_driver = {
 static void __init dmi_check_cb_common(const struct dmi_system_id *id)
 {
        pr_info("Identified laptop model '%s'\n", id->ident);
-       if (use_alt_lcd_levels == -1) {
-               if (acpi_has_method(NULL,
-                               "\\_SB.PCI0.LPCB.FJEX.SBL2"))
-                       use_alt_lcd_levels = 1;
-               else
-                       use_alt_lcd_levels = 0;
-               vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as "
-                       "%i\n", use_alt_lcd_levels);
-       }
 }
 
 static int __init dmi_check_cb_s6410(const struct dmi_system_id *id)
 {
        dmi_check_cb_common(id);
-       fujitsu->keycode1 = KEY_SCREENLOCK;     /* "Lock" */
-       fujitsu->keycode2 = KEY_HELP;   /* "Mobility Center" */
+       fujitsu_bl->keycode1 = KEY_SCREENLOCK;  /* "Lock" */
+       fujitsu_bl->keycode2 = KEY_HELP;        /* "Mobility Center" */
        return 1;
 }
 
 static int __init dmi_check_cb_s6420(const struct dmi_system_id *id)
 {
        dmi_check_cb_common(id);
-       fujitsu->keycode1 = KEY_SCREENLOCK;     /* "Lock" */
-       fujitsu->keycode2 = KEY_HELP;   /* "Mobility Center" */
+       fujitsu_bl->keycode1 = KEY_SCREENLOCK;  /* "Lock" */
+       fujitsu_bl->keycode2 = KEY_HELP;        /* "Mobility Center" */
        return 1;
 }
 
 static int __init dmi_check_cb_p8010(const struct dmi_system_id *id)
 {
        dmi_check_cb_common(id);
-       fujitsu->keycode1 = KEY_HELP;   /* "Support" */
-       fujitsu->keycode3 = KEY_SWITCHVIDEOMODE;        /* "Presentation" */
-       fujitsu->keycode4 = KEY_WWW;    /* "Internet" */
+       fujitsu_bl->keycode1 = KEY_HELP;                /* "Support" */
+       fujitsu_bl->keycode3 = KEY_SWITCHVIDEOMODE;     /* "Presentation" */
+       fujitsu_bl->keycode4 = KEY_WWW;                 /* "Internet" */
        return 1;
 }
 
@@ -693,7 +685,7 @@ static const struct dmi_system_id fujitsu_dmi_table[] __initconst = {
 
 /* ACPI device for LCD brightness control */
 
-static int acpi_fujitsu_add(struct acpi_device *device)
+static int acpi_fujitsu_bl_add(struct acpi_device *device)
 {
        int state = 0;
        struct input_dev *input;
@@ -702,22 +694,22 @@ static int acpi_fujitsu_add(struct acpi_device *device)
        if (!device)
                return -EINVAL;
 
-       fujitsu->acpi_handle = device->handle;
-       sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_DEVICE_NAME);
+       fujitsu_bl->acpi_handle = device->handle;
+       sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_BL_DEVICE_NAME);
        sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
-       device->driver_data = fujitsu;
+       device->driver_data = fujitsu_bl;
 
-       fujitsu->input = input = input_allocate_device();
+       fujitsu_bl->input = input = input_allocate_device();
        if (!input) {
                error = -ENOMEM;
                goto err_stop;
        }
 
-       snprintf(fujitsu->phys, sizeof(fujitsu->phys),
+       snprintf(fujitsu_bl->phys, sizeof(fujitsu_bl->phys),
                 "%s/video/input0", acpi_device_hid(device));
 
        input->name = acpi_device_name(device);
-       input->phys = fujitsu->phys;
+       input->phys = fujitsu_bl->phys;
        input->id.bustype = BUS_HOST;
        input->id.product = 0x06;
        input->dev.parent = &device->dev;
@@ -730,7 +722,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
        if (error)
                goto err_free_input_dev;
 
-       error = acpi_bus_update_power(fujitsu->acpi_handle, &state);
+       error = acpi_bus_update_power(fujitsu_bl->acpi_handle, &state);
        if (error) {
                pr_err("Error reading power state\n");
                goto err_unregister_input_dev;
@@ -740,7 +732,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
               acpi_device_name(device), acpi_device_bid(device),
               !device->power.state ? "on" : "off");
 
-       fujitsu->dev = device;
+       fujitsu_bl->dev = device;
 
        if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
                vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
@@ -750,6 +742,15 @@ static int acpi_fujitsu_add(struct acpi_device *device)
                        pr_err("_INI Method failed\n");
        }
 
+       if (use_alt_lcd_levels == -1) {
+               if (acpi_has_method(NULL, "\\_SB.PCI0.LPCB.FJEX.SBL2"))
+                       use_alt_lcd_levels = 1;
+               else
+                       use_alt_lcd_levels = 0;
+               vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as %i\n",
+                           use_alt_lcd_levels);
+       }
+
        /* do config (detect defaults) */
        use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0;
        disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0;
@@ -758,7 +759,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
                    use_alt_lcd_levels, disable_brightness_adjust);
 
        if (get_max_brightness() <= 0)
-               fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS;
+               fujitsu_bl->max_brightness = FUJITSU_LCD_N_LEVELS;
        get_lcd_level();
 
        return 0;
@@ -772,38 +773,38 @@ err_stop:
        return error;
 }
 
-static int acpi_fujitsu_remove(struct acpi_device *device)
+static int acpi_fujitsu_bl_remove(struct acpi_device *device)
 {
-       struct fujitsu_t *fujitsu = acpi_driver_data(device);
-       struct input_dev *input = fujitsu->input;
+       struct fujitsu_bl *fujitsu_bl = acpi_driver_data(device);
+       struct input_dev *input = fujitsu_bl->input;
 
        input_unregister_device(input);
 
-       fujitsu->acpi_handle = NULL;
+       fujitsu_bl->acpi_handle = NULL;
 
        return 0;
 }
 
 /* Brightness notify */
 
-static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
+static void acpi_fujitsu_bl_notify(struct acpi_device *device, u32 event)
 {
        struct input_dev *input;
        int keycode;
        int oldb, newb;
 
-       input = fujitsu->input;
+       input = fujitsu_bl->input;
 
        switch (event) {
        case ACPI_FUJITSU_NOTIFY_CODE1:
                keycode = 0;
-               oldb = fujitsu->brightness_level;
+               oldb = fujitsu_bl->brightness_level;
                get_lcd_level();
-               newb = fujitsu->brightness_level;
+               newb = fujitsu_bl->brightness_level;
 
                vdbg_printk(FUJLAPTOP_DBG_TRACE,
                            "brightness button event [%i -> %i (%i)]\n",
-                           oldb, newb, fujitsu->brightness_changed);
+                           oldb, newb, fujitsu_bl->brightness_changed);
 
                if (oldb < newb) {
                        if (disable_brightness_adjust != 1) {
@@ -840,7 +841,7 @@ static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
 
 /* ACPI device for hotkey handling */
 
-static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
+static int acpi_fujitsu_laptop_add(struct acpi_device *device)
 {
        int result = 0;
        int state = 0;
@@ -851,42 +852,42 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
        if (!device)
                return -EINVAL;
 
-       fujitsu_hotkey->acpi_handle = device->handle;
+       fujitsu_laptop->acpi_handle = device->handle;
        sprintf(acpi_device_name(device), "%s",
-               ACPI_FUJITSU_HOTKEY_DEVICE_NAME);
+               ACPI_FUJITSU_LAPTOP_DEVICE_NAME);
        sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
-       device->driver_data = fujitsu_hotkey;
+       device->driver_data = fujitsu_laptop;
 
        /* kfifo */
-       spin_lock_init(&fujitsu_hotkey->fifo_lock);
-       error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int),
+       spin_lock_init(&fujitsu_laptop->fifo_lock);
+       error = kfifo_alloc(&fujitsu_laptop->fifo, RINGBUFFERSIZE * sizeof(int),
                        GFP_KERNEL);
        if (error) {
                pr_err("kfifo_alloc failed\n");
                goto err_stop;
        }
 
-       fujitsu_hotkey->input = input = input_allocate_device();
+       fujitsu_laptop->input = input = input_allocate_device();
        if (!input) {
                error = -ENOMEM;
                goto err_free_fifo;
        }
 
-       snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys),
+       snprintf(fujitsu_laptop->phys, sizeof(fujitsu_laptop->phys),
                 "%s/video/input0", acpi_device_hid(device));
 
        input->name = acpi_device_name(device);
-       input->phys = fujitsu_hotkey->phys;
+       input->phys = fujitsu_laptop->phys;
        input->id.bustype = BUS_HOST;
        input->id.product = 0x06;
        input->dev.parent = &device->dev;
 
        set_bit(EV_KEY, input->evbit);
-       set_bit(fujitsu->keycode1, input->keybit);
-       set_bit(fujitsu->keycode2, input->keybit);
-       set_bit(fujitsu->keycode3, input->keybit);
-       set_bit(fujitsu->keycode4, input->keybit);
-       set_bit(fujitsu->keycode5, input->keybit);
+       set_bit(fujitsu_bl->keycode1, input->keybit);
+       set_bit(fujitsu_bl->keycode2, input->keybit);
+       set_bit(fujitsu_bl->keycode3, input->keybit);
+       set_bit(fujitsu_bl->keycode4, input->keybit);
+       set_bit(fujitsu_bl->keycode5, input->keybit);
        set_bit(KEY_TOUCHPAD_TOGGLE, input->keybit);
        set_bit(KEY_UNKNOWN, input->keybit);
 
@@ -894,7 +895,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
        if (error)
                goto err_free_input_dev;
 
-       error = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
+       error = acpi_bus_update_power(fujitsu_laptop->acpi_handle, &state);
        if (error) {
                pr_err("Error reading power state\n");
                goto err_unregister_input_dev;
@@ -904,7 +905,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
                acpi_device_name(device), acpi_device_bid(device),
                !device->power.state ? "on" : "off");
 
-       fujitsu_hotkey->dev = device;
+       fujitsu_laptop->dev = device;
 
        if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
                vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
@@ -920,27 +921,27 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
                ; /* No action, result is discarded */
        vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i);
 
-       fujitsu_hotkey->rfkill_supported =
-               call_fext_func(FUNC_RFKILL, 0x0, 0x0, 0x0);
+       fujitsu_laptop->flags_supported =
+               call_fext_func(FUNC_FLAGS, 0x0, 0x0, 0x0);
 
        /* Make sure our bitmask of supported functions is cleared if the
           RFKILL function block is not implemented, like on the S7020. */
-       if (fujitsu_hotkey->rfkill_supported == UNSUPPORTED_CMD)
-               fujitsu_hotkey->rfkill_supported = 0;
+       if (fujitsu_laptop->flags_supported == UNSUPPORTED_CMD)
+               fujitsu_laptop->flags_supported = 0;
 
-       if (fujitsu_hotkey->rfkill_supported)
-               fujitsu_hotkey->rfkill_state =
-                       call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);
+       if (fujitsu_laptop->flags_supported)
+               fujitsu_laptop->flags_state =
+                       call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0);
 
        /* Suspect this is a keymap of the application panel, print it */
        pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
 
 #if IS_ENABLED(CONFIG_LEDS_CLASS)
        if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
-               result = led_classdev_register(&fujitsu->pf_device->dev,
+               result = led_classdev_register(&fujitsu_bl->pf_device->dev,
                                                &logolamp_led);
                if (result == 0) {
-                       fujitsu_hotkey->logolamp_registered = 1;
+                       fujitsu_laptop->logolamp_registered = 1;
                } else {
                        pr_err("Could not register LED handler for logo lamp, error %i\n",
                               result);
@@ -949,10 +950,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
 
        if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) &&
           (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) {
-               result = led_classdev_register(&fujitsu->pf_device->dev,
+               result = led_classdev_register(&fujitsu_bl->pf_device->dev,
                                                &kblamps_led);
                if (result == 0) {
-                       fujitsu_hotkey->kblamps_registered = 1;
+                       fujitsu_laptop->kblamps_registered = 1;
                } else {
                        pr_err("Could not register LED handler for keyboard lamps, error %i\n",
                               result);
@@ -966,10 +967,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
         * that an RF LED is present.
         */
        if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
-               result = led_classdev_register(&fujitsu->pf_device->dev,
+               result = led_classdev_register(&fujitsu_bl->pf_device->dev,
                                                &radio_led);
                if (result == 0) {
-                       fujitsu_hotkey->radio_led_registered = 1;
+                       fujitsu_laptop->radio_led_registered = 1;
                } else {
                        pr_err("Could not register LED handler for radio LED, error %i\n",
                               result);
@@ -983,10 +984,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
        */
        if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) &&
           (call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) {
-               result = led_classdev_register(&fujitsu->pf_device->dev,
+               result = led_classdev_register(&fujitsu_bl->pf_device->dev,
                                                &eco_led);
                if (result == 0) {
-                       fujitsu_hotkey->eco_led_registered = 1;
+                       fujitsu_laptop->eco_led_registered = 1;
                } else {
                        pr_err("Could not register LED handler for eco LED, error %i\n",
                               result);
@@ -1002,47 +1003,47 @@ err_unregister_input_dev:
 err_free_input_dev:
        input_free_device(input);
 err_free_fifo:
-       kfifo_free(&fujitsu_hotkey->fifo);
+       kfifo_free(&fujitsu_laptop->fifo);
 err_stop:
        return error;
 }
 
-static int acpi_fujitsu_hotkey_remove(struct acpi_device *device)
+static int acpi_fujitsu_laptop_remove(struct acpi_device *device)
 {
-       struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device);
-       struct input_dev *input = fujitsu_hotkey->input;
+       struct fujitsu_laptop *fujitsu_laptop = acpi_driver_data(device);
+       struct input_dev *input = fujitsu_laptop->input;
 
 #if IS_ENABLED(CONFIG_LEDS_CLASS)
-       if (fujitsu_hotkey->logolamp_registered)
+       if (fujitsu_laptop->logolamp_registered)
                led_classdev_unregister(&logolamp_led);
 
-       if (fujitsu_hotkey->kblamps_registered)
+       if (fujitsu_laptop->kblamps_registered)
                led_classdev_unregister(&kblamps_led);
 
-       if (fujitsu_hotkey->radio_led_registered)
+       if (fujitsu_laptop->radio_led_registered)
                led_classdev_unregister(&radio_led);
 
-       if (fujitsu_hotkey->eco_led_registered)
+       if (fujitsu_laptop->eco_led_registered)
                led_classdev_unregister(&eco_led);
 #endif
 
        input_unregister_device(input);
 
-       kfifo_free(&fujitsu_hotkey->fifo);
+       kfifo_free(&fujitsu_laptop->fifo);
 
-       fujitsu_hotkey->acpi_handle = NULL;
+       fujitsu_laptop->acpi_handle = NULL;
 
        return 0;
 }
 
-static void acpi_fujitsu_hotkey_press(int keycode)
+static void acpi_fujitsu_laptop_press(int keycode)
 {
-       struct input_dev *input = fujitsu_hotkey->input;
+       struct input_dev *input = fujitsu_laptop->input;
        int status;
 
-       status = kfifo_in_locked(&fujitsu_hotkey->fifo,
+       status = kfifo_in_locked(&fujitsu_laptop->fifo,
                                 (unsigned char *)&keycode, sizeof(keycode),
-                                &fujitsu_hotkey->fifo_lock);
+                                &fujitsu_laptop->fifo_lock);
        if (status != sizeof(keycode)) {
                vdbg_printk(FUJLAPTOP_DBG_WARN,
                            "Could not push keycode [0x%x]\n", keycode);
@@ -1054,16 +1055,16 @@ static void acpi_fujitsu_hotkey_press(int keycode)
                    "Push keycode into ringbuffer [%d]\n", keycode);
 }
 
-static void acpi_fujitsu_hotkey_release(void)
+static void acpi_fujitsu_laptop_release(void)
 {
-       struct input_dev *input = fujitsu_hotkey->input;
+       struct input_dev *input = fujitsu_laptop->input;
        int keycode, status;
 
        while (true) {
-               status = kfifo_out_locked(&fujitsu_hotkey->fifo,
+               status = kfifo_out_locked(&fujitsu_laptop->fifo,
                                          (unsigned char *)&keycode,
                                          sizeof(keycode),
-                                         &fujitsu_hotkey->fifo_lock);
+                                         &fujitsu_laptop->fifo_lock);
                if (status != sizeof(keycode))
                        return;
                input_report_key(input, keycode, 0);
@@ -1073,14 +1074,14 @@ static void acpi_fujitsu_hotkey_release(void)
        }
 }
 
-static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
+static void acpi_fujitsu_laptop_notify(struct acpi_device *device, u32 event)
 {
        struct input_dev *input;
        int keycode;
        unsigned int irb = 1;
        int i;
 
-       input = fujitsu_hotkey->input;
+       input = fujitsu_laptop->input;
 
        if (event != ACPI_FUJITSU_NOTIFY_CODE1) {
                keycode = KEY_UNKNOWN;
@@ -1093,9 +1094,9 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
                return;
        }
 
-       if (fujitsu_hotkey->rfkill_supported)
-               fujitsu_hotkey->rfkill_state =
-                       call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);
+       if (fujitsu_laptop->flags_supported)
+               fujitsu_laptop->flags_state =
+                       call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0);
 
        i = 0;
        while ((irb =
@@ -1103,19 +1104,19 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
                        && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) {
                switch (irb & 0x4ff) {
                case KEY1_CODE:
-                       keycode = fujitsu->keycode1;
+                       keycode = fujitsu_bl->keycode1;
                        break;
                case KEY2_CODE:
-                       keycode = fujitsu->keycode2;
+                       keycode = fujitsu_bl->keycode2;
                        break;
                case KEY3_CODE:
-                       keycode = fujitsu->keycode3;
+                       keycode = fujitsu_bl->keycode3;
                        break;
                case KEY4_CODE:
-                       keycode = fujitsu->keycode4;
+                       keycode = fujitsu_bl->keycode4;
                        break;
                case KEY5_CODE:
-                       keycode = fujitsu->keycode5;
+                       keycode = fujitsu_bl->keycode5;
                        break;
                case 0:
                        keycode = 0;
@@ -1128,17 +1129,17 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
                }
 
                if (keycode > 0)
-                       acpi_fujitsu_hotkey_press(keycode);
+                       acpi_fujitsu_laptop_press(keycode);
                else if (keycode == 0)
-                       acpi_fujitsu_hotkey_release();
+                       acpi_fujitsu_laptop_release();
        }
 
        /* On some models (first seen on the Skylake-based Lifebook
         * E736/E746/E756), the touchpad toggle hotkey (Fn+F4) is
-        * handled in software; its state is queried using FUNC_RFKILL
+        * handled in software; its state is queried using FUNC_FLAGS
         */
-       if ((fujitsu_hotkey->rfkill_supported & BIT(26)) &&
-           (call_fext_func(FUNC_RFKILL, 0x1, 0x0, 0x0) & BIT(26))) {
+       if ((fujitsu_laptop->flags_supported & BIT(26)) &&
+           (call_fext_func(FUNC_FLAGS, 0x1, 0x0, 0x0) & BIT(26))) {
                keycode = KEY_TOUCHPAD_TOGGLE;
                input_report_key(input, keycode, 1);
                input_sync(input);
@@ -1150,83 +1151,81 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
 
 /* Initialization */
 
-static const struct acpi_device_id fujitsu_device_ids[] = {
-       {ACPI_FUJITSU_HID, 0},
+static const struct acpi_device_id fujitsu_bl_device_ids[] = {
+       {ACPI_FUJITSU_BL_HID, 0},
        {"", 0},
 };
 
-static struct acpi_driver acpi_fujitsu_driver = {
-       .name = ACPI_FUJITSU_DRIVER_NAME,
+static struct acpi_driver acpi_fujitsu_bl_driver = {
+       .name = ACPI_FUJITSU_BL_DRIVER_NAME,
        .class = ACPI_FUJITSU_CLASS,
-       .ids = fujitsu_device_ids,
+       .ids = fujitsu_bl_device_ids,
        .ops = {
-               .add = acpi_fujitsu_add,
-               .remove = acpi_fujitsu_remove,
-               .notify = acpi_fujitsu_notify,
+               .add = acpi_fujitsu_bl_add,
+               .remove = acpi_fujitsu_bl_remove,
+               .notify = acpi_fujitsu_bl_notify,
                },
 };
 
-static const struct acpi_device_id fujitsu_hotkey_device_ids[] = {
-       {ACPI_FUJITSU_HOTKEY_HID, 0},
+static const struct acpi_device_id fujitsu_laptop_device_ids[] = {
+       {ACPI_FUJITSU_LAPTOP_HID, 0},
        {"", 0},
 };
 
-static struct acpi_driver acpi_fujitsu_hotkey_driver = {
-       .name = ACPI_FUJITSU_HOTKEY_DRIVER_NAME,
+static struct acpi_driver acpi_fujitsu_laptop_driver = {
+       .name = ACPI_FUJITSU_LAPTOP_DRIVER_NAME,
        .class = ACPI_FUJITSU_CLASS,
-       .ids = fujitsu_hotkey_device_ids,
+       .ids = fujitsu_laptop_device_ids,
        .ops = {
-               .add = acpi_fujitsu_hotkey_add,
-               .remove = acpi_fujitsu_hotkey_remove,
-               .notify = acpi_fujitsu_hotkey_notify,
+               .add = acpi_fujitsu_laptop_add,
+               .remove = acpi_fujitsu_laptop_remove,
+               .notify = acpi_fujitsu_laptop_notify,
                },
 };
 
 static const struct acpi_device_id fujitsu_ids[] __used = {
-       {ACPI_FUJITSU_HID, 0},
-       {ACPI_FUJITSU_HOTKEY_HID, 0},
+       {ACPI_FUJITSU_BL_HID, 0},
+       {ACPI_FUJITSU_LAPTOP_HID, 0},
        {"", 0}
 };
 MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
 
 static int __init fujitsu_init(void)
 {
-       int ret, result, max_brightness;
+       int ret, max_brightness;
 
        if (acpi_disabled)
                return -ENODEV;
 
-       fujitsu = kzalloc(sizeof(struct fujitsu_t), GFP_KERNEL);
-       if (!fujitsu)
+       fujitsu_bl = kzalloc(sizeof(struct fujitsu_bl), GFP_KERNEL);
+       if (!fujitsu_bl)
                return -ENOMEM;
-       fujitsu->keycode1 = KEY_PROG1;
-       fujitsu->keycode2 = KEY_PROG2;
-       fujitsu->keycode3 = KEY_PROG3;
-       fujitsu->keycode4 = KEY_PROG4;
-       fujitsu->keycode5 = KEY_RFKILL;
+       fujitsu_bl->keycode1 = KEY_PROG1;
+       fujitsu_bl->keycode2 = KEY_PROG2;
+       fujitsu_bl->keycode3 = KEY_PROG3;
+       fujitsu_bl->keycode4 = KEY_PROG4;
+       fujitsu_bl->keycode5 = KEY_RFKILL;
        dmi_check_system(fujitsu_dmi_table);
 
-       result = acpi_bus_register_driver(&acpi_fujitsu_driver);
-       if (result < 0) {
-               ret = -ENODEV;
+       ret = acpi_bus_register_driver(&acpi_fujitsu_bl_driver);
+       if (ret)
                goto fail_acpi;
-       }
 
        /* Register platform stuff */
 
-       fujitsu->pf_device = platform_device_alloc("fujitsu-laptop", -1);
-       if (!fujitsu->pf_device) {
+       fujitsu_bl->pf_device = platform_device_alloc("fujitsu-laptop", -1);
+       if (!fujitsu_bl->pf_device) {
                ret = -ENOMEM;
                goto fail_platform_driver;
        }
 
-       ret = platform_device_add(fujitsu->pf_device);
+       ret = platform_device_add(fujitsu_bl->pf_device);
        if (ret)
                goto fail_platform_device1;
 
        ret =
-           sysfs_create_group(&fujitsu->pf_device->dev.kobj,
-                              &fujitsupf_attribute_group);
+           sysfs_create_group(&fujitsu_bl->pf_device->dev.kobj,
+                              &fujitsu_pf_attribute_group);
        if (ret)
                goto fail_platform_device2;
 
@@ -1236,90 +1235,88 @@ static int __init fujitsu_init(void)
                struct backlight_properties props;
 
                memset(&props, 0, sizeof(struct backlight_properties));
-               max_brightness = fujitsu->max_brightness;
+               max_brightness = fujitsu_bl->max_brightness;
                props.type = BACKLIGHT_PLATFORM;
                props.max_brightness = max_brightness - 1;
-               fujitsu->bl_device = backlight_device_register("fujitsu-laptop",
-                                                              NULL, NULL,
-                                                              &fujitsubl_ops,
-                                                              &props);
-               if (IS_ERR(fujitsu->bl_device)) {
-                       ret = PTR_ERR(fujitsu->bl_device);
-                       fujitsu->bl_device = NULL;
+               fujitsu_bl->bl_device = backlight_device_register("fujitsu-laptop",
+                                                                 NULL, NULL,
+                                                                 &fujitsu_bl_ops,
+                                                                 &props);
+               if (IS_ERR(fujitsu_bl->bl_device)) {
+                       ret = PTR_ERR(fujitsu_bl->bl_device);
+                       fujitsu_bl->bl_device = NULL;
                        goto fail_sysfs_group;
                }
-               fujitsu->bl_device->props.brightness = fujitsu->brightness_level;
+               fujitsu_bl->bl_device->props.brightness = fujitsu_bl->brightness_level;
        }
 
-       ret = platform_driver_register(&fujitsupf_driver);
+       ret = platform_driver_register(&fujitsu_pf_driver);
        if (ret)
                goto fail_backlight;
 
-       /* Register hotkey driver */
+       /* Register laptop driver */
 
-       fujitsu_hotkey = kzalloc(sizeof(struct fujitsu_hotkey_t), GFP_KERNEL);
-       if (!fujitsu_hotkey) {
+       fujitsu_laptop = kzalloc(sizeof(struct fujitsu_laptop), GFP_KERNEL);
+       if (!fujitsu_laptop) {
                ret = -ENOMEM;
-               goto fail_hotkey;
+               goto fail_laptop;
        }
 
-       result = acpi_bus_register_driver(&acpi_fujitsu_hotkey_driver);
-       if (result < 0) {
-               ret = -ENODEV;
-               goto fail_hotkey1;
-       }
+       ret = acpi_bus_register_driver(&acpi_fujitsu_laptop_driver);
+       if (ret)
+               goto fail_laptop1;
 
        /* Sync backlight power status (needs FUJ02E3 device, hence deferred) */
        if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
                if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3)
-                       fujitsu->bl_device->props.power = FB_BLANK_POWERDOWN;
+                       fujitsu_bl->bl_device->props.power = FB_BLANK_POWERDOWN;
                else
-                       fujitsu->bl_device->props.power = FB_BLANK_UNBLANK;
+                       fujitsu_bl->bl_device->props.power = FB_BLANK_UNBLANK;
        }
 
        pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
 
        return 0;
 
-fail_hotkey1:
-       kfree(fujitsu_hotkey);
-fail_hotkey:
-       platform_driver_unregister(&fujitsupf_driver);
+fail_laptop1:
+       kfree(fujitsu_laptop);
+fail_laptop:
+       platform_driver_unregister(&fujitsu_pf_driver);
 fail_backlight:
-       backlight_device_unregister(fujitsu->bl_device);
+       backlight_device_unregister(fujitsu_bl->bl_device);
 fail_sysfs_group:
-       sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
-                          &fujitsupf_attribute_group);
+       sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj,
+                          &fujitsu_pf_attribute_group);
 fail_platform_device2:
-       platform_device_del(fujitsu->pf_device);
+       platform_device_del(fujitsu_bl->pf_device);
 fail_platform_device1:
-       platform_device_put(fujitsu->pf_device);
+       platform_device_put(fujitsu_bl->pf_device);
 fail_platform_driver:
-       acpi_bus_unregister_driver(&acpi_fujitsu_driver);
+       acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
 fail_acpi:
-       kfree(fujitsu);
+       kfree(fujitsu_bl);
 
        return ret;
 }
 
 static void __exit fujitsu_cleanup(void)
 {
-       acpi_bus_unregister_driver(&acpi_fujitsu_hotkey_driver);
+       acpi_bus_unregister_driver(&acpi_fujitsu_laptop_driver);
 
-       kfree(fujitsu_hotkey);
+       kfree(fujitsu_laptop);
 
-       platform_driver_unregister(&fujitsupf_driver);
+       platform_driver_unregister(&fujitsu_pf_driver);
 
-       backlight_device_unregister(fujitsu->bl_device);
+       backlight_device_unregister(fujitsu_bl->bl_device);
 
-       sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
-                          &fujitsupf_attribute_group);
+       sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj,
+                          &fujitsu_pf_attribute_group);
 
-       platform_device_unregister(fujitsu->pf_device);
+       platform_device_unregister(fujitsu_bl->pf_device);
 
-       acpi_bus_unregister_driver(&acpi_fujitsu_driver);
+       acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
 
-       kfree(fujitsu);
+       kfree(fujitsu_bl);
 
        pr_info("driver unloaded\n");
 }
@@ -1341,7 +1338,3 @@ MODULE_AUTHOR("Jonathan Woithe, Peter Gruber, Tony Vroon");
 MODULE_DESCRIPTION("Fujitsu laptop extras support");
 MODULE_VERSION(FUJITSU_DRIVER_VERSION);
 MODULE_LICENSE("GPL");
-
-MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");
-MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*");
-MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
index 09b4df74291e2618e95e842dc4f5d7bdb178ec93..bb865695d7a62d20fa66800c8ed421dcfa8cd8c2 100644 (file)
@@ -193,10 +193,7 @@ static int __init ptp_kvm_init(void)
 
        kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL);
 
-       if (IS_ERR(kvm_ptp_clock.ptp_clock))
-               return PTR_ERR(kvm_ptp_clock.ptp_clock);
-
-       return 0;
+       return PTR_ERR_OR_ZERO(kvm_ptp_clock.ptp_clock);
 }
 
 module_init(ptp_kvm_init);
index 9d19b9a62011b376be541b247336d455952bb42b..315a4be8dc1e64f429fb6bd5bab9700a0d254f29 100644 (file)
@@ -37,8 +37,8 @@
 #include "tsi721.h"
 
 #ifdef DEBUG
-u32 dbg_level;
-module_param(dbg_level, uint, S_IWUSR | S_IRUGO);
+u32 tsi_dbg_level;
+module_param_named(dbg_level, tsi_dbg_level, uint, S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
 #endif
 
index 5941437cbdd164c7e0c491f33117401bc6fe54e2..957eadc5815095045f06291dbc2b83bbaea795d7 100644 (file)
@@ -40,11 +40,11 @@ enum {
 };
 
 #ifdef DEBUG
-extern u32 dbg_level;
+extern u32 tsi_dbg_level;
 
 #define tsi_debug(level, dev, fmt, arg...)                             \
        do {                                                            \
-               if (DBG_##level & dbg_level)                            \
+               if (DBG_##level & tsi_dbg_level)                                \
                        dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg); \
        } while (0)
 #else
index 65f86bc24c07c7032726700e09a1d9ef3cdfb3c2..faad69a1a5974b8f76acde68a9e8c8922e27d01e 100644 (file)
@@ -76,7 +76,7 @@ config QCOM_ADSP_PIL
        depends on OF && ARCH_QCOM
        depends on REMOTEPROC
        depends on QCOM_SMEM
-       depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
+       depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
        select MFD_SYSCON
        select QCOM_MDT_LOADER
        select QCOM_RPROC_COMMON
@@ -93,7 +93,7 @@ config QCOM_Q6V5_PIL
        depends on OF && ARCH_QCOM
        depends on QCOM_SMEM
        depends on REMOTEPROC
-       depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
+       depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
        select MFD_SYSCON
        select QCOM_RPROC_COMMON
        select QCOM_SCM
@@ -104,7 +104,7 @@ config QCOM_Q6V5_PIL
 config QCOM_WCNSS_PIL
        tristate "Qualcomm WCNSS Peripheral Image Loader"
        depends on OF && ARCH_QCOM
-       depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
+       depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
        depends on QCOM_SMEM
        depends on REMOTEPROC
        select QCOM_MDT_LOADER
index f12ac0b28263f1dc6ae646554cc5a0b4226a1a2a..edc008f556632b03d182efc2e306601d38519b42 100644 (file)
@@ -16,7 +16,6 @@ config RPMSG_CHAR
 config RPMSG_QCOM_SMD
        tristate "Qualcomm Shared Memory Driver (SMD)"
        depends on QCOM_SMEM
-       depends on QCOM_SMD=n
        select RPMSG
        help
          Say y here to enable support for the Qualcomm Shared Memory Driver
index 40f1136f55688981d70767bcf4eb55524a603cc6..058db724b5a28a8390856aeb51da30a639f3cf6b 100644 (file)
@@ -572,6 +572,12 @@ int pkey_sec2protkey(u16 cardnr, u16 domain,
                rc = -EIO;
                goto out;
        }
+       if (prepcblk->ccp_rscode != 0) {
+               DEBUG_WARN(
+                       "pkey_sec2protkey unwrap secure key warning, card response %d/%d\n",
+                       (int) prepcblk->ccp_rtcode,
+                       (int) prepcblk->ccp_rscode);
+       }
 
        /* process response cprb param block */
        prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
@@ -761,9 +767,10 @@ out:
 }
 
 /*
- * Fetch just the mkvp value via query_crypto_facility from adapter.
+ * Fetch the current and old mkvp values via
+ * query_crypto_facility from adapter.
  */
-static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp)
+static int fetch_mkvp(u16 cardnr, u16 domain, u64 mkvp[2])
 {
        int rc, found = 0;
        size_t rlen, vlen;
@@ -779,9 +786,10 @@ static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp)
        rc = query_crypto_facility(cardnr, domain, "STATICSA",
                                   rarray, &rlen, varray, &vlen);
        if (rc == 0 && rlen > 8*8 && vlen > 184+8) {
-               if (rarray[64] == '2') {
+               if (rarray[8*8] == '2') {
                        /* current master key state is valid */
-                       *mkvp = *((u64 *)(varray + 184));
+                       mkvp[0] = *((u64 *)(varray + 184));
+                       mkvp[1] = *((u64 *)(varray + 172));
                        found = 1;
                }
        }
@@ -796,14 +804,14 @@ struct mkvp_info {
        struct list_head list;
        u16 cardnr;
        u16 domain;
-       u64 mkvp;
+       u64 mkvp[2];
 };
 
 /* a list with mkvp_info entries */
 static LIST_HEAD(mkvp_list);
 static DEFINE_SPINLOCK(mkvp_list_lock);
 
-static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
+static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 mkvp[2])
 {
        int rc = -ENOENT;
        struct mkvp_info *ptr;
@@ -812,7 +820,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
        list_for_each_entry(ptr, &mkvp_list, list) {
                if (ptr->cardnr == cardnr &&
                    ptr->domain == domain) {
-                       *mkvp = ptr->mkvp;
+                       memcpy(mkvp, ptr->mkvp, 2 * sizeof(u64));
                        rc = 0;
                        break;
                }
@@ -822,7 +830,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
        return rc;
 }
 
-static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
+static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp[2])
 {
        int found = 0;
        struct mkvp_info *ptr;
@@ -831,7 +839,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
        list_for_each_entry(ptr, &mkvp_list, list) {
                if (ptr->cardnr == cardnr &&
                    ptr->domain == domain) {
-                       ptr->mkvp = mkvp;
+                       memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
                        found = 1;
                        break;
                }
@@ -844,7 +852,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
                }
                ptr->cardnr = cardnr;
                ptr->domain = domain;
-               ptr->mkvp = mkvp;
+               memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
                list_add(&ptr->list, &mkvp_list);
        }
        spin_unlock_bh(&mkvp_list_lock);
@@ -888,8 +896,8 @@ int pkey_findcard(const struct pkey_seckey *seckey,
        struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
        struct zcrypt_device_matrix *device_matrix;
        u16 card, dom;
-       u64 mkvp;
-       int i, rc;
+       u64 mkvp[2];
+       int i, rc, oi = -1;
 
        /* mkvp must not be zero */
        if (t->mkvp == 0)
@@ -910,14 +918,14 @@ int pkey_findcard(const struct pkey_seckey *seckey,
                    device_matrix->device[i].functions & 0x04) {
                        /* an enabled CCA Coprocessor card */
                        /* try cached mkvp */
-                       if (mkvp_cache_fetch(card, dom, &mkvp) == 0 &&
-                           t->mkvp == mkvp) {
+                       if (mkvp_cache_fetch(card, dom, mkvp) == 0 &&
+                           t->mkvp == mkvp[0]) {
                                if (!verify)
                                        break;
                                /* verify: fetch mkvp from adapter */
-                               if (fetch_mkvp(card, dom, &mkvp) == 0) {
+                               if (fetch_mkvp(card, dom, mkvp) == 0) {
                                        mkvp_cache_update(card, dom, mkvp);
-                                       if (t->mkvp == mkvp)
+                                       if (t->mkvp == mkvp[0])
                                                break;
                                }
                        }
@@ -936,14 +944,21 @@ int pkey_findcard(const struct pkey_seckey *seckey,
                        card = AP_QID_CARD(device_matrix->device[i].qid);
                        dom = AP_QID_QUEUE(device_matrix->device[i].qid);
                        /* fresh fetch mkvp from adapter */
-                       if (fetch_mkvp(card, dom, &mkvp) == 0) {
+                       if (fetch_mkvp(card, dom, mkvp) == 0) {
                                mkvp_cache_update(card, dom, mkvp);
-                               if (t->mkvp == mkvp)
+                               if (t->mkvp == mkvp[0])
                                        break;
+                               if (t->mkvp == mkvp[1] && oi < 0)
+                                       oi = i;
                        }
                }
+               if (i >= MAX_ZDEV_ENTRIES && oi >= 0) {
+                       /* old mkvp matched, use this card then */
+                       card = AP_QID_CARD(device_matrix->device[oi].qid);
+                       dom = AP_QID_QUEUE(device_matrix->device[oi].qid);
+               }
        }
-       if (i < MAX_ZDEV_ENTRIES) {
+       if (i < MAX_ZDEV_ENTRIES || oi >= 0) {
                if (pcardnr)
                        *pcardnr = card;
                if (pdomain)
index fd5944bbe224964a5afce64c1a82ba4d33c27dc2..730d9619400e5b1515b41371e6bc6f3adfa2118f 100644 (file)
@@ -1283,7 +1283,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
                p_header = (struct pdu *)
                        (skb_tail_pointer(ch->trans_skb) - skb->len);
                p_header->pdu_flag = 0x00;
-               if (skb->protocol == ntohs(ETH_P_SNAP))
+               if (be16_to_cpu(skb->protocol) == ETH_P_SNAP)
                        p_header->pdu_flag |= 0x60;
                else
                        p_header->pdu_flag |= 0x20;
index ac65f12bcd43c9dca411940ecabbf315e1b8585f..198842ce6876e812b07d57ab826ca8934920bd44 100644 (file)
@@ -106,7 +106,7 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
                        priv->stats.rx_frame_errors++;
                        return;
                }
-               pskb->protocol = ntohs(header->type);
+               pskb->protocol = cpu_to_be16(header->type);
                if ((header->length <= LL_HEADER_LENGTH) ||
                    (len <= LL_HEADER_LENGTH)) {
                        if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
@@ -125,7 +125,7 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
                header->length -= LL_HEADER_LENGTH;
                len -= LL_HEADER_LENGTH;
                if ((header->length > skb_tailroom(pskb)) ||
-                       (header->length > len)) {
+                   (header->length > len)) {
                        if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
                                CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
                                        "%s(%s): Packet size %d (overrun)"
@@ -485,7 +485,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
                } else {
                        atomic_inc(&skb->users);
                        header.length = l;
-                       header.type = skb->protocol;
+                       header.type = be16_to_cpu(skb->protocol);
                        header.unused = 0;
                        memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
                               LL_HEADER_LENGTH);
@@ -503,7 +503,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
        atomic_inc(&skb->users);
        ch->prof.txlen += skb->len;
        header.length = skb->len + LL_HEADER_LENGTH;
-       header.type = skb->protocol;
+       header.type = be16_to_cpu(skb->protocol);
        header.unused = 0;
        memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
        block_len = skb->len + 2;
@@ -690,7 +690,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
                p_header->pdu_offset = skb->len;
                p_header->pdu_proto = 0x01;
                p_header->pdu_flag = 0x00;
-               if (skb->protocol == ntohs(ETH_P_SNAP)) {
+               if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) {
                        p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
                } else {
                        p_header->pdu_flag |= PDU_FIRST;
@@ -745,7 +745,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
        p_header->pdu_proto = 0x01;
        p_header->pdu_flag = 0x00;
        p_header->pdu_seq = 0;
-       if (skb->protocol == ntohs(ETH_P_SNAP)) {
+       if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) {
                p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
        } else {
                p_header->pdu_flag |= PDU_FIRST;
index 3f85b97ab8d2704820ba1f6f038a99534a14461a..dba94b486f057822ba45d29bea1589adb4026ea5 100644 (file)
@@ -635,7 +635,7 @@ static void netiucv_unpack_skb(struct iucv_connection *conn,
        skb_put(pskb, NETIUCV_HDRLEN);
        pskb->dev = dev;
        pskb->ip_summed = CHECKSUM_NONE;
-       pskb->protocol = ntohs(ETH_P_IP);
+       pskb->protocol = cpu_to_be16(ETH_P_IP);
 
        while (1) {
                struct sk_buff *skb;
index e7addea8741b799066644052cba4e9a99f3a3335..f6aa21176d89780e9e745f9045255ff37f1cf788 100644 (file)
@@ -240,7 +240,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
 #define QETH_TX_TIMEOUT                100 * HZ
 #define QETH_RCD_TIMEOUT       60 * HZ
 #define QETH_RECLAIM_WORK_TIME HZ
-#define QETH_HEADER_SIZE       32
 #define QETH_MAX_PORTNO                15
 
 /*IPv6 address autoconfiguration stuff*/
@@ -447,7 +446,7 @@ struct qeth_qdio_out_buffer {
        atomic_t state;
        int next_element_to_fill;
        struct sk_buff_head skb_list;
-       int is_header[16];
+       int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
 
        struct qaob *aob;
        struct qeth_qdio_out_q *q;
@@ -503,22 +502,12 @@ struct qeth_qdio_info {
        int default_out_queue;
 };
 
-enum qeth_send_errors {
-       QETH_SEND_ERROR_NONE,
-       QETH_SEND_ERROR_LINK_FAILURE,
-       QETH_SEND_ERROR_RETRY,
-       QETH_SEND_ERROR_KICK_IT,
-};
-
 #define QETH_ETH_MAC_V4      0x0100 /* like v4 */
 #define QETH_ETH_MAC_V6      0x3333 /* like v6 */
 /* tr mc mac is longer, but that will be enough to detect mc frames */
 #define QETH_TR_MAC_NC       0xc000 /* non-canonical */
 #define QETH_TR_MAC_C        0x0300 /* canonical */
 
-#define DEFAULT_ADD_HHLEN 0
-#define MAX_ADD_HHLEN 1024
-
 /**
  * buffer stuff for read channel
  */
@@ -644,7 +633,6 @@ struct qeth_reply {
        atomic_t refcnt;
 };
 
-
 struct qeth_card_blkt {
        int time_total;
        int inter_packet;
@@ -685,7 +673,6 @@ struct qeth_card_options {
        struct qeth_ipa_info ipa6;
        struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */
        int fake_broadcast;
-       int add_hhlen;
        int layer2;
        int performance_stats;
        int rx_sg_cb;
@@ -717,17 +704,16 @@ struct qeth_discipline {
        void (*start_poll)(struct ccw_device *, int, unsigned long);
        qdio_handler_t *input_handler;
        qdio_handler_t *output_handler;
+       int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done);
        int (*recover)(void *ptr);
        int (*setup) (struct ccwgroup_device *);
        void (*remove) (struct ccwgroup_device *);
        int (*set_online) (struct ccwgroup_device *);
        int (*set_offline) (struct ccwgroup_device *);
-       void (*shutdown)(struct ccwgroup_device *);
-       int (*prepare) (struct ccwgroup_device *);
-       void (*complete) (struct ccwgroup_device *);
        int (*freeze)(struct ccwgroup_device *);
        int (*thaw) (struct ccwgroup_device *);
        int (*restore)(struct ccwgroup_device *);
+       int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd);
        int (*control_event_handler)(struct qeth_card *card,
                                        struct qeth_ipa_cmd *cmd);
 };
@@ -856,9 +842,9 @@ static inline int qeth_get_ip_version(struct sk_buff *skb)
 {
        __be16 *p = &((struct ethhdr *)skb->data)->h_proto;
 
-       if (*p == ETH_P_8021Q)
+       if (be16_to_cpu(*p) == ETH_P_8021Q)
                p += 2;
-       switch (*p) {
+       switch (be16_to_cpu(*p)) {
        case ETH_P_IPV6:
                return 6;
        case ETH_P_IP:
@@ -920,14 +906,12 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
 struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
                        enum qeth_ipa_cmds, enum qeth_prot_versions);
 int qeth_query_setadapterparms(struct qeth_card *);
-int qeth_check_qdio_errors(struct qeth_card *, struct qdio_buffer *,
-               unsigned int, const char *);
-void qeth_queue_input_buffer(struct qeth_card *, int);
 struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
                struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
                struct qeth_hdr **);
 void qeth_schedule_recovery(struct qeth_card *);
 void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long);
+int qeth_poll(struct napi_struct *napi, int budget);
 void qeth_qdio_input_handler(struct ccw_device *,
                unsigned int, unsigned int, int,
                int, unsigned long);
@@ -948,9 +932,6 @@ void qeth_prepare_control_data(struct qeth_card *, int,
 void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
 void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
 struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
-int qeth_mdio_read(struct net_device *, int, int);
-int qeth_snmp_command(struct qeth_card *, char __user *);
-int qeth_query_oat_command(struct qeth_card *, char __user *);
 int qeth_query_switch_attributes(struct qeth_card *card,
                                  struct qeth_switch_info *sw_info);
 int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
@@ -961,19 +942,22 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
 int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
 int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
 int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
-int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
+int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
+                        int extra_elems, int data_offset);
 int qeth_get_elements_for_frags(struct sk_buff *);
 int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
-                       struct sk_buff *, struct qeth_hdr *, int, int, int);
+                       struct sk_buff *, struct qeth_hdr *, int, int);
 int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
                    struct sk_buff *, struct qeth_hdr *, int);
+int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 int qeth_core_get_sset_count(struct net_device *, int);
 void qeth_core_get_ethtool_stats(struct net_device *,
                                struct ethtool_stats *, u64 *);
 void qeth_core_get_strings(struct net_device *, u32, u8 *);
 void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
 void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
-int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
+int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
+                                        struct ethtool_link_ksettings *cmd);
 int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback);
 int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int);
 int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
index 315d8a2db7c066a0b8eb3739021edc9fde698c19..38114a8d56e00f471360ab400767dcb8ba8a1a7b 100644 (file)
@@ -55,7 +55,6 @@ static struct mutex qeth_mod_mutex;
 
 static void qeth_send_control_data_cb(struct qeth_channel *,
                        struct qeth_cmd_buffer *);
-static int qeth_issue_next_read(struct qeth_card *);
 static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
 static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32);
 static void qeth_free_buffer_pool(struct qeth_card *);
@@ -1202,7 +1201,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
        while (skb) {
                QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
                QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
-               if (skb->protocol == ETH_P_AF_IUCV) {
+               if (be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) {
                        if (skb->sk) {
                                struct iucv_sock *iucv = iucv_sk(skb->sk);
                                iucv->sk_txnotify(skb, notification);
@@ -1233,7 +1232,8 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
        while (skb) {
                QETH_CARD_TEXT(buf->q->card, 5, "skbr");
                QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb);
-               if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) {
+               if (notify_general_error &&
+                   be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) {
                        if (skb->sk) {
                                iucv = iucv_sk(skb->sk);
                                iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
@@ -1396,7 +1396,6 @@ static void qeth_set_intial_options(struct qeth_card *card)
        card->options.route4.type = NO_ROUTER;
        card->options.route6.type = NO_ROUTER;
        card->options.fake_broadcast = 0;
-       card->options.add_hhlen = DEFAULT_ADD_HHLEN;
        card->options.performance_stats = 0;
        card->options.rx_sg_cb = QETH_RX_SG_CB;
        card->options.isolation = ISOLATION_MODE_NONE;
@@ -3217,8 +3216,10 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
 }
 EXPORT_SYMBOL_GPL(qeth_hw_trap);
 
-int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
-               unsigned int qdio_error, const char *dbftext)
+static int qeth_check_qdio_errors(struct qeth_card *card,
+                                 struct qdio_buffer *buf,
+                                 unsigned int qdio_error,
+                                 const char *dbftext)
 {
        if (qdio_error) {
                QETH_CARD_TEXT(card, 2, dbftext);
@@ -3235,18 +3236,8 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
        }
        return 0;
 }
-EXPORT_SYMBOL_GPL(qeth_check_qdio_errors);
 
-static void qeth_buffer_reclaim_work(struct work_struct *work)
-{
-       struct qeth_card *card = container_of(work, struct qeth_card,
-               buffer_reclaim_work.work);
-
-       QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
-       qeth_queue_input_buffer(card, card->reclaim_index);
-}
-
-void qeth_queue_input_buffer(struct qeth_card *card, int index)
+static void qeth_queue_input_buffer(struct qeth_card *card, int index)
 {
        struct qeth_qdio_q *queue = card->qdio.in_q;
        struct list_head *lh;
@@ -3320,9 +3311,17 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
                                          QDIO_MAX_BUFFERS_PER_Q;
        }
 }
-EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
 
-static int qeth_handle_send_error(struct qeth_card *card,
+static void qeth_buffer_reclaim_work(struct work_struct *work)
+{
+       struct qeth_card *card = container_of(work, struct qeth_card,
+               buffer_reclaim_work.work);
+
+       QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
+       qeth_queue_input_buffer(card, card->reclaim_index);
+}
+
+static void qeth_handle_send_error(struct qeth_card *card,
                struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
 {
        int sbalf15 = buffer->buffer->element[15].sflags;
@@ -3338,15 +3337,14 @@ static int qeth_handle_send_error(struct qeth_card *card,
        qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
 
        if (!qdio_err)
-               return QETH_SEND_ERROR_NONE;
+               return;
 
        if ((sbalf15 >= 15) && (sbalf15 <= 31))
-               return QETH_SEND_ERROR_RETRY;
+               return;
 
        QETH_CARD_TEXT(card, 1, "lnkfail");
        QETH_CARD_TEXT_(card, 1, "%04x %02x",
                       (u16)qdio_err, (u8)sbalf15);
-       return QETH_SEND_ERROR_LINK_FAILURE;
 }
 
 /*
@@ -3799,9 +3797,9 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
                return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3);
        case QETH_PRIO_Q_ING_VLAN:
                tci = &((struct ethhdr *)skb->data)->h_proto;
-               if (*tci == ETH_P_8021Q)
-                       return qeth_cut_iqd_prio(card, ~*(tci + 1) >>
-                       (VLAN_PRIO_SHIFT + 1) & 3);
+               if (be16_to_cpu(*tci) == ETH_P_8021Q)
+                       return qeth_cut_iqd_prio(card,
+                       ~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3);
                break;
        default:
                break;
@@ -3837,6 +3835,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
  * @card:                      qeth card structure, to check max. elems.
  * @skb:                       SKB address
  * @extra_elems:               extra elems needed, to check against max.
+ * @data_offset:               range starts at skb->data + data_offset
  *
  * Returns the number of pages, and thus QDIO buffer elements, needed to cover
  * skb data, including linear part and fragments. Checks if the result plus
@@ -3844,10 +3843,10 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
  * Note: extra_elems is not included in the returned result.
  */
 int qeth_get_elements_no(struct qeth_card *card,
-                    struct sk_buff *skb, int extra_elems)
+                    struct sk_buff *skb, int extra_elems, int data_offset)
 {
        int elements = qeth_get_elements_for_range(
-                               (addr_t)skb->data,
+                               (addr_t)skb->data + data_offset,
                                (addr_t)skb->data + skb_headlen(skb)) +
                        qeth_get_elements_for_frags(skb);
 
@@ -4025,8 +4024,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
 
 int qeth_do_send_packet_fast(struct qeth_card *card,
                struct qeth_qdio_out_q *queue, struct sk_buff *skb,
-               struct qeth_hdr *hdr, int elements_needed,
-               int offset, int hd_len)
+               struct qeth_hdr *hdr, int offset, int hd_len)
 {
        struct qeth_qdio_out_buffer *buffer;
        int index;
@@ -4418,7 +4416,7 @@ void qeth_tx_timeout(struct net_device *dev)
 }
 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
 
-int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
+static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
 {
        struct qeth_card *card = dev->ml_priv;
        int rc = 0;
@@ -4481,7 +4479,6 @@ int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
        }
        return rc;
 }
-EXPORT_SYMBOL_GPL(qeth_mdio_read);
 
 static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
                struct qeth_cmd_buffer *iob, int len,
@@ -4571,7 +4568,7 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
        return 0;
 }
 
-int qeth_snmp_command(struct qeth_card *card, char __user *udata)
+static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
 {
        struct qeth_cmd_buffer *iob;
        struct qeth_ipa_cmd *cmd;
@@ -4631,7 +4628,6 @@ out:
        kfree(qinfo.udata);
        return rc;
 }
-EXPORT_SYMBOL_GPL(qeth_snmp_command);
 
 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
                struct qeth_reply *reply, unsigned long data)
@@ -4663,7 +4659,7 @@ static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
        return 0;
 }
 
-int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
+static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
 {
        int rc = 0;
        struct qeth_cmd_buffer *iob;
@@ -4733,7 +4729,6 @@ out_free:
 out:
        return rc;
 }
-EXPORT_SYMBOL_GPL(qeth_query_oat_command);
 
 static int qeth_query_card_info_cb(struct qeth_card *card,
                                   struct qeth_reply *reply, unsigned long data)
@@ -4774,12 +4769,10 @@ static int qeth_query_card_info(struct qeth_card *card,
 
 static inline int qeth_get_qdio_q_format(struct qeth_card *card)
 {
-       switch (card->info.type) {
-       case QETH_CARD_TYPE_IQD:
-               return 2;
-       default:
-               return 0;
-       }
+       if (card->info.type == QETH_CARD_TYPE_IQD)
+               return QDIO_IQDIO_QFMT;
+       else
+               return QDIO_QETH_QFMT;
 }
 
 static void qeth_determine_capabilities(struct qeth_card *card)
@@ -4818,8 +4811,9 @@ static void qeth_determine_capabilities(struct qeth_card *card)
                QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
 
        QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt);
-       QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac1);
-       QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac3);
+       QETH_DBF_TEXT_(SETUP, 2, "ac1:%02x", card->ssqd.qdioac1);
+       QETH_DBF_TEXT_(SETUP, 2, "ac2:%04x", card->ssqd.qdioac2);
+       QETH_DBF_TEXT_(SETUP, 2, "ac3:%04x", card->ssqd.qdioac3);
        QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt);
        if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
            ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
@@ -5287,6 +5281,83 @@ no_mem:
 }
 EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
 
+int qeth_poll(struct napi_struct *napi, int budget)
+{
+       struct qeth_card *card = container_of(napi, struct qeth_card, napi);
+       int work_done = 0;
+       struct qeth_qdio_buffer *buffer;
+       int done;
+       int new_budget = budget;
+
+       if (card->options.performance_stats) {
+               card->perf_stats.inbound_cnt++;
+               card->perf_stats.inbound_start_time = qeth_get_micros();
+       }
+
+       while (1) {
+               if (!card->rx.b_count) {
+                       card->rx.qdio_err = 0;
+                       card->rx.b_count = qdio_get_next_buffers(
+                               card->data.ccwdev, 0, &card->rx.b_index,
+                               &card->rx.qdio_err);
+                       if (card->rx.b_count <= 0) {
+                               card->rx.b_count = 0;
+                               break;
+                       }
+                       card->rx.b_element =
+                               &card->qdio.in_q->bufs[card->rx.b_index]
+                               .buffer->element[0];
+                       card->rx.e_offset = 0;
+               }
+
+               while (card->rx.b_count) {
+                       buffer = &card->qdio.in_q->bufs[card->rx.b_index];
+                       if (!(card->rx.qdio_err &&
+                           qeth_check_qdio_errors(card, buffer->buffer,
+                           card->rx.qdio_err, "qinerr")))
+                               work_done +=
+                                       card->discipline->process_rx_buffer(
+                                               card, new_budget, &done);
+                       else
+                               done = 1;
+
+                       if (done) {
+                               if (card->options.performance_stats)
+                                       card->perf_stats.bufs_rec++;
+                               qeth_put_buffer_pool_entry(card,
+                                       buffer->pool_entry);
+                               qeth_queue_input_buffer(card, card->rx.b_index);
+                               card->rx.b_count--;
+                               if (card->rx.b_count) {
+                                       card->rx.b_index =
+                                               (card->rx.b_index + 1) %
+                                               QDIO_MAX_BUFFERS_PER_Q;
+                                       card->rx.b_element =
+                                               &card->qdio.in_q
+                                               ->bufs[card->rx.b_index]
+                                               .buffer->element[0];
+                                       card->rx.e_offset = 0;
+                               }
+                       }
+
+                       if (work_done >= budget)
+                               goto out;
+                       else
+                               new_budget = budget - work_done;
+               }
+       }
+
+       napi_complete(napi);
+       if (qdio_start_irq(card->data.ccwdev, 0))
+               napi_schedule(&card->napi);
+out:
+       if (card->options.performance_stats)
+               card->perf_stats.inbound_time += qeth_get_micros() -
+                       card->perf_stats.inbound_start_time;
+       return work_done;
+}
+EXPORT_SYMBOL_GPL(qeth_poll);
+
 int qeth_setassparms_cb(struct qeth_card *card,
                        struct qeth_reply *reply, unsigned long data)
 {
@@ -5677,23 +5748,12 @@ static int qeth_core_set_offline(struct ccwgroup_device *gdev)
 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
 {
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-       if (card->discipline && card->discipline->shutdown)
-               card->discipline->shutdown(gdev);
-}
-
-static int qeth_core_prepare(struct ccwgroup_device *gdev)
-{
-       struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-       if (card->discipline && card->discipline->prepare)
-               return card->discipline->prepare(gdev);
-       return 0;
-}
-
-static void qeth_core_complete(struct ccwgroup_device *gdev)
-{
-       struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-       if (card->discipline && card->discipline->complete)
-               card->discipline->complete(gdev);
+       qeth_set_allowed_threads(card, 0, 1);
+       if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
+               qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+       qeth_qdio_clear_card(card, 0);
+       qeth_clear_qdio_buffers(card);
+       qdio_free(CARD_DDEV(card));
 }
 
 static int qeth_core_freeze(struct ccwgroup_device *gdev)
@@ -5730,8 +5790,8 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
        .set_online = qeth_core_set_online,
        .set_offline = qeth_core_set_offline,
        .shutdown = qeth_core_shutdown,
-       .prepare = qeth_core_prepare,
-       .complete = qeth_core_complete,
+       .prepare = NULL,
+       .complete = NULL,
        .freeze = qeth_core_freeze,
        .thaw = qeth_core_thaw,
        .restore = qeth_core_restore,
@@ -5761,6 +5821,60 @@ static const struct attribute_group *qeth_drv_attr_groups[] = {
        NULL,
 };
 
+int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+       struct qeth_card *card = dev->ml_priv;
+       struct mii_ioctl_data *mii_data;
+       int rc = 0;
+
+       if (!card)
+               return -ENODEV;
+
+       if (!qeth_card_hw_is_reachable(card))
+               return -ENODEV;
+
+       if (card->info.type == QETH_CARD_TYPE_OSN)
+               return -EPERM;
+
+       switch (cmd) {
+       case SIOC_QETH_ADP_SET_SNMP_CONTROL:
+               rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
+               break;
+       case SIOC_QETH_GET_CARD_TYPE:
+               if ((card->info.type == QETH_CARD_TYPE_OSD ||
+                    card->info.type == QETH_CARD_TYPE_OSM ||
+                    card->info.type == QETH_CARD_TYPE_OSX) &&
+                   !card->info.guestlan)
+                       return 1;
+               else
+                       return 0;
+       case SIOCGMIIPHY:
+               mii_data = if_mii(rq);
+               mii_data->phy_id = 0;
+               break;
+       case SIOCGMIIREG:
+               mii_data = if_mii(rq);
+               if (mii_data->phy_id != 0)
+                       rc = -EINVAL;
+               else
+                       mii_data->val_out = qeth_mdio_read(dev,
+                               mii_data->phy_id, mii_data->reg_num);
+               break;
+       case SIOC_QETH_QUERY_OAT:
+               rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
+               break;
+       default:
+               if (card->discipline->do_ioctl)
+                       rc = card->discipline->do_ioctl(dev, rq, cmd);
+               else
+                       rc = -EOPNOTSUPP;
+       }
+       if (rc)
+               QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
+       return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_do_ioctl);
+
 static struct {
        const char str[ETH_GSTRING_LEN];
 } qeth_ethtool_stats_keys[] = {
@@ -5895,104 +6009,124 @@ void qeth_core_get_drvinfo(struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
 
-/* Helper function to fill 'advertizing' and 'supported' which are the same. */
-/* Autoneg and full-duplex are supported and advertized uncondionally.      */
-/* Always advertize and support all speeds up to specified, and only one     */
+/* Helper function to fill 'advertising' and 'supported' which are the same. */
+/* Autoneg and full-duplex are supported and advertised unconditionally.     */
+/* Always advertise and support all speeds up to specified, and only one     */
 /* specified port type.                                                             */
-static void qeth_set_ecmd_adv_sup(struct ethtool_cmd *ecmd,
+static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
                                int maxspeed, int porttype)
 {
-       int port_sup, port_adv, spd_sup, spd_adv;
+       ethtool_link_ksettings_zero_link_mode(cmd, supported);
+       ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+       ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
+
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
 
        switch (porttype) {
        case PORT_TP:
-               port_sup = SUPPORTED_TP;
-               port_adv = ADVERTISED_TP;
+               ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
                break;
        case PORT_FIBRE:
-               port_sup = SUPPORTED_FIBRE;
-               port_adv = ADVERTISED_FIBRE;
+               ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
                break;
        default:
-               port_sup = SUPPORTED_TP;
-               port_adv = ADVERTISED_TP;
+               ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
                WARN_ON_ONCE(1);
        }
 
-       /* "Fallthrough" case'es ordered from high to low result in setting  */
-       /* flags cumulatively, starting from the specified speed and down to */
-       /* the lowest possible.                                              */
-       spd_sup = 0;
-       spd_adv = 0;
+       /* fallthrough from high to low, to select all legal speeds: */
        switch (maxspeed) {
        case SPEED_10000:
-               spd_sup |= SUPPORTED_10000baseT_Full;
-               spd_adv |= ADVERTISED_10000baseT_Full;
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    10000baseT_Full);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    10000baseT_Full);
        case SPEED_1000:
-               spd_sup |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
-               spd_adv |= ADVERTISED_1000baseT_Half |
-                                               ADVERTISED_1000baseT_Full;
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    1000baseT_Full);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    1000baseT_Full);
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    1000baseT_Half);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    1000baseT_Half);
        case SPEED_100:
-               spd_sup |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
-               spd_adv |= ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    100baseT_Full);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    100baseT_Full);
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    100baseT_Half);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    100baseT_Half);
        case SPEED_10:
-               spd_sup |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
-               spd_adv |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
-       break;
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    10baseT_Full);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    10baseT_Full);
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    10baseT_Half);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    10baseT_Half);
+               /* end fallthrough */
+               break;
        default:
-               spd_sup = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
-               spd_adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    10baseT_Full);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    10baseT_Full);
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    10baseT_Half);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    10baseT_Half);
                WARN_ON_ONCE(1);
        }
-       ecmd->advertising = ADVERTISED_Autoneg | port_adv | spd_adv;
-       ecmd->supported = SUPPORTED_Autoneg | port_sup | spd_sup;
 }
 
-int qeth_core_ethtool_get_settings(struct net_device *netdev,
-                                       struct ethtool_cmd *ecmd)
+int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
+               struct ethtool_link_ksettings *cmd)
 {
        struct qeth_card *card = netdev->ml_priv;
        enum qeth_link_types link_type;
        struct carrier_info carrier_info;
        int rc;
-       u32 speed;
 
        if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
                link_type = QETH_LINK_TYPE_10GBIT_ETH;
        else
                link_type = card->info.link_type;
 
-       ecmd->transceiver = XCVR_INTERNAL;
-       ecmd->duplex = DUPLEX_FULL;
-       ecmd->autoneg = AUTONEG_ENABLE;
+       cmd->base.duplex = DUPLEX_FULL;
+       cmd->base.autoneg = AUTONEG_ENABLE;
+       cmd->base.phy_address = 0;
+       cmd->base.mdio_support = 0;
+       cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+       cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
 
        switch (link_type) {
        case QETH_LINK_TYPE_FAST_ETH:
        case QETH_LINK_TYPE_LANE_ETH100:
-               qeth_set_ecmd_adv_sup(ecmd, SPEED_100, PORT_TP);
-               speed = SPEED_100;
-               ecmd->port = PORT_TP;
+               cmd->base.speed = SPEED_100;
+               cmd->base.port = PORT_TP;
                break;
-
        case QETH_LINK_TYPE_GBIT_ETH:
        case QETH_LINK_TYPE_LANE_ETH1000:
-               qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_FIBRE);
-               speed = SPEED_1000;
-               ecmd->port = PORT_FIBRE;
+               cmd->base.speed = SPEED_1000;
+               cmd->base.port = PORT_FIBRE;
                break;
-
        case QETH_LINK_TYPE_10GBIT_ETH:
-               qeth_set_ecmd_adv_sup(ecmd, SPEED_10000, PORT_FIBRE);
-               speed = SPEED_10000;
-               ecmd->port = PORT_FIBRE;
+               cmd->base.speed = SPEED_10000;
+               cmd->base.port = PORT_FIBRE;
                break;
-
        default:
-               qeth_set_ecmd_adv_sup(ecmd, SPEED_10, PORT_TP);
-               speed = SPEED_10;
-               ecmd->port = PORT_TP;
+               cmd->base.speed = SPEED_10;
+               cmd->base.port = PORT_TP;
        }
-       ethtool_cmd_speed_set(ecmd, speed);
+       qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port);
 
        /* Check if we can obtain more accurate information.     */
        /* If QUERY_CARD_INFO command is not supported or fails, */
@@ -6017,49 +6151,48 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
        switch (carrier_info.card_type) {
        case CARD_INFO_TYPE_1G_COPPER_A:
        case CARD_INFO_TYPE_1G_COPPER_B:
-               qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_TP);
-               ecmd->port = PORT_TP;
+               cmd->base.port = PORT_TP;
+               qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
                break;
        case CARD_INFO_TYPE_1G_FIBRE_A:
        case CARD_INFO_TYPE_1G_FIBRE_B:
-               qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_FIBRE);
-               ecmd->port = PORT_FIBRE;
+               cmd->base.port = PORT_FIBRE;
+               qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
                break;
        case CARD_INFO_TYPE_10G_FIBRE_A:
        case CARD_INFO_TYPE_10G_FIBRE_B:
-               qeth_set_ecmd_adv_sup(ecmd, SPEED_10000, PORT_FIBRE);
-               ecmd->port = PORT_FIBRE;
+               cmd->base.port = PORT_FIBRE;
+               qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port);
                break;
        }
 
        switch (carrier_info.port_mode) {
        case CARD_INFO_PORTM_FULLDUPLEX:
-               ecmd->duplex = DUPLEX_FULL;
+               cmd->base.duplex = DUPLEX_FULL;
                break;
        case CARD_INFO_PORTM_HALFDUPLEX:
-               ecmd->duplex = DUPLEX_HALF;
+               cmd->base.duplex = DUPLEX_HALF;
                break;
        }
 
        switch (carrier_info.port_speed) {
        case CARD_INFO_PORTS_10M:
-               speed = SPEED_10;
+               cmd->base.speed = SPEED_10;
                break;
        case CARD_INFO_PORTS_100M:
-               speed = SPEED_100;
+               cmd->base.speed = SPEED_100;
                break;
        case CARD_INFO_PORTS_1G:
-               speed = SPEED_1000;
+               cmd->base.speed = SPEED_1000;
                break;
        case CARD_INFO_PORTS_10G:
-               speed = SPEED_10000;
+               cmd->base.speed = SPEED_10000;
                break;
        }
-       ethtool_cmd_speed_set(ecmd, speed);
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
+EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_link_ksettings);
 
 /* Callback to handle checksum offload command reply from OSA card.
  * Verify that required features have been enabled on the card.
index bc69d0a338ad715316b5e1aa271969e07c8da799..4accb0a61ce0be1001db084248506f480242cdcd 100644 (file)
@@ -29,7 +29,6 @@ extern unsigned char IPA_PDU_HEADER[];
 #define QETH_TIMEOUT           (10 * HZ)
 #define QETH_IPA_TIMEOUT       (45 * HZ)
 #define QETH_IDX_COMMAND_SEQNO 0xffff0000
-#define SR_INFO_LEN            16
 
 #define QETH_CLEAR_CHANNEL_PARM        -10
 #define QETH_HALT_CHANNEL_PARM -11
@@ -65,7 +64,6 @@ enum qeth_link_types {
        QETH_LINK_TYPE_LANE_TR      = 0x82,
        QETH_LINK_TYPE_LANE_ETH1000 = 0x83,
        QETH_LINK_TYPE_LANE         = 0x88,
-       QETH_LINK_TYPE_ATM_NATIVE   = 0x90,
 };
 
 /*
@@ -185,8 +183,6 @@ enum qeth_ipa_return_codes {
        IPA_RC_ENOMEM                   = 0xfffe,
        IPA_RC_FFFF                     = 0xffff
 };
-/* for DELIP */
-#define IPA_RC_IP_ADDRESS_NOT_DEFINED  IPA_RC_PRIMARY_ALREADY_DEFINED
 /* for SET_DIAGNOSTIC_ASSIST */
 #define IPA_RC_INVALID_SUBCMD          IPA_RC_IP_TABLE_FULL
 #define IPA_RC_HARDWARE_AUTH_ERROR     IPA_RC_UNKNOWN_ERROR
@@ -631,14 +627,6 @@ enum qeth_ipa_addr_change_code {
        IPA_ADDR_CHANGE_CODE_MACADDR            = 0x02,
        IPA_ADDR_CHANGE_CODE_REMOVAL            = 0x80, /* else addition */
 };
-enum qeth_ipa_addr_change_retcode {
-       IPA_ADDR_CHANGE_RETCODE_OK              = 0x0000,
-       IPA_ADDR_CHANGE_RETCODE_LOSTEVENTS      = 0x0010,
-};
-enum qeth_ipa_addr_change_lostmask {
-       IPA_ADDR_CHANGE_MASK_OVERFLOW           = 0x01,
-       IPA_ADDR_CHANGE_MASK_STATECHANGE        = 0x02,
-};
 
 struct qeth_ipacmd_addr_change_entry {
        struct net_if_token token;
@@ -817,9 +805,4 @@ extern unsigned char IDX_ACTIVATE_WRITE[];
        ((buffer) && \
         (*(buffer + ((*(buffer + 0x0b)) + 4)) == 0xc1))
 
-#define ADDR_FRAME_TYPE_DIX 1
-#define ADDR_FRAME_TYPE_802_3 2
-#define ADDR_FRAME_TYPE_TR_WITHOUT_SR 0x10
-#define ADDR_FRAME_TYPE_TR_WITH_SR 0x20
-
 #endif
index bea483307618996240cb90cc3382950ab8b38354..1b07f382d74c955974d138271bdce2bbbf594f63 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/etherdevice.h>
-#include <linux/mii.h>
 #include <linux/ip.h>
 #include <linux/list.h>
 #include <linux/hash.h>
 static int qeth_l2_set_offline(struct ccwgroup_device *);
 static int qeth_l2_stop(struct net_device *);
 static void qeth_l2_set_rx_mode(struct net_device *);
-static int qeth_l2_recover(void *);
 static void qeth_bridgeport_query_support(struct qeth_card *card);
 static void qeth_bridge_state_change(struct qeth_card *card,
                                        struct qeth_ipa_cmd *cmd);
 static void qeth_bridge_host_event(struct qeth_card *card,
                                        struct qeth_ipa_cmd *cmd);
 
-static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
-       struct qeth_card *card = dev->ml_priv;
-       struct mii_ioctl_data *mii_data;
-       int rc = 0;
-
-       if (!card)
-               return -ENODEV;
-
-       if (!qeth_card_hw_is_reachable(card))
-               return -ENODEV;
-
-       if (card->info.type == QETH_CARD_TYPE_OSN)
-               return -EPERM;
-
-       switch (cmd) {
-       case SIOC_QETH_ADP_SET_SNMP_CONTROL:
-               rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
-               break;
-       case SIOC_QETH_GET_CARD_TYPE:
-               if ((card->info.type == QETH_CARD_TYPE_OSD ||
-                    card->info.type == QETH_CARD_TYPE_OSM ||
-                    card->info.type == QETH_CARD_TYPE_OSX) &&
-                   !card->info.guestlan)
-                       return 1;
-               return 0;
-               break;
-       case SIOCGMIIPHY:
-               mii_data = if_mii(rq);
-               mii_data->phy_id = 0;
-               break;
-       case SIOCGMIIREG:
-               mii_data = if_mii(rq);
-               if (mii_data->phy_id != 0)
-                       rc = -EINVAL;
-               else
-                       mii_data->val_out = qeth_mdio_read(dev,
-                               mii_data->phy_id, mii_data->reg_num);
-               break;
-       case SIOC_QETH_QUERY_OAT:
-               rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
-               break;
-       default:
-               rc = -EOPNOTSUPP;
-       }
-       if (rc)
-               QETH_CARD_TEXT_(card, 2, "ioce%d", rc);
-       return rc;
-}
-
 static int qeth_l2_verify_dev(struct net_device *dev)
 {
        struct qeth_card *card;
@@ -332,7 +280,7 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
        else
                hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
 
-       hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE;
+       hdr->hdr.l2.pkt_length = skb->len - sizeof(struct qeth_hdr);
        /* VSWITCH relies on the VLAN
         * information to be present in
         * the QDIO header */
@@ -552,81 +500,6 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
        return work_done;
 }
 
-static int qeth_l2_poll(struct napi_struct *napi, int budget)
-{
-       struct qeth_card *card = container_of(napi, struct qeth_card, napi);
-       int work_done = 0;
-       struct qeth_qdio_buffer *buffer;
-       int done;
-       int new_budget = budget;
-
-       if (card->options.performance_stats) {
-               card->perf_stats.inbound_cnt++;
-               card->perf_stats.inbound_start_time = qeth_get_micros();
-       }
-
-       while (1) {
-               if (!card->rx.b_count) {
-                       card->rx.qdio_err = 0;
-                       card->rx.b_count = qdio_get_next_buffers(
-                               card->data.ccwdev, 0, &card->rx.b_index,
-                               &card->rx.qdio_err);
-                       if (card->rx.b_count <= 0) {
-                               card->rx.b_count = 0;
-                               break;
-                       }
-                       card->rx.b_element =
-                               &card->qdio.in_q->bufs[card->rx.b_index]
-                               .buffer->element[0];
-                       card->rx.e_offset = 0;
-               }
-
-               while (card->rx.b_count) {
-                       buffer = &card->qdio.in_q->bufs[card->rx.b_index];
-                       if (!(card->rx.qdio_err &&
-                           qeth_check_qdio_errors(card, buffer->buffer,
-                           card->rx.qdio_err, "qinerr")))
-                               work_done += qeth_l2_process_inbound_buffer(
-                                       card, new_budget, &done);
-                       else
-                               done = 1;
-
-                       if (done) {
-                               if (card->options.performance_stats)
-                                       card->perf_stats.bufs_rec++;
-                               qeth_put_buffer_pool_entry(card,
-                                       buffer->pool_entry);
-                               qeth_queue_input_buffer(card, card->rx.b_index);
-                               card->rx.b_count--;
-                               if (card->rx.b_count) {
-                                       card->rx.b_index =
-                                               (card->rx.b_index + 1) %
-                                               QDIO_MAX_BUFFERS_PER_Q;
-                                       card->rx.b_element =
-                                               &card->qdio.in_q
-                                               ->bufs[card->rx.b_index]
-                                               .buffer->element[0];
-                                       card->rx.e_offset = 0;
-                               }
-                       }
-
-                       if (work_done >= budget)
-                               goto out;
-                       else
-                               new_budget = budget - work_done;
-               }
-       }
-
-       napi_complete(napi);
-       if (qdio_start_irq(card->data.ccwdev, 0))
-               napi_schedule(&card->napi);
-out:
-       if (card->options.performance_stats)
-               card->perf_stats.inbound_time += qeth_get_micros() -
-                       card->perf_stats.inbound_start_time;
-       return work_done;
-}
-
 static int qeth_l2_request_initial_mac(struct qeth_card *card)
 {
        int rc = 0;
@@ -808,7 +681,8 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
                qeth_promisc_to_bridge(card);
 }
 
-static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
+                                          struct net_device *dev)
 {
        int rc;
        struct qeth_hdr *hdr = NULL;
@@ -849,7 +723,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
         * chaining we can not send long frag lists
         */
        if ((card->info.type != QETH_CARD_TYPE_IQD) &&
-           !qeth_get_elements_no(card, new_skb, 0)) {
+           !qeth_get_elements_no(card, new_skb, 0, 0)) {
                int lin_rc = skb_linearize(new_skb);
 
                if (card->options.performance_stats) {
@@ -894,7 +768,8 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                }
        }
 
-       elements = qeth_get_elements_no(card, new_skb, elements_needed);
+       elements = qeth_get_elements_no(card, new_skb, elements_needed,
+                                       (data_offset > 0) ? data_offset : 0);
        if (!elements) {
                if (data_offset >= 0)
                        kmem_cache_free(qeth_core_header_cache, hdr);
@@ -909,7 +784,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                         elements);
        } else
                rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
-                                       elements, data_offset, hd_len);
+                                             data_offset, hd_len);
        if (!rc) {
                card->stats.tx_packets++;
                card->stats.tx_bytes += tx_bytes;
@@ -1042,7 +917,7 @@ static const struct ethtool_ops qeth_l2_ethtool_ops = {
        .get_ethtool_stats = qeth_core_get_ethtool_stats,
        .get_sset_count = qeth_core_get_sset_count,
        .get_drvinfo = qeth_core_get_drvinfo,
-       .get_settings = qeth_core_ethtool_get_settings,
+       .get_link_ksettings = qeth_core_ethtool_get_link_ksettings,
 };
 
 static const struct ethtool_ops qeth_l2_osn_ops = {
@@ -1059,7 +934,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
        .ndo_start_xmit         = qeth_l2_hard_start_xmit,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = qeth_l2_set_rx_mode,
-       .ndo_do_ioctl           = qeth_l2_do_ioctl,
+       .ndo_do_ioctl           = qeth_do_ioctl,
        .ndo_set_mac_address    = qeth_l2_set_mac_address,
        .ndo_change_mtu         = qeth_change_mtu,
        .ndo_vlan_rx_add_vid    = qeth_l2_vlan_rx_add_vid,
@@ -1116,7 +991,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
        card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
                                  PAGE_SIZE;
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
-       netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
+       netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
        netif_carrier_off(card->dev);
        return register_netdev(card->dev);
 }
@@ -1326,17 +1201,6 @@ static void __exit qeth_l2_exit(void)
        pr_info("unregister layer 2 discipline\n");
 }
 
-static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
-{
-       struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-       qeth_set_allowed_threads(card, 0, 1);
-       if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
-               qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
-       qeth_qdio_clear_card(card, 0);
-       qeth_clear_qdio_buffers(card);
-       qdio_free(CARD_DDEV(card));
-}
-
 static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
 {
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
@@ -1408,15 +1272,16 @@ struct qeth_discipline qeth_l2_discipline = {
        .start_poll = qeth_qdio_start_poll,
        .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
        .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
+       .process_rx_buffer = qeth_l2_process_inbound_buffer,
        .recover = qeth_l2_recover,
        .setup = qeth_l2_probe_device,
        .remove = qeth_l2_remove_device,
        .set_online = qeth_l2_set_online,
        .set_offline = qeth_l2_set_offline,
-       .shutdown = qeth_l2_shutdown,
        .freeze = qeth_l2_pm_suspend,
        .thaw = qeth_l2_pm_resume,
        .restore = qeth_l2_pm_resume,
+       .do_ioctl = NULL,
        .control_event_handler = qeth_l2_control_event,
 };
 EXPORT_SYMBOL_GPL(qeth_l2_discipline);
index 692db49e3d2a368d36f76bfffe6200db6782627c..687972356d6b00f8776cb332e31b1ae0063183a5 100644 (file)
@@ -8,9 +8,6 @@
 #include "qeth_core.h"
 #include "qeth_l2.h"
 
-#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
-struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
-
 static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
                                struct device_attribute *attr, char *buf,
                                int show_state)
index 06d0addcc058dcccd4333a8f3edcdd96fccb2de4..6e0354ef4b8629827e4c8ef40b9d569bc11e354b 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/etherdevice.h>
-#include <linux/mii.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/inetdevice.h>
 
 
 static int qeth_l3_set_offline(struct ccwgroup_device *);
-static int qeth_l3_recover(void *);
 static int qeth_l3_stop(struct net_device *);
 static void qeth_l3_set_multicast_list(struct net_device *);
-static int qeth_l3_neigh_setup(struct net_device *, struct neigh_parms *);
 static int qeth_l3_register_addr_entry(struct qeth_card *,
                struct qeth_ipaddr *);
 static int qeth_l3_deregister_addr_entry(struct qeth_card *,
                struct qeth_ipaddr *);
-static int __qeth_l3_set_online(struct ccwgroup_device *, int);
-static int __qeth_l3_set_offline(struct ccwgroup_device *, int);
 
 static int qeth_l3_isxdigit(char *buf)
 {
@@ -1341,7 +1336,7 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
        return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
 }
 
-static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac)
+static void qeth_l3_get_mac_for_ipm(__be32 ipm, char *mac)
 {
        ip_eth_mc_map(ipm, mac);
 }
@@ -1414,7 +1409,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
             im4 = rcu_dereference(im4->next_rcu)) {
                qeth_l3_get_mac_for_ipm(im4->multiaddr, buf);
 
-               tmp->u.a4.addr = im4->multiaddr;
+               tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
                memcpy(tmp->mac, buf, sizeof(tmp->mac));
 
                ipm = qeth_l3_ip_from_hash(card, tmp);
@@ -1425,7 +1420,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
                        if (!ipm)
                                continue;
                        memcpy(ipm->mac, buf, sizeof(tmp->mac));
-                       ipm->u.a4.addr = im4->multiaddr;
+                       ipm->u.a4.addr = be32_to_cpu(im4->multiaddr);
                        ipm->is_multicast = 1;
                        ipm->disp_flag = QETH_DISP_ADDR_ADD;
                        hash_add(card->ip_mc_htable,
@@ -1598,8 +1593,8 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
        spin_lock_bh(&card->ip_lock);
 
        for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
-               addr->u.a4.addr = ifa->ifa_address;
-               addr->u.a4.mask = ifa->ifa_mask;
+               addr->u.a4.addr = be32_to_cpu(ifa->ifa_address);
+               addr->u.a4.mask = be32_to_cpu(ifa->ifa_mask);
                addr->type = QETH_IP_TYPE_NORMAL;
                qeth_l3_delete_ip(card, addr);
        }
@@ -1690,25 +1685,25 @@ static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
                        struct sk_buff *skb, struct qeth_hdr *hdr,
                        unsigned short *vlan_id)
 {
-       __be16 prot;
+       __u16 prot;
        struct iphdr *ip_hdr;
        unsigned char tg_addr[MAX_ADDR_LEN];
        int is_vlan = 0;
 
        if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
-               prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
-                             ETH_P_IP);
+               prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
+                             ETH_P_IP;
                switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
                case QETH_CAST_MULTICAST:
                        switch (prot) {
 #ifdef CONFIG_QETH_IPV6
-                       case __constant_htons(ETH_P_IPV6):
+                       case ETH_P_IPV6:
                                ndisc_mc_map((struct in6_addr *)
                                     skb->data + 24,
                                     tg_addr, card->dev, 0);
                                break;
 #endif
-                       case __constant_htons(ETH_P_IP):
+                       case ETH_P_IP:
                                ip_hdr = (struct iphdr *)skb->data;
                                ip_eth_mc_map(ip_hdr->daddr, tg_addr);
                                break;
@@ -1795,7 +1790,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
                        magic = *(__u16 *)skb->data;
                        if ((card->info.type == QETH_CARD_TYPE_IQD) &&
                            (magic == ETH_P_AF_IUCV)) {
-                               skb->protocol = ETH_P_AF_IUCV;
+                               skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
                                skb->pkt_type = PACKET_HOST;
                                skb->mac_header = NET_SKB_PAD;
                                skb->dev = card->dev;
@@ -1834,81 +1829,6 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
        return work_done;
 }
 
-static int qeth_l3_poll(struct napi_struct *napi, int budget)
-{
-       struct qeth_card *card = container_of(napi, struct qeth_card, napi);
-       int work_done = 0;
-       struct qeth_qdio_buffer *buffer;
-       int done;
-       int new_budget = budget;
-
-       if (card->options.performance_stats) {
-               card->perf_stats.inbound_cnt++;
-               card->perf_stats.inbound_start_time = qeth_get_micros();
-       }
-
-       while (1) {
-               if (!card->rx.b_count) {
-                       card->rx.qdio_err = 0;
-                       card->rx.b_count = qdio_get_next_buffers(
-                               card->data.ccwdev, 0, &card->rx.b_index,
-                               &card->rx.qdio_err);
-                       if (card->rx.b_count <= 0) {
-                               card->rx.b_count = 0;
-                               break;
-                       }
-                       card->rx.b_element =
-                               &card->qdio.in_q->bufs[card->rx.b_index]
-                               .buffer->element[0];
-                       card->rx.e_offset = 0;
-               }
-
-               while (card->rx.b_count) {
-                       buffer = &card->qdio.in_q->bufs[card->rx.b_index];
-                       if (!(card->rx.qdio_err &&
-                           qeth_check_qdio_errors(card, buffer->buffer,
-                           card->rx.qdio_err, "qinerr")))
-                               work_done += qeth_l3_process_inbound_buffer(
-                                       card, new_budget, &done);
-                       else
-                               done = 1;
-
-                       if (done) {
-                               if (card->options.performance_stats)
-                                       card->perf_stats.bufs_rec++;
-                               qeth_put_buffer_pool_entry(card,
-                                       buffer->pool_entry);
-                               qeth_queue_input_buffer(card, card->rx.b_index);
-                               card->rx.b_count--;
-                               if (card->rx.b_count) {
-                                       card->rx.b_index =
-                                               (card->rx.b_index + 1) %
-                                               QDIO_MAX_BUFFERS_PER_Q;
-                                       card->rx.b_element =
-                                               &card->qdio.in_q
-                                               ->bufs[card->rx.b_index]
-                                               .buffer->element[0];
-                                       card->rx.e_offset = 0;
-                               }
-                       }
-
-                       if (work_done >= budget)
-                               goto out;
-                       else
-                               new_budget = budget - work_done;
-               }
-       }
-
-       napi_complete(napi);
-       if (qdio_start_irq(card->data.ccwdev, 0))
-               napi_schedule(&card->napi);
-out:
-       if (card->options.performance_stats)
-               card->perf_stats.inbound_time += qeth_get_micros() -
-                       card->perf_stats.inbound_start_time;
-       return work_done;
-}
-
 static int qeth_l3_verify_vlan_dev(struct net_device *dev,
                        struct qeth_card *card)
 {
@@ -2461,15 +2381,8 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
        struct qeth_card *card = dev->ml_priv;
        struct qeth_arp_cache_entry arp_entry;
-       struct mii_ioctl_data *mii_data;
        int rc = 0;
 
-       if (!card)
-               return -ENODEV;
-
-       if (!qeth_card_hw_is_reachable(card))
-               return -ENODEV;
-
        switch (cmd) {
        case SIOC_QETH_ARP_SET_NO_ENTRIES:
                if (!capable(CAP_NET_ADMIN)) {
@@ -2514,37 +2427,9 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
                }
                rc = qeth_l3_arp_flush_cache(card);
                break;
-       case SIOC_QETH_ADP_SET_SNMP_CONTROL:
-               rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
-               break;
-       case SIOC_QETH_GET_CARD_TYPE:
-               if ((card->info.type == QETH_CARD_TYPE_OSD ||
-                    card->info.type == QETH_CARD_TYPE_OSX) &&
-                   !card->info.guestlan)
-                       return 1;
-               return 0;
-               break;
-       case SIOCGMIIPHY:
-               mii_data = if_mii(rq);
-               mii_data->phy_id = 0;
-               break;
-       case SIOCGMIIREG:
-               mii_data = if_mii(rq);
-               if (mii_data->phy_id != 0)
-                       rc = -EINVAL;
-               else
-                       mii_data->val_out = qeth_mdio_read(dev,
-                                                       mii_data->phy_id,
-                                                       mii_data->reg_num);
-               break;
-       case SIOC_QETH_QUERY_OAT:
-               rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
-               break;
        default:
                rc = -EOPNOTSUPP;
        }
-       if (rc)
-               QETH_CARD_TEXT_(card, 2, "ioce%d", rc);
        return rc;
 }
 
@@ -2572,10 +2457,10 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
        rcu_read_unlock();
 
        /* try something else */
-       if (skb->protocol == ETH_P_IPV6)
+       if (be16_to_cpu(skb->protocol) == ETH_P_IPV6)
                return (skb_network_header(skb)[24] == 0xff) ?
                                RTN_MULTICAST : 0;
-       else if (skb->protocol == ETH_P_IP)
+       else if (be16_to_cpu(skb->protocol) == ETH_P_IP)
                return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ?
                                RTN_MULTICAST : 0;
        /* ... */
@@ -2609,17 +2494,13 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
        char daddr[16];
        struct af_iucv_trans_hdr *iucv_hdr;
 
-       skb_pull(skb, 14);
-       card->dev->header_ops->create(skb, card->dev, 0,
-                                     card->dev->dev_addr, card->dev->dev_addr,
-                                     card->dev->addr_len);
-       skb_pull(skb, 14);
-       iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
        memset(hdr, 0, sizeof(struct qeth_hdr));
        hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
        hdr->hdr.l3.ext_flags = 0;
-       hdr->hdr.l3.length = skb->len;
+       hdr->hdr.l3.length = skb->len - ETH_HLEN;
        hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
+
+       iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
        memset(daddr, 0, sizeof(daddr));
        daddr[0] = 0xfe;
        daddr[1] = 0x80;
@@ -2730,7 +2611,7 @@ static void qeth_tso_fill_header(struct qeth_card *card,
        hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
                                       sizeof(struct qeth_hdr_tso));
        tcph->check = 0;
-       if (skb->protocol == ETH_P_IPV6) {
+       if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) {
                ip6h->payload_len = 0;
                tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
                                               0, IPPROTO_TCP, 0);
@@ -2774,10 +2655,11 @@ static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
        return elements;
 }
 
-static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
+                                          struct net_device *dev)
 {
        int rc;
-       u16 *tag;
+       __be16 *tag;
        struct qeth_hdr *hdr = NULL;
        int hdr_elements = 0;
        int elements;
@@ -2798,7 +2680,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (((card->info.type == QETH_CARD_TYPE_IQD) &&
             (((card->options.cq != QETH_CQ_ENABLED) && !ipv) ||
              ((card->options.cq == QETH_CQ_ENABLED) &&
-              (skb->protocol != ETH_P_AF_IUCV)))) ||
+              (be16_to_cpu(skb->protocol) != ETH_P_AF_IUCV)))) ||
            card->options.sniffer)
                        goto tx_drop;
 
@@ -2823,10 +2705,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if ((card->info.type == QETH_CARD_TYPE_IQD) &&
            !skb_is_nonlinear(skb)) {
                new_skb = skb;
-               if (new_skb->protocol == ETH_P_AF_IUCV)
-                       data_offset = 0;
-               else
-                       data_offset = ETH_HLEN;
+               data_offset = ETH_HLEN;
                hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
                if (!hdr)
                        goto tx_drop;
@@ -2854,9 +2733,9 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                new_skb->data + 8, 4);
                        skb_copy_to_linear_data_offset(new_skb, 8,
                                new_skb->data + 12, 4);
-                       tag = (u16 *)(new_skb->data + 12);
-                       *tag = __constant_htons(ETH_P_8021Q);
-                       *(tag + 1) = htons(skb_vlan_tag_get(new_skb));
+                       tag = (__be16 *)(new_skb->data + 12);
+                       *tag = cpu_to_be16(ETH_P_8021Q);
+                       *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb));
                }
        }
 
@@ -2867,7 +2746,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
         */
        if ((card->info.type != QETH_CARD_TYPE_IQD) &&
            ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
-            (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) {
+            (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) {
                int lin_rc = skb_linearize(new_skb);
 
                if (card->options.performance_stats) {
@@ -2894,7 +2773,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        qeth_l3_fill_header(card, hdr, new_skb, ipv,
                                                cast_type);
                } else {
-                       if (new_skb->protocol == ETH_P_AF_IUCV)
+                       if (be16_to_cpu(new_skb->protocol) == ETH_P_AF_IUCV)
                                qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb);
                        else {
                                qeth_l3_fill_header(card, hdr, new_skb, ipv,
@@ -2909,7 +2788,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        elements = use_tso ?
                   qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
-                  qeth_get_elements_no(card, new_skb, hdr_elements);
+                  qeth_get_elements_no(card, new_skb, hdr_elements,
+                                       (data_offset > 0) ? data_offset : 0);
        if (!elements) {
                if (data_offset >= 0)
                        kmem_cache_free(qeth_core_header_cache, hdr);
@@ -2931,7 +2811,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements);
        } else
                rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
-                                       elements, data_offset, 0);
+                                             data_offset, 0);
 
        if (!rc) {
                card->stats.tx_packets++;
@@ -3032,7 +2912,7 @@ static const struct ethtool_ops qeth_l3_ethtool_ops = {
        .get_ethtool_stats = qeth_core_get_ethtool_stats,
        .get_sset_count = qeth_core_get_sset_count,
        .get_drvinfo = qeth_core_get_drvinfo,
-       .get_settings = qeth_core_ethtool_get_settings,
+       .get_link_ksettings = qeth_core_ethtool_get_link_ksettings,
 };
 
 /*
@@ -3066,7 +2946,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
        .ndo_start_xmit         = qeth_l3_hard_start_xmit,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = qeth_l3_set_multicast_list,
-       .ndo_do_ioctl           = qeth_l3_do_ioctl,
+       .ndo_do_ioctl           = qeth_do_ioctl,
        .ndo_change_mtu         = qeth_change_mtu,
        .ndo_fix_features       = qeth_fix_features,
        .ndo_set_features       = qeth_set_features,
@@ -3082,7 +2962,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
        .ndo_start_xmit         = qeth_l3_hard_start_xmit,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = qeth_l3_set_multicast_list,
-       .ndo_do_ioctl           = qeth_l3_do_ioctl,
+       .ndo_do_ioctl           = qeth_do_ioctl,
        .ndo_change_mtu         = qeth_change_mtu,
        .ndo_fix_features       = qeth_fix_features,
        .ndo_set_features       = qeth_set_features,
@@ -3151,7 +3031,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                                  PAGE_SIZE;
 
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
-       netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
+       netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
        netif_carrier_off(card->dev);
        return register_netdev(card->dev);
 }
@@ -3372,17 +3252,6 @@ static int qeth_l3_recover(void *ptr)
        return 0;
 }
 
-static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
-{
-       struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-       qeth_set_allowed_threads(card, 0, 1);
-       if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
-               qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
-       qeth_qdio_clear_card(card, 0);
-       qeth_clear_qdio_buffers(card);
-       qdio_free(CARD_DDEV(card));
-}
-
 static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
 {
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
@@ -3440,15 +3309,16 @@ struct qeth_discipline qeth_l3_discipline = {
        .start_poll = qeth_qdio_start_poll,
        .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
        .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
+       .process_rx_buffer = qeth_l3_process_inbound_buffer,
        .recover = qeth_l3_recover,
        .setup = qeth_l3_probe_device,
        .remove = qeth_l3_remove_device,
        .set_online = qeth_l3_set_online,
        .set_offline = qeth_l3_set_offline,
-       .shutdown = qeth_l3_shutdown,
        .freeze = qeth_l3_pm_suspend,
        .thaw = qeth_l3_pm_resume,
        .restore = qeth_l3_pm_resume,
+       .do_ioctl = qeth_l3_do_ioctl,
        .control_event_handler = qeth_l3_control_event,
 };
 EXPORT_SYMBOL_GPL(qeth_l3_discipline);
@@ -3472,8 +3342,8 @@ static int qeth_l3_ip_event(struct notifier_block *this,
 
        addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
        if (addr) {
-               addr->u.a4.addr = ifa->ifa_address;
-               addr->u.a4.mask = ifa->ifa_mask;
+               addr->u.a4.addr = be32_to_cpu(ifa->ifa_address);
+               addr->u.a4.mask = be32_to_cpu(ifa->ifa_mask);
                addr->type = QETH_IP_TYPE_NORMAL;
        } else
                return NOTIFY_DONE;
index 05e9471e3d3fc0052e7866786486a8590f7b1129..ff29a4b416b45db98b1ee65f0d341c3a95c5f975 100644 (file)
@@ -286,7 +286,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
                if (!addr)
                        return -ENOMEM;
 
-               addr->u.a6.addr.s6_addr32[0] = 0xfe800000;
+               addr->u.a6.addr.s6_addr32[0] = cpu_to_be32(0xfe800000);
                addr->u.a6.addr.s6_addr32[1] = 0x00000000;
                for (i = 8; i < 16; i++)
                        addr->u.a6.addr.s6_addr[i] =
@@ -320,7 +320,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
 
        addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
        if (addr != NULL) {
-               addr->u.a6.addr.s6_addr32[0] = 0xfe800000;
+               addr->u.a6.addr.s6_addr32[0] = cpu_to_be32(0xfe800000);
                addr->u.a6.addr.s6_addr32[1] = 0x00000000;
                for (i = 8; i < 16; i++)
                        addr->u.a6.addr.s6_addr[i] = card->options.hsuid[i - 8];
index 230043c1c90ffcfe5781bcdea23cc52dc9bc6732..3c52867dfe28e33b04f85858dfdb9285eef11ba2 100644 (file)
@@ -1241,16 +1241,15 @@ config SCSI_LPFC
        tristate "Emulex LightPulse Fibre Channel Support"
        depends on PCI && SCSI
        depends on SCSI_FC_ATTRS
-       depends on NVME_FC && NVME_TARGET_FC
        select CRC_T10DIF
-       help
+       ---help---
           This lpfc driver supports the Emulex LightPulse
           Family of Fibre Channel PCI host adapters.
 
 config SCSI_LPFC_DEBUG_FS
        bool "Emulex LightPulse Fibre Channel debugfs Support"
        depends on SCSI_LPFC && DEBUG_FS
-       help
+       ---help---
          This makes debugging information from the lpfc driver
          available via the debugfs filesystem.
 
index a3ad042934870d4bd8bd5ded29a00516156d9168..c8172f16cf33cd6454ae571c3310f6451afa8b79 100644 (file)
@@ -2056,7 +2056,6 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
 {
        struct hw_fib **hw_fib_p;
        struct fib **fib_p;
-       int rcode = 1;
 
        hw_fib_p = hw_fib_pool;
        fib_p = fib_pool;
@@ -2074,11 +2073,11 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
                }
        }
 
+       /*
+        * Get the actual number of allocated fibs
+        */
        num = hw_fib_p - hw_fib_pool;
-       if (!num)
-               rcode = 0;
-
-       return rcode;
+       return num;
 }
 
 static void wakeup_fibctx_threads(struct aac_dev *dev,
@@ -2186,7 +2185,6 @@ static void aac_process_events(struct aac_dev *dev)
        struct fib *fib;
        unsigned long flags;
        spinlock_t *t_lock;
-       unsigned int rcode;
 
        t_lock = dev->queues->queue[HostNormCmdQueue].lock;
        spin_lock_irqsave(t_lock, flags);
@@ -2269,8 +2267,8 @@ static void aac_process_events(struct aac_dev *dev)
                 * Fill up fib pointer pools with actual fibs
                 * and hw_fibs
                 */
-               rcode = fillup_pools(dev, hw_fib_pool, fib_pool, num);
-               if (!rcode)
+               num = fillup_pools(dev, hw_fib_pool, fib_pool, num);
+               if (!num)
                        goto free_mem;
 
                /*
index 2e5338dec621fbff89c8a68acc3ba241173f3239..7b0410e0f569481cc40101fab38a768c7e274c8e 100644 (file)
@@ -468,7 +468,7 @@ err_out:
        return -1;
 
 err_blink:
-       return (status > 16) & 0xFF;
+       return (status >> 16) & 0xFF;
 }
 
 static inline u32 aac_get_vector(struct aac_dev *dev)
index 109e2c99e6c162e01a4b569292bad7b4e68fd3dc..95d8f25cbccab7056dc4c7967814cd5932fd3507 100644 (file)
@@ -6278,7 +6278,7 @@ ahd_reset(struct ahd_softc *ahd, int reinit)
                 * does not disable its parity logic prior to
                 * the start of the reset.  This may cause a
                 * parity error to be detected and thus a
-                * spurious SERR or PERR assertion.  Disble
+                * spurious SERR or PERR assertion.  Disable
                 * PERR and SERR responses during the CHIPRST.
                 */
                mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN);
index 48e200102221c518dacba0a7137c77170e81c53a..c01b47e5b55a899a48b28553a41747b04d2903cc 100644 (file)
@@ -113,7 +113,7 @@ struct alua_queue_data {
 #define ALUA_POLICY_SWITCH_ALL         1
 
 static void alua_rtpg_work(struct work_struct *work);
-static void alua_rtpg_queue(struct alua_port_group *pg,
+static bool alua_rtpg_queue(struct alua_port_group *pg,
                            struct scsi_device *sdev,
                            struct alua_queue_data *qdata, bool force);
 static void alua_check(struct scsi_device *sdev, bool force);
@@ -862,7 +862,13 @@ static void alua_rtpg_work(struct work_struct *work)
        kref_put(&pg->kref, release_port_group);
 }
 
-static void alua_rtpg_queue(struct alua_port_group *pg,
+/**
+ * alua_rtpg_queue() - cause RTPG to be submitted asynchronously
+ *
+ * Returns true if and only if alua_rtpg_work() will be called asynchronously.
+ * That function is responsible for calling @qdata->fn().
+ */
+static bool alua_rtpg_queue(struct alua_port_group *pg,
                            struct scsi_device *sdev,
                            struct alua_queue_data *qdata, bool force)
 {
@@ -870,8 +876,8 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
        unsigned long flags;
        struct workqueue_struct *alua_wq = kaluad_wq;
 
-       if (!pg)
-               return;
+       if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
+               return false;
 
        spin_lock_irqsave(&pg->lock, flags);
        if (qdata) {
@@ -884,14 +890,12 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
                pg->flags |= ALUA_PG_RUN_RTPG;
                kref_get(&pg->kref);
                pg->rtpg_sdev = sdev;
-               scsi_device_get(sdev);
                start_queue = 1;
        } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
                pg->flags |= ALUA_PG_RUN_RTPG;
                /* Do not queue if the worker is already running */
                if (!(pg->flags & ALUA_PG_RUNNING)) {
                        kref_get(&pg->kref);
-                       sdev = NULL;
                        start_queue = 1;
                }
        }
@@ -900,13 +904,17 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
                alua_wq = kaluad_sync_wq;
        spin_unlock_irqrestore(&pg->lock, flags);
 
-       if (start_queue &&
-           !queue_delayed_work(alua_wq, &pg->rtpg_work,
-                               msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) {
-               if (sdev)
-                       scsi_device_put(sdev);
-               kref_put(&pg->kref, release_port_group);
+       if (start_queue) {
+               if (queue_delayed_work(alua_wq, &pg->rtpg_work,
+                               msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS)))
+                       sdev = NULL;
+               else
+                       kref_put(&pg->kref, release_port_group);
        }
+       if (sdev)
+               scsi_device_put(sdev);
+
+       return true;
 }
 
 /*
@@ -1007,11 +1015,13 @@ static int alua_activate(struct scsi_device *sdev,
                mutex_unlock(&h->init_mutex);
                goto out;
        }
-       fn = NULL;
        rcu_read_unlock();
        mutex_unlock(&h->init_mutex);
 
-       alua_rtpg_queue(pg, sdev, qdata, true);
+       if (alua_rtpg_queue(pg, sdev, qdata, true))
+               fn = NULL;
+       else
+               err = SCSI_DH_DEV_OFFLINED;
        kref_put(&pg->kref, release_port_group);
 out:
        if (fn)
index 524a0c755ed7e74cd790778ec7c04ae452cc853d..9d659aaace15d0e3ec28c69a14688b81a544d8b7 100644 (file)
@@ -2956,7 +2956,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
        /* fill_cmd can't fail here, no data buffer to map. */
        (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
                        scsi3addr, TYPE_MSG);
-       rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
+       rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
        if (rc) {
                dev_warn(&h->pdev->dev, "Failed to send reset command\n");
                goto out;
@@ -3714,7 +3714,7 @@ exit_failed:
  *  # (integer code indicating one of several NOT READY states
  *     describing why a volume is to be kept offline)
  */
-static int hpsa_volume_offline(struct ctlr_info *h,
+static unsigned char hpsa_volume_offline(struct ctlr_info *h,
                                        unsigned char scsi3addr[])
 {
        struct CommandList *c;
@@ -3735,7 +3735,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
                                        DEFAULT_TIMEOUT);
        if (rc) {
                cmd_free(h, c);
-               return 0;
+               return HPSA_VPD_LV_STATUS_UNSUPPORTED;
        }
        sense = c->err_info->SenseInfo;
        if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
@@ -3746,19 +3746,13 @@ static int hpsa_volume_offline(struct ctlr_info *h,
        cmd_status = c->err_info->CommandStatus;
        scsi_status = c->err_info->ScsiStatus;
        cmd_free(h, c);
-       /* Is the volume 'not ready'? */
-       if (cmd_status != CMD_TARGET_STATUS ||
-               scsi_status != SAM_STAT_CHECK_CONDITION ||
-               sense_key != NOT_READY ||
-               asc != ASC_LUN_NOT_READY)  {
-               return 0;
-       }
 
        /* Determine the reason for not ready state */
        ldstat = hpsa_get_volume_status(h, scsi3addr);
 
        /* Keep volume offline in certain cases: */
        switch (ldstat) {
+       case HPSA_LV_FAILED:
        case HPSA_LV_UNDERGOING_ERASE:
        case HPSA_LV_NOT_AVAILABLE:
        case HPSA_LV_UNDERGOING_RPI:
@@ -3780,7 +3774,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
        default:
                break;
        }
-       return 0;
+       return HPSA_LV_OK;
 }
 
 /*
@@ -3853,10 +3847,10 @@ static int hpsa_update_device_info(struct ctlr_info *h,
        /* Do an inquiry to the device to see what it is. */
        if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
                (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
-               /* Inquiry failed (msg printed already) */
                dev_err(&h->pdev->dev,
-                       "hpsa_update_device_info: inquiry failed\n");
-               rc = -EIO;
+                       "%s: inquiry failed, device will be skipped.\n",
+                       __func__);
+               rc = HPSA_INQUIRY_FAILED;
                goto bail_out;
        }
 
@@ -3885,15 +3879,20 @@ static int hpsa_update_device_info(struct ctlr_info *h,
        if ((this_device->devtype == TYPE_DISK ||
                this_device->devtype == TYPE_ZBC) &&
                is_logical_dev_addr_mode(scsi3addr)) {
-               int volume_offline;
+               unsigned char volume_offline;
 
                hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
                if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
                        hpsa_get_ioaccel_status(h, scsi3addr, this_device);
                volume_offline = hpsa_volume_offline(h, scsi3addr);
-               if (volume_offline < 0 || volume_offline > 0xff)
-                       volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
-               this_device->volume_offline = volume_offline & 0xff;
+               this_device->volume_offline = volume_offline;
+               if (volume_offline == HPSA_LV_FAILED) {
+                       rc = HPSA_LV_FAILED;
+                       dev_err(&h->pdev->dev,
+                               "%s: LV failed, device will be skipped.\n",
+                               __func__);
+                       goto bail_out;
+               }
        } else {
                this_device->raid_level = RAID_UNKNOWN;
                this_device->offload_config = 0;
@@ -4379,8 +4378,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
                        goto out;
                }
                if (rc) {
-                       dev_warn(&h->pdev->dev,
-                               "Inquiry failed, skipping device.\n");
+                       h->drv_req_rescan = 1;
                        continue;
                }
 
@@ -5558,7 +5556,7 @@ static void hpsa_scan_complete(struct ctlr_info *h)
 
        spin_lock_irqsave(&h->scan_lock, flags);
        h->scan_finished = 1;
-       wake_up_all(&h->scan_wait_queue);
+       wake_up(&h->scan_wait_queue);
        spin_unlock_irqrestore(&h->scan_lock, flags);
 }
 
@@ -5576,11 +5574,23 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
        if (unlikely(lockup_detected(h)))
                return hpsa_scan_complete(h);
 
+       /*
+        * If a scan is already waiting to run, no need to add another
+        */
+       spin_lock_irqsave(&h->scan_lock, flags);
+       if (h->scan_waiting) {
+               spin_unlock_irqrestore(&h->scan_lock, flags);
+               return;
+       }
+
+       spin_unlock_irqrestore(&h->scan_lock, flags);
+
        /* wait until any scan already in progress is finished. */
        while (1) {
                spin_lock_irqsave(&h->scan_lock, flags);
                if (h->scan_finished)
                        break;
+               h->scan_waiting = 1;
                spin_unlock_irqrestore(&h->scan_lock, flags);
                wait_event(h->scan_wait_queue, h->scan_finished);
                /* Note: We don't need to worry about a race between this
@@ -5590,6 +5600,7 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
                 */
        }
        h->scan_finished = 0; /* mark scan as in progress */
+       h->scan_waiting = 0;
        spin_unlock_irqrestore(&h->scan_lock, flags);
 
        if (unlikely(lockup_detected(h)))
@@ -8792,6 +8803,7 @@ reinit_after_soft_reset:
        init_waitqueue_head(&h->event_sync_wait_queue);
        mutex_init(&h->reset_mutex);
        h->scan_finished = 1; /* no scan currently in progress */
+       h->scan_waiting = 0;
 
        pci_set_drvdata(pdev, h);
        h->ndevices = 0;
index bf6cdc1066544fa5fe2df6f5396d17ca4b4c8909..6f04f2ad412530a76d615b394250d502221d5457 100644 (file)
@@ -201,6 +201,7 @@ struct ctlr_info {
        dma_addr_t              errinfo_pool_dhandle;
        unsigned long           *cmd_pool_bits;
        int                     scan_finished;
+       u8                      scan_waiting : 1;
        spinlock_t              scan_lock;
        wait_queue_head_t       scan_wait_queue;
 
index a584cdf0705846ef13a0375ecb2e1579513ecf92..5961705eef767526f66a6dbc1bbb1e7feec70c85 100644 (file)
 #define CFGTBL_BusType_Fibre2G  0x00000200l
 
 /* VPD Inquiry types */
+#define HPSA_INQUIRY_FAILED            0x02
 #define HPSA_VPD_SUPPORTED_PAGES        0x00
 #define HPSA_VPD_LV_DEVICE_ID           0x83
 #define HPSA_VPD_LV_DEVICE_GEOMETRY     0xC1
 /* Logical volume states */
 #define HPSA_VPD_LV_STATUS_UNSUPPORTED                 0xff
 #define HPSA_LV_OK                                      0x0
+#define HPSA_LV_FAILED                                 0x01
 #define HPSA_LV_NOT_AVAILABLE                          0x0b
 #define HPSA_LV_UNDERGOING_ERASE                       0x0F
 #define HPSA_LV_UNDERGOING_RPI                         0x12
index 07c08ce68d70af2fc09be51a3f4c8ba67c5a1d9d..894b1e3ebd56f4a141a8ed49a298819e3abbe8b2 100644 (file)
@@ -561,8 +561,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
        WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
        task->state = state;
 
-       if (!list_empty(&task->running))
+       spin_lock_bh(&conn->taskqueuelock);
+       if (!list_empty(&task->running)) {
+               pr_debug_once("%s while task on list", __func__);
                list_del_init(&task->running);
+       }
+       spin_unlock_bh(&conn->taskqueuelock);
 
        if (conn->task == task)
                conn->task = NULL;
@@ -784,7 +788,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
                if (session->tt->xmit_task(task))
                        goto free_task;
        } else {
+               spin_lock_bh(&conn->taskqueuelock);
                list_add_tail(&task->running, &conn->mgmtqueue);
+               spin_unlock_bh(&conn->taskqueuelock);
                iscsi_conn_queue_work(conn);
        }
 
@@ -1475,8 +1481,10 @@ void iscsi_requeue_task(struct iscsi_task *task)
         * this may be on the requeue list already if the xmit_task callout
         * is handling the r2ts while we are adding new ones
         */
+       spin_lock_bh(&conn->taskqueuelock);
        if (list_empty(&task->running))
                list_add_tail(&task->running, &conn->requeue);
+       spin_unlock_bh(&conn->taskqueuelock);
        iscsi_conn_queue_work(conn);
 }
 EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1513,22 +1521,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
         * only have one nop-out as a ping from us and targets should not
         * overflow us with nop-ins
         */
+       spin_lock_bh(&conn->taskqueuelock);
 check_mgmt:
        while (!list_empty(&conn->mgmtqueue)) {
                conn->task = list_entry(conn->mgmtqueue.next,
                                         struct iscsi_task, running);
                list_del_init(&conn->task->running);
+               spin_unlock_bh(&conn->taskqueuelock);
                if (iscsi_prep_mgmt_task(conn, conn->task)) {
                        /* regular RX path uses back_lock */
                        spin_lock_bh(&conn->session->back_lock);
                        __iscsi_put_task(conn->task);
                        spin_unlock_bh(&conn->session->back_lock);
                        conn->task = NULL;
+                       spin_lock_bh(&conn->taskqueuelock);
                        continue;
                }
                rc = iscsi_xmit_task(conn);
                if (rc)
                        goto done;
+               spin_lock_bh(&conn->taskqueuelock);
        }
 
        /* process pending command queue */
@@ -1536,19 +1548,24 @@ check_mgmt:
                conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
                                        running);
                list_del_init(&conn->task->running);
+               spin_unlock_bh(&conn->taskqueuelock);
                if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
                        fail_scsi_task(conn->task, DID_IMM_RETRY);
+                       spin_lock_bh(&conn->taskqueuelock);
                        continue;
                }
                rc = iscsi_prep_scsi_cmd_pdu(conn->task);
                if (rc) {
                        if (rc == -ENOMEM || rc == -EACCES) {
+                               spin_lock_bh(&conn->taskqueuelock);
                                list_add_tail(&conn->task->running,
                                              &conn->cmdqueue);
                                conn->task = NULL;
+                               spin_unlock_bh(&conn->taskqueuelock);
                                goto done;
                        } else
                                fail_scsi_task(conn->task, DID_ABORT);
+                       spin_lock_bh(&conn->taskqueuelock);
                        continue;
                }
                rc = iscsi_xmit_task(conn);
@@ -1559,6 +1576,7 @@ check_mgmt:
                 * we need to check the mgmt queue for nops that need to
                 * be sent to aviod starvation
                 */
+               spin_lock_bh(&conn->taskqueuelock);
                if (!list_empty(&conn->mgmtqueue))
                        goto check_mgmt;
        }
@@ -1578,12 +1596,15 @@ check_mgmt:
                conn->task = task;
                list_del_init(&conn->task->running);
                conn->task->state = ISCSI_TASK_RUNNING;
+               spin_unlock_bh(&conn->taskqueuelock);
                rc = iscsi_xmit_task(conn);
                if (rc)
                        goto done;
+               spin_lock_bh(&conn->taskqueuelock);
                if (!list_empty(&conn->mgmtqueue))
                        goto check_mgmt;
        }
+       spin_unlock_bh(&conn->taskqueuelock);
        spin_unlock_bh(&conn->session->frwd_lock);
        return -ENODATA;
 
@@ -1739,7 +1760,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
                        goto prepd_reject;
                }
        } else {
+               spin_lock_bh(&conn->taskqueuelock);
                list_add_tail(&task->running, &conn->cmdqueue);
+               spin_unlock_bh(&conn->taskqueuelock);
                iscsi_conn_queue_work(conn);
        }
 
@@ -2897,6 +2920,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
        INIT_LIST_HEAD(&conn->mgmtqueue);
        INIT_LIST_HEAD(&conn->cmdqueue);
        INIT_LIST_HEAD(&conn->requeue);
+       spin_lock_init(&conn->taskqueuelock);
        INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
 
        /* allocate login_task used for the login/text sequences */
index 763f012fdeca00e21f086be86efa1ec12d65c5f0..87f5e694dbedd8a7eda74c1a5dccd0e9008e8c93 100644 (file)
@@ -221,7 +221,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
                task->num_scatter = qc->n_elem;
        } else {
                for_each_sg(qc->sg, sg, qc->n_elem, si)
-                       xfer += sg->length;
+                       xfer += sg_dma_len(sg);
 
                task->total_xfer_len = xfer;
                task->num_scatter = si;
index 0bba2e30b4f09f62ef096ce73df629681a1646b6..257bbdd0f0b83a54d36d0378db18dedb93472737 100644 (file)
@@ -99,12 +99,13 @@ struct lpfc_sli2_slim;
 #define FC_MAX_ADPTMSG         64
 
 #define MAX_HBAEVT     32
+#define MAX_HBAS_NO_RESET 16
 
 /* Number of MSI-X vectors the driver uses */
 #define LPFC_MSIX_VECTORS      2
 
 /* lpfc wait event data ready flag */
-#define LPFC_DATA_READY                (1<<0)
+#define LPFC_DATA_READY                0       /* bit 0 */
 
 /* queue dump line buffer size */
 #define LPFC_LBUF_SZ           128
@@ -692,6 +693,7 @@ struct lpfc_hba {
                                         * capability
                                         */
 #define HBA_NVME_IOQ_FLUSH      0x80000 /* NVME IO queues flushed. */
+#define NVME_XRI_ABORT_EVENT   0x100000
 
        uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
        struct lpfc_dmabuf slim2p;
index 5c783ef7f260612e881dd199bccd767d9179a99d..22819afbaef5c4a229ce66ebdf8d681b47fbbc2f 100644 (file)
@@ -3010,6 +3010,12 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
 static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
                   lpfc_poll_show, lpfc_poll_store);
 
+int lpfc_no_hba_reset_cnt;
+unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
+MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
+
 LPFC_ATTR(sli_mode, 0, 0, 3,
        "SLI mode selector:"
        " 0 - auto (SLI-3 if supported),"
@@ -3309,9 +3315,9 @@ LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
  * lpfc_enable_fc4_type: Defines what FC4 types are supported.
  * Supported Values:  1 - register just FCP
  *                    3 - register both FCP and NVME
- * Supported values are [1,3]. Default value is 3
+ * Supported values are [1,3]. Default value is 1
  */
-LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
+LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
            LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
            "Define fc4 type to register with fabric.");
 
@@ -4451,7 +4457,8 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
 
        phba->cfg_fcp_imax = (uint32_t)val;
-       for (i = 0; i < phba->io_channel_irqs; i++)
+
+       for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
                lpfc_modify_hba_eq_delay(phba, i);
 
        return strlen(buf);
index 843dd73004da0239089442a8591bd2c37e141f16..54e6ac42fbcd4269e2272c5b81271d607232da0a 100644 (file)
@@ -384,7 +384,7 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *);
 extern struct device_attribute *lpfc_hba_attrs[];
 extern struct device_attribute *lpfc_vport_attrs[];
 extern struct scsi_host_template lpfc_template;
-extern struct scsi_host_template lpfc_template_s3;
+extern struct scsi_host_template lpfc_template_no_hr;
 extern struct scsi_host_template lpfc_template_nvme;
 extern struct scsi_host_template lpfc_vport_template;
 extern struct fc_function_template lpfc_transport_functions;
@@ -554,3 +554,5 @@ void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
                                struct lpfc_wcqe_complete *abts_cmpl);
 extern int lpfc_enable_nvmet_cnt;
 extern unsigned long long lpfc_enable_nvmet[];
+extern int lpfc_no_hba_reset_cnt;
+extern unsigned long lpfc_no_hba_reset[];
index c22bb3f887e15b767c8cc53334389cba053c50a9..d3e9af983015ccfc1408a99b61ee190e9730098c 100644 (file)
@@ -939,8 +939,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                         "FC4 x%08x, Data: x%08x x%08x\n",
                                         ndlp, did, ndlp->nlp_fc4_type,
                                         FC_TYPE_FCP, FC_TYPE_NVME);
+                       ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
                }
-               ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
                lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
                lpfc_issue_els_prli(vport, ndlp, 0);
        } else
index 9f4798e9d9380dab26d1a506fcf5fe1c43dd7493..913eed822cb8ed85d4d726a2ee3150c7760a4f47 100644 (file)
@@ -3653,17 +3653,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
                        idiag.ptr_private = phba->sli4_hba.nvmels_cq;
                        goto pass_check;
                }
-               /* NVME LS complete queue */
-               if (phba->sli4_hba.nvmels_cq &&
-                   phba->sli4_hba.nvmels_cq->queue_id == queid) {
-                       /* Sanity check */
-                       rc = lpfc_idiag_que_param_check(
-                                       phba->sli4_hba.nvmels_cq, index, count);
-                       if (rc)
-                               goto error_out;
-                       idiag.ptr_private = phba->sli4_hba.nvmels_cq;
-                       goto pass_check;
-               }
                /* FCP complete queue */
                if (phba->sli4_hba.fcp_cq) {
                        for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
@@ -3738,17 +3727,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
                        idiag.ptr_private = phba->sli4_hba.nvmels_wq;
                        goto pass_check;
                }
-               /* NVME LS work queue */
-               if (phba->sli4_hba.nvmels_wq &&
-                   phba->sli4_hba.nvmels_wq->queue_id == queid) {
-                       /* Sanity check */
-                       rc = lpfc_idiag_que_param_check(
-                                       phba->sli4_hba.nvmels_wq, index, count);
-                       if (rc)
-                               goto error_out;
-                       idiag.ptr_private = phba->sli4_hba.nvmels_wq;
-                       goto pass_check;
-               }
                /* FCP work queue */
                if (phba->sli4_hba.fcp_wq) {
                        for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
index c05f56c3023f1edb2ace78b50de25ef99f4b829e..7b7d314af0e0878f4a37eef337ebf29de84a3d1c 100644 (file)
 /* hbqinfo output buffer size */
 #define LPFC_HBQINFO_SIZE 8192
 
-enum {
-       DUMP_FCP,
-       DUMP_NVME,
-       DUMP_MBX,
-       DUMP_ELS,
-       DUMP_NVMELS,
-};
-
 /* nvmestat output buffer size */
 #define LPFC_NVMESTAT_SIZE 8192
 #define LPFC_NVMEKTIME_SIZE 8192
@@ -283,8 +275,22 @@ struct lpfc_idiag {
        struct lpfc_idiag_offset offset;
        void *ptr_private;
 };
+
+#else
+
+#define lpfc_nvmeio_data(phba, fmt, arg...) \
+       no_printk(fmt, ##arg)
+
 #endif
 
+enum {
+       DUMP_FCP,
+       DUMP_NVME,
+       DUMP_MBX,
+       DUMP_ELS,
+       DUMP_NVMELS,
+};
+
 /* Mask for discovery_trace */
 #define LPFC_DISC_TRC_ELS_CMD          0x1     /* Trace ELS commands */
 #define LPFC_DISC_TRC_ELS_RSP          0x2     /* Trace ELS response */
index 2d26440e6f2fe6b3d9d2bc457c112397601409cf..a5ca37e45fb68266303244ca91fdc01d60e08f62 100644 (file)
@@ -5177,15 +5177,15 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
 
 static uint32_t
 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
-               struct lpfc_hba *phba)
+               struct lpfc_vport *vport)
 {
 
        desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
 
-       memcpy(desc->port_names.wwnn, phba->wwnn,
+       memcpy(desc->port_names.wwnn, &vport->fc_nodename,
                        sizeof(desc->port_names.wwnn));
 
-       memcpy(desc->port_names.wwpn, phba->wwpn,
+       memcpy(desc->port_names.wwpn, &vport->fc_portname,
                        sizeof(desc->port_names.wwpn));
 
        desc->length = cpu_to_be32(sizeof(desc->port_names));
@@ -5279,7 +5279,7 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
        len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
                                       (len + pcmd), &rdp_context->link_stat);
        len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
-                                            (len + pcmd), phba);
+                                            (len + pcmd), vport);
        len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
                                        (len + pcmd), vport, ndlp);
        len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
@@ -7968,7 +7968,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                        did, vport->port_state, ndlp->nlp_flag);
 
                phba->fc_stat.elsRcvPRLI++;
-               if (vport->port_state < LPFC_DISC_AUTH) {
+               if ((vport->port_state < LPFC_DISC_AUTH) &&
+                   (vport->fc_flag & FC_FABRIC)) {
                        rjt_err = LSRJT_UNABLE_TPC;
                        rjt_exp = LSEXP_NOTHING_MORE;
                        break;
@@ -8371,11 +8372,17 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                        spin_lock_irq(shost->host_lock);
                        vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
                        spin_unlock_irq(shost->host_lock);
-                       if (vport->port_type == LPFC_PHYSICAL_PORT
-                               && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
-                               lpfc_issue_init_vfi(vport);
-                       else
+                       if (mb->mbxStatus == MBX_NOT_FINISHED)
+                               break;
+                       if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
+                           !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
+                               if (phba->sli_rev == LPFC_SLI_REV4)
+                                       lpfc_issue_init_vfi(vport);
+                               else
+                                       lpfc_initial_flogi(vport);
+                       } else {
                                lpfc_initial_fdisc(vport);
+                       }
                        break;
                }
        } else {
index 194a14d5f8a9821bf7c3ddc562049db294ce3e07..180b072beef6b079c756474d889e3a494ad75131 100644 (file)
@@ -313,8 +313,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
                                 ndlp->nlp_state, ndlp->nlp_rpi);
        }
 
-       if (!(vport->load_flag & FC_UNLOADING) &&
-           !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
+       if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
            !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
            (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
            (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
@@ -641,6 +640,8 @@ lpfc_work_done(struct lpfc_hba *phba)
                        lpfc_handle_rrq_active(phba);
                if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
                        lpfc_sli4_fcp_xri_abort_event_proc(phba);
+               if (phba->hba_flag & NVME_XRI_ABORT_EVENT)
+                       lpfc_sli4_nvme_xri_abort_event_proc(phba);
                if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
                        lpfc_sli4_els_xri_abort_event_proc(phba);
                if (phba->hba_flag & ASYNC_EVENT)
@@ -2173,7 +2174,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
        uint32_t boot_flag, addr_mode;
        uint16_t fcf_index, next_fcf_index;
        struct lpfc_fcf_rec *fcf_rec = NULL;
-       uint16_t vlan_id;
+       uint16_t vlan_id = LPFC_FCOE_NULL_VID;
        bool select_new_fcf;
        int rc;
 
@@ -4020,9 +4021,11 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                rdata = rport->dd_data;
                /* break the link before dropping the ref */
                ndlp->rport = NULL;
-               if (rdata && rdata->pnode == ndlp)
-                       lpfc_nlp_put(ndlp);
-               rdata->pnode = NULL;
+               if (rdata) {
+                       if (rdata->pnode == ndlp)
+                               lpfc_nlp_put(ndlp);
+                       rdata->pnode = NULL;
+               }
                /* drop reference for earlier registeration */
                put_device(&rport->dev);
        }
@@ -4344,9 +4347,8 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 {
        INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
        INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
-       init_timer(&ndlp->nlp_delayfunc);
-       ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
-       ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
+       setup_timer(&ndlp->nlp_delayfunc, lpfc_els_retry_delay,
+                       (unsigned long)ndlp);
        ndlp->nlp_DID = did;
        ndlp->vport = vport;
        ndlp->phba = vport->phba;
@@ -4606,9 +4608,9 @@ lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
                pring = qp->pring;
                if (!pring)
                        continue;
-               spin_lock_irq(&pring->ring_lock);
+               spin_lock(&pring->ring_lock);
                __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
-               spin_unlock_irq(&pring->ring_lock);
+               spin_unlock(&pring->ring_lock);
        }
        spin_unlock_irq(&phba->hbalock);
 }
index cfdb068a3bfccb76046fbc8d0cb000e80bc905ff..15277705cb6b8c650880572837954d91b6bc01eb 100644 (file)
@@ -1001,7 +1001,7 @@ struct eq_delay_info {
        uint32_t phase;
        uint32_t delay_multi;
 };
-#define        LPFC_MAX_EQ_DELAY       8
+#define        LPFC_MAX_EQ_DELAY_EQID_CNT      8
 
 struct sgl_page_pairs {
        uint32_t sgl_pg0_addr_lo;
@@ -1070,7 +1070,7 @@ struct lpfc_mbx_modify_eq_delay {
        union {
                struct {
                        uint32_t num_eq;
-                       struct eq_delay_info eq[LPFC_MAX_EQ_DELAY];
+                       struct eq_delay_info eq[LPFC_MAX_EQ_DELAY_EQID_CNT];
                } request;
                struct {
                        uint32_t word0;
index 0ee429d773f394e826074e6580bbd73c212e26ba..6cc561b042118ed6d172dddb8221d77c8a793ee4 100644 (file)
@@ -3555,6 +3555,44 @@ out_free_mem:
        return rc;
 }
 
+static uint64_t
+lpfc_get_wwpn(struct lpfc_hba *phba)
+{
+       uint64_t wwn;
+       int rc;
+       LPFC_MBOXQ_t *mboxq;
+       MAILBOX_t *mb;
+
+
+       mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+                                               GFP_KERNEL);
+       if (!mboxq)
+               return (uint64_t)-1;
+
+       /* First get WWN of HBA instance */
+       lpfc_read_nv(phba, mboxq);
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       if (rc != MBX_SUCCESS) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "6019 Mailbox failed , mbxCmd x%x "
+                               "READ_NV, mbxStatus x%x\n",
+                               bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+                               bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+               mempool_free(mboxq, phba->mbox_mem_pool);
+               return (uint64_t) -1;
+       }
+       mb = &mboxq->u.mb;
+       memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
+       /* wwn is WWPN of HBA instance */
+       mempool_free(mboxq, phba->mbox_mem_pool);
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               return be64_to_cpu(wwn);
+       else
+               return (((wwn & 0xffffffff00000000) >> 32) |
+                       ((wwn & 0x00000000ffffffff) << 32));
+
+}
+
 /**
  * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
  * @phba: pointer to lpfc hba data structure.
@@ -3676,17 +3714,32 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
        struct lpfc_vport *vport;
        struct Scsi_Host  *shost = NULL;
        int error = 0;
+       int i;
+       uint64_t wwn;
+       bool use_no_reset_hba = false;
+
+       wwn = lpfc_get_wwpn(phba);
+
+       for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
+               if (wwn == lpfc_no_hba_reset[i]) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                       "6020 Setting use_no_reset port=%llx\n",
+                                       wwn);
+                       use_no_reset_hba = true;
+                       break;
+               }
+       }
 
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
                if (dev != &phba->pcidev->dev) {
                        shost = scsi_host_alloc(&lpfc_vport_template,
                                                sizeof(struct lpfc_vport));
                } else {
-                       if (phba->sli_rev == LPFC_SLI_REV4)
+                       if (!use_no_reset_hba)
                                shost = scsi_host_alloc(&lpfc_template,
                                                sizeof(struct lpfc_vport));
                        else
-                               shost = scsi_host_alloc(&lpfc_template_s3,
+                               shost = scsi_host_alloc(&lpfc_template_no_hr,
                                                sizeof(struct lpfc_vport));
                }
        } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
@@ -3734,17 +3787,14 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
        INIT_LIST_HEAD(&vport->rcv_buffer_list);
        spin_lock_init(&vport->work_port_lock);
 
-       init_timer(&vport->fc_disctmo);
-       vport->fc_disctmo.function = lpfc_disc_timeout;
-       vport->fc_disctmo.data = (unsigned long)vport;
+       setup_timer(&vport->fc_disctmo, lpfc_disc_timeout,
+                       (unsigned long)vport);
 
-       init_timer(&vport->els_tmofunc);
-       vport->els_tmofunc.function = lpfc_els_timeout;
-       vport->els_tmofunc.data = (unsigned long)vport;
+       setup_timer(&vport->els_tmofunc, lpfc_els_timeout,
+                       (unsigned long)vport);
 
-       init_timer(&vport->delayed_disc_tmo);
-       vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
-       vport->delayed_disc_tmo.data = (unsigned long)vport;
+       setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo,
+                       (unsigned long)vport);
 
        error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
        if (error)
@@ -5406,21 +5456,15 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
        INIT_LIST_HEAD(&phba->luns);
 
        /* MBOX heartbeat timer */
-       init_timer(&psli->mbox_tmo);
-       psli->mbox_tmo.function = lpfc_mbox_timeout;
-       psli->mbox_tmo.data = (unsigned long) phba;
+       setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba);
        /* Fabric block timer */
-       init_timer(&phba->fabric_block_timer);
-       phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
-       phba->fabric_block_timer.data = (unsigned long) phba;
+       setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout,
+                       (unsigned long)phba);
        /* EA polling mode timer */
-       init_timer(&phba->eratt_poll);
-       phba->eratt_poll.function = lpfc_poll_eratt;
-       phba->eratt_poll.data = (unsigned long) phba;
+       setup_timer(&phba->eratt_poll, lpfc_poll_eratt,
+                       (unsigned long)phba);
        /* Heartbeat timer */
-       init_timer(&phba->hb_tmofunc);
-       phba->hb_tmofunc.function = lpfc_hb_timeout;
-       phba->hb_tmofunc.data = (unsigned long)phba;
+       setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba);
 
        return 0;
 }
@@ -5446,9 +5490,8 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
         */
 
        /* FCP polling mode timer */
-       init_timer(&phba->fcp_poll_timer);
-       phba->fcp_poll_timer.function = lpfc_poll_timeout;
-       phba->fcp_poll_timer.data = (unsigned long) phba;
+       setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout,
+                       (unsigned long)phba);
 
        /* Host attention work mask setup */
        phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
@@ -5482,7 +5525,8 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
 
        /* Initialize the host templates the configured values. */
        lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-       lpfc_template_s3.sg_tablesize = phba->cfg_sg_seg_cnt;
+       lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
+       lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
 
        /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
        if (phba->cfg_enable_bg) {
@@ -5617,14 +5661,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
         * Initialize timers used by driver
         */
 
-       init_timer(&phba->rrq_tmr);
-       phba->rrq_tmr.function = lpfc_rrq_timeout;
-       phba->rrq_tmr.data = (unsigned long)phba;
+       setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba);
 
        /* FCF rediscover timer */
-       init_timer(&phba->fcf.redisc_wait);
-       phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
-       phba->fcf.redisc_wait.data = (unsigned long)phba;
+       setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo,
+                       (unsigned long)phba);
 
        /*
         * Control structure for handling external multi-buffer mailbox
@@ -5706,6 +5747,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        /* Initialize the host templates with the updated values. */
        lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
        lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+       lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
 
        if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
                phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
@@ -5736,6 +5778,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                /* Initialize the Abort nvme buffer list used by driver */
                spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
+               /* Fast-path XRI aborted CQ Event work queue list */
+               INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
        }
 
        /* This abort list used by worker thread */
@@ -5847,10 +5891,17 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                /* Check to see if it matches any module parameter */
                for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
                        if (wwn == lpfc_enable_nvmet[i]) {
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                                "6017 NVME Target %016llx\n",
                                                wwn);
                                phba->nvmet_support = 1; /* a match */
+#else
+                               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                               "6021 Can't enable NVME Target."
+                                               " NVME_TARGET_FC infrastructure"
+                                               " is not in kernel\n");
+#endif
                        }
                }
        }
@@ -8712,12 +8763,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                }
        }
 
-       /*
-        * Configure EQ delay multipier for interrupt coalescing using
-        * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time.
-        */
-       for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY)
+       for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
                lpfc_modify_hba_eq_delay(phba, qidx);
+
        return 0;
 
 out_destroy:
@@ -8973,6 +9021,11 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
        /* Pending ELS XRI abort events */
        list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
                         &cqelist);
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+               /* Pending NVME XRI abort events */
+               list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
+                                &cqelist);
+       }
        /* Pending asynnc events */
        list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
                         &cqelist);
@@ -10400,12 +10453,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
        fc_remove_host(shost);
        scsi_remove_host(shost);
 
-       /* Perform ndlp cleanup on the physical port.  The nvme and nvmet
-        * localports are destroyed after to cleanup all transport memory.
-        */
        lpfc_cleanup(vport);
-       lpfc_nvmet_destroy_targetport(phba);
-       lpfc_nvme_destroy_localport(vport);
 
        /*
         * Bring down the SLI Layer. This step disable all interrupts,
@@ -12018,6 +12066,7 @@ static struct pci_driver lpfc_driver = {
        .id_table       = lpfc_id_table,
        .probe          = lpfc_pci_probe_one,
        .remove         = lpfc_pci_remove_one,
+       .shutdown       = lpfc_pci_remove_one,
        .suspend        = lpfc_pci_suspend_one,
        .resume         = lpfc_pci_resume_one,
        .err_handler    = &lpfc_err_handler,
index c61d8d692edeeeca94512d2eeb0c2fcbdff0b382..5986c7957199df6ef97343a3c0402931cbdeb7ad 100644 (file)
@@ -646,7 +646,6 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
        }
 
        dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
-       dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
        if (!dma_buf->iocbq) {
                kfree(dma_buf->context);
                pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
@@ -658,6 +657,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
                                "2621 Ran out of nvmet iocb/WQEs\n");
                return NULL;
        }
+       dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
        nvmewqe = dma_buf->iocbq;
        wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
        /* Initialize WQE */
index 609a908ea9db5ba291e678c296726d69cae784f9..0024de1c6c1fea8e4568007a23296dd46f709826 100644 (file)
@@ -316,7 +316,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
        bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
        bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
        bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
-       bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
+       bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
        bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
 
        /* Word 6 */
@@ -620,15 +620,15 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
         * Embed the payload in the last half of the WQE
         * WQE words 16-30 get the NVME CMD IU payload
         *
-        * WQE Word 16 is already setup with flags
-        * WQE words 17-19 get payload Words 2-4
+        * WQE words 16-19 get payload Words 1-4
         * WQE words 20-21 get payload Words 6-7
         * WQE words 22-29 get payload Words 16-23
         */
-       wptr = &wqe->words[17];  /* WQE ptr */
+       wptr = &wqe->words[16];  /* WQE ptr */
        dptr = (uint32_t *)nCmd->cmdaddr;  /* payload ptr */
-       dptr += 2;              /* Skip Words 0-1 in payload */
+       dptr++;                 /* Skip Word 0 in payload */
 
+       *wptr++ = *dptr++;      /* Word 1 */
        *wptr++ = *dptr++;      /* Word 2 */
        *wptr++ = *dptr++;      /* Word 3 */
        *wptr++ = *dptr++;      /* Word 4 */
@@ -978,9 +978,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
                        bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
                               NVME_WRITE_CMD);
 
-                       /* Word 16 */
-                       wqe->words[16] = LPFC_NVME_EMBED_WRITE;
-
                        phba->fc4NvmeOutputRequests++;
                } else {
                        /* Word 7 */
@@ -1002,9 +999,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
                        bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
                               NVME_READ_CMD);
 
-                       /* Word 16 */
-                       wqe->words[16] = LPFC_NVME_EMBED_READ;
-
                        phba->fc4NvmeInputRequests++;
                }
        } else {
@@ -1026,9 +1020,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
                /* Word 11 */
                bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
 
-               /* Word 16 */
-               wqe->words[16] = LPFC_NVME_EMBED_CMD;
-
                phba->fc4NvmeControlRequests++;
        }
        /*
@@ -1286,6 +1277,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
        pnvme_fcreq->private = (void *)lpfc_ncmd;
        lpfc_ncmd->nvmeCmd = pnvme_fcreq;
        lpfc_ncmd->nrport = rport;
+       lpfc_ncmd->ndlp = ndlp;
        lpfc_ncmd->start_time = jiffies;
 
        lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
@@ -1319,7 +1311,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
                                 "sid: x%x did: x%x oxid: x%x\n",
                                 ret, vport->fc_myDID, ndlp->nlp_DID,
                                 lpfc_ncmd->cur_iocbq.sli4_xritag);
-               ret = -EINVAL;
+               ret = -EBUSY;
                goto out_free_nvme_buf;
        }
 
@@ -1821,10 +1813,10 @@ lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
                                                pdma_phys_sgl1, cur_xritag);
                                if (status) {
                                        /* failure, put on abort nvme list */
-                                       lpfc_ncmd->exch_busy = 1;
+                                       lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
                                } else {
                                        /* success, put on NVME buffer list */
-                                       lpfc_ncmd->exch_busy = 0;
+                                       lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
                                        lpfc_ncmd->status = IOSTAT_SUCCESS;
                                        num_posted++;
                                }
@@ -1854,10 +1846,10 @@ lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
                                         struct lpfc_nvme_buf, list);
                        if (status) {
                                /* failure, put on abort nvme list */
-                               lpfc_ncmd->exch_busy = 1;
+                               lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
                        } else {
                                /* success, put on NVME buffer list */
-                               lpfc_ncmd->exch_busy = 0;
+                               lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
                                lpfc_ncmd->status = IOSTAT_SUCCESS;
                                num_posted++;
                        }
@@ -2099,7 +2091,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
        unsigned long iflag = 0;
 
        lpfc_ncmd->nonsg_phys = 0;
-       if (lpfc_ncmd->exch_busy) {
+       if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
                spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
                                        iflag);
                lpfc_ncmd->nvmeCmd = NULL;
@@ -2135,11 +2127,12 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
 int
 lpfc_nvme_create_localport(struct lpfc_vport *vport)
 {
+       int ret = 0;
        struct lpfc_hba  *phba = vport->phba;
        struct nvme_fc_port_info nfcp_info;
        struct nvme_fc_local_port *localport;
        struct lpfc_nvme_lport *lport;
-       int len, ret = 0;
+       int len;
 
        /* Initialize this localport instance.  The vport wwn usage ensures
         * that NPIV is accounted for.
@@ -2156,8 +2149,12 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
        /* localport is allocated from the stack, but the registration
         * call allocates heap memory as well as the private area.
         */
+#if (IS_ENABLED(CONFIG_NVME_FC))
        ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
                                         &vport->phba->pcidev->dev, &localport);
+#else
+       ret = -ENOMEM;
+#endif
        if (!ret) {
                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
                                 "6005 Successfully registered local "
@@ -2173,10 +2170,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
                lport->vport = vport;
                INIT_LIST_HEAD(&lport->rport_list);
                vport->nvmei_support = 1;
+               len  = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
+               vport->phba->total_nvme_bufs += len;
        }
 
-       len  = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
-       vport->phba->total_nvme_bufs += len;
        return ret;
 }
 
@@ -2193,6 +2190,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
 void
 lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
 {
+#if (IS_ENABLED(CONFIG_NVME_FC))
        struct nvme_fc_local_port *localport;
        struct lpfc_nvme_lport *lport;
        struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
@@ -2208,7 +2206,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
                         "6011 Destroying NVME localport %p\n",
                         localport);
-
        list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) {
                /* The last node ref has to get released now before the rport
                 * private memory area is released by the transport.
@@ -2222,6 +2219,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
                                         "6008 rport fail destroy %x\n", ret);
                wait_for_completion_timeout(&rport->rport_unreg_done, 5);
        }
+
        /* lport's rport list is clear.  Unregister
         * lport and release resources.
         */
@@ -2245,6 +2243,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
                                 "Failed, status x%x\n",
                                 ret);
        }
+#endif
 }
 
 void
@@ -2275,6 +2274,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
 int
 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
+#if (IS_ENABLED(CONFIG_NVME_FC))
        int ret = 0;
        struct nvme_fc_local_port *localport;
        struct lpfc_nvme_lport *lport;
@@ -2348,7 +2348,6 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                        rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
                rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
                rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
-
                ret = nvme_fc_register_remoteport(localport, &rpinfo,
                                                  &remote_port);
                if (!ret) {
@@ -2384,6 +2383,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                                 ndlp->nlp_type, ndlp->nlp_DID, ndlp);
        }
        return ret;
+#else
+       return 0;
+#endif
 }
 
 /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
@@ -2401,6 +2403,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 void
 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
+#if (IS_ENABLED(CONFIG_NVME_FC))
        int ret;
        struct nvme_fc_local_port *localport;
        struct lpfc_nvme_lport *lport;
@@ -2458,7 +2461,61 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        return;
 
  input_err:
+#endif
        lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
                         "6168: State error: lport %p, rport%p FCID x%06x\n",
                         vport->localport, ndlp->rport, ndlp->nlp_DID);
 }
+
+/**
+ * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the fcp xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * FCP aborted xri.
+ **/
+void
+lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
+                          struct sli4_wcqe_xri_aborted *axri)
+{
+       uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+       uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+       struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
+       struct lpfc_nodelist *ndlp;
+       unsigned long iflag = 0;
+       int rrq_empty = 0;
+
+       if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
+               return;
+       spin_lock_irqsave(&phba->hbalock, iflag);
+       spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+       list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
+                                &phba->sli4_hba.lpfc_abts_nvme_buf_list,
+                                list) {
+               if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
+                       list_del(&lpfc_ncmd->list);
+                       lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
+                       lpfc_ncmd->status = IOSTAT_SUCCESS;
+                       spin_unlock(
+                               &phba->sli4_hba.abts_nvme_buf_list_lock);
+
+                       rrq_empty = list_empty(&phba->active_rrq_list);
+                       spin_unlock_irqrestore(&phba->hbalock, iflag);
+                       ndlp = lpfc_ncmd->ndlp;
+                       if (ndlp) {
+                               lpfc_set_rrq_active(
+                                       phba, ndlp,
+                                       lpfc_ncmd->cur_iocbq.sli4_lxritag,
+                                       rxid, 1);
+                               lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+                       }
+                       lpfc_release_nvme_buf(phba, lpfc_ncmd);
+                       if (rrq_empty)
+                               lpfc_worker_wake_up(phba);
+                       return;
+               }
+       }
+       spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+       spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
index b2fae5e813f8a82b17b53a5cd5affd8f7e5d6bad..1347deb8dd6cbd3e56db9f615cbc55e8290fc443 100644 (file)
@@ -57,6 +57,7 @@ struct lpfc_nvme_buf {
        struct list_head list;
        struct nvmefc_fcp_req *nvmeCmd;
        struct lpfc_nvme_rport *nrport;
+       struct lpfc_nodelist *ndlp;
 
        uint32_t timeout;
 
index c421e1738ee989efcca922da9b145995cc164c72..acba1b67e505e9bbe57cbb4ea86c79a0a1e079f7 100644 (file)
@@ -520,7 +520,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
        struct lpfc_hba *phba = ctxp->phba;
        struct lpfc_iocbq *nvmewqeq;
        unsigned long iflags;
-       int rc, id;
+       int rc;
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        if (phba->ktime_on) {
@@ -530,7 +530,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
                        ctxp->ts_nvme_data = ktime_get_ns();
        }
        if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
-               id = smp_processor_id();
+               int id = smp_processor_id();
                ctxp->cpu = id;
                if (id < LPFC_CHECK_CPU_CNT)
                        phba->cpucheck_xmt_io[id]++;
@@ -571,6 +571,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
                lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
                                "6102 Bad state IO x%x aborted\n",
                                ctxp->oxid);
+               rc = -ENXIO;
                goto aerr;
        }
 
@@ -580,6 +581,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
                lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
                                "6152 FCP Drop IO x%x: Prep\n",
                                ctxp->oxid);
+               rc = -ENXIO;
                goto aerr;
        }
 
@@ -618,8 +620,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
        ctxp->wqeq->hba_wqidx = 0;
        nvmewqeq->context2 = NULL;
        nvmewqeq->context3 = NULL;
+       rc = -EBUSY;
 aerr:
-       return -ENXIO;
+       return rc;
 }
 
 static void
@@ -668,9 +671,13 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
        lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
                                           NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED;
 
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
        error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
                                             &phba->pcidev->dev,
                                             &phba->targetport);
+#else
+       error = -ENOMEM;
+#endif
        if (error) {
                lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
                                "6025 Cannot register NVME targetport "
@@ -731,9 +738,25 @@ lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
        return 0;
 }
 
+/**
+ * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the nvmet xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * NVMET aborted xri.
+ **/
+void
+lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
+                           struct sli4_wcqe_xri_aborted *axri)
+{
+       /* TODO: work in progress */
+}
+
 void
 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
 {
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
        struct lpfc_nvmet_tgtport *tgtp;
 
        if (phba->nvmet_support == 0)
@@ -745,6 +768,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
                wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
        }
        phba->targetport = NULL;
+#endif
 }
 
 /**
@@ -764,6 +788,7 @@ static void
 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                           struct hbq_dmabuf *nvmebuf)
 {
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
        struct lpfc_nvmet_tgtport *tgtp;
        struct fc_frame_header *fc_hdr;
        struct lpfc_nvmet_rcv_ctx *ctxp;
@@ -844,6 +869,7 @@ dropit:
 
        atomic_inc(&tgtp->xmt_ls_abort);
        lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
+#endif
 }
 
 /**
@@ -865,6 +891,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
                            struct rqb_dmabuf *nvmebuf,
                            uint64_t isr_timestamp)
 {
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
        struct lpfc_nvmet_rcv_ctx *ctxp;
        struct lpfc_nvmet_tgtport *tgtp;
        struct fc_frame_header *fc_hdr;
@@ -955,7 +982,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
 
        atomic_inc(&tgtp->rcv_fcp_cmd_drop);
        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
-                       "6159 FCP Drop IO x%x: nvmet_fc_rcv_fcp_req x%x\n",
+                       "6159 FCP Drop IO x%x: err x%x\n",
                        ctxp->oxid, rc);
 dropit:
        lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
@@ -970,6 +997,7 @@ dropit:
                /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
                lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
        }
+#endif
 }
 
 /**
@@ -1114,7 +1142,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
        bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
        bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
        bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
-       bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_DD_SOL_CTL);
+       bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
        bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
 
        /* Word 6 */
@@ -1445,7 +1473,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
 
        case NVMET_FCOP_RSP:
                /* Words 0 - 2 */
-               sgel = &rsp->sg[0];
                physaddr = rsp->rspdma;
                wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
                wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
@@ -1681,8 +1708,8 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
        struct lpfc_nodelist *ndlp;
 
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-                       "6067 %s: Entrypoint: sid %x xri %x\n", __func__,
-                       sid, xri);
+                       "6067 Abort: sid %x xri x%x/x%x\n",
+                       sid, xri, ctxp->wqeq->sli4_xritag);
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
 
@@ -1693,7 +1720,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
                atomic_inc(&tgtp->xmt_abort_rsp_error);
                lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
                                "6134 Drop ABTS - wrong NDLP state x%x.\n",
-                               ndlp->nlp_state);
+                               (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
 
                /* No failure to an ABTS request. */
                return 0;
@@ -1791,7 +1818,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
                atomic_inc(&tgtp->xmt_abort_rsp_error);
                lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
                                "6160 Drop ABTS - wrong NDLP state x%x.\n",
-                               ndlp->nlp_state);
+                               (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
 
                /* No failure to an ABTS request. */
                return 0;
index 9d6384af9fce7e9b321e8031a421869095fbab55..54fd0c81ceaf69a7ceb475acb87e309df7e10de1 100644 (file)
@@ -5953,12 +5953,13 @@ struct scsi_host_template lpfc_template_nvme = {
        .track_queue_depth      = 0,
 };
 
-struct scsi_host_template lpfc_template_s3 = {
+struct scsi_host_template lpfc_template_no_hr = {
        .module                 = THIS_MODULE,
        .name                   = LPFC_DRIVER_NAME,
        .proc_name              = LPFC_DRIVER_NAME,
        .info                   = lpfc_info,
        .queuecommand           = lpfc_queuecommand,
+       .eh_timed_out           = fc_eh_timed_out,
        .eh_abort_handler       = lpfc_abort_handler,
        .eh_device_reset_handler = lpfc_device_reset_handler,
        .eh_target_reset_handler = lpfc_target_reset_handler,
@@ -6015,7 +6016,6 @@ struct scsi_host_template lpfc_vport_template = {
        .eh_abort_handler       = lpfc_abort_handler,
        .eh_device_reset_handler = lpfc_device_reset_handler,
        .eh_target_reset_handler = lpfc_target_reset_handler,
-       .eh_bus_reset_handler   = lpfc_bus_reset_handler,
        .slave_alloc            = lpfc_slave_alloc,
        .slave_configure        = lpfc_slave_configure,
        .slave_destroy          = lpfc_slave_destroy,
index e43e5e23c24b475f3f8930bcae9cb67e4685153e..1c9fa45df7eb5deec4c8988b18ffe36f4621b267 100644 (file)
@@ -1,3 +1,4 @@
+
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
@@ -952,7 +953,7 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
        start_sglq = sglq;
        while (!found) {
                if (!sglq)
-                       return NULL;
+                       break;
                if (ndlp && ndlp->active_rrqs_xri_bitmap &&
                    test_bit(sglq->sli4_lxritag,
                    ndlp->active_rrqs_xri_bitmap)) {
@@ -12212,6 +12213,41 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
        }
 }
 
+/**
+ * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 NVME abort XRI events.
+ **/
+void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
+{
+       struct lpfc_cq_event *cq_event;
+
+       /* First, declare the fcp xri abort event has been handled */
+       spin_lock_irq(&phba->hbalock);
+       phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
+       spin_unlock_irq(&phba->hbalock);
+       /* Now, handle all the fcp xri abort events */
+       while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
+               /* Get the first event from the head of the event queue */
+               spin_lock_irq(&phba->hbalock);
+               list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
+                                cq_event, struct lpfc_cq_event, list);
+               spin_unlock_irq(&phba->hbalock);
+               /* Notify aborted XRI for NVME work queue */
+               if (phba->nvmet_support) {
+                       lpfc_sli4_nvmet_xri_aborted(phba,
+                                                   &cq_event->cqe.wcqe_axri);
+               } else {
+                       lpfc_sli4_nvme_xri_aborted(phba,
+                                                  &cq_event->cqe.wcqe_axri);
+               }
+               /* Free the event processed back to the free pool */
+               lpfc_sli4_cq_event_release(phba, cq_event);
+       }
+}
+
 /**
  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
  * @phba: pointer to lpfc hba data structure.
@@ -12709,10 +12745,22 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
                spin_unlock_irqrestore(&phba->hbalock, iflags);
                workposted = true;
                break;
+       case LPFC_NVME:
+               spin_lock_irqsave(&phba->hbalock, iflags);
+               list_add_tail(&cq_event->list,
+                             &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
+               /* Set the nvme xri abort event flag */
+               phba->hba_flag |= NVME_XRI_ABORT_EVENT;
+               spin_unlock_irqrestore(&phba->hbalock, iflags);
+               workposted = true;
+               break;
        default:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                               "0603 Invalid work queue CQE subtype (x%x)\n",
-                               cq->subtype);
+                               "0603 Invalid CQ subtype %d: "
+                               "%08x %08x %08x %08x\n",
+                               cq->subtype, wcqe->word0, wcqe->parameter,
+                               wcqe->word2, wcqe->word3);
+               lpfc_sli4_cq_event_release(phba, cq_event);
                workposted = false;
                break;
        }
@@ -13827,6 +13875,8 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
  * @startq: The starting FCP EQ to modify
  *
  * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
+ * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
+ * updated in one mailbox command.
  *
  * The @phba struct is used to send mailbox command to HBA. The @startq
  * is used to get the starting FCP EQ to change.
@@ -13879,7 +13929,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
                eq_delay->u.request.eq[cnt].phase = 0;
                eq_delay->u.request.eq[cnt].delay_multi = dmult;
                cnt++;
-               if (cnt >= LPFC_MAX_EQ_DELAY)
+               if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT)
                        break;
        }
        eq_delay->u.request.num_eq = cnt;
@@ -15185,17 +15235,17 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
                drq = drqp[idx];
                cq  = cqp[idx];
 
-               if (hrq->entry_count != drq->entry_count) {
-                       status = -EINVAL;
-                       goto out;
-               }
-
                /* sanity check on queue memory */
                if (!hrq || !drq || !cq) {
                        status = -ENODEV;
                        goto out;
                }
 
+               if (hrq->entry_count != drq->entry_count) {
+                       status = -EINVAL;
+                       goto out;
+               }
+
                if (idx == 0) {
                        bf_set(lpfc_mbx_rq_create_num_pages,
                               &rq_create->u.request,
index 91153c9f6d18259b3978b7f3218ad76198170c34..710458cf11d62f77c7a48973aded3f255b233277 100644 (file)
@@ -642,6 +642,7 @@ struct lpfc_sli4_hba {
        struct list_head sp_asynce_work_queue;
        struct list_head sp_fcp_xri_aborted_work_queue;
        struct list_head sp_els_xri_aborted_work_queue;
+       struct list_head sp_nvme_xri_aborted_work_queue;
        struct list_head sp_unsol_work_queue;
        struct lpfc_sli4_link link_state;
        struct lpfc_sli4_lnk_info lnk_info;
@@ -794,9 +795,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
 int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
                        void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba);
 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
 void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
                               struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
+                               struct sli4_wcqe_xri_aborted *axri);
+void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
+                                struct sli4_wcqe_xri_aborted *axri);
 void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
                               struct sli4_wcqe_xri_aborted *);
 void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
index 86c6c9b26b823a04dd5afbdff4688901d577c82d..d4e95e28f4e3d55a6ec97162cfdb1e26998e4fe1 100644 (file)
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "11.2.0.7"
+#define LPFC_DRIVER_VERSION "11.2.0.10"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index e7e5974e1a2c435ef2ee0a79276e981fcb79cc87..2b209bbb4c9165fa7afdeff0f233f649684f8495 100644 (file)
@@ -35,8 +35,8 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION                                "07.701.16.00-rc1"
-#define MEGASAS_RELDATE                                "February 2, 2017"
+#define MEGASAS_VERSION                                "07.701.17.00-rc1"
+#define MEGASAS_RELDATE                                "March 2, 2017"
 
 /*
  * Device IDs
index 7ac9a9ee9bd473c3cc0b6178975f46e3d32f3b77..0016f12cc563e7c6e1eb3c2a87685f60c83b9747 100644 (file)
@@ -1963,6 +1963,9 @@ scan_target:
        if (!mr_device_priv_data)
                return -ENOMEM;
        sdev->hostdata = mr_device_priv_data;
+
+       atomic_set(&mr_device_priv_data->r1_ldio_hint,
+                  instance->r1_ldio_hint_default);
        return 0;
 }
 
@@ -5034,10 +5037,12 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
                                         &instance->irq_context[j]);
                        /* Retry irq register for IO_APIC*/
                        instance->msix_vectors = 0;
-                       if (is_probe)
+                       if (is_probe) {
+                               pci_free_irq_vectors(instance->pdev);
                                return megasas_setup_irqs_ioapic(instance);
-                       else
+                       } else {
                                return -1;
+                       }
                }
        }
        return 0;
@@ -5277,9 +5282,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
                        MPI2_REPLY_POST_HOST_INDEX_OFFSET);
        }
 
-       i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
-       if (i < 0)
-               goto fail_setup_irqs;
+       if (!instance->msix_vectors) {
+               i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
+               if (i < 0)
+                       goto fail_setup_irqs;
+       }
 
        dev_info(&instance->pdev->dev,
                "firmware supports msix\t: (%d)", fw_msix_count);
index 29650ba669da58da099cf91e9de0aae504146bb0..f990ab4d45e1bf72b3adf8991b11c01309c7530b 100644 (file)
@@ -2159,7 +2159,7 @@ megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context,
                                cpu_sel = MR_RAID_CTX_CPUSEL_1;
 
                        if (is_stream_detected(rctx_g35) &&
-                           (raid->level == 5) &&
+                           ((raid->level == 5) || (raid->level == 6)) &&
                            (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) &&
                            (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS))
                                cpu_sel = MR_RAID_CTX_CPUSEL_0;
@@ -2338,7 +2338,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
                                fp_possible = false;
                                atomic_dec(&instance->fw_outstanding);
                        } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
-                                  atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint)) {
+                                  (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) {
                                fp_possible = false;
                                atomic_dec(&instance->fw_outstanding);
                                if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
index 7fe7e6ed595b79e8831bfbeb55767e5d65ff7e5c..8981806fb13fa7792e2b8d45f4fc6880b362da25 100644 (file)
@@ -1442,9 +1442,6 @@ void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
        u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
 extern struct sas_function_template mpt3sas_transport_functions;
 extern struct scsi_transport_template *mpt3sas_transport_template;
-extern int scsi_internal_device_block(struct scsi_device *sdev);
-extern int scsi_internal_device_unblock(struct scsi_device *sdev,
-                               enum scsi_device_state new_state);
 /* trigger data externs */
 void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
        struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
index 46e866c36c8a884a98588a8d7a81272aed0ce400..919ba2bb15f110f4619a72646d170f87ad985237 100644 (file)
@@ -2859,7 +2859,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
            sas_device_priv_data->sas_target->handle);
        sas_device_priv_data->block = 1;
 
-       r = scsi_internal_device_block(sdev);
+       r = scsi_internal_device_block(sdev, false);
        if (r == -EINVAL)
                sdev_printk(KERN_WARNING, sdev,
                    "device_block failed with return(%d) for handle(0x%04x)\n",
@@ -2895,7 +2895,7 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
                    "performing a block followed by an unblock\n",
                    r, sas_device_priv_data->sas_target->handle);
                sas_device_priv_data->block = 1;
-               r = scsi_internal_device_block(sdev);
+               r = scsi_internal_device_block(sdev, false);
                if (r)
                        sdev_printk(KERN_WARNING, sdev, "retried device_block "
                            "failed with return(%d) for handle(0x%04x)\n",
@@ -4677,7 +4677,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
        struct MPT3SAS_DEVICE *sas_device_priv_data;
        u32 response_code = 0;
        unsigned long flags;
-       unsigned int sector_sz;
 
        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
 
@@ -4742,20 +4741,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
        }
 
        xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
-
-       /* In case of bogus fw or device, we could end up having
-        * unaligned partial completion. We can force alignment here,
-        * then scsi-ml does not need to handle this misbehavior.
-        */
-       sector_sz = scmd->device->sector_size;
-       if (unlikely(!blk_rq_is_passthrough(scmd->request) && sector_sz &&
-                    xfer_cnt % sector_sz)) {
-               sdev_printk(KERN_INFO, scmd->device,
-                   "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
-                           xfer_cnt, sector_sz);
-               xfer_cnt = round_down(xfer_cnt, sector_sz);
-       }
-
        scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
        if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
                log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
index 64e9f507ce328637cb8675ef32a2d94631d1b0c2..414f2a772a5fca9b6b0079e24e359444ae3be577 100644 (file)
@@ -1,5 +1,5 @@
 obj-$(CONFIG_QEDF) := qedf.o
 qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \
-        qedf_attr.o qedf_els.o
+        qedf_attr.o qedf_els.o drv_scsi_fw_funcs.o drv_fcoe_fw_funcs.o
 
 qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
new file mode 100644 (file)
index 0000000..8c65e3b
--- /dev/null
@@ -0,0 +1,190 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#include "drv_fcoe_fw_funcs.h"
+#include "drv_scsi_fw_funcs.h"
+
+#define FCOE_RX_ID (0xFFFFu)
+
+static inline void init_common_sqe(struct fcoe_task_params *task_params,
+                                  enum fcoe_sqe_request_type request_type)
+{
+       memset(task_params->sqe, 0, sizeof(*(task_params->sqe)));
+       SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE,
+                 request_type);
+       task_params->sqe->task_id = task_params->itid;
+}
+
+int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
+                               struct scsi_sgl_task_params *sgl_task_params,
+                               struct regpair sense_data_buffer_phys_addr,
+                               u32 task_retry_id,
+                               u8 fcp_cmd_payload[32])
+{
+       struct fcoe_task_context *ctx = task_params->context;
+       struct ystorm_fcoe_task_st_ctx *y_st_ctx;
+       struct tstorm_fcoe_task_st_ctx *t_st_ctx;
+       struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+       struct mstorm_fcoe_task_st_ctx *m_st_ctx;
+       u32 io_size, val;
+       bool slow_sgl;
+
+       memset(ctx, 0, sizeof(*(ctx)));
+       slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
+                                   sgl_task_params->small_mid_sge);
+       io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
+                  task_params->tx_io_size : task_params->rx_io_size);
+
+       /* Ystorm ctx */
+       y_st_ctx = &ctx->ystorm_st_context;
+       y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
+       y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
+       y_st_ctx->task_type = task_params->task_type;
+       memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload,
+              fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
+
+       /* Tstorm ctx */
+       t_st_ctx = &ctx->tstorm_st_context;
+       t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
+                                       FCOE_TASK_DEV_TYPE_TAPE :
+                                       FCOE_TASK_DEV_TYPE_DISK);
+       t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
+       val = cpu_to_le32(task_params->cq_rss_number);
+       t_st_ctx->read_only.glbl_q_num = val;
+       t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
+       t_st_ctx->read_only.task_type = task_params->task_type;
+       SET_FIELD(t_st_ctx->read_write.flags,
+                 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
+       t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
+
+       /* Ustorm ctx */
+       u_ag_ctx = &ctx->ustorm_ag_context;
+       u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
+
+       /* Mstorm buffer for sense/rsp data placement */
+       m_st_ctx = &ctx->mstorm_st_context;
+       val = cpu_to_le32(sense_data_buffer_phys_addr.hi);
+       m_st_ctx->rsp_buf_addr.hi = val;
+       val = cpu_to_le32(sense_data_buffer_phys_addr.lo);
+       m_st_ctx->rsp_buf_addr.lo = val;
+
+       if (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
+               /* Ystorm ctx */
+               y_st_ctx->expect_first_xfer = 1;
+
+               /* Set the amount of super SGEs. Can be up to 4. */
+               SET_FIELD(y_st_ctx->sgl_mode,
+                         YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
+                         (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+               init_scsi_sgl_context(&y_st_ctx->sgl_params,
+                                     &y_st_ctx->data_desc,
+                                     sgl_task_params);
+
+               /* Mstorm ctx */
+               SET_FIELD(m_st_ctx->flags,
+                         MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
+                         (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+       } else {
+               /* Tstorm ctx */
+               SET_FIELD(t_st_ctx->read_write.flags,
+                         FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
+                         (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+
+               /* Mstorm ctx */
+               m_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
+               init_scsi_sgl_context(&m_st_ctx->sgl_params,
+                                     &m_st_ctx->data_desc,
+                                     sgl_task_params);
+       }
+
+       init_common_sqe(task_params, SEND_FCOE_CMD);
+       return 0;
+}
+
+int init_initiator_midpath_unsolicited_fcoe_task(
+       struct fcoe_task_params *task_params,
+       struct fcoe_tx_mid_path_params *mid_path_fc_header,
+       struct scsi_sgl_task_params *tx_sgl_task_params,
+       struct scsi_sgl_task_params *rx_sgl_task_params,
+       u8 fw_to_place_fc_header)
+{
+       struct fcoe_task_context *ctx = task_params->context;
+       struct ystorm_fcoe_task_st_ctx *y_st_ctx;
+       struct tstorm_fcoe_task_st_ctx *t_st_ctx;
+       struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+       struct mstorm_fcoe_task_st_ctx *m_st_ctx;
+       u32 val;
+
+       memset(ctx, 0, sizeof(*(ctx)));
+
+       /* Init Ystorm */
+       y_st_ctx = &ctx->ystorm_st_context;
+       init_scsi_sgl_context(&y_st_ctx->sgl_params,
+                             &y_st_ctx->data_desc,
+                             tx_sgl_task_params);
+       SET_FIELD(y_st_ctx->sgl_mode,
+                 YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
+       y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
+       y_st_ctx->task_type = task_params->task_type;
+       memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
+              mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
+
+       /* Init Mstorm */
+       m_st_ctx = &ctx->mstorm_st_context;
+       init_scsi_sgl_context(&m_st_ctx->sgl_params,
+                             &m_st_ctx->data_desc,
+                             rx_sgl_task_params);
+       SET_FIELD(m_st_ctx->flags,
+                 MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER,
+                 fw_to_place_fc_header);
+       m_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->rx_io_size);
+
+       /* Init Tstorm */
+       t_st_ctx = &ctx->tstorm_st_context;
+       t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
+       val = cpu_to_le32(task_params->cq_rss_number);
+       t_st_ctx->read_only.glbl_q_num = val;
+       t_st_ctx->read_only.task_type = task_params->task_type;
+       SET_FIELD(t_st_ctx->read_write.flags,
+                 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
+       t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
+
+       /* Init Ustorm */
+       u_ag_ctx = &ctx->ustorm_ag_context;
+       u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
+
+       /* Init SQE */
+       init_common_sqe(task_params, SEND_FCOE_MIDPATH);
+       task_params->sqe->additional_info_union.burst_length =
+                                   tx_sgl_task_params->total_buffer_size;
+       SET_FIELD(task_params->sqe->flags,
+                 FCOE_WQE_NUM_SGES, tx_sgl_task_params->num_sges);
+       SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE,
+                 SCSI_FAST_SGL);
+
+       return 0;
+}
+
+int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params)
+{
+       init_common_sqe(task_params, SEND_FCOE_ABTS_REQUEST);
+       return 0;
+}
+
+int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params)
+{
+       init_common_sqe(task_params, FCOE_EXCHANGE_CLEANUP);
+       return 0;
+}
+
+int init_initiator_sequence_recovery_fcoe_task(
+       struct fcoe_task_params *task_params, u32 off)
+{
+       init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
+       task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
+       return 0;
+}
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
new file mode 100644 (file)
index 0000000..617529b
--- /dev/null
@@ -0,0 +1,93 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _FCOE_FW_FUNCS_H
+#define _FCOE_FW_FUNCS_H
+#include "drv_scsi_fw_funcs.h"
+#include "qedf_hsi.h"
+#include <linux/qed/qed_if.h>
+
+struct fcoe_task_params {
+       /* Output parameter [set/filled by the HSI function] */
+       struct fcoe_task_context *context;
+
+       /* Output parameter [set/filled by the HSI function] */
+       struct fcoe_wqe *sqe;
+       enum fcoe_task_type task_type;
+       u32 tx_io_size; /* in bytes */
+       u32 rx_io_size; /* in bytes */
+       u32 conn_cid;
+       u16 itid;
+       u8 cq_rss_number;
+
+        /* Whether it's Tape device or not (0=Disk, 1=Tape) */
+       u8 is_tape_device;
+};
+
+/**
+ * @brief init_initiator_rw_fcoe_task - Initializes FCoE task context for
+ * read/write task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param sgl_task_params - Pointer to SGL task params
+ * @param sense_data_buffer_phys_addr - Pointer to sense data buffer
+ * @param task_retry_id - retry identification - Used only for Tape device
+ * @param fcp_cmnd_payload - FCP CMD Payload
+ */
+int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
+       struct scsi_sgl_task_params *sgl_task_params,
+       struct regpair sense_data_buffer_phys_addr,
+       u32 task_retry_id,
+       u8 fcp_cmd_payload[32]);
+
+/**
+ * @brief init_initiator_midpath_fcoe_task - Initializes FCoE task context for
+ * midpath/unsolicited task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param mid_path_fc_header - FC header
+ * @param tx_sgl_task_params - Pointer to Tx SGL task params
+ * @param rx_sgl_task_params - Pointer to Rx SGL task params
+ * @param fw_to_place_fc_header        - Indication if the FW will place the FC header
+ * in addition to the data arrives.
+ */
+int init_initiator_midpath_unsolicited_fcoe_task(
+       struct fcoe_task_params *task_params,
+       struct fcoe_tx_mid_path_params *mid_path_fc_header,
+       struct scsi_sgl_task_params *tx_sgl_task_params,
+       struct scsi_sgl_task_params *rx_sgl_task_params,
+       u8 fw_to_place_fc_header);
+
+/**
+ * @brief init_initiator_abort_fcoe_task - Initializes FCoE task context for
+ * abort task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params);
+
+/**
+ * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
+ * cleanup task types and init fcoe_sqe
+ *
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params);
+
+/**
+ * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
+ * sequence recovery task types and init fcoe_sqe
+ *
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param desired_offset - The desired offest the task will be re-sent from
+ */
+int init_initiator_sequence_recovery_fcoe_task(
+       struct fcoe_task_params *task_params,
+       u32 desired_offset);
+#endif
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.c b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
new file mode 100644 (file)
index 0000000..11e0cc0
--- /dev/null
@@ -0,0 +1,44 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#include "drv_scsi_fw_funcs.h"
+
+#define SCSI_NUM_SGES_IN_CACHE 0x4
+
+bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
+{
+       return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
+}
+
+void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
+                          struct scsi_cached_sges *ctx_data_desc,
+                          struct scsi_sgl_task_params *sgl_task_params)
+{
+       /* no need to check for sgl_task_params->sgl validity */
+       u8 num_sges_to_init = sgl_task_params->num_sges >
+                             SCSI_NUM_SGES_IN_CACHE ? SCSI_NUM_SGES_IN_CACHE :
+                             sgl_task_params->num_sges;
+       u8 sge_index;
+       u32 val;
+
+       val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
+       ctx_sgl_params->sgl_addr.lo = val;
+       val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
+       ctx_sgl_params->sgl_addr.hi = val;
+       val = cpu_to_le32(sgl_task_params->total_buffer_size);
+       ctx_sgl_params->sgl_total_length = val;
+       ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
+
+       for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) {
+               val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
+               ctx_data_desc->sge[sge_index].sge_addr.lo = val;
+               val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
+               ctx_data_desc->sge[sge_index].sge_addr.hi = val;
+               val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
+               ctx_data_desc->sge[sge_index].sge_len = val;
+       }
+}
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.h b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
new file mode 100644 (file)
index 0000000..9cb4541
--- /dev/null
@@ -0,0 +1,85 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _SCSI_FW_FUNCS_H
+#define _SCSI_FW_FUNCS_H
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/fcoe_common.h>
+
+struct scsi_sgl_task_params {
+       struct scsi_sge *sgl;
+       struct regpair sgl_phys_addr;
+       u32 total_buffer_size;
+       u16 num_sges;
+
+        /* true if SGL contains a small (< 4KB) SGE in middle(not 1st or last)
+         * -> relevant for tx only
+         */
+       bool small_mid_sge;
+};
+
+struct scsi_dif_task_params {
+       u32 initial_ref_tag;
+       bool initial_ref_tag_is_valid;
+       u16 application_tag;
+       u16 application_tag_mask;
+       u16 dif_block_size_log;
+       bool dif_on_network;
+       bool dif_on_host;
+       u8 host_guard_type;
+       u8 protection_type;
+       u8 ref_tag_mask;
+       bool crc_seed;
+
+        /* Enable Connection error upon DIF error (segments with DIF errors are
+         * dropped)
+         */
+       bool tx_dif_conn_err_en;
+       bool ignore_app_tag;
+       bool keep_ref_tag_const;
+       bool validate_guard;
+       bool validate_app_tag;
+       bool validate_ref_tag;
+       bool forward_guard;
+       bool forward_app_tag;
+       bool forward_ref_tag;
+       bool forward_app_tag_with_mask;
+       bool forward_ref_tag_with_mask;
+};
+
+struct scsi_initiator_cmd_params {
+        /* for cdb_size > default CDB size (extended CDB > 16 bytes) ->
+         * pointer to the CDB buffer SGE
+         */
+       struct scsi_sge extended_cdb_sge;
+
+       /* Physical address of sense data buffer for sense data - 256B buffer */
+       struct regpair sense_data_buffer_phys_addr;
+};
+
+/**
+ * @brief scsi_is_slow_sgl - checks for slow SGL
+ *
+ * @param num_sges - number of sges in SGL
+ * @param small_mid_sge - True is the SGL contains an SGE which is smaller than
+ * 4KB and its not the 1st or last SGE in the SGL
+ */
+bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge);
+
+/**
+ * @brief init_scsi_sgl_context - initializes SGL task context
+ *
+ * @param sgl_params - SGL context parameters to initialize (output parameter)
+ * @param data_desc - context struct containing SGEs array to set (output
+ * parameter)
+ * @param sgl_task_params - SGL parameters (input)
+ */
+void init_scsi_sgl_context(struct scsi_sgl_params *sgl_params,
+       struct scsi_cached_sges *ctx_data_desc,
+       struct scsi_sgl_task_params *sgl_task_params);
+#endif
index 96346a1b1515e81b6c17a7035fb4051a263b8a4a..40aeb6bb96a2afd11c3264b7ea0004110033e2d1 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/qed/qed_ll2_if.h>
 #include "qedf_version.h"
 #include "qedf_dbg.h"
+#include "drv_fcoe_fw_funcs.h"
 
 /* Helpers to extract upper and lower 32-bits of pointer */
 #define U64_HI(val) ((u32)(((u64)(val)) >> 32))
 #define UPSTREAM_KEEP          1
 
 struct qedf_mp_req {
-       uint8_t tm_flags;
-
        uint32_t req_len;
        void *req_buf;
        dma_addr_t req_buf_dma;
-       struct fcoe_sge *mp_req_bd;
+       struct scsi_sge *mp_req_bd;
        dma_addr_t mp_req_bd_dma;
        struct fc_frame_header req_fc_hdr;
 
        uint32_t resp_len;
        void *resp_buf;
        dma_addr_t resp_buf_dma;
-       struct fcoe_sge *mp_resp_bd;
+       struct scsi_sge *mp_resp_bd;
        dma_addr_t mp_resp_bd_dma;
        struct fc_frame_header resp_fc_hdr;
 };
@@ -119,6 +118,7 @@ struct qedf_ioreq {
 #define QEDF_CMD_IN_CLEANUP            0x2
 #define QEDF_CMD_SRR_SENT              0x3
        u8 io_req_flags;
+       uint8_t tm_flags;
        struct qedf_rport *fcport;
        unsigned long flags;
        enum qedf_ioreq_event event;
@@ -130,6 +130,8 @@ struct qedf_ioreq {
        struct completion tm_done;
        struct completion abts_done;
        struct fcoe_task_context *task;
+       struct fcoe_task_params *task_params;
+       struct scsi_sgl_task_params *sgl_task_params;
        int idx;
 /*
  * Need to allocate enough room for both sense data and FCP response data
@@ -199,8 +201,8 @@ struct qedf_rport {
        dma_addr_t sq_pbl_dma;
        u32 sq_pbl_size;
        u32 sid;
-#define        QEDF_RPORT_TYPE_DISK            1
-#define        QEDF_RPORT_TYPE_TAPE            2
+#define        QEDF_RPORT_TYPE_DISK            0
+#define        QEDF_RPORT_TYPE_TAPE            1
        uint dev_type; /* Disk or tape */
        struct list_head peers;
 };
@@ -391,7 +393,7 @@ struct qedf_ctx {
 
 struct io_bdt {
        struct qedf_ioreq *io_req;
-       struct fcoe_sge *bd_tbl;
+       struct scsi_sge *bd_tbl;
        dma_addr_t bd_tbl_dma;
        u16 bd_valid;
 };
@@ -400,7 +402,7 @@ struct qedf_cmd_mgr {
        struct qedf_ctx *qedf;
        u16 idx;
        struct io_bdt **io_bdt_pool;
-#define FCOE_PARAMS_NUM_TASKS          4096
+#define FCOE_PARAMS_NUM_TASKS          2048
        struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS];
        spinlock_t lock;
        atomic_t free_list_cnt;
@@ -465,9 +467,8 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
        unsigned int timer_msec);
 extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
 extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
-       struct fcoe_task_context *task_ctx);
-extern void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid,
-       u32 ptu_invalidate, enum fcoe_task_type req_type, u32 offset);
+       struct fcoe_task_context *task_ctx, struct fcoe_wqe *wqe);
+extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
 extern void qedf_ring_doorbell(struct qedf_rport *fcport);
 extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
        struct qedf_ioreq *els_req);
index 23bd70628a2f05b9c5a5c2dbd6184e4ad6d076b2..7d173f48a81e8d240cd5d957b2997a3c0c3f0223 100644 (file)
@@ -81,14 +81,17 @@ struct qedf_dbg_ctx {
 #define QEDF_INFO(pdev, level, fmt, ...)       \
                qedf_dbg_info(pdev, __func__, __LINE__, level, fmt,     \
                              ## __VA_ARGS__)
-
-extern void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+__printf(4, 5)
+void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
                          const char *fmt, ...);
-extern void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+__printf(4, 5)
+void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
                           const char *, ...);
-extern void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func,
+__printf(4, 5)
+void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func,
                            u32 line, const char *, ...);
-extern void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+__printf(5, 6)
+void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
                          u32 info, const char *fmt, ...);
 
 /* GRC Dump related defines */
index 59f3e5c73a139b9324a4307dc1f500a1153a4536..c505d41f6dc843825fb52fabaaca33e5dd25d1ce 100644 (file)
@@ -25,6 +25,9 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
        uint16_t xid;
        uint32_t start_time = jiffies / HZ;
        uint32_t current_time;
+       struct fcoe_wqe *sqe;
+       unsigned long flags;
+       u16 sqe_idx;
 
        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
 
@@ -113,20 +116,25 @@ retry_els:
        /* Obtain exchange id */
        xid = els_req->xid;
 
+       spin_lock_irqsave(&fcport->rport_lock, flags);
+
+       sqe_idx = qedf_get_sqe_idx(fcport);
+       sqe = &fcport->sq[sqe_idx];
+       memset(sqe, 0, sizeof(struct fcoe_wqe));
+
        /* Initialize task context for this IO request */
        task = qedf_get_task_mem(&qedf->tasks, xid);
-       qedf_init_mp_task(els_req, task);
+       qedf_init_mp_task(els_req, task, sqe);
 
        /* Put timer on original I/O request */
        if (timer_msec)
                qedf_cmd_timer_set(qedf, els_req, timer_msec);
 
-       qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
-
        /* Ring doorbell */
        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
                   "req\n");
        qedf_ring_doorbell(fcport);
+       spin_unlock_irqrestore(&fcport->rport_lock, flags);
 els_err:
        return rc;
 }
@@ -604,6 +612,8 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
        struct qedf_rport *fcport;
        unsigned long flags;
        struct qedf_els_cb_arg *cb_arg;
+       struct fcoe_wqe *sqe;
+       u16 sqe_idx;
 
        fcport = orig_io_req->fcport;
 
@@ -631,8 +641,13 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
 
        spin_lock_irqsave(&fcport->rport_lock, flags);
 
-       qedf_add_to_sq(fcport, orig_io_req->xid, 0,
-           FCOE_TASK_TYPE_SEQUENCE_CLEANUP, offset);
+       sqe_idx = qedf_get_sqe_idx(fcport);
+       sqe = &fcport->sq[sqe_idx];
+       memset(sqe, 0, sizeof(struct fcoe_wqe));
+       orig_io_req->task_params->sqe = sqe;
+
+       init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
+                                                  offset);
        qedf_ring_doorbell(fcport);
 
        spin_unlock_irqrestore(&fcport->rport_lock, flags);
index 868d423380d120ca82c135a8e545fbd98e1ba935..ed58b9104f58b8894b3dfd924c6facfaba29be67 100644 (file)
@@ -203,7 +203,7 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
                        case FIP_DT_MAC:
                                mp = (struct fip_mac_desc *)desc;
                                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
-                                   "fd_mac=%pM.\n", __func__, mp->fd_mac);
+                                   "fd_mac=%pM\n", mp->fd_mac);
                                ether_addr_copy(cvl_mac, mp->fd_mac);
                                break;
                        case FIP_DT_NAME:
index ee0dcf9d3aba7847eaa673ab3542a1c0d6d375c3..1d7f90d0adc1a0c4f55ade57f0bf18f622372184 100644 (file)
@@ -96,7 +96,7 @@ void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
        if (!cmgr->io_bdt_pool)
                goto free_cmd_pool;
 
-       bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge);
+       bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
        for (i = 0; i < num_ios; i++) {
                bdt_info = cmgr->io_bdt_pool[i];
                if (bdt_info->bd_tbl) {
@@ -119,6 +119,8 @@ free_cmd_pool:
 
        for (i = 0; i < num_ios; i++) {
                io_req = &cmgr->cmds[i];
+               kfree(io_req->sgl_task_params);
+               kfree(io_req->task_params);
                /* Make sure we free per command sense buffer */
                if (io_req->sense_buffer)
                        dma_free_coherent(&qedf->pdev->dev,
@@ -178,7 +180,7 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
        spin_lock_init(&cmgr->lock);
 
        /*
-        * Initialize list of qedf_ioreq.
+        * Initialize I/O request fields.
         */
        xid = QEDF_MIN_XID;
 
@@ -196,6 +198,29 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
                    GFP_KERNEL);
                if (!io_req->sense_buffer)
                        goto mem_err;
+
+               /* Allocate task parameters to pass to f/w init funcions */
+               io_req->task_params = kzalloc(sizeof(*io_req->task_params),
+                                             GFP_KERNEL);
+               if (!io_req->task_params) {
+                       QEDF_ERR(&(qedf->dbg_ctx),
+                                "Failed to allocate task_params for xid=0x%x\n",
+                                i);
+                       goto mem_err;
+               }
+
+               /*
+                * Allocate scatter/gather list info to pass to f/w init
+                * functions.
+                */
+               io_req->sgl_task_params = kzalloc(
+                   sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
+               if (!io_req->sgl_task_params) {
+                       QEDF_ERR(&(qedf->dbg_ctx),
+                                "Failed to allocate sgl_task_params for xid=0x%x\n",
+                                i);
+                       goto mem_err;
+               }
        }
 
        /* Allocate pool of io_bdts - one for each qedf_ioreq */
@@ -211,8 +236,8 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
                cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
                    GFP_KERNEL);
                if (!cmgr->io_bdt_pool[i]) {
-                       QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
-                                  "io_bdt_pool[%d].\n", i);
+                       QEDF_WARN(&(qedf->dbg_ctx),
+                                 "Failed to alloc io_bdt_pool[%d].\n", i);
                        goto mem_err;
                }
        }
@@ -220,11 +245,11 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
        for (i = 0; i < num_ios; i++) {
                bdt_info = cmgr->io_bdt_pool[i];
                bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
-                   QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge),
+                   QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
                    &bdt_info->bd_tbl_dma, GFP_KERNEL);
                if (!bdt_info->bd_tbl) {
-                       QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
-                                  "bdt_tbl[%d].\n", i);
+                       QEDF_WARN(&(qedf->dbg_ctx),
+                                 "Failed to alloc bdt_tbl[%d].\n", i);
                        goto mem_err;
                }
        }
@@ -318,6 +343,7 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
        }
        bd_tbl->io_req = io_req;
        io_req->cmd_type = cmd_type;
+       io_req->tm_flags = 0;
 
        /* Reset sequence offset data */
        io_req->rx_buf_off = 0;
@@ -336,10 +362,9 @@ static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
 {
        struct qedf_mp_req *mp_req = &(io_req->mp_req);
        struct qedf_ctx *qedf = io_req->fcport->qedf;
-       uint64_t sz = sizeof(struct fcoe_sge);
+       uint64_t sz = sizeof(struct scsi_sge);
 
        /* clear tm flags */
-       mp_req->tm_flags = 0;
        if (mp_req->mp_req_bd) {
                dma_free_coherent(&qedf->pdev->dev, sz,
                    mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
@@ -387,7 +412,7 @@ void qedf_release_cmd(struct kref *ref)
 static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
        int bd_index)
 {
-       struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+       struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
        int frag_size, sg_frags;
 
        sg_frags = 0;
@@ -398,7 +423,7 @@ static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
                        frag_size = sg_len;
                bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
                bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
-               bd[bd_index + sg_frags].size = (uint16_t)frag_size;
+               bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
 
                addr += (u64)frag_size;
                sg_frags++;
@@ -413,7 +438,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
        struct Scsi_Host *host = sc->device->host;
        struct fc_lport *lport = shost_priv(host);
        struct qedf_ctx *qedf = lport_priv(lport);
-       struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+       struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
        struct scatterlist *sg;
        int byte_count = 0;
        int sg_count = 0;
@@ -439,7 +464,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
 
                bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
                bd[bd_count].sge_addr.hi = (addr >> 32);
-               bd[bd_count].size = (u16)sg_len;
+               bd[bd_count].sge_len = (u16)sg_len;
 
                return ++bd_count;
        }
@@ -480,7 +505,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
                        sg_frags = 1;
                        bd[bd_count].sge_addr.lo = U64_LO(addr);
                        bd[bd_count].sge_addr.hi  = U64_HI(addr);
-                       bd[bd_count].size = (uint16_t)sg_len;
+                       bd[bd_count].sge_len = (uint16_t)sg_len;
                }
 
                bd_count += sg_frags;
@@ -498,7 +523,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
 {
        struct scsi_cmnd *sc = io_req->sc_cmd;
-       struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+       struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
        int bd_count;
 
        if (scsi_sg_count(sc)) {
@@ -508,7 +533,7 @@ static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
        } else {
                bd_count = 0;
                bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
-               bd[0].size = 0;
+               bd[0].sge_len = 0;
        }
        io_req->bd_tbl->bd_valid = bd_count;
 
@@ -529,430 +554,223 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
 
        /* 4 bytes: flag info */
        fcp_cmnd->fc_pri_ta = 0;
-       fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
+       fcp_cmnd->fc_tm_flags = io_req->tm_flags;
        fcp_cmnd->fc_flags = io_req->io_req_flags;
        fcp_cmnd->fc_cmdref = 0;
 
        /* Populate data direction */
-       if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
-               fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
-       else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
+       if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
                fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
+       } else {
+               if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
+                       fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
+               else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
+                       fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
+       }
 
        fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
 
        /* 16 bytes: CDB information */
-       memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
+       if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
+               memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
 
        /* 4 bytes: FCP data length */
        fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
-
 }
 
 static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
-       struct qedf_ioreq *io_req, u32 *ptu_invalidate,
-       struct fcoe_task_context *task_ctx)
+       struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
+       struct fcoe_wqe *sqe)
 {
        enum fcoe_task_type task_type;
        struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
        struct io_bdt *bd_tbl = io_req->bd_tbl;
-       union fcoe_data_desc_ctx *data_desc;
-       u32 *fcp_cmnd;
+       u8 fcp_cmnd[32];
        u32 tmp_fcp_cmnd[8];
-       int cnt, i;
-       int bd_count;
+       int bd_count = 0;
        struct qedf_ctx *qedf = fcport->qedf;
        uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
-       u8 tmp_sgl_mode = 0;
-       u8 mst_sgl_mode = 0;
+       struct regpair sense_data_buffer_phys_addr;
+       u32 tx_io_size = 0;
+       u32 rx_io_size = 0;
+       int i, cnt;
 
-       memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+       /* Note init_initiator_rw_fcoe_task memsets the task context */
        io_req->task = task_ctx;
+       memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+       memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
+       memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
 
-       if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
-               task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
-       else
+       /* Set task type bassed on DMA directio of command */
+       if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
                task_type = FCOE_TASK_TYPE_READ_INITIATOR;
-
-       /* Y Storm context */
-       task_ctx->ystorm_st_context.expect_first_xfer = 1;
-       task_ctx->ystorm_st_context.data_2_trns_rem = io_req->data_xfer_len;
-       /* Check if this is required */
-       task_ctx->ystorm_st_context.ox_id = io_req->xid;
-       task_ctx->ystorm_st_context.task_rety_identifier =
-           io_req->task_retry_identifier;
-
-       /* T Storm ag context */
-       SET_FIELD(task_ctx->tstorm_ag_context.flags0,
-           TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, PROTOCOLID_FCOE);
-       task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
-
-       /* T Storm st context */
-       SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
-           FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
-           1);
-       task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
-
-       task_ctx->tstorm_st_context.read_only.dev_type =
-           FCOE_TASK_DEV_TYPE_DISK;
-       task_ctx->tstorm_st_context.read_only.conf_supported = 0;
-       task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
-
-       /* Completion queue for response. */
-       task_ctx->tstorm_st_context.read_only.glbl_q_num = cq_idx;
-       task_ctx->tstorm_st_context.read_only.fcp_cmd_trns_size =
-           io_req->data_xfer_len;
-       task_ctx->tstorm_st_context.read_write.e_d_tov_exp_timeout_val =
-           lport->e_d_tov;
-
-       task_ctx->ustorm_ag_context.global_cq_num = cq_idx;
-       io_req->fp_idx = cq_idx;
-
-       bd_count = bd_tbl->bd_valid;
-       if (task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
-               /* Setup WRITE task */
-               struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
-
-               task_ctx->ystorm_st_context.task_type =
-                   FCOE_TASK_TYPE_WRITE_INITIATOR;
-               data_desc = &task_ctx->ystorm_st_context.data_desc;
-
-               if (io_req->use_slowpath) {
-                       SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
-                           YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
-                           FCOE_SLOW_SGL);
-                       data_desc->slow.base_sgl_addr.lo =
-                           U64_LO(bd_tbl->bd_tbl_dma);
-                       data_desc->slow.base_sgl_addr.hi =
-                           U64_HI(bd_tbl->bd_tbl_dma);
-                       data_desc->slow.remainder_num_sges = bd_count;
-                       data_desc->slow.curr_sge_off = 0;
-                       data_desc->slow.curr_sgl_index = 0;
-                       qedf->slow_sge_ios++;
-                       io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
-               } else {
-                       SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
-                           YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
-                           (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
-                           FCOE_MUL_FAST_SGES);
-
-                       if (bd_count == 1) {
-                               data_desc->single_sge.sge_addr.lo =
-                                   fcoe_bd_tbl->sge_addr.lo;
-                               data_desc->single_sge.sge_addr.hi =
-                                   fcoe_bd_tbl->sge_addr.hi;
-                               data_desc->single_sge.size =
-                                   fcoe_bd_tbl->size;
-                               data_desc->single_sge.is_valid_sge = 0;
-                               qedf->single_sge_ios++;
-                               io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
-                       } else {
-                               data_desc->fast.sgl_start_addr.lo =
-                                   U64_LO(bd_tbl->bd_tbl_dma);
-                               data_desc->fast.sgl_start_addr.hi =
-                                   U64_HI(bd_tbl->bd_tbl_dma);
-                               data_desc->fast.sgl_byte_offset =
-                                   data_desc->fast.sgl_start_addr.lo &
-                                   (QEDF_PAGE_SIZE - 1);
-                               if (data_desc->fast.sgl_byte_offset > 0)
-                                       QEDF_ERR(&(qedf->dbg_ctx),
-                                           "byte_offset=%u for xid=0x%x.\n",
-                                           io_req->xid,
-                                           data_desc->fast.sgl_byte_offset);
-                               data_desc->fast.task_reuse_cnt =
-                                   io_req->reuse_count;
-                               io_req->reuse_count++;
-                               if (io_req->reuse_count == QEDF_MAX_REUSE) {
-                                       *ptu_invalidate = 1;
-                                       io_req->reuse_count = 0;
-                               }
-                               qedf->fast_sge_ios++;
-                               io_req->sge_type = QEDF_IOREQ_FAST_SGE;
-                       }
-               }
-
-               /* T Storm context */
-               task_ctx->tstorm_st_context.read_only.task_type =
-                   FCOE_TASK_TYPE_WRITE_INITIATOR;
-
-               /* M Storm context */
-               tmp_sgl_mode = GET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
-                   YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE);
-               SET_FIELD(task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
-                   FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE,
-                   tmp_sgl_mode);
-
        } else {
-               /* Setup READ task */
-
-               /* M Storm context */
-               struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
-
-               data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
-               task_ctx->mstorm_st_context.fp.data_2_trns_rem =
-                   io_req->data_xfer_len;
-
-               if (io_req->use_slowpath) {
-                       SET_FIELD(
-                           task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
-                           FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
-                           FCOE_SLOW_SGL);
-                       data_desc->slow.base_sgl_addr.lo =
-                           U64_LO(bd_tbl->bd_tbl_dma);
-                       data_desc->slow.base_sgl_addr.hi =
-                           U64_HI(bd_tbl->bd_tbl_dma);
-                       data_desc->slow.remainder_num_sges =
-                           bd_count;
-                       data_desc->slow.curr_sge_off = 0;
-                       data_desc->slow.curr_sgl_index = 0;
-                       qedf->slow_sge_ios++;
-                       io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
+               if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+                       task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
+                       tx_io_size = io_req->data_xfer_len;
                } else {
-                       SET_FIELD(
-                           task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
-                           FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
-                           (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
-                           FCOE_MUL_FAST_SGES);
-
-                       if (bd_count == 1) {
-                               data_desc->single_sge.sge_addr.lo =
-                                   fcoe_bd_tbl->sge_addr.lo;
-                               data_desc->single_sge.sge_addr.hi =
-                                   fcoe_bd_tbl->sge_addr.hi;
-                               data_desc->single_sge.size =
-                                   fcoe_bd_tbl->size;
-                               data_desc->single_sge.is_valid_sge = 0;
-                               qedf->single_sge_ios++;
-                               io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
-                       } else {
-                               data_desc->fast.sgl_start_addr.lo =
-                                   U64_LO(bd_tbl->bd_tbl_dma);
-                               data_desc->fast.sgl_start_addr.hi =
-                                   U64_HI(bd_tbl->bd_tbl_dma);
-                               data_desc->fast.sgl_byte_offset = 0;
-                               data_desc->fast.task_reuse_cnt =
-                                   io_req->reuse_count;
-                               io_req->reuse_count++;
-                               if (io_req->reuse_count == QEDF_MAX_REUSE) {
-                                       *ptu_invalidate = 1;
-                                       io_req->reuse_count = 0;
-                               }
-                               qedf->fast_sge_ios++;
-                               io_req->sge_type = QEDF_IOREQ_FAST_SGE;
-                       }
+                       task_type = FCOE_TASK_TYPE_READ_INITIATOR;
+                       rx_io_size = io_req->data_xfer_len;
                }
-
-               /* Y Storm context */
-               task_ctx->ystorm_st_context.expect_first_xfer = 0;
-               task_ctx->ystorm_st_context.task_type =
-                   FCOE_TASK_TYPE_READ_INITIATOR;
-
-               /* T Storm context */
-               task_ctx->tstorm_st_context.read_only.task_type =
-                   FCOE_TASK_TYPE_READ_INITIATOR;
-               mst_sgl_mode = GET_FIELD(
-                   task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
-                   FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE);
-               SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
-                   FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
-                   mst_sgl_mode);
        }
 
+       /* Setup the fields for fcoe_task_params */
+       io_req->task_params->context = task_ctx;
+       io_req->task_params->sqe = sqe;
+       io_req->task_params->task_type = task_type;
+       io_req->task_params->tx_io_size = tx_io_size;
+       io_req->task_params->rx_io_size = rx_io_size;
+       io_req->task_params->conn_cid = fcport->fw_cid;
+       io_req->task_params->itid = io_req->xid;
+       io_req->task_params->cq_rss_number = cq_idx;
+       io_req->task_params->is_tape_device = fcport->dev_type;
+
+       /* Fill in information for scatter/gather list */
+       if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
+               bd_count = bd_tbl->bd_valid;
+               io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
+               io_req->sgl_task_params->sgl_phys_addr.lo =
+                       U64_LO(bd_tbl->bd_tbl_dma);
+               io_req->sgl_task_params->sgl_phys_addr.hi =
+                       U64_HI(bd_tbl->bd_tbl_dma);
+               io_req->sgl_task_params->num_sges = bd_count;
+               io_req->sgl_task_params->total_buffer_size =
+                   scsi_bufflen(io_req->sc_cmd);
+               io_req->sgl_task_params->small_mid_sge =
+                       io_req->use_slowpath;
+       }
+
+       /* Fill in physical address of sense buffer */
+       sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
+       sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
+
        /* fill FCP_CMND IU */
-       fcp_cmnd = (u32 *)task_ctx->ystorm_st_context.tx_info_union.fcp_cmd_payload.opaque;
-       qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
+       qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
 
        /* Swap fcp_cmnd since FC is big endian */
        cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
-
        for (i = 0; i < cnt; i++) {
-               *fcp_cmnd = cpu_to_be32(tmp_fcp_cmnd[i]);
-               fcp_cmnd++;
+               tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
+       }
+       memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
+
+       init_initiator_rw_fcoe_task(io_req->task_params,
+                                   io_req->sgl_task_params,
+                                   sense_data_buffer_phys_addr,
+                                   io_req->task_retry_identifier, fcp_cmnd);
+
+       /* Increment SGL type counters */
+       if (bd_count == 1) {
+               qedf->single_sge_ios++;
+               io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
+       } else if (io_req->use_slowpath) {
+               qedf->slow_sge_ios++;
+               io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
+       } else {
+               qedf->fast_sge_ios++;
+               io_req->sge_type = QEDF_IOREQ_FAST_SGE;
        }
-
-       /* M Storm context - Sense buffer */
-       task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
-               U64_LO(io_req->sense_buffer_dma);
-       task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
-               U64_HI(io_req->sense_buffer_dma);
 }
 
 void qedf_init_mp_task(struct qedf_ioreq *io_req,
-       struct fcoe_task_context *task_ctx)
+       struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
 {
        struct qedf_mp_req *mp_req = &(io_req->mp_req);
        struct qedf_rport *fcport = io_req->fcport;
        struct qedf_ctx *qedf = io_req->fcport->qedf;
        struct fc_frame_header *fc_hdr;
-       enum fcoe_task_type task_type = 0;
-       union fcoe_data_desc_ctx *data_desc;
+       struct fcoe_tx_mid_path_params task_fc_hdr;
+       struct scsi_sgl_task_params tx_sgl_task_params;
+       struct scsi_sgl_task_params rx_sgl_task_params;
 
-       QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Initializing MP task "
-                  "for cmd_type = %d\n", io_req->cmd_type);
+       QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+                 "Initializing MP task for cmd_type=%d\n",
+                 io_req->cmd_type);
 
        qedf->control_requests++;
 
-       /* Obtain task_type */
-       if ((io_req->cmd_type == QEDF_TASK_MGMT_CMD) ||
-           (io_req->cmd_type == QEDF_ELS)) {
-               task_type = FCOE_TASK_TYPE_MIDPATH;
-       } else if (io_req->cmd_type == QEDF_ABTS) {
-               task_type = FCOE_TASK_TYPE_ABTS;
-       }
-
+       memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
+       memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
        memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+       memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
 
        /* Setup the task from io_req for easy reference */
        io_req->task = task_ctx;
 
-       QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "task type = %d\n",
-                  task_type);
-
-       /* YSTORM only */
-       {
-               /* Initialize YSTORM task context */
-               struct fcoe_tx_mid_path_params *task_fc_hdr =
-                   &task_ctx->ystorm_st_context.tx_info_union.tx_params.mid_path;
-               memset(task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
-               task_ctx->ystorm_st_context.task_rety_identifier =
-                   io_req->task_retry_identifier;
-
-               /* Init SGL parameters */
-               if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
-                   (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
-                       data_desc = &task_ctx->ystorm_st_context.data_desc;
-                       data_desc->slow.base_sgl_addr.lo =
-                           U64_LO(mp_req->mp_req_bd_dma);
-                       data_desc->slow.base_sgl_addr.hi =
-                           U64_HI(mp_req->mp_req_bd_dma);
-                       data_desc->slow.remainder_num_sges = 1;
-                       data_desc->slow.curr_sge_off = 0;
-                       data_desc->slow.curr_sgl_index = 0;
-               }
-
-               fc_hdr = &(mp_req->req_fc_hdr);
-               if (task_type == FCOE_TASK_TYPE_MIDPATH) {
-                       fc_hdr->fh_ox_id = io_req->xid;
-                       fc_hdr->fh_rx_id = htons(0xffff);
-               } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
-                       fc_hdr->fh_rx_id = io_req->xid;
-               }
+       /* Setup the fields for fcoe_task_params */
+       io_req->task_params->context = task_ctx;
+       io_req->task_params->sqe = sqe;
+       io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
+       io_req->task_params->tx_io_size = io_req->data_xfer_len;
+       /* rx_io_size tells the f/w how large a response buffer we have */
+       io_req->task_params->rx_io_size = PAGE_SIZE;
+       io_req->task_params->conn_cid = fcport->fw_cid;
+       io_req->task_params->itid = io_req->xid;
+       /* Return middle path commands on CQ 0 */
+       io_req->task_params->cq_rss_number = 0;
+       io_req->task_params->is_tape_device = fcport->dev_type;
+
+       fc_hdr = &(mp_req->req_fc_hdr);
+       /* Set OX_ID and RX_ID based on driver task id */
+       fc_hdr->fh_ox_id = io_req->xid;
+       fc_hdr->fh_rx_id = htons(0xffff);
+
+       /* Set up FC header information */
+       task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
+       task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
+       task_fc_hdr.type = fc_hdr->fh_type;
+       task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
+       task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
+       task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
+       task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
+
+       /* Set up s/g list parameters for request buffer */
+       tx_sgl_task_params.sgl = mp_req->mp_req_bd;
+       tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
+       tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
+       tx_sgl_task_params.num_sges = 1;
+       /* Set PAGE_SIZE for now since sg element is that size ??? */
+       tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
+       tx_sgl_task_params.small_mid_sge = 0;
+
+       /* Set up s/g list parameters for request buffer */
+       rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
+       rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
+       rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
+       rx_sgl_task_params.num_sges = 1;
+       /* Set PAGE_SIZE for now since sg element is that size ??? */
+       rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
+       rx_sgl_task_params.small_mid_sge = 0;
 
-               /* Fill FC Header into middle path buffer */
-               task_fc_hdr->parameter = fc_hdr->fh_parm_offset;
-               task_fc_hdr->r_ctl = fc_hdr->fh_r_ctl;
-               task_fc_hdr->type = fc_hdr->fh_type;
-               task_fc_hdr->cs_ctl = fc_hdr->fh_cs_ctl;
-               task_fc_hdr->df_ctl = fc_hdr->fh_df_ctl;
-               task_fc_hdr->rx_id = fc_hdr->fh_rx_id;
-               task_fc_hdr->ox_id = fc_hdr->fh_ox_id;
-
-               task_ctx->ystorm_st_context.data_2_trns_rem =
-                   io_req->data_xfer_len;
-               task_ctx->ystorm_st_context.task_type = task_type;
-       }
-
-       /* TSTORM ONLY */
-       {
-               task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
-               task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
-               /* Always send middle-path repsonses on CQ #0 */
-               task_ctx->tstorm_st_context.read_only.glbl_q_num = 0;
-               io_req->fp_idx = 0;
-               SET_FIELD(task_ctx->tstorm_ag_context.flags0,
-                   TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE,
-                   PROTOCOLID_FCOE);
-               task_ctx->tstorm_st_context.read_only.task_type = task_type;
-               SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
-                   FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
-                   1);
-               task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
-       }
-
-       /* MSTORM only */
-       {
-               if (task_type == FCOE_TASK_TYPE_MIDPATH) {
-                       /* Initialize task context */
-                       data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
-
-                       /* Set cache sges address and length */
-                       data_desc->slow.base_sgl_addr.lo =
-                           U64_LO(mp_req->mp_resp_bd_dma);
-                       data_desc->slow.base_sgl_addr.hi =
-                           U64_HI(mp_req->mp_resp_bd_dma);
-                       data_desc->slow.remainder_num_sges = 1;
-                       data_desc->slow.curr_sge_off = 0;
-                       data_desc->slow.curr_sgl_index = 0;
 
-                       /*
-                        * Also need to fil in non-fastpath response address
-                        * for middle path commands.
-                        */
-                       task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
-                           U64_LO(mp_req->mp_resp_bd_dma);
-                       task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
-                           U64_HI(mp_req->mp_resp_bd_dma);
-               }
-       }
-
-       /* USTORM ONLY */
-       {
-               task_ctx->ustorm_ag_context.global_cq_num = 0;
-       }
+       /*
+        * Last arg is 0 as previous code did not set that we wanted the
+        * fc header information.
+        */
+       init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
+                                                    &task_fc_hdr,
+                                                    &tx_sgl_task_params,
+                                                    &rx_sgl_task_params, 0);
 
-       /* I/O stats. Middle path commands always use slow SGEs */
-       qedf->slow_sge_ios++;
-       io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
+       /* Midpath requests always consume 1 SGE */
+       qedf->single_sge_ios++;
 }
 
-void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, u32 ptu_invalidate,
-       enum fcoe_task_type req_type, u32 offset)
+/* Presumed that fcport->rport_lock is held */
+u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
 {
-       struct fcoe_wqe *sqe;
        uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
+       u16 rval;
 
-       sqe = &fcport->sq[fcport->sq_prod_idx];
+       rval = fcport->sq_prod_idx;
 
+       /* Adjust ring index */
        fcport->sq_prod_idx++;
        fcport->fw_sq_prod_idx++;
        if (fcport->sq_prod_idx == total_sqe)
                fcport->sq_prod_idx = 0;
 
-       switch (req_type) {
-       case FCOE_TASK_TYPE_WRITE_INITIATOR:
-       case FCOE_TASK_TYPE_READ_INITIATOR:
-               SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_CMD);
-               if (ptu_invalidate)
-                       SET_FIELD(sqe->flags, FCOE_WQE_INVALIDATE_PTU, 1);
-               break;
-       case FCOE_TASK_TYPE_MIDPATH:
-               SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_MIDPATH);
-               break;
-       case FCOE_TASK_TYPE_ABTS:
-               SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
-                   SEND_FCOE_ABTS_REQUEST);
-               break;
-       case FCOE_TASK_TYPE_EXCHANGE_CLEANUP:
-               SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
-                    FCOE_EXCHANGE_CLEANUP);
-               break;
-       case FCOE_TASK_TYPE_SEQUENCE_CLEANUP:
-               SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
-                   FCOE_SEQUENCE_RECOVERY);
-               /* NOTE: offset param only used for sequence recovery */
-               sqe->additional_info_union.seq_rec_updated_offset = offset;
-               break;
-       case FCOE_TASK_TYPE_UNSOLICITED:
-               break;
-       default:
-               break;
-       }
-
-       sqe->task_id = xid;
-
-       /* Make sure SQ data is coherent */
-       wmb();
-
+       return rval;
 }
 
 void qedf_ring_doorbell(struct qedf_rport *fcport)
@@ -1029,7 +847,8 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
        struct fcoe_task_context *task_ctx;
        u16 xid;
        enum fcoe_task_type req_type = 0;
-       u32 ptu_invalidate = 0;
+       struct fcoe_wqe *sqe;
+       u16 sqe_idx;
 
        /* Initialize rest of io_req fileds */
        io_req->data_xfer_len = scsi_bufflen(sc_cmd);
@@ -1061,6 +880,16 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
                return -EAGAIN;
        }
 
+       if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+               QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
+               kref_put(&io_req->refcount, qedf_release_cmd);
+       }
+
+       /* Obtain free SQE */
+       sqe_idx = qedf_get_sqe_idx(fcport);
+       sqe = &fcport->sq[sqe_idx];
+       memset(sqe, 0, sizeof(struct fcoe_wqe));
+
        /* Get the task context */
        task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
        if (!task_ctx) {
@@ -1070,15 +899,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
                return -EINVAL;
        }
 
-       qedf_init_task(fcport, lport, io_req, &ptu_invalidate, task_ctx);
-
-       if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
-               QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
-               kref_put(&io_req->refcount, qedf_release_cmd);
-       }
-
-       /* Obtain free SQ entry */
-       qedf_add_to_sq(fcport, xid, ptu_invalidate, req_type, 0);
+       qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
 
        /* Ring doorbell */
        qedf_ring_doorbell(fcport);
@@ -1342,7 +1163,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
                } else {
                        refcount = kref_read(&io_req->refcount);
                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
-                           "%d:0:%d:%d xid=0x%0x op=0x%02x "
+                           "%d:0:%d:%lld xid=0x%0x op=0x%02x "
                            "lba=%02x%02x%02x%02x cdb_status=%d "
                            "fcp_resid=0x%x refcount=%d.\n",
                            qedf->lport->host->host_no, sc_cmd->device->id,
@@ -1426,7 +1247,7 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
 
        sc_cmd->result = result << 16;
        refcount = kref_read(&io_req->refcount);
-       QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%d: Completing "
+       QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
            "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
            "allowed=%d retries=%d refcount=%d.\n",
            qedf->lport->host->host_no, sc_cmd->device->id,
@@ -1661,6 +1482,8 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
        u32 r_a_tov = 0;
        int rc = 0;
        unsigned long flags;
+       struct fcoe_wqe *sqe;
+       u16 sqe_idx;
 
        r_a_tov = rdata->r_a_tov;
        lport = qedf->lport;
@@ -1712,10 +1535,12 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
 
        spin_lock_irqsave(&fcport->rport_lock, flags);
 
-       /* Add ABTS to send queue */
-       qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_ABTS, 0);
+       sqe_idx = qedf_get_sqe_idx(fcport);
+       sqe = &fcport->sq[sqe_idx];
+       memset(sqe, 0, sizeof(struct fcoe_wqe));
+       io_req->task_params->sqe = sqe;
 
-       /* Ring doorbell */
+       init_initiator_abort_fcoe_task(io_req->task_params);
        qedf_ring_doorbell(fcport);
 
        spin_unlock_irqrestore(&fcport->rport_lock, flags);
@@ -1784,8 +1609,8 @@ void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
 int qedf_init_mp_req(struct qedf_ioreq *io_req)
 {
        struct qedf_mp_req *mp_req;
-       struct fcoe_sge *mp_req_bd;
-       struct fcoe_sge *mp_resp_bd;
+       struct scsi_sge *mp_req_bd;
+       struct scsi_sge *mp_resp_bd;
        struct qedf_ctx *qedf = io_req->fcport->qedf;
        dma_addr_t addr;
        uint64_t sz;
@@ -1819,7 +1644,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
        }
 
        /* Allocate and map mp_req_bd and mp_resp_bd */
-       sz = sizeof(struct fcoe_sge);
+       sz = sizeof(struct scsi_sge);
        mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
            &mp_req->mp_req_bd_dma, GFP_KERNEL);
        if (!mp_req->mp_req_bd) {
@@ -1841,7 +1666,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
        mp_req_bd = mp_req->mp_req_bd;
        mp_req_bd->sge_addr.lo = U64_LO(addr);
        mp_req_bd->sge_addr.hi = U64_HI(addr);
-       mp_req_bd->size = QEDF_PAGE_SIZE;
+       mp_req_bd->sge_len = QEDF_PAGE_SIZE;
 
        /*
         * MP buffer is either a task mgmt command or an ELS.
@@ -1852,7 +1677,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
        addr = mp_req->resp_buf_dma;
        mp_resp_bd->sge_addr.lo = U64_LO(addr);
        mp_resp_bd->sge_addr.hi = U64_HI(addr);
-       mp_resp_bd->size = QEDF_PAGE_SIZE;
+       mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
 
        return 0;
 }
@@ -1895,6 +1720,8 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
        int tmo = 0;
        int rc = SUCCESS;
        unsigned long flags;
+       struct fcoe_wqe *sqe;
+       u16 sqe_idx;
 
        fcport = io_req->fcport;
        if (!fcport) {
@@ -1940,12 +1767,16 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
 
        init_completion(&io_req->tm_done);
 
-       /* Obtain free SQ entry */
        spin_lock_irqsave(&fcport->rport_lock, flags);
-       qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_EXCHANGE_CLEANUP, 0);
 
-       /* Ring doorbell */
+       sqe_idx = qedf_get_sqe_idx(fcport);
+       sqe = &fcport->sq[sqe_idx];
+       memset(sqe, 0, sizeof(struct fcoe_wqe));
+       io_req->task_params->sqe = sqe;
+
+       init_initiator_cleanup_fcoe_task(io_req->task_params);
        qedf_ring_doorbell(fcport);
+
        spin_unlock_irqrestore(&fcport->rport_lock, flags);
 
        tmo = wait_for_completion_timeout(&io_req->tm_done,
@@ -1991,16 +1822,15 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
        uint8_t tm_flags)
 {
        struct qedf_ioreq *io_req;
-       struct qedf_mp_req *tm_req;
        struct fcoe_task_context *task;
-       struct fc_frame_header *fc_hdr;
-       struct fcp_cmnd *fcp_cmnd;
        struct qedf_ctx *qedf = fcport->qedf;
+       struct fc_lport *lport = qedf->lport;
        int rc = 0;
        uint16_t xid;
-       uint32_t sid, did;
        int tmo = 0;
        unsigned long flags;
+       struct fcoe_wqe *sqe;
+       u16 sqe_idx;
 
        if (!sc_cmd) {
                QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
@@ -2031,36 +1861,14 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
        /* Set the return CPU to be the same as the request one */
        io_req->cpu = smp_processor_id();
 
-       tm_req = (struct qedf_mp_req *)&(io_req->mp_req);
-
-       rc = qedf_init_mp_req(io_req);
-       if (rc == FAILED) {
-               QEDF_ERR(&(qedf->dbg_ctx), "Task mgmt MP request init "
-                         "failed\n");
-               kref_put(&io_req->refcount, qedf_release_cmd);
-               goto reset_tmf_err;
-       }
-
        /* Set TM flags */
-       io_req->io_req_flags = 0;
-       tm_req->tm_flags = tm_flags;
+       io_req->io_req_flags = QEDF_READ;
+       io_req->data_xfer_len = 0;
+       io_req->tm_flags = tm_flags;
 
        /* Default is to return a SCSI command when an error occurs */
        io_req->return_scsi_cmd_on_abts = true;
 
-       /* Fill FCP_CMND */
-       qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
-       fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
-       memset(fcp_cmnd->fc_cdb, 0, FCP_CMND_LEN);
-       fcp_cmnd->fc_dl = 0;
-
-       /* Fill FC header */
-       fc_hdr = &(tm_req->req_fc_hdr);
-       sid = fcport->sid;
-       did = fcport->rdata->ids.port_id;
-       __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, sid, did,
-                          FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
-                          FC_FC_SEQ_INIT, 0);
        /* Obtain exchange id */
        xid = io_req->xid;
 
@@ -2069,16 +1877,18 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
 
        /* Initialize task context for this IO request */
        task = qedf_get_task_mem(&qedf->tasks, xid);
-       qedf_init_mp_task(io_req, task);
 
        init_completion(&io_req->tm_done);
 
-       /* Obtain free SQ entry */
        spin_lock_irqsave(&fcport->rport_lock, flags);
-       qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
 
-       /* Ring doorbell */
+       sqe_idx = qedf_get_sqe_idx(fcport);
+       sqe = &fcport->sq[sqe_idx];
+       memset(sqe, 0, sizeof(struct fcoe_wqe));
+
+       qedf_init_task(fcport, lport, io_req, task, sqe);
        qedf_ring_doorbell(fcport);
+
        spin_unlock_irqrestore(&fcport->rport_lock, flags);
 
        tmo = wait_for_completion_timeout(&io_req->tm_done,
@@ -2162,14 +1972,6 @@ void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
        struct qedf_ioreq *io_req)
 {
        struct fcoe_cqe_rsp_info *fcp_rsp;
-       struct fcoe_cqe_midpath_info *mp_info;
-
-
-       /* Get TMF response length from CQE */
-       mp_info = &cqe->cqe_info.midpath_info;
-       io_req->mp_req.resp_len = mp_info->data_placement_size;
-       QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
-           "Response len is %d.\n", io_req->mp_req.resp_len);
 
        fcp_rsp = &cqe->cqe_info.rsp_info;
        qedf_parse_fcp_rsp(io_req, fcp_rsp);
index d9d7a86b5f8baf13038cfc1bd17885945d1fdf4e..8e2a160490e66a747e75bf9c0c1a149e34d474a3 100644 (file)
@@ -2456,8 +2456,8 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
        }
 
        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
-           "BDQ PBL addr=0x%p dma=0x%llx.\n", qedf->bdq_pbl,
-           qedf->bdq_pbl_dma);
+                 "BDQ PBL addr=0x%p dma=%pad\n",
+                 qedf->bdq_pbl, &qedf->bdq_pbl_dma);
 
        /*
         * Populate BDQ PBL with physical and virtual address of individual
index 2b3e16b24299ee94bf752d361d7b479c7958c385..90a6925577cca6d5ea147f3028f645af844b91b6 100644 (file)
@@ -1,5 +1,5 @@
 obj-$(CONFIG_QEDI) := qedi.o
 qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \
-           qedi_dbg.o
+           qedi_dbg.o qedi_fw_api.o
 
 qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o
index 955936274241406e2c8df92a7f3a5fcd530d7f05..59417199bf363ae956bb2832f10fe22307a978d1 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/debugfs.h>
 #include <linux/module.h>
 
-int do_not_recover;
+int qedi_do_not_recover;
 static struct dentry *qedi_dbg_root;
 
 void
@@ -74,22 +74,22 @@ qedi_dbg_exit(void)
 static ssize_t
 qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg)
 {
-       if (!do_not_recover)
-               do_not_recover = 1;
+       if (!qedi_do_not_recover)
+               qedi_do_not_recover = 1;
 
        QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
-                 do_not_recover);
+                 qedi_do_not_recover);
        return 0;
 }
 
 static ssize_t
 qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg)
 {
-       if (do_not_recover)
-               do_not_recover = 0;
+       if (qedi_do_not_recover)
+               qedi_do_not_recover = 0;
 
        QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
-                 do_not_recover);
+                 qedi_do_not_recover);
        return 0;
 }
 
@@ -141,7 +141,7 @@ qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
        if (*ppos)
                return 0;
 
-       cnt = sprintf(buffer, "do_not_recover=%d\n", do_not_recover);
+       cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover);
        cnt = min_t(int, count, cnt - *ppos);
        *ppos += cnt;
        return cnt;
index c9f0ef4e11b33ce9ca2a707645b1b2088b6a1f63..d6978cbc56f0586aa8a075191433184c50c93b01 100644 (file)
@@ -14,6 +14,8 @@
 #include "qedi.h"
 #include "qedi_iscsi.h"
 #include "qedi_gbl.h"
+#include "qedi_fw_iscsi.h"
+#include "qedi_fw_scsi.h"
 
 static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
                               struct iscsi_task *mtask);
@@ -53,8 +55,8 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
        resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
        resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
 
-       resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time2wait);
-       resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time2retain);
+       resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time_2_wait);
+       resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time_2_retain);
 
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
                  "Freeing tid=0x%x for cid=0x%x\n",
@@ -975,81 +977,6 @@ exit_fp_process:
        return;
 }
 
-static void qedi_add_to_sq(struct qedi_conn *qedi_conn, struct iscsi_task *task,
-                          u16 tid, uint16_t ptu_invalidate, int is_cleanup)
-{
-       struct iscsi_wqe *wqe;
-       struct iscsi_wqe_field *cont_field;
-       struct qedi_endpoint *ep;
-       struct scsi_cmnd *sc = task->sc;
-       struct iscsi_login_req *login_hdr;
-       struct qedi_cmd *cmd = task->dd_data;
-
-       login_hdr = (struct iscsi_login_req *)task->hdr;
-       ep = qedi_conn->ep;
-       wqe = &ep->sq[ep->sq_prod_idx];
-
-       memset(wqe, 0, sizeof(*wqe));
-
-       ep->sq_prod_idx++;
-       ep->fw_sq_prod_idx++;
-       if (ep->sq_prod_idx == QEDI_SQ_SIZE)
-               ep->sq_prod_idx = 0;
-
-       if (is_cleanup) {
-               SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
-                         ISCSI_WQE_TYPE_TASK_CLEANUP);
-               wqe->task_id = tid;
-               return;
-       }
-
-       if (ptu_invalidate) {
-               SET_FIELD(wqe->flags, ISCSI_WQE_PTU_INVALIDATE,
-                         ISCSI_WQE_SET_PTU_INVALIDATE);
-       }
-
-       cont_field = &wqe->cont_prevtid_union.cont_field;
-
-       switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
-       case ISCSI_OP_LOGIN:
-       case ISCSI_OP_TEXT:
-               SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
-                         ISCSI_WQE_TYPE_MIDDLE_PATH);
-               SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
-                         1);
-               cont_field->contlen_cdbsize_field = ntoh24(login_hdr->dlength);
-               break;
-       case ISCSI_OP_LOGOUT:
-       case ISCSI_OP_NOOP_OUT:
-       case ISCSI_OP_SCSI_TMFUNC:
-                SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
-                          ISCSI_WQE_TYPE_NORMAL);
-               break;
-       default:
-               if (!sc)
-                       break;
-
-               SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
-                         ISCSI_WQE_TYPE_NORMAL);
-               cont_field->contlen_cdbsize_field =
-                               (sc->sc_data_direction == DMA_TO_DEVICE) ?
-                               scsi_bufflen(sc) : 0;
-               if (cmd->use_slowpath)
-                       SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, 0);
-               else
-                       SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
-                                 (sc->sc_data_direction ==
-                                  DMA_TO_DEVICE) ?
-                                 min((u16)QEDI_FAST_SGE_COUNT,
-                                     (u16)cmd->io_tbl.sge_valid) : 0);
-               break;
-       }
-
-       wqe->task_id = tid;
-       /* Make sure SQ data is coherent */
-       wmb();
-}
-
 static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
 {
        struct iscsi_db_data dbell = { 0 };
@@ -1076,96 +1003,116 @@ static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
                  qedi_conn->iscsi_conn_id);
 }
 
+static u16 qedi_get_wqe_idx(struct qedi_conn *qedi_conn)
+{
+       struct qedi_endpoint *ep;
+       u16 rval;
+
+       ep = qedi_conn->ep;
+       rval = ep->sq_prod_idx;
+
+       /* Increament SQ index */
+       ep->sq_prod_idx++;
+       ep->fw_sq_prod_idx++;
+       if (ep->sq_prod_idx == QEDI_SQ_SIZE)
+               ep->sq_prod_idx = 0;
+
+       return rval;
+}
+
 int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
                          struct iscsi_task *task)
 {
-       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_login_req_hdr login_req_pdu_header;
+       struct scsi_sgl_task_params tx_sgl_task_params;
+       struct scsi_sgl_task_params rx_sgl_task_params;
+       struct iscsi_task_params task_params;
        struct iscsi_task_context *fw_task_ctx;
+       struct qedi_ctx *qedi = qedi_conn->qedi;
        struct iscsi_login_req *login_hdr;
-       struct iscsi_login_req_hdr *fw_login_req = NULL;
-       struct iscsi_cached_sge_ctx *cached_sge = NULL;
-       struct iscsi_sge *single_sge = NULL;
-       struct iscsi_sge *req_sge = NULL;
-       struct iscsi_sge *resp_sge = NULL;
+       struct scsi_sge *req_sge = NULL;
+       struct scsi_sge *resp_sge = NULL;
        struct qedi_cmd *qedi_cmd;
-       s16 ptu_invalidate = 0;
+       struct qedi_endpoint *ep;
        s16 tid = 0;
+       u16 sq_idx = 0;
+       int rval = 0;
 
-       req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
-       resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+       req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+       resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
        qedi_cmd = (struct qedi_cmd *)task->dd_data;
+       ep = qedi_conn->ep;
        login_hdr = (struct iscsi_login_req *)task->hdr;
 
        tid = qedi_get_task_idx(qedi);
        if (tid == -1)
                return -ENOMEM;
 
-       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+       fw_task_ctx =
+            (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
        memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
 
        qedi_cmd->task_id = tid;
 
-       /* Ystorm context */
-       fw_login_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.login_req;
-       fw_login_req->opcode = login_hdr->opcode;
-       fw_login_req->version_min = login_hdr->min_version;
-       fw_login_req->version_max = login_hdr->max_version;
-       fw_login_req->flags_attr = login_hdr->flags;
-       fw_login_req->isid_tabc = *((u16 *)login_hdr->isid + 2);
-       fw_login_req->isid_d = *((u32 *)login_hdr->isid);
-       fw_login_req->tsih = login_hdr->tsih;
-       qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
-       fw_login_req->itt = qedi_set_itt(tid, get_itt(task->itt));
-       fw_login_req->cid = qedi_conn->iscsi_conn_id;
-       fw_login_req->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
-       fw_login_req->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
-       fw_login_req->exp_stat_sn = 0;
-
-       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
-               ptu_invalidate = 1;
-               qedi->tid_reuse_count[tid] = 0;
-       }
+       memset(&task_params, 0, sizeof(task_params));
+       memset(&login_req_pdu_header, 0, sizeof(login_req_pdu_header));
+       memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+       memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+       /* Update header info */
+       login_req_pdu_header.opcode = login_hdr->opcode;
+       login_req_pdu_header.version_min = login_hdr->min_version;
+       login_req_pdu_header.version_max = login_hdr->max_version;
+       login_req_pdu_header.flags_attr = login_hdr->flags;
+       login_req_pdu_header.isid_tabc = swab32p((u32 *)login_hdr->isid);
+       login_req_pdu_header.isid_d = swab16p((u16 *)&login_hdr->isid[4]);
+
+       login_req_pdu_header.tsih = login_hdr->tsih;
+       login_req_pdu_header.hdr_second_dword = ntoh24(login_hdr->dlength);
 
-       fw_task_ctx->ystorm_st_context.state.reuse_count =
-                                               qedi->tid_reuse_count[tid];
-       fw_task_ctx->mstorm_st_context.reuse_count =
-                                               qedi->tid_reuse_count[tid]++;
-       cached_sge =
-              &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
-       cached_sge->sge.sge_len = req_sge->sge_len;
-       cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
-       cached_sge->sge.sge_addr.hi =
-                            (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
-
-       /* Mstorm context */
-       single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
-       fw_task_ctx->mstorm_st_context.task_type = 0x2;
-       fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
-       single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
-       single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
-       single_sge->sge_len = resp_sge->sge_len;
-
-       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-                 ISCSI_MFLAGS_SINGLE_SGE, 1);
-       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-                 ISCSI_MFLAGS_SLOW_IO, 0);
-       fw_task_ctx->mstorm_st_context.sgl_size = 1;
-       fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
-       /* Ustorm context */
-       fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
-       fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
-                                               ntoh24(login_hdr->dlength);
-       fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
-       fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-       fw_task_ctx->ustorm_st_context.task_type = 0x2;
-       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
-       fw_task_ctx->ustorm_ag_context.exp_data_acked =
-                                                ntoh24(login_hdr->dlength);
-       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
-                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
-       SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
-                 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+       qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+       login_req_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+       login_req_pdu_header.cid = qedi_conn->iscsi_conn_id;
+       login_req_pdu_header.cmd_sn = be32_to_cpu(login_hdr->cmdsn);
+       login_req_pdu_header.exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+       login_req_pdu_header.exp_stat_sn = 0;
+
+       /* Fill tx AHS and rx buffer */
+       tx_sgl_task_params.sgl =
+                              (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+       tx_sgl_task_params.sgl_phys_addr.lo =
+                                        (u32)(qedi_conn->gen_pdu.req_dma_addr);
+       tx_sgl_task_params.sgl_phys_addr.hi =
+                             (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+       tx_sgl_task_params.total_buffer_size = ntoh24(login_hdr->dlength);
+       tx_sgl_task_params.num_sges = 1;
+
+       rx_sgl_task_params.sgl =
+                             (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+       rx_sgl_task_params.sgl_phys_addr.lo =
+                                       (u32)(qedi_conn->gen_pdu.resp_dma_addr);
+       rx_sgl_task_params.sgl_phys_addr.hi =
+                            (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+       rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
+       rx_sgl_task_params.num_sges = 1;
+
+       /* Fill fw input params */
+       task_params.context = fw_task_ctx;
+       task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+       task_params.itid = tid;
+       task_params.cq_rss_number = 0;
+       task_params.tx_io_size = ntoh24(login_hdr->dlength);
+       task_params.rx_io_size = resp_sge->sge_len;
+
+       sq_idx = qedi_get_wqe_idx(qedi_conn);
+       task_params.sqe = &ep->sq[sq_idx];
+
+       memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+       rval = init_initiator_login_request_task(&task_params,
+                                                &login_req_pdu_header,
+                                                &tx_sgl_task_params,
+                                                &rx_sgl_task_params);
+       if (rval)
+               return -1;
 
        spin_lock(&qedi_conn->list_lock);
        list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1173,7 +1120,6 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
        qedi_conn->active_cmd_count++;
        spin_unlock(&qedi_conn->list_lock);
 
-       qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
        qedi_ring_doorbell(qedi_conn);
        return 0;
 }
@@ -1181,65 +1127,64 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
 int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
                           struct iscsi_task *task)
 {
-       struct qedi_ctx *qedi = qedi_conn->qedi;
-       struct iscsi_logout_req_hdr *fw_logout_req = NULL;
-       struct iscsi_task_context *fw_task_ctx = NULL;
+       struct iscsi_logout_req_hdr logout_pdu_header;
+       struct scsi_sgl_task_params tx_sgl_task_params;
+       struct scsi_sgl_task_params rx_sgl_task_params;
+       struct iscsi_task_params task_params;
+       struct iscsi_task_context *fw_task_ctx;
        struct iscsi_logout *logout_hdr = NULL;
-       struct qedi_cmd *qedi_cmd = NULL;
-       s16  tid = 0;
-       s16 ptu_invalidate = 0;
+       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct qedi_cmd *qedi_cmd;
+       struct qedi_endpoint *ep;
+       s16 tid = 0;
+       u16 sq_idx = 0;
+       int rval = 0;
 
        qedi_cmd = (struct qedi_cmd *)task->dd_data;
        logout_hdr = (struct iscsi_logout *)task->hdr;
+       ep = qedi_conn->ep;
 
        tid = qedi_get_task_idx(qedi);
        if (tid == -1)
                return -ENOMEM;
 
-       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
-
+       fw_task_ctx =
+            (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
        memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
        qedi_cmd->task_id = tid;
 
-       /* Ystorm context */
-       fw_logout_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.logout_req;
-       fw_logout_req->opcode = ISCSI_OPCODE_LOGOUT_REQUEST;
-       fw_logout_req->reason_code = 0x80 | logout_hdr->flags;
-       qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
-       fw_logout_req->itt = qedi_set_itt(tid, get_itt(task->itt));
-       fw_logout_req->exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
-       fw_logout_req->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+       memset(&task_params, 0, sizeof(task_params));
+       memset(&logout_pdu_header, 0, sizeof(logout_pdu_header));
+       memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+       memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
 
-       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
-               ptu_invalidate = 1;
-               qedi->tid_reuse_count[tid] = 0;
-       }
-       fw_task_ctx->ystorm_st_context.state.reuse_count =
-                                                 qedi->tid_reuse_count[tid];
-       fw_task_ctx->mstorm_st_context.reuse_count =
-                                               qedi->tid_reuse_count[tid]++;
-       fw_logout_req->cid = qedi_conn->iscsi_conn_id;
-       fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
-
-       /* Mstorm context */
-       fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
-       fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
-
-       /* Ustorm context */
-       fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
-       fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
-       fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
-       fw_task_ctx->ustorm_st_context.task_type =  ISCSI_TASK_TYPE_MIDPATH;
-       fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-
-       SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
-                 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
-       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
-                 ISCSI_REG1_NUM_FAST_SGES, 0);
-
-       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
-       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
-                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+       /* Update header info */
+       logout_pdu_header.opcode = logout_hdr->opcode;
+       logout_pdu_header.reason_code = 0x80 | logout_hdr->flags;
+       qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+       logout_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+       logout_pdu_header.exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
+       logout_pdu_header.cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+       logout_pdu_header.cid = qedi_conn->iscsi_conn_id;
+
+       /* Fill fw input params */
+       task_params.context = fw_task_ctx;
+       task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+       task_params.itid = tid;
+       task_params.cq_rss_number = 0;
+       task_params.tx_io_size = 0;
+       task_params.rx_io_size = 0;
+
+       sq_idx = qedi_get_wqe_idx(qedi_conn);
+       task_params.sqe = &ep->sq[sq_idx];
+       memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+
+       rval = init_initiator_logout_request_task(&task_params,
+                                                 &logout_pdu_header,
+                                                 NULL, NULL);
+       if (rval)
+               return -1;
 
        spin_lock(&qedi_conn->list_lock);
        list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1247,9 +1192,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
        qedi_conn->active_cmd_count++;
        spin_unlock(&qedi_conn->list_lock);
 
-       qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
        qedi_ring_doorbell(qedi_conn);
-
        return 0;
 }
 
@@ -1461,9 +1404,9 @@ static void qedi_tmf_work(struct work_struct *work)
                  get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
                  qedi_conn->iscsi_conn_id);
 
-       if (do_not_recover) {
+       if (qedi_do_not_recover) {
                QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
-                        do_not_recover);
+                        qedi_do_not_recover);
                goto abort_ret;
        }
 
@@ -1533,47 +1476,46 @@ ldel_exit:
 static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
                               struct iscsi_task *mtask)
 {
-       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_tmf_request_hdr tmf_pdu_header;
+       struct iscsi_task_params task_params;
        struct qedi_ctx *qedi = qedi_conn->qedi;
        struct iscsi_task_context *fw_task_ctx;
-       struct iscsi_tmf_request_hdr *fw_tmf_request;
-       struct iscsi_sge *single_sge;
-       struct qedi_cmd *qedi_cmd;
-       struct qedi_cmd *cmd;
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
        struct iscsi_task *ctask;
        struct iscsi_tm *tmf_hdr;
-       struct iscsi_sge *req_sge;
-       struct iscsi_sge *resp_sge;
-       u32 lun[2];
-       s16 tid = 0, ptu_invalidate = 0;
+       struct qedi_cmd *qedi_cmd;
+       struct qedi_cmd *cmd;
+       struct qedi_endpoint *ep;
+       u32 scsi_lun[2];
+       s16 tid = 0;
+       u16 sq_idx = 0;
+       int rval = 0;
 
-       req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
-       resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
-       qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
        tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+       qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
+       ep = qedi_conn->ep;
 
-       tid = qedi_cmd->task_id;
-       qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
+       tid = qedi_get_task_idx(qedi);
+       if (tid == -1)
+               return -ENOMEM;
 
-       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+       fw_task_ctx =
+            (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
        memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
 
-       fw_tmf_request = &fw_task_ctx->ystorm_st_context.pdu_hdr.tmf_request;
-       fw_tmf_request->itt = qedi_set_itt(tid, get_itt(mtask->itt));
-       fw_tmf_request->cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
+       qedi_cmd->task_id = tid;
 
-       memcpy(lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
-       fw_tmf_request->lun.lo = be32_to_cpu(lun[0]);
-       fw_tmf_request->lun.hi = be32_to_cpu(lun[1]);
+       memset(&task_params, 0, sizeof(task_params));
+       memset(&tmf_pdu_header, 0, sizeof(tmf_pdu_header));
 
-       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
-               ptu_invalidate = 1;
-               qedi->tid_reuse_count[tid] = 0;
-       }
-       fw_task_ctx->ystorm_st_context.state.reuse_count =
-                                               qedi->tid_reuse_count[tid];
-       fw_task_ctx->mstorm_st_context.reuse_count =
-                                               qedi->tid_reuse_count[tid]++;
+       /* Update header info */
+       qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
+       tmf_pdu_header.itt = qedi_set_itt(tid, get_itt(mtask->itt));
+       tmf_pdu_header.cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
+
+       memcpy(scsi_lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
+       tmf_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
+       tmf_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
 
        if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
             ISCSI_TM_FUNC_ABORT_TASK) {
@@ -1584,53 +1526,34 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
                        return 0;
                }
                cmd = (struct qedi_cmd *)ctask->dd_data;
-               fw_tmf_request->rtt =
+               tmf_pdu_header.rtt =
                                qedi_set_itt(cmd->task_id,
                                             get_itt(tmf_hdr->rtt));
        } else {
-               fw_tmf_request->rtt = ISCSI_RESERVED_TAG;
+               tmf_pdu_header.rtt = ISCSI_RESERVED_TAG;
        }
 
-       fw_tmf_request->opcode = tmf_hdr->opcode;
-       fw_tmf_request->function = tmf_hdr->flags;
-       fw_tmf_request->hdr_second_dword = ntoh24(tmf_hdr->dlength);
-       fw_tmf_request->ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
-
-       single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
-       fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
-       fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
-       single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
-       single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
-       single_sge->sge_len = resp_sge->sge_len;
-
-       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-                 ISCSI_MFLAGS_SINGLE_SGE, 1);
-       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-                 ISCSI_MFLAGS_SLOW_IO, 0);
-       fw_task_ctx->mstorm_st_context.sgl_size = 1;
-       fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
-       /* Ustorm context */
-       fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
-       fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
-       fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
-       fw_task_ctx->ustorm_st_context.task_type =  ISCSI_TASK_TYPE_MIDPATH;
-       fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-
-       SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
-                 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
-       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
-                 ISCSI_REG1_NUM_FAST_SGES, 0);
-
-       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
-       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
-                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
-       fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
-       fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+       tmf_pdu_header.opcode = tmf_hdr->opcode;
+       tmf_pdu_header.function = tmf_hdr->flags;
+       tmf_pdu_header.hdr_second_dword = ntoh24(tmf_hdr->dlength);
+       tmf_pdu_header.ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
 
-       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
-                 "Add TMF to SQ, tmf tid=0x%x, itt=0x%x, cid=0x%x\n",
-                 tid,  mtask->itt, qedi_conn->iscsi_conn_id);
+       /* Fill fw input params */
+       task_params.context = fw_task_ctx;
+       task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+       task_params.itid = tid;
+       task_params.cq_rss_number = 0;
+       task_params.tx_io_size = 0;
+       task_params.rx_io_size = 0;
+
+       sq_idx = qedi_get_wqe_idx(qedi_conn);
+       task_params.sqe = &ep->sq[sq_idx];
+
+       memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+       rval = init_initiator_tmf_request_task(&task_params,
+                                              &tmf_pdu_header);
+       if (rval)
+               return -1;
 
        spin_lock(&qedi_conn->list_lock);
        list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1638,7 +1561,6 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
        qedi_conn->active_cmd_count++;
        spin_unlock(&qedi_conn->list_lock);
 
-       qedi_add_to_sq(qedi_conn, mtask, tid, ptu_invalidate, false);
        qedi_ring_doorbell(qedi_conn);
        return 0;
 }
@@ -1689,101 +1611,98 @@ int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
 int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
                         struct iscsi_task *task)
 {
-       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_text_request_hdr text_request_pdu_header;
+       struct scsi_sgl_task_params tx_sgl_task_params;
+       struct scsi_sgl_task_params rx_sgl_task_params;
+       struct iscsi_task_params task_params;
        struct iscsi_task_context *fw_task_ctx;
-       struct iscsi_text_request_hdr *fw_text_request;
-       struct iscsi_cached_sge_ctx *cached_sge;
-       struct iscsi_sge *single_sge;
-       struct qedi_cmd *qedi_cmd;
-       /* For 6.5 hdr iscsi_hdr */
+       struct qedi_ctx *qedi = qedi_conn->qedi;
        struct iscsi_text *text_hdr;
-       struct iscsi_sge *req_sge;
-       struct iscsi_sge *resp_sge;
-       s16 ptu_invalidate = 0;
+       struct scsi_sge *req_sge = NULL;
+       struct scsi_sge *resp_sge = NULL;
+       struct qedi_cmd *qedi_cmd;
+       struct qedi_endpoint *ep;
        s16 tid = 0;
+       u16 sq_idx = 0;
+       int rval = 0;
 
-       req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
-       resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+       req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+       resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
        qedi_cmd = (struct qedi_cmd *)task->dd_data;
        text_hdr = (struct iscsi_text *)task->hdr;
+       ep = qedi_conn->ep;
 
        tid = qedi_get_task_idx(qedi);
        if (tid == -1)
                return -ENOMEM;
 
-       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+       fw_task_ctx =
+            (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
        memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
 
        qedi_cmd->task_id = tid;
 
-       /* Ystorm context */
-       fw_text_request =
-                       &fw_task_ctx->ystorm_st_context.pdu_hdr.text_request;
-       fw_text_request->opcode = text_hdr->opcode;
-       fw_text_request->flags_attr = text_hdr->flags;
+       memset(&task_params, 0, sizeof(task_params));
+       memset(&text_request_pdu_header, 0, sizeof(text_request_pdu_header));
+       memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+       memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+
+       /* Update header info */
+       text_request_pdu_header.opcode = text_hdr->opcode;
+       text_request_pdu_header.flags_attr = text_hdr->flags;
 
        qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
-       fw_text_request->itt = qedi_set_itt(tid, get_itt(task->itt));
-       fw_text_request->ttt = text_hdr->ttt;
-       fw_text_request->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
-       fw_text_request->exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
-       fw_text_request->hdr_second_dword = ntoh24(text_hdr->dlength);
-
-       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
-               ptu_invalidate = 1;
-               qedi->tid_reuse_count[tid] = 0;
-       }
-       fw_task_ctx->ystorm_st_context.state.reuse_count =
-                                                    qedi->tid_reuse_count[tid];
-       fw_task_ctx->mstorm_st_context.reuse_count =
-                                                  qedi->tid_reuse_count[tid]++;
-
-       cached_sge =
-              &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
-       cached_sge->sge.sge_len = req_sge->sge_len;
-       cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
-       cached_sge->sge.sge_addr.hi =
+       text_request_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+       text_request_pdu_header.ttt = text_hdr->ttt;
+       text_request_pdu_header.cmd_sn = be32_to_cpu(text_hdr->cmdsn);
+       text_request_pdu_header.exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
+       text_request_pdu_header.hdr_second_dword = ntoh24(text_hdr->dlength);
+
+       /* Fill tx AHS and rx buffer */
+       tx_sgl_task_params.sgl =
+                              (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+       tx_sgl_task_params.sgl_phys_addr.lo =
+                                        (u32)(qedi_conn->gen_pdu.req_dma_addr);
+       tx_sgl_task_params.sgl_phys_addr.hi =
                              (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+       tx_sgl_task_params.total_buffer_size = req_sge->sge_len;
+       tx_sgl_task_params.num_sges = 1;
+
+       rx_sgl_task_params.sgl =
+                             (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+       rx_sgl_task_params.sgl_phys_addr.lo =
+                                       (u32)(qedi_conn->gen_pdu.resp_dma_addr);
+       rx_sgl_task_params.sgl_phys_addr.hi =
+                            (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+       rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
+       rx_sgl_task_params.num_sges = 1;
+
+       /* Fill fw input params */
+       task_params.context = fw_task_ctx;
+       task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+       task_params.itid = tid;
+       task_params.cq_rss_number = 0;
+       task_params.tx_io_size = ntoh24(text_hdr->dlength);
+       task_params.rx_io_size = resp_sge->sge_len;
+
+       sq_idx = qedi_get_wqe_idx(qedi_conn);
+       task_params.sqe = &ep->sq[sq_idx];
+
+       memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+       rval = init_initiator_text_request_task(&task_params,
+                                               &text_request_pdu_header,
+                                               &tx_sgl_task_params,
+                                               &rx_sgl_task_params);
+       if (rval)
+               return -1;
 
-       /* Mstorm context */
-       single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
-       fw_task_ctx->mstorm_st_context.task_type = 0x2;
-       fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
-       single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
-       single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
-       single_sge->sge_len = resp_sge->sge_len;
-
-       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-                 ISCSI_MFLAGS_SINGLE_SGE, 1);
-       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-                 ISCSI_MFLAGS_SLOW_IO, 0);
-       fw_task_ctx->mstorm_st_context.sgl_size = 1;
-       fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
-       /* Ustorm context */
-       fw_task_ctx->ustorm_ag_context.exp_data_acked =
-                                                     ntoh24(text_hdr->dlength);
-       fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
-       fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
-                                                     ntoh24(text_hdr->dlength);
-       fw_task_ctx->ustorm_st_context.exp_data_sn =
-                                             be32_to_cpu(text_hdr->exp_statsn);
-       fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-       fw_task_ctx->ustorm_st_context.task_type = 0x2;
-       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
-       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
-                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
-
-       /*  Add command in active command list */
        spin_lock(&qedi_conn->list_lock);
        list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
        qedi_cmd->io_cmd_in_list = true;
        qedi_conn->active_cmd_count++;
        spin_unlock(&qedi_conn->list_lock);
 
-       qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
        qedi_ring_doorbell(qedi_conn);
-
        return 0;
 }
 
@@ -1791,58 +1710,62 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
                           struct iscsi_task *task,
                           char *datap, int data_len, int unsol)
 {
+       struct iscsi_nop_out_hdr nop_out_pdu_header;
+       struct scsi_sgl_task_params tx_sgl_task_params;
+       struct scsi_sgl_task_params rx_sgl_task_params;
+       struct iscsi_task_params task_params;
        struct qedi_ctx *qedi = qedi_conn->qedi;
        struct iscsi_task_context *fw_task_ctx;
-       struct iscsi_nop_out_hdr *fw_nop_out;
-       struct qedi_cmd *qedi_cmd;
-       /* For 6.5 hdr iscsi_hdr */
        struct iscsi_nopout *nopout_hdr;
-       struct iscsi_cached_sge_ctx *cached_sge;
-       struct iscsi_sge *single_sge;
-       struct iscsi_sge *req_sge;
-       struct iscsi_sge *resp_sge;
-       u32 lun[2];
-       s16 ptu_invalidate = 0;
+       struct scsi_sge *req_sge = NULL;
+       struct scsi_sge *resp_sge = NULL;
+       struct qedi_cmd *qedi_cmd;
+       struct qedi_endpoint *ep;
+       u32 scsi_lun[2];
        s16 tid = 0;
+       u16 sq_idx = 0;
+       int rval = 0;
 
-       req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
-       resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+       req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+       resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
        qedi_cmd = (struct qedi_cmd *)task->dd_data;
        nopout_hdr = (struct iscsi_nopout *)task->hdr;
+       ep = qedi_conn->ep;
 
        tid = qedi_get_task_idx(qedi);
-       if (tid == -1) {
-               QEDI_WARN(&qedi->dbg_ctx, "Invalid tid\n");
+       if (tid == -1)
                return -ENOMEM;
-       }
-
-       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
 
+       fw_task_ctx =
+            (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
        memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
        qedi_cmd->task_id = tid;
 
-       /* Ystorm context */
-       fw_nop_out = &fw_task_ctx->ystorm_st_context.pdu_hdr.nop_out;
-       SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
-       SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
+       memset(&task_params, 0, sizeof(task_params));
+       memset(&nop_out_pdu_header, 0, sizeof(nop_out_pdu_header));
+       memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+       memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+
+       /* Update header info */
+       nop_out_pdu_header.opcode = nopout_hdr->opcode;
+       SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
+       SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
 
-       memcpy(lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
-       fw_nop_out->lun.lo = be32_to_cpu(lun[0]);
-       fw_nop_out->lun.hi = be32_to_cpu(lun[1]);
+       memcpy(scsi_lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
+       nop_out_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
+       nop_out_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
+       nop_out_pdu_header.cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
+       nop_out_pdu_header.exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
 
        qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
 
        if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
-               fw_nop_out->itt = be32_to_cpu(nopout_hdr->itt);
-               fw_nop_out->ttt = be32_to_cpu(nopout_hdr->ttt);
-               fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
-               fw_task_ctx->ystorm_st_context.state.local_comp = 1;
-               SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
-                         USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
+               nop_out_pdu_header.itt = be32_to_cpu(nopout_hdr->itt);
+               nop_out_pdu_header.ttt = be32_to_cpu(nopout_hdr->ttt);
        } else {
-               fw_nop_out->itt = qedi_set_itt(tid, get_itt(task->itt));
-               fw_nop_out->ttt = ISCSI_TTT_ALL_ONES;
-               fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+               nop_out_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+               nop_out_pdu_header.ttt = ISCSI_TTT_ALL_ONES;
 
                spin_lock(&qedi_conn->list_lock);
                list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1851,53 +1774,46 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
                spin_unlock(&qedi_conn->list_lock);
        }
 
-       fw_nop_out->opcode = ISCSI_OPCODE_NOP_OUT;
-       fw_nop_out->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
-       fw_nop_out->exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
-
-       cached_sge =
-              &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
-       cached_sge->sge.sge_len = req_sge->sge_len;
-       cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
-       cached_sge->sge.sge_addr.hi =
-                       (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
-
-       /* Mstorm context */
-       fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
-       fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
-
-       single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
-       single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
-       single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
-       single_sge->sge_len = resp_sge->sge_len;
-       fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
-       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
-               ptu_invalidate = 1;
-               qedi->tid_reuse_count[tid] = 0;
-       }
-       fw_task_ctx->ystorm_st_context.state.reuse_count =
-                                               qedi->tid_reuse_count[tid];
-       fw_task_ctx->mstorm_st_context.reuse_count =
-                                               qedi->tid_reuse_count[tid]++;
-       /* Ustorm context */
-       fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
-       fw_task_ctx->ustorm_st_context.exp_data_transfer_len = data_len;
-       fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
-       fw_task_ctx->ustorm_st_context.task_type =  ISCSI_TASK_TYPE_MIDPATH;
-       fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-
-       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
-                 ISCSI_REG1_NUM_FAST_SGES, 0);
-
-       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
-       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
-                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
-
-       fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
-       fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
-
-       qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+       /* Fill tx AHS and rx buffer */
+       if (data_len) {
+               tx_sgl_task_params.sgl =
+                              (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+               tx_sgl_task_params.sgl_phys_addr.lo =
+                                        (u32)(qedi_conn->gen_pdu.req_dma_addr);
+               tx_sgl_task_params.sgl_phys_addr.hi =
+                             (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+               tx_sgl_task_params.total_buffer_size = data_len;
+               tx_sgl_task_params.num_sges = 1;
+
+               rx_sgl_task_params.sgl =
+                             (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+               rx_sgl_task_params.sgl_phys_addr.lo =
+                                       (u32)(qedi_conn->gen_pdu.resp_dma_addr);
+               rx_sgl_task_params.sgl_phys_addr.hi =
+                            (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+               rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
+               rx_sgl_task_params.num_sges = 1;
+       }
+
+       /* Fill fw input params */
+       task_params.context = fw_task_ctx;
+       task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+       task_params.itid = tid;
+       task_params.cq_rss_number = 0;
+       task_params.tx_io_size = data_len;
+       task_params.rx_io_size = resp_sge->sge_len;
+
+       sq_idx = qedi_get_wqe_idx(qedi_conn);
+       task_params.sqe = &ep->sq[sq_idx];
+
+       memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+       rval = init_initiator_nop_out_task(&task_params,
+                                          &nop_out_pdu_header,
+                                          &tx_sgl_task_params,
+                                          &rx_sgl_task_params);
+       if (rval)
+               return -1;
+
        qedi_ring_doorbell(qedi_conn);
        return 0;
 }
@@ -1905,7 +1821,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
 static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
                         int bd_index)
 {
-       struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+       struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
        int frag_size, sg_frags;
 
        sg_frags = 0;
@@ -1938,7 +1854,7 @@ static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
 static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
 {
        struct scsi_cmnd *sc = cmd->scsi_cmd;
-       struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+       struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
        struct scatterlist *sg;
        int byte_count = 0;
        int bd_count = 0;
@@ -2040,7 +1956,7 @@ static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
                if (bd_count == 0)
                        return;
        } else {
-               struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+               struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
 
                bd[0].sge_addr.lo = 0;
                bd[0].sge_addr.hi = 0;
@@ -2136,244 +2052,182 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
        struct qedi_conn *qedi_conn = conn->dd_data;
        struct qedi_cmd *cmd = task->dd_data;
        struct scsi_cmnd *sc = task->sc;
+       struct iscsi_cmd_hdr cmd_pdu_header;
+       struct scsi_sgl_task_params tx_sgl_task_params;
+       struct scsi_sgl_task_params rx_sgl_task_params;
+       struct scsi_sgl_task_params *prx_sgl = NULL;
+       struct scsi_sgl_task_params *ptx_sgl = NULL;
+       struct iscsi_task_params task_params;
+       struct iscsi_conn_params conn_params;
+       struct scsi_initiator_cmd_params cmd_params;
        struct iscsi_task_context *fw_task_ctx;
-       struct iscsi_cached_sge_ctx *cached_sge;
-       struct iscsi_phys_sgl_ctx *phys_sgl;
-       struct iscsi_virt_sgl_ctx *virt_sgl;
-       struct ystorm_iscsi_task_st_ctx *yst_cxt;
-       struct mstorm_iscsi_task_st_ctx *mst_cxt;
-       struct iscsi_sgl *sgl_struct;
-       struct iscsi_sge *single_sge;
+       struct iscsi_cls_conn *cls_conn;
        struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
-       struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
-       enum iscsi_task_type task_type;
-       struct iscsi_cmd_hdr *fw_cmd;
-       u32 lun[2];
-       u32 exp_data;
-       u16 cq_idx = smp_processor_id() % qedi->num_queues;
-       s16 ptu_invalidate = 0;
+       enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
+       struct qedi_endpoint *ep;
+       u32 scsi_lun[2];
        s16 tid = 0;
-       u8 num_fast_sgs;
+       u16 sq_idx = 0;
+       u16 cq_idx;
+       int rval = 0;
 
-       tid = qedi_get_task_idx(qedi);
-       if (tid == -1)
-               return -ENOMEM;
+       ep = qedi_conn->ep;
+       cls_conn = qedi_conn->cls_conn;
+       conn = cls_conn->dd_data;
 
        qedi_iscsi_map_sg_list(cmd);
+       int_to_scsilun(sc->device->lun, (struct scsi_lun *)scsi_lun);
 
-       int_to_scsilun(sc->device->lun, (struct scsi_lun *)lun);
-       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+       tid = qedi_get_task_idx(qedi);
+       if (tid == -1)
+               return -ENOMEM;
 
+       fw_task_ctx =
+            (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
        memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
-       cmd->task_id = tid;
 
-       /* Ystorm context */
-       fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd;
-       SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE);
+       cmd->task_id = tid;
 
+       memset(&task_params, 0, sizeof(task_params));
+       memset(&cmd_pdu_header, 0, sizeof(cmd_pdu_header));
+       memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+       memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+       memset(&conn_params, 0, sizeof(conn_params));
+       memset(&cmd_params, 0, sizeof(cmd_params));
+
+       cq_idx = smp_processor_id() % qedi->num_queues;
+       /* Update header info */
+       SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR,
+                 ISCSI_ATTR_SIMPLE);
        if (sc->sc_data_direction == DMA_TO_DEVICE) {
-               if (conn->session->initial_r2t_en) {
-                       exp_data = min((conn->session->imm_data_en *
-                                       conn->max_xmit_dlength),
-                                      conn->session->first_burst);
-                       exp_data = min(exp_data, scsi_bufflen(sc));
-                       fw_task_ctx->ustorm_ag_context.exp_data_acked =
-                                                         cpu_to_le32(exp_data);
-               } else {
-                       fw_task_ctx->ustorm_ag_context.exp_data_acked =
-                             min(conn->session->first_burst, scsi_bufflen(sc));
-               }
-
-               SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1);
+               SET_FIELD(cmd_pdu_header.flags_attr,
+                         ISCSI_CMD_HDR_WRITE, 1);
                task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
        } else {
-               if (scsi_bufflen(sc))
-                       SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1);
+               SET_FIELD(cmd_pdu_header.flags_attr,
+                         ISCSI_CMD_HDR_READ, 1);
                task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
        }
 
-       fw_cmd->lun.lo = be32_to_cpu(lun[0]);
-       fw_cmd->lun.hi = be32_to_cpu(lun[1]);
+       cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
+       cmd_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
 
        qedi_update_itt_map(qedi, tid, task->itt, cmd);
-       fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt));
-       fw_cmd->expected_transfer_length = scsi_bufflen(sc);
-       fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
-       fw_cmd->opcode = hdr->opcode;
-       qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb);
-
-       /* Mstorm context */
-       fw_task_ctx->mstorm_st_context.sense_db.lo = (u32)cmd->sense_buffer_dma;
-       fw_task_ctx->mstorm_st_context.sense_db.hi =
-                                       (u32)((u64)cmd->sense_buffer_dma >> 32);
-       fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id;
-       fw_task_ctx->mstorm_st_context.task_type = task_type;
-
-       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
-               ptu_invalidate = 1;
-               qedi->tid_reuse_count[tid] = 0;
-       }
-       fw_task_ctx->ystorm_st_context.state.reuse_count =
-                                                    qedi->tid_reuse_count[tid];
-       fw_task_ctx->mstorm_st_context.reuse_count =
-                                                  qedi->tid_reuse_count[tid]++;
-
-       /* Ustorm context */
-       fw_task_ctx->ustorm_st_context.rem_rcv_len = scsi_bufflen(sc);
-       fw_task_ctx->ustorm_st_context.exp_data_transfer_len = scsi_bufflen(sc);
-       fw_task_ctx->ustorm_st_context.exp_data_sn =
-                                                  be32_to_cpu(hdr->exp_statsn);
-       fw_task_ctx->ustorm_st_context.task_type = task_type;
-       fw_task_ctx->ustorm_st_context.cq_rss_number = cq_idx;
-       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
-
-       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
-                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
-       SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
-                 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
-
-       num_fast_sgs = (cmd->io_tbl.sge_valid ?
-                       min((u16)QEDI_FAST_SGE_COUNT,
-                           (u16)cmd->io_tbl.sge_valid) : 0);
-       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
-                 ISCSI_REG1_NUM_FAST_SGES, num_fast_sgs);
-
-       fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
-       fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
-
-       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Total sge count [%d]\n",
-                 cmd->io_tbl.sge_valid);
-
-       yst_cxt = &fw_task_ctx->ystorm_st_context;
-       mst_cxt = &fw_task_ctx->mstorm_st_context;
-       /* Tx path */
+       cmd_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+       cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length);
+       cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength);
+       cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn);
+       cmd_pdu_header.opcode = hdr->opcode;
+       qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb);
+
+       /* Fill tx AHS and rx buffer */
        if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
-               /* not considering  superIO or FastIO */
-               if (cmd->io_tbl.sge_valid == 1) {
-                       cached_sge = &yst_cxt->state.sgl_ctx_union.cached_sge;
-                       cached_sge->sge.sge_addr.lo = bd[0].sge_addr.lo;
-                       cached_sge->sge.sge_addr.hi = bd[0].sge_addr.hi;
-                       cached_sge->sge.sge_len = bd[0].sge_len;
-                       qedi->cached_sgls++;
-               } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
-                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-                                 ISCSI_MFLAGS_SLOW_IO, 1);
-                       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
-                                 ISCSI_REG1_NUM_FAST_SGES, 0);
-                       phys_sgl = &yst_cxt->state.sgl_ctx_union.phys_sgl;
-                       phys_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
-                       phys_sgl->sgl_base.hi =
-                                    (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
-                       phys_sgl->sgl_size = cmd->io_tbl.sge_valid;
-                       qedi->slow_sgls++;
-               } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
-                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-                                 ISCSI_MFLAGS_SLOW_IO, 0);
-                       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
-                                 ISCSI_REG1_NUM_FAST_SGES,
-                                 min((u16)QEDI_FAST_SGE_COUNT,
-                                     (u16)cmd->io_tbl.sge_valid));
-                       virt_sgl = &yst_cxt->state.sgl_ctx_union.virt_sgl;
-                       virt_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
-                       virt_sgl->sgl_base.hi =
+               tx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
+               tx_sgl_task_params.sgl_phys_addr.lo =
+                                                (u32)(cmd->io_tbl.sge_tbl_dma);
+               tx_sgl_task_params.sgl_phys_addr.hi =
                                      (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
-                       virt_sgl->sgl_initial_offset =
-                                (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
-                       qedi->fast_sgls++;
-               }
-               fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
-               fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
-       } else {
-       /* Rx path */
-               if (cmd->io_tbl.sge_valid == 1) {
-                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-                                 ISCSI_MFLAGS_SLOW_IO, 0);
-                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-                                 ISCSI_MFLAGS_SINGLE_SGE, 1);
-                       single_sge = &mst_cxt->sgl_union.single_sge;
-                       single_sge->sge_addr.lo = bd[0].sge_addr.lo;
-                       single_sge->sge_addr.hi = bd[0].sge_addr.hi;
-                       single_sge->sge_len = bd[0].sge_len;
-                       qedi->cached_sgls++;
-               } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
-                       sgl_struct = &mst_cxt->sgl_union.sgl_struct;
-                       sgl_struct->sgl_addr.lo =
-                                               (u32)(cmd->io_tbl.sge_tbl_dma);
-                       sgl_struct->sgl_addr.hi =
-                                    (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
-                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-                                 ISCSI_MFLAGS_SLOW_IO, 1);
-                       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
-                                 ISCSI_REG1_NUM_FAST_SGES, 0);
-                       sgl_struct->updated_sge_size = 0;
-                       sgl_struct->updated_sge_offset = 0;
-                       qedi->slow_sgls++;
-               } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
-                       sgl_struct = &mst_cxt->sgl_union.sgl_struct;
-                       sgl_struct->sgl_addr.lo =
-                                               (u32)(cmd->io_tbl.sge_tbl_dma);
-                       sgl_struct->sgl_addr.hi =
-                                    (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
-                       sgl_struct->byte_offset =
-                               (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
-                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-                                 ISCSI_MFLAGS_SLOW_IO, 0);
-                       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
-                                 ISCSI_REG1_NUM_FAST_SGES, 0);
-                       sgl_struct->updated_sge_size = 0;
-                       sgl_struct->updated_sge_offset = 0;
-                       qedi->fast_sgls++;
-               }
-               fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
-               fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
-       }
-
-       if (cmd->io_tbl.sge_valid == 1)
-               /* Singel-SGL */
-               qedi->use_cached_sge = true;
-       else {
+               tx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
+               tx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
                if (cmd->use_slowpath)
-                       qedi->use_slow_sge = true;
-               else
-                       qedi->use_fast_sge = true;
-       }
+                       tx_sgl_task_params.small_mid_sge = true;
+       } else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
+               rx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
+               rx_sgl_task_params.sgl_phys_addr.lo =
+                                                (u32)(cmd->io_tbl.sge_tbl_dma);
+               rx_sgl_task_params.sgl_phys_addr.hi =
+                                     (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+               rx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
+               rx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
+       }
+
+       /* Add conn param */
+       conn_params.first_burst_length = conn->session->first_burst;
+       conn_params.max_send_pdu_length = conn->max_xmit_dlength;
+       conn_params.max_burst_length = conn->session->max_burst;
+       if (conn->session->initial_r2t_en)
+               conn_params.initial_r2t = true;
+       if (conn->session->imm_data_en)
+               conn_params.immediate_data = true;
+
+       /* Add cmd params */
+       cmd_params.sense_data_buffer_phys_addr.lo = (u32)cmd->sense_buffer_dma;
+       cmd_params.sense_data_buffer_phys_addr.hi =
+                                       (u32)((u64)cmd->sense_buffer_dma >> 32);
+       /* Fill fw input params */
+       task_params.context = fw_task_ctx;
+       task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+       task_params.itid = tid;
+       task_params.cq_rss_number = cq_idx;
+       if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE)
+               task_params.tx_io_size = scsi_bufflen(sc);
+       else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ)
+               task_params.rx_io_size = scsi_bufflen(sc);
+
+       sq_idx = qedi_get_wqe_idx(qedi_conn);
+       task_params.sqe = &ep->sq[sq_idx];
+
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
-                 "%s: %s-SGL: num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x",
+                 "%s: %s-SGL: sg_len=0x%x num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x\n",
                  (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
                  "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
                  "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
-                 (u16)cmd->io_tbl.sge_valid, (u32)(cmd->io_tbl.sge_tbl_dma),
+                 (u16)cmd->io_tbl.sge_valid, scsi_bufflen(sc),
+                 (u32)(cmd->io_tbl.sge_tbl_dma),
                  (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
 
-       /*  Add command in active command list */
+       memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+
+       if (task_params.tx_io_size != 0)
+               ptx_sgl = &tx_sgl_task_params;
+       if (task_params.rx_io_size != 0)
+               prx_sgl = &rx_sgl_task_params;
+
+       rval = init_initiator_rw_iscsi_task(&task_params, &conn_params,
+                                           &cmd_params, &cmd_pdu_header,
+                                           ptx_sgl, prx_sgl,
+                                           NULL);
+       if (rval)
+               return -1;
+
        spin_lock(&qedi_conn->list_lock);
        list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
        cmd->io_cmd_in_list = true;
        qedi_conn->active_cmd_count++;
        spin_unlock(&qedi_conn->list_lock);
 
-       qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
        qedi_ring_doorbell(qedi_conn);
-       if (qedi_io_tracing)
-               qedi_trace_io(qedi, task, tid, QEDI_IO_TRACE_REQ);
-
        return 0;
 }
 
 int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
 {
+       struct iscsi_task_params task_params;
+       struct qedi_endpoint *ep;
        struct iscsi_conn *conn = task->conn;
        struct qedi_conn *qedi_conn = conn->dd_data;
        struct qedi_cmd *cmd = task->dd_data;
-       s16 ptu_invalidate = 0;
+       u16 sq_idx = 0;
+       int rval = 0;
 
        QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
                  "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
                  cmd->task_id, get_itt(task->itt), task->state,
                  cmd->state, qedi_conn->iscsi_conn_id);
 
-       qedi_add_to_sq(qedi_conn, task, cmd->task_id, ptu_invalidate, true);
-       qedi_ring_doorbell(qedi_conn);
+       memset(&task_params, 0, sizeof(task_params));
+       ep = qedi_conn->ep;
+
+       sq_idx = qedi_get_wqe_idx(qedi_conn);
+
+       task_params.sqe = &ep->sq[sq_idx];
+       memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+       task_params.itid = cmd->task_id;
 
+       rval = init_cleanup_task(&task_params);
+       if (rval)
+               return rval;
+
+       qedi_ring_doorbell(qedi_conn);
        return 0;
 }
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
new file mode 100644 (file)
index 0000000..fd354d4
--- /dev/null
@@ -0,0 +1,781 @@
+/* QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+
+#include "qedi_fw_iscsi.h"
+#include "qedi_fw_scsi.h"
+
+#define SCSI_NUM_SGES_IN_CACHE 0x4
+
+static bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
+{
+       return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
+}
+
+static
+void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
+                          struct scsi_cached_sges *ctx_data_desc,
+                          struct scsi_sgl_task_params *sgl_task_params)
+{
+       u8 sge_index;
+       u8 num_sges;
+       u32 val;
+
+       num_sges = (sgl_task_params->num_sges > SCSI_NUM_SGES_IN_CACHE) ?
+                            SCSI_NUM_SGES_IN_CACHE : sgl_task_params->num_sges;
+
+       /* sgl params */
+       val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
+       ctx_sgl_params->sgl_addr.lo = val;
+       val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
+       ctx_sgl_params->sgl_addr.hi = val;
+       val = cpu_to_le32(sgl_task_params->total_buffer_size);
+       ctx_sgl_params->sgl_total_length = val;
+       ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
+
+       for (sge_index = 0; sge_index < num_sges; sge_index++) {
+               val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
+               ctx_data_desc->sge[sge_index].sge_addr.lo = val;
+               val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
+               ctx_data_desc->sge[sge_index].sge_addr.hi = val;
+               val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
+               ctx_data_desc->sge[sge_index].sge_len = val;
+       }
+}
+
+static u32 calc_rw_task_size(struct iscsi_task_params *task_params,
+                            enum iscsi_task_type task_type,
+                            struct scsi_sgl_task_params *sgl_task_params,
+                            struct scsi_dif_task_params *dif_task_params)
+{
+       u32 io_size;
+
+       if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
+           task_type == ISCSI_TASK_TYPE_TARGET_READ)
+               io_size = task_params->tx_io_size;
+       else
+               io_size = task_params->rx_io_size;
+
+       if (!io_size)
+               return 0;
+
+       if (!dif_task_params)
+               return io_size;
+
+       return !dif_task_params->dif_on_network ?
+              io_size : sgl_task_params->total_buffer_size;
+}
+
+static void
+init_dif_context_flags(struct iscsi_dif_flags *ctx_dif_flags,
+                      struct scsi_dif_task_params *dif_task_params)
+{
+       if (!dif_task_params)
+               return;
+
+       SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG,
+                 dif_task_params->dif_block_size_log);
+       SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_DIF_TO_PEER,
+                 dif_task_params->dif_on_network ? 1 : 0);
+       SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_HOST_INTERFACE,
+                 dif_task_params->dif_on_host ? 1 : 0);
+}
+
+static void init_sqe(struct iscsi_task_params *task_params,
+                    struct scsi_sgl_task_params *sgl_task_params,
+                    struct scsi_dif_task_params *dif_task_params,
+                    struct iscsi_common_hdr *pdu_header,
+                    struct scsi_initiator_cmd_params *cmd_params,
+                    enum iscsi_task_type task_type,
+                    bool is_cleanup)
+{
+       if (!task_params->sqe)
+               return;
+
+       memset(task_params->sqe, 0, sizeof(*task_params->sqe));
+       task_params->sqe->task_id = cpu_to_le16(task_params->itid);
+       if (is_cleanup) {
+               SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+                         ISCSI_WQE_TYPE_TASK_CLEANUP);
+               return;
+       }
+
+       switch (task_type) {
+       case ISCSI_TASK_TYPE_INITIATOR_WRITE:
+       {
+               u32 buf_size = 0;
+               u32 num_sges = 0;
+
+               init_dif_context_flags(&task_params->sqe->prot_flags,
+                                      dif_task_params);
+
+               SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+                         ISCSI_WQE_TYPE_NORMAL);
+
+               if (task_params->tx_io_size) {
+                       buf_size = calc_rw_task_size(task_params, task_type,
+                                                    sgl_task_params,
+                                                    dif_task_params);
+
+               if (scsi_is_slow_sgl(sgl_task_params->num_sges,
+                                    sgl_task_params->small_mid_sge))
+                       num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
+               else
+                       num_sges = min(sgl_task_params->num_sges,
+                                      (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
+       }
+
+       SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, num_sges);
+       SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
+                 buf_size);
+
+       if (GET_FIELD(pdu_header->hdr_second_dword,
+                     ISCSI_CMD_HDR_TOTAL_AHS_LEN))
+               SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CDB_SIZE,
+                         cmd_params->extended_cdb_sge.sge_len);
+       }
+               break;
+       case ISCSI_TASK_TYPE_INITIATOR_READ:
+               SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+                         ISCSI_WQE_TYPE_NORMAL);
+
+               if (GET_FIELD(pdu_header->hdr_second_dword,
+                             ISCSI_CMD_HDR_TOTAL_AHS_LEN))
+                       SET_FIELD(task_params->sqe->contlen_cdbsize,
+                                 ISCSI_WQE_CDB_SIZE,
+                                 cmd_params->extended_cdb_sge.sge_len);
+               break;
+       case ISCSI_TASK_TYPE_LOGIN_RESPONSE:
+       case ISCSI_TASK_TYPE_MIDPATH:
+       {
+               bool advance_statsn = true;
+
+               if (task_type == ISCSI_TASK_TYPE_LOGIN_RESPONSE)
+                       SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+                                 ISCSI_WQE_TYPE_LOGIN);
+               else
+                       SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+                                 ISCSI_WQE_TYPE_MIDDLE_PATH);
+
+               if (task_type == ISCSI_TASK_TYPE_MIDPATH) {
+                       u8 opcode = GET_FIELD(pdu_header->hdr_first_byte,
+                                             ISCSI_COMMON_HDR_OPCODE);
+
+                       if (opcode != ISCSI_OPCODE_TEXT_RESPONSE &&
+                           (opcode != ISCSI_OPCODE_NOP_IN ||
+                           pdu_header->itt == ISCSI_TTT_ALL_ONES))
+                               advance_statsn = false;
+               }
+
+               SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE,
+                         advance_statsn ? 1 : 0);
+
+               if (task_params->tx_io_size) {
+                       SET_FIELD(task_params->sqe->contlen_cdbsize,
+                                 ISCSI_WQE_CONT_LEN, task_params->tx_io_size);
+
+               if (scsi_is_slow_sgl(sgl_task_params->num_sges,
+                                    sgl_task_params->small_mid_sge))
+                       SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
+                                 ISCSI_WQE_NUM_SGES_SLOWIO);
+               else
+                       SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
+                                 min(sgl_task_params->num_sges,
+                                     (u16)SCSI_NUM_SGES_SLOW_SGL_THR));
+               }
+       }
+               break;
+       default:
+               break;
+       }
+}
+
+static void init_default_iscsi_task(struct iscsi_task_params *task_params,
+                                   struct data_hdr *pdu_header,
+                                   enum iscsi_task_type task_type)
+{
+       struct iscsi_task_context *context;
+       u16 index;
+       u32 val;
+
+       context = task_params->context;
+       memset(context, 0, sizeof(*context));
+
+       for (index = 0; index <
+            ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
+            index++) {
+               val = cpu_to_le32(pdu_header->data[index]);
+               context->ystorm_st_context.pdu_hdr.data.data[index] = val;
+       }
+
+       context->mstorm_st_context.task_type = task_type;
+       context->mstorm_ag_context.task_cid =
+                                           cpu_to_le16(task_params->conn_icid);
+
+       SET_FIELD(context->ustorm_ag_context.flags1,
+                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+       context->ustorm_st_context.task_type = task_type;
+       context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
+       context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid);
+}
+
+static
+void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
+                                         struct scsi_initiator_cmd_params *cmd)
+{
+       union iscsi_task_hdr *ctx_pdu_hdr = &ystc->pdu_hdr;
+       u32 val;
+
+       if (!cmd->extended_cdb_sge.sge_len)
+               return;
+
+       SET_FIELD(ctx_pdu_hdr->ext_cdb_cmd.hdr_second_dword,
+                 ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE,
+                 cmd->extended_cdb_sge.sge_len);
+       val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.lo);
+       ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.lo = val;
+       val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.hi);
+       ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.hi = val;
+       val = cpu_to_le32(cmd->extended_cdb_sge.sge_len);
+       ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_len  = val;
+}
+
+static
+void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
+                              struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
+                              u32 remaining_recv_len,
+                              u32 expected_data_transfer_len,
+                              u8 num_sges, bool tx_dif_conn_err_en)
+{
+       u32 val;
+
+       ustorm_st_cxt->rem_rcv_len = cpu_to_le32(remaining_recv_len);
+       ustorm_ag_cxt->exp_data_acked = cpu_to_le32(expected_data_transfer_len);
+       val = cpu_to_le32(expected_data_transfer_len);
+       ustorm_st_cxt->exp_data_transfer_len = val;
+       SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
+       SET_FIELD(ustorm_ag_cxt->flags2,
+                 USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
+                 tx_dif_conn_err_en ? 1 : 0);
+}
+
+static
+void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
+                                       struct iscsi_conn_params  *conn_params,
+                                       enum iscsi_task_type task_type,
+                                       u32 task_size,
+                                       u32 exp_data_transfer_len,
+                                       u8 total_ahs_length)
+{
+       u32 max_unsolicited_data = 0, val;
+
+       if (total_ahs_length &&
+           (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
+            task_type == ISCSI_TASK_TYPE_INITIATOR_READ))
+               SET_FIELD(context->ustorm_st_context.flags2,
+                         USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST, 1);
+
+       switch (task_type) {
+       case ISCSI_TASK_TYPE_INITIATOR_WRITE:
+               if (!conn_params->initial_r2t)
+                       max_unsolicited_data = conn_params->first_burst_length;
+               else if (conn_params->immediate_data)
+                       max_unsolicited_data =
+                                         min(conn_params->first_burst_length,
+                                             conn_params->max_send_pdu_length);
+
+               context->ustorm_ag_context.exp_data_acked =
+                                  cpu_to_le32(total_ahs_length == 0 ?
+                                               min(exp_data_transfer_len,
+                                                   max_unsolicited_data) :
+                                               ((u32)(total_ahs_length +
+                                                      ISCSI_AHS_CNTL_SIZE)));
+               break;
+       case ISCSI_TASK_TYPE_TARGET_READ:
+               val = cpu_to_le32(exp_data_transfer_len);
+               context->ustorm_ag_context.exp_data_acked = val;
+               break;
+       case ISCSI_TASK_TYPE_INITIATOR_READ:
+               context->ustorm_ag_context.exp_data_acked =
+                                       cpu_to_le32((total_ahs_length == 0 ? 0 :
+                                                    total_ahs_length +
+                                                    ISCSI_AHS_CNTL_SIZE));
+               break;
+       case ISCSI_TASK_TYPE_TARGET_WRITE:
+               val = cpu_to_le32(task_size);
+               context->ustorm_ag_context.exp_cont_len = val;
+               break;
+       default:
+               break;
+       }
+}
+
+static
+void init_rtdif_task_context(struct rdif_task_context *rdif_context,
+                            struct tdif_task_context *tdif_context,
+                            struct scsi_dif_task_params *dif_task_params,
+                            enum iscsi_task_type task_type)
+{
+       u32 val;
+
+       if (!dif_task_params->dif_on_network || !dif_task_params->dif_on_host)
+               return;
+
+       if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE ||
+           task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
+               rdif_context->app_tag_value =
+                                 cpu_to_le16(dif_task_params->application_tag);
+               rdif_context->partial_crc_value = cpu_to_le16(0xffff);
+               val = cpu_to_le32(dif_task_params->initial_ref_tag);
+               rdif_context->initial_ref_tag = val;
+               rdif_context->app_tag_mask =
+                            cpu_to_le16(dif_task_params->application_tag_mask);
+               SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
+                         dif_task_params->crc_seed ? 1 : 0);
+               SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+                         dif_task_params->host_guard_type);
+               SET_FIELD(rdif_context->flags0,
+                         RDIF_TASK_CONTEXT_PROTECTIONTYPE,
+                         dif_task_params->protection_type);
+               SET_FIELD(rdif_context->flags0,
+                         RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1);
+               SET_FIELD(rdif_context->flags0,
+                         RDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+                         dif_task_params->keep_ref_tag_const ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+                         (dif_task_params->validate_app_tag &&
+                         dif_task_params->dif_on_network) ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_VALIDATEGUARD,
+                         (dif_task_params->validate_guard &&
+                         dif_task_params->dif_on_network) ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_VALIDATEREFTAG,
+                         (dif_task_params->validate_ref_tag &&
+                         dif_task_params->dif_on_network) ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_HOSTINTERFACE,
+                         dif_task_params->dif_on_host ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_NETWORKINTERFACE,
+                         dif_task_params->dif_on_network ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_FORWARDGUARD,
+                         dif_task_params->forward_guard ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_FORWARDAPPTAG,
+                         dif_task_params->forward_app_tag ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_FORWARDREFTAG,
+                         dif_task_params->forward_ref_tag ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+                         dif_task_params->forward_app_tag_with_mask ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+                         dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_INTERVALSIZE,
+                         dif_task_params->dif_block_size_log - 9);
+               SET_FIELD(rdif_context->state,
+                         RDIF_TASK_CONTEXT_REFTAGMASK,
+                         dif_task_params->ref_tag_mask);
+               SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG,
+                         dif_task_params->ignore_app_tag);
+       }
+
+       if (task_type == ISCSI_TASK_TYPE_TARGET_READ ||
+           task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
+               tdif_context->app_tag_value =
+                                 cpu_to_le16(dif_task_params->application_tag);
+               tdif_context->partial_crc_valueB =
+                      cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
+               tdif_context->partial_crc_value_a =
+                      cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
+               SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_CRC_SEED,
+                         dif_task_params->crc_seed ? 1 : 0);
+
+               SET_FIELD(tdif_context->flags0,
+                         TDIF_TASK_CONTEXT_SETERRORWITHEOP,
+                         dif_task_params->tx_dif_conn_err_en ? 1 : 0);
+               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD,
+                         dif_task_params->forward_guard   ? 1 : 0);
+               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG,
+                         dif_task_params->forward_app_tag ? 1 : 0);
+               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG,
+                         dif_task_params->forward_ref_tag ? 1 : 0);
+               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE,
+                         dif_task_params->dif_block_size_log - 9);
+               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE,
+                         dif_task_params->dif_on_host    ? 1 : 0);
+               SET_FIELD(tdif_context->flags1,
+                         TDIF_TASK_CONTEXT_NETWORKINTERFACE,
+                         dif_task_params->dif_on_network ? 1 : 0);
+               val = cpu_to_le32(dif_task_params->initial_ref_tag);
+               tdif_context->initial_ref_tag = val;
+               tdif_context->app_tag_mask =
+                            cpu_to_le16(dif_task_params->application_tag_mask);
+               SET_FIELD(tdif_context->flags0,
+                         TDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+                         dif_task_params->host_guard_type);
+               SET_FIELD(tdif_context->flags0,
+                         TDIF_TASK_CONTEXT_PROTECTIONTYPE,
+                         dif_task_params->protection_type);
+               SET_FIELD(tdif_context->flags0,
+                         TDIF_TASK_CONTEXT_INITIALREFTAGVALID,
+                         dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
+               SET_FIELD(tdif_context->flags0,
+                         TDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+                         dif_task_params->keep_ref_tag_const ? 1 : 0);
+               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD,
+                         (dif_task_params->validate_guard &&
+                          dif_task_params->dif_on_host) ? 1 : 0);
+               SET_FIELD(tdif_context->flags1,
+                         TDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+                         (dif_task_params->validate_app_tag &&
+                         dif_task_params->dif_on_host) ? 1 : 0);
+               SET_FIELD(tdif_context->flags1,
+                         TDIF_TASK_CONTEXT_VALIDATEREFTAG,
+                         (dif_task_params->validate_ref_tag &&
+                          dif_task_params->dif_on_host) ? 1 : 0);
+               SET_FIELD(tdif_context->flags1,
+                         TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+                         dif_task_params->forward_app_tag_with_mask ? 1 : 0);
+               SET_FIELD(tdif_context->flags1,
+                         TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+                         dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
+               SET_FIELD(tdif_context->flags1,
+                         TDIF_TASK_CONTEXT_REFTAGMASK,
+                         dif_task_params->ref_tag_mask);
+               SET_FIELD(tdif_context->flags0,
+                         TDIF_TASK_CONTEXT_IGNOREAPPTAG,
+                         dif_task_params->ignore_app_tag ? 1 : 0);
+       }
+}
+
+static void set_local_completion_context(struct iscsi_task_context *context)
+{
+       SET_FIELD(context->ystorm_st_context.state.flags,
+                 YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
+       SET_FIELD(context->ustorm_st_context.flags,
+                 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
+}
+
+static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
+                             enum iscsi_task_type task_type,
+                             struct iscsi_conn_params *conn_params,
+                             struct iscsi_common_hdr *pdu_header,
+                             struct scsi_sgl_task_params *sgl_task_params,
+                             struct scsi_initiator_cmd_params *cmd_params,
+                             struct scsi_dif_task_params *dif_task_params)
+{
+       u32 exp_data_transfer_len = conn_params->max_burst_length;
+       struct iscsi_task_context *cxt;
+       bool slow_io = false;
+       u32 task_size, val;
+       u8 num_sges = 0;
+
+       task_size = calc_rw_task_size(task_params, task_type, sgl_task_params,
+                                     dif_task_params);
+
+       init_default_iscsi_task(task_params, (struct data_hdr *)pdu_header,
+                               task_type);
+
+       cxt = task_params->context;
+
+       val = cpu_to_le32(task_size);
+       cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val;
+       init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
+                                            cmd_params);
+       val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
+       cxt->mstorm_st_context.sense_db.lo = val;
+
+       val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
+       cxt->mstorm_st_context.sense_db.hi = val;
+
+       if (task_params->tx_io_size) {
+               init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
+                                      dif_task_params);
+               init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+                                     &cxt->ystorm_st_context.state.data_desc,
+                                     sgl_task_params);
+
+               slow_io = scsi_is_slow_sgl(sgl_task_params->num_sges,
+                                          sgl_task_params->small_mid_sge);
+
+               num_sges = !slow_io ? min_t(u16, sgl_task_params->num_sges,
+                                           (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
+                                     ISCSI_WQE_NUM_SGES_SLOWIO;
+
+               if (slow_io) {
+                       SET_FIELD(cxt->ystorm_st_context.state.flags,
+                                 YSTORM_ISCSI_TASK_STATE_SLOW_IO, 1);
+               }
+       } else if (task_params->rx_io_size) {
+               init_dif_context_flags(&cxt->mstorm_st_context.dif_flags,
+                                      dif_task_params);
+               init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+                                     &cxt->mstorm_st_context.data_desc,
+                                     sgl_task_params);
+               num_sges = !scsi_is_slow_sgl(sgl_task_params->num_sges,
+                               sgl_task_params->small_mid_sge) ?
+                               min_t(u16, sgl_task_params->num_sges,
+                                     (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
+                               ISCSI_WQE_NUM_SGES_SLOWIO;
+               cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_size);
+       }
+
+       if (exp_data_transfer_len > task_size  ||
+           task_type != ISCSI_TASK_TYPE_TARGET_WRITE)
+               exp_data_transfer_len = task_size;
+
+       init_ustorm_task_contexts(&task_params->context->ustorm_st_context,
+                                 &task_params->context->ustorm_ag_context,
+                                 task_size, exp_data_transfer_len, num_sges,
+                                 dif_task_params ?
+                                 dif_task_params->tx_dif_conn_err_en : false);
+
+       set_rw_exp_data_acked_and_cont_len(task_params->context, conn_params,
+                                          task_type, task_size,
+                                          exp_data_transfer_len,
+                                       GET_FIELD(pdu_header->hdr_second_dword,
+                                                 ISCSI_CMD_HDR_TOTAL_AHS_LEN));
+
+       if (dif_task_params)
+               init_rtdif_task_context(&task_params->context->rdif_context,
+                                       &task_params->context->tdif_context,
+                                       dif_task_params, task_type);
+
+       init_sqe(task_params, sgl_task_params, dif_task_params, pdu_header,
+                cmd_params, task_type, false);
+
+       return 0;
+}
+
+int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
+                                struct iscsi_conn_params *conn_params,
+                                struct scsi_initiator_cmd_params *cmd_params,
+                                struct iscsi_cmd_hdr *cmd_header,
+                                struct scsi_sgl_task_params *tx_sgl_params,
+                                struct scsi_sgl_task_params *rx_sgl_params,
+                                struct scsi_dif_task_params *dif_task_params)
+{
+       if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_WRITE))
+               return init_rw_iscsi_task(task_params,
+                                         ISCSI_TASK_TYPE_INITIATOR_WRITE,
+                                         conn_params,
+                                         (struct iscsi_common_hdr *)cmd_header,
+                                         tx_sgl_params, cmd_params,
+                                         dif_task_params);
+       else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ))
+               return init_rw_iscsi_task(task_params,
+                                         ISCSI_TASK_TYPE_INITIATOR_READ,
+                                         conn_params,
+                                         (struct iscsi_common_hdr *)cmd_header,
+                                         rx_sgl_params, cmd_params,
+                                         dif_task_params);
+       else
+               return -1;
+}
+
+int init_initiator_login_request_task(struct iscsi_task_params *task_params,
+                                     struct iscsi_login_req_hdr  *login_header,
+                                     struct scsi_sgl_task_params *tx_params,
+                                     struct scsi_sgl_task_params *rx_params)
+{
+       struct iscsi_task_context *cxt;
+
+       cxt = task_params->context;
+
+       init_default_iscsi_task(task_params,
+                               (struct data_hdr *)login_header,
+                               ISCSI_TASK_TYPE_MIDPATH);
+
+       init_ustorm_task_contexts(&cxt->ustorm_st_context,
+                                 &cxt->ustorm_ag_context,
+                                 task_params->rx_io_size ?
+                                 rx_params->total_buffer_size : 0,
+                                 task_params->tx_io_size ?
+                                 tx_params->total_buffer_size : 0, 0,
+                                 0);
+
+       if (task_params->tx_io_size)
+               init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+                                     &cxt->ystorm_st_context.state.data_desc,
+                                     tx_params);
+
+       if (task_params->rx_io_size)
+               init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+                                     &cxt->mstorm_st_context.data_desc,
+                                     rx_params);
+
+       cxt->mstorm_st_context.rem_task_size =
+                       cpu_to_le32(task_params->rx_io_size ?
+                                   rx_params->total_buffer_size : 0);
+
+       init_sqe(task_params, tx_params, NULL,
+                (struct iscsi_common_hdr *)login_header, NULL,
+                ISCSI_TASK_TYPE_MIDPATH, false);
+
+       return 0;
+}
+
+int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
+                               struct iscsi_nop_out_hdr *nop_out_pdu_header,
+                               struct scsi_sgl_task_params *tx_sgl_task_params,
+                               struct scsi_sgl_task_params *rx_sgl_task_params)
+{
+       struct iscsi_task_context *cxt;
+
+       cxt = task_params->context;
+
+       init_default_iscsi_task(task_params,
+                               (struct data_hdr *)nop_out_pdu_header,
+                               ISCSI_TASK_TYPE_MIDPATH);
+
+       if (nop_out_pdu_header->itt == ISCSI_ITT_ALL_ONES)
+               set_local_completion_context(task_params->context);
+
+       if (task_params->tx_io_size)
+               init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+                                     &cxt->ystorm_st_context.state.data_desc,
+                                     tx_sgl_task_params);
+
+       if (task_params->rx_io_size)
+               init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+                                     &cxt->mstorm_st_context.data_desc,
+                                     rx_sgl_task_params);
+
+       init_ustorm_task_contexts(&cxt->ustorm_st_context,
+                                 &cxt->ustorm_ag_context,
+                                 task_params->rx_io_size ?
+                                 rx_sgl_task_params->total_buffer_size : 0,
+                                 task_params->tx_io_size ?
+                                 tx_sgl_task_params->total_buffer_size : 0,
+                                 0, 0);
+
+       cxt->mstorm_st_context.rem_task_size =
+                               cpu_to_le32(task_params->rx_io_size ?
+                                       rx_sgl_task_params->total_buffer_size :
+                                       0);
+
+       init_sqe(task_params, tx_sgl_task_params, NULL,
+                (struct iscsi_common_hdr *)nop_out_pdu_header, NULL,
+                ISCSI_TASK_TYPE_MIDPATH, false);
+
+       return 0;
+}
+
+int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
+                                      struct iscsi_logout_req_hdr *logout_hdr,
+                                      struct scsi_sgl_task_params *tx_params,
+                                      struct scsi_sgl_task_params *rx_params)
+{
+       struct iscsi_task_context *cxt;
+
+       cxt = task_params->context;
+
+       init_default_iscsi_task(task_params,
+                               (struct data_hdr *)logout_hdr,
+                               ISCSI_TASK_TYPE_MIDPATH);
+
+       if (task_params->tx_io_size)
+               init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+                                     &cxt->ystorm_st_context.state.data_desc,
+                                     tx_params);
+
+       if (task_params->rx_io_size)
+               init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+                                     &cxt->mstorm_st_context.data_desc,
+                                     rx_params);
+
+       init_ustorm_task_contexts(&cxt->ustorm_st_context,
+                                 &cxt->ustorm_ag_context,
+                                 task_params->rx_io_size ?
+                                 rx_params->total_buffer_size : 0,
+                                 task_params->tx_io_size ?
+                                 tx_params->total_buffer_size : 0,
+                                 0, 0);
+
+       cxt->mstorm_st_context.rem_task_size =
+                                       cpu_to_le32(task_params->rx_io_size ?
+                                       rx_params->total_buffer_size : 0);
+
+       init_sqe(task_params, tx_params, NULL,
+                (struct iscsi_common_hdr *)logout_hdr, NULL,
+                ISCSI_TASK_TYPE_MIDPATH, false);
+
+       return 0;
+}
+
+int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
+                                   struct iscsi_tmf_request_hdr *tmf_header)
+{
+       init_default_iscsi_task(task_params, (struct data_hdr *)tmf_header,
+                               ISCSI_TASK_TYPE_MIDPATH);
+
+       init_sqe(task_params, NULL, NULL,
+                (struct iscsi_common_hdr *)tmf_header, NULL,
+                ISCSI_TASK_TYPE_MIDPATH, false);
+
+       return 0;
+}
+
+int init_initiator_text_request_task(struct iscsi_task_params *task_params,
+                                    struct iscsi_text_request_hdr *text_header,
+                                    struct scsi_sgl_task_params *tx_params,
+                                    struct scsi_sgl_task_params *rx_params)
+{
+       struct iscsi_task_context *cxt;
+
+       cxt = task_params->context;
+
+       init_default_iscsi_task(task_params,
+                               (struct data_hdr *)text_header,
+                               ISCSI_TASK_TYPE_MIDPATH);
+
+       if (task_params->tx_io_size)
+               init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+                                     &cxt->ystorm_st_context.state.data_desc,
+                                     tx_params);
+
+       if (task_params->rx_io_size)
+               init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+                                     &cxt->mstorm_st_context.data_desc,
+                                     rx_params);
+
+       cxt->mstorm_st_context.rem_task_size =
+                               cpu_to_le32(task_params->rx_io_size ?
+                                       rx_params->total_buffer_size : 0);
+
+       init_ustorm_task_contexts(&cxt->ustorm_st_context,
+                                 &cxt->ustorm_ag_context,
+                                 task_params->rx_io_size ?
+                                 rx_params->total_buffer_size : 0,
+                                 task_params->tx_io_size ?
+                                 tx_params->total_buffer_size : 0, 0, 0);
+
+       init_sqe(task_params, tx_params, NULL,
+                (struct iscsi_common_hdr *)text_header, NULL,
+                ISCSI_TASK_TYPE_MIDPATH, false);
+
+       return 0;
+}
+
+int init_cleanup_task(struct iscsi_task_params *task_params)
+{
+       init_sqe(task_params, NULL, NULL, NULL, NULL, ISCSI_TASK_TYPE_MIDPATH,
+                true);
+       return 0;
+}
diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h
new file mode 100644 (file)
index 0000000..b6f24f9
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_FW_ISCSI_H_
+#define _QEDI_FW_ISCSI_H_
+
+#include "qedi_fw_scsi.h"
+
+struct iscsi_task_params {
+       struct iscsi_task_context *context;
+       struct iscsi_wqe          *sqe;
+       u32                       tx_io_size;
+       u32                       rx_io_size;
+       u16                       conn_icid;
+       u16                       itid;
+       u8                        cq_rss_number;
+};
+
+struct iscsi_conn_params {
+       u32     first_burst_length;
+       u32     max_send_pdu_length;
+       u32     max_burst_length;
+       bool    initial_r2t;
+       bool    immediate_data;
+};
+
+/* @brief init_initiator_read_iscsi_task - initializes iSCSI Initiator Read
+ * task context.
+ *
+ * @param task_params    - Pointer to task parameters struct
+ * @param conn_params    - Connection Parameters
+ * @param cmd_params     - command specific parameters
+ * @param cmd_pdu_header  - PDU Header Parameters
+ * @param sgl_task_params - Pointer to SGL task params
+ * @param dif_task_params - Pointer to DIF parameters struct
+ */
+int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
+                                struct iscsi_conn_params *conn_params,
+                                struct scsi_initiator_cmd_params *cmd_params,
+                                struct iscsi_cmd_hdr *cmd_pdu_header,
+                                struct scsi_sgl_task_params *tx_sgl_params,
+                                struct scsi_sgl_task_params *rx_sgl_params,
+                                struct scsi_dif_task_params *dif_task_params);
+
+/* @brief init_initiator_login_request_task - initializes iSCSI Initiator Login
+ * Request task context.
+ *
+ * @param task_params            - Pointer to task parameters struct
+ * @param login_req_pdu_header    - PDU Header Parameters
+ * @param tx_sgl_task_params     - Pointer to SGL task params
+ * @param rx_sgl_task_params     - Pointer to SGL task params
+ */
+int init_initiator_login_request_task(struct iscsi_task_params *task_params,
+                                     struct iscsi_login_req_hdr *login_header,
+                                     struct scsi_sgl_task_params *tx_params,
+                                     struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_initiator_nop_out_task - initializes iSCSI Initiator NOP Out
+ * task context.
+ *
+ * @param task_params          - Pointer to task parameters struct
+ * @param nop_out_pdu_header    - PDU Header Parameters
+ * @param tx_sgl_task_params   - Pointer to SGL task params
+ * @param rx_sgl_task_params   - Pointer to SGL task params
+ */
+int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
+                               struct iscsi_nop_out_hdr *nop_out_pdu_header,
+                               struct scsi_sgl_task_params *tx_sgl_params,
+                               struct scsi_sgl_task_params *rx_sgl_params);
+
+/* @brief init_initiator_logout_request_task - initializes iSCSI Initiator
+ * Logout Request task context.
+ *
+ * @param task_params          - Pointer to task parameters struct
+ * @param logout_pdu_header  - PDU Header Parameters
+ * @param tx_sgl_task_params   - Pointer to SGL task params
+ * @param rx_sgl_task_params   - Pointer to SGL task params
+ */
+int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
+                                      struct iscsi_logout_req_hdr *logout_hdr,
+                                      struct scsi_sgl_task_params *tx_params,
+                                      struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_initiator_tmf_request_task - initializes iSCSI Initiator TMF
+ * task context.
+ *
+ * @param task_params  - Pointer to task parameters struct
+ * @param tmf_pdu_header - PDU Header Parameters
+ */
+int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
+                                   struct iscsi_tmf_request_hdr *tmf_header);
+
+/* @brief init_initiator_text_request_task - initializes iSCSI Initiator Text
+ * Request task context.
+ *
+ * @param task_params               - Pointer to task parameters struct
+ * @param text_request_pdu_header    - PDU Header Parameters
+ * @param tx_sgl_task_params        - Pointer to Tx SGL task params
+ * @param rx_sgl_task_params        - Pointer to Rx SGL task params
+ */
+int init_initiator_text_request_task(struct iscsi_task_params *task_params,
+                                    struct iscsi_text_request_hdr *text_header,
+                                    struct scsi_sgl_task_params *tx_params,
+                                    struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_cleanup_task - initializes Clean task (SQE)
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_cleanup_task(struct iscsi_task_params *task_params);
+#endif
diff --git a/drivers/scsi/qedi/qedi_fw_scsi.h b/drivers/scsi/qedi/qedi_fw_scsi.h
new file mode 100644 (file)
index 0000000..cdaf918
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_FW_SCSI_H_
+#define _QEDI_FW_SCSI_H_
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+
+struct scsi_sgl_task_params {
+       struct scsi_sge *sgl;
+       struct regpair  sgl_phys_addr;
+       u32             total_buffer_size;
+       u16             num_sges;
+       bool            small_mid_sge;
+};
+
+struct scsi_dif_task_params {
+       u32     initial_ref_tag;
+       bool    initial_ref_tag_is_valid;
+       u16     application_tag;
+       u16     application_tag_mask;
+       u16     dif_block_size_log;
+       bool    dif_on_network;
+       bool    dif_on_host;
+       u8      host_guard_type;
+       u8      protection_type;
+       u8      ref_tag_mask;
+       bool    crc_seed;
+       bool    tx_dif_conn_err_en;
+       bool    ignore_app_tag;
+       bool    keep_ref_tag_const;
+       bool    validate_guard;
+       bool    validate_app_tag;
+       bool    validate_ref_tag;
+       bool    forward_guard;
+       bool    forward_app_tag;
+       bool    forward_ref_tag;
+       bool    forward_app_tag_with_mask;
+       bool    forward_ref_tag_with_mask;
+};
+
+struct scsi_initiator_cmd_params {
+       struct scsi_sge extended_cdb_sge;
+       struct regpair  sense_data_buffer_phys_addr;
+};
+#endif
index 8e488de88ece9fb8fc49b991935cf5c24a87b5b4..63d793f460645d44c4ae29638145cbcbcbc9be78 100644 (file)
 
 #include "qedi_iscsi.h"
 
+#ifdef CONFIG_DEBUG_FS
+extern int qedi_do_not_recover;
+#else
+#define qedi_do_not_recover (0)
+#endif
+
 extern uint qedi_io_tracing;
-extern int do_not_recover;
+
 extern struct scsi_host_template qedi_host_template;
 extern struct iscsi_transport qedi_iscsi_transport;
 extern const struct qed_iscsi_ops *qedi_ops;
index b9f79d36142d5e85182d39d16ba34a7a1a6f6521..d1de172bebac626b9b61b2da4e1588a36519bb9d 100644 (file)
@@ -175,7 +175,7 @@ static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi,
                if (cmd->io_tbl.sge_tbl)
                        dma_free_coherent(&qedi->pdev->dev,
                                          QEDI_ISCSI_MAX_BDS_PER_CMD *
-                                         sizeof(struct iscsi_sge),
+                                         sizeof(struct scsi_sge),
                                          cmd->io_tbl.sge_tbl,
                                          cmd->io_tbl.sge_tbl_dma);
 
@@ -191,7 +191,7 @@ static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
                           struct qedi_cmd *cmd)
 {
        struct qedi_io_bdt *io = &cmd->io_tbl;
-       struct iscsi_sge *sge;
+       struct scsi_sge *sge;
 
        io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev,
                                         QEDI_ISCSI_MAX_BDS_PER_CMD *
@@ -708,22 +708,20 @@ static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
 
 static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn)
 {
-       struct iscsi_sge *bd_tbl;
+       struct scsi_sge *bd_tbl;
 
-       bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+       bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
 
        bd_tbl->sge_addr.hi =
                (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
        bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr;
        bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr -
                                qedi_conn->gen_pdu.req_buf;
-       bd_tbl->reserved0 = 0;
-       bd_tbl = (struct iscsi_sge  *)qedi_conn->gen_pdu.resp_bd_tbl;
+       bd_tbl = (struct scsi_sge  *)qedi_conn->gen_pdu.resp_bd_tbl;
        bd_tbl->sge_addr.hi =
                        (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
        bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr;
        bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN;
-       bd_tbl->reserved0 = 0;
 }
 
 static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
@@ -833,7 +831,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
                return ERR_PTR(ret);
        }
 
-       if (do_not_recover) {
+       if (qedi_do_not_recover) {
                ret = -ENOMEM;
                return ERR_PTR(ret);
        }
@@ -957,7 +955,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
        struct qedi_endpoint *qedi_ep;
        int ret = 0;
 
-       if (do_not_recover)
+       if (qedi_do_not_recover)
                return 1;
 
        qedi_ep = ep->dd_data;
@@ -1025,7 +1023,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
                }
 
                if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
-                       if (do_not_recover) {
+                       if (qedi_do_not_recover) {
                                QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
                                          "Do not recover cid=0x%x\n",
                                          qedi_ep->iscsi_cid);
@@ -1039,7 +1037,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
                }
        }
 
-       if (do_not_recover)
+       if (qedi_do_not_recover)
                goto ep_exit_recover;
 
        switch (qedi_ep->state) {
index d3c06bbddb4e8195d0ba1809a1ed6ad671d402c1..3247287cb0e7e5c4b16a2ad043ec636754e77266 100644 (file)
@@ -102,7 +102,7 @@ struct qedi_endpoint {
 #define QEDI_SQ_WQES_MIN       16
 
 struct qedi_io_bdt {
-       struct iscsi_sge *sge_tbl;
+       struct scsi_sge *sge_tbl;
        dma_addr_t sge_tbl_dma;
        u16 sge_valid;
 };
index 5eda21d903e93dfc96702552d2de9deb7f01c734..92775a8b74b1cdc068b8d8808b7bd9ccd5b212eb 100644 (file)
@@ -1805,7 +1805,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
         */
        qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
 
-       qedi_setup_int(qedi);
+       rc = qedi_setup_int(qedi);
        if (rc)
                goto stop_iscsi_func;
 
@@ -2007,6 +2007,7 @@ static void qedi_remove(struct pci_dev *pdev)
 
 static struct pci_device_id qedi_pci_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
+       { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) },
        { 0 },
 };
 MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
index 9543a1b139d4e9a00fbac1c9c8474d6bec0e7653..d61e3ac22e675bd1145003c9064e885de116d108 100644 (file)
@@ -7,8 +7,8 @@
  * this source tree.
  */
 
-#define QEDI_MODULE_VERSION    "8.10.3.0"
+#define QEDI_MODULE_VERSION    "8.10.4.0"
 #define QEDI_DRIVER_MAJOR_VER          8
 #define QEDI_DRIVER_MINOR_VER          10
-#define QEDI_DRIVER_REV_VER            3
+#define QEDI_DRIVER_REV_VER            4
 #define QEDI_DRIVER_ENG_VER            0
index 67c0d5aa32125ca135ccb6cc2bd83af76b0ffd1b..de952935b5d2ca572d618e2a8802a1e035c0fbdb 100644 (file)
@@ -3,6 +3,7 @@ config SCSI_QLA_FC
        depends on PCI && SCSI
        depends on SCSI_FC_ATTRS
        select FW_LOADER
+       select BTREE
        ---help---
        This qla2xxx driver supports all QLogic Fibre Channel
        PCI and PCIe host adapters.
index f610103994afd4c53cbf439db646eb5b44851689..435ff7fd6384a0a4e941efb3d60411e0731d4c1b 100644 (file)
@@ -2154,8 +2154,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
                    "Timer for the VP[%d] has stopped\n", vha->vp_idx);
        }
 
-       BUG_ON(atomic_read(&vha->vref_count));
-
        qla2x00_free_fcports(vha);
 
        mutex_lock(&ha->vport_lock);
@@ -2166,7 +2164,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
        dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
            vha->gnl.ldma);
 
-       if (vha->qpair->vp_idx == vha->vp_idx) {
+       if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
                if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
                        ql_log(ql_log_warn, vha, 0x7087,
                            "Queue Pair delete failed.\n");
index 21d9fb7fc88796cbaa09fbfa160b9b20c17e2015..51b4179469d1851be96872ee39b24739fc34135e 100644 (file)
@@ -2707,13 +2707,9 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
            "%-+5d  0  1  2  3  4  5  6  7  8  9  A  B  C  D  E  F\n", size);
        ql_dbg(level, vha, id,
            "----- -----------------------------------------------\n");
-       for (cnt = 0; cnt < size; cnt++, buf++) {
-               if (cnt % 16 == 0)
-                       ql_dbg(level, vha, id, "%04x:", cnt & ~0xFU);
-               printk(" %02x", *buf);
-               if (cnt % 16 == 15)
-                       printk("\n");
+       for (cnt = 0; cnt < size; cnt += 16) {
+               ql_dbg(level, vha, id, "%04x: ", cnt);
+               print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
+                              buf + cnt, min(16U, size - cnt), false);
        }
-       if (cnt % 16 != 0)
-               printk("\n");
 }
index e1fc4e66966aeab7b64bfd4ca9c75ca4da1a5be5..c6bffe929fe7dc54b83ac8d89087b4b0d7e0efca 100644 (file)
@@ -348,6 +348,7 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
 #define ql_dbg_tgt     0x00004000 /* Target mode */
 #define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
 #define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
+#define ql_dbg_tgt_dif  0x00000800 /* Target mode dif */
 
 extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
        uint32_t, void **);
index 625d438e3cce01e39a57bfdd3d581ac24e6a5c55..ae119018dfaae9fe65c5cfe1869cdc655b27a3ea 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/firmware.h>
 #include <linux/aer.h>
 #include <linux/mutex.h>
+#include <linux/btree.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
@@ -395,11 +396,15 @@ struct srb_iocb {
                        struct completion comp;
                } abt;
                struct ct_arg ctarg;
+#define MAX_IOCB_MB_REG 28
+#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t))
                struct {
-                       __le16 in_mb[28];       /* fr fw */
-                       __le16 out_mb[28];      /* to fw */
+                       __le16 in_mb[MAX_IOCB_MB_REG];  /* from FW */
+                       __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */
                        void *out, *in;
                        dma_addr_t out_dma, in_dma;
+                       struct completion comp;
+                       int rc;
                } mbx;
                struct {
                        struct imm_ntfy_from_isp *ntfy;
@@ -437,7 +442,7 @@ typedef struct srb {
        uint32_t handle;
        uint16_t flags;
        uint16_t type;
-       char *name;
+       const char *name;
        int iocbs;
        struct qla_qpair *qpair;
        u32 gen1;       /* scratch */
@@ -2300,6 +2305,8 @@ typedef struct fc_port {
        struct ct_sns_desc ct_desc;
        enum discovery_state disc_state;
        enum login_state fw_login_state;
+       unsigned long plogi_nack_done_deadline;
+
        u32 login_gen, last_login_gen;
        u32 rscn_gen, last_rscn_gen;
        u32 chip_reset;
@@ -3106,6 +3113,16 @@ struct qla_chip_state_84xx {
        uint32_t gold_fw_version;
 };
 
+struct qla_dif_statistics {
+       uint64_t dif_input_bytes;
+       uint64_t dif_output_bytes;
+       uint64_t dif_input_requests;
+       uint64_t dif_output_requests;
+       uint32_t dif_guard_err;
+       uint32_t dif_ref_tag_err;
+       uint32_t dif_app_tag_err;
+};
+
 struct qla_statistics {
        uint32_t total_isp_aborts;
        uint64_t input_bytes;
@@ -3118,6 +3135,8 @@ struct qla_statistics {
        uint32_t stat_max_pend_cmds;
        uint32_t stat_max_qfull_cmds_alloc;
        uint32_t stat_max_qfull_cmds_dropped;
+
+       struct qla_dif_statistics qla_dif_stats;
 };
 
 struct bidi_statistics {
@@ -3125,6 +3144,16 @@ struct bidi_statistics {
        unsigned long long transfer_bytes;
 };
 
+struct qla_tc_param {
+       struct scsi_qla_host *vha;
+       uint32_t blk_sz;
+       uint32_t bufflen;
+       struct scatterlist *sg;
+       struct scatterlist *prot_sg;
+       struct crc_context *ctx;
+       uint8_t *ctx_dsd_alloced;
+};
+
 /* Multi queue support */
 #define MBC_INITIALIZE_MULTIQ 0x1f
 #define QLA_QUE_PAGE 0X1000
@@ -3272,6 +3301,8 @@ struct qlt_hw_data {
        uint8_t tgt_node_name[WWN_SIZE];
 
        struct dentry *dfs_tgt_sess;
+       struct dentry *dfs_tgt_port_database;
+
        struct list_head q_full_list;
        uint32_t num_pend_cmds;
        uint32_t num_qfull_cmds_alloc;
@@ -3281,6 +3312,7 @@ struct qlt_hw_data {
        spinlock_t sess_lock;
        int rspq_vector_cpuid;
        spinlock_t atio_lock ____cacheline_aligned;
+       struct btree_head32 host_map;
 };
 
 #define MAX_QFULL_CMDS_ALLOC   8192
@@ -3290,6 +3322,10 @@ struct qlt_hw_data {
 
 #define LEAK_EXCHG_THRESH_HOLD_PERCENT 75      /* 75 percent */
 
+#define QLA_EARLY_LINKUP(_ha) \
+       ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \
+        _ha->flags.fw_started && !_ha->flags.fw_init_done)
+
 /*
  * Qlogic host adapter specific data structure.
 */
@@ -3339,7 +3375,11 @@ struct qla_hw_data {
                uint32_t        fawwpn_enabled:1;
                uint32_t        exlogins_enabled:1;
                uint32_t        exchoffld_enabled:1;
-               /* 35 bits */
+
+               uint32_t        lip_ae:1;
+               uint32_t        n2n_ae:1;
+               uint32_t        fw_started:1;
+               uint32_t        fw_init_done:1;
        } flags;
 
        /* This spinlock is used to protect "io transactions", you must
@@ -3432,7 +3472,6 @@ struct qla_hw_data {
 #define P2P_LOOP  3
        uint8_t         interrupts_on;
        uint32_t        isp_abort_cnt;
-
 #define PCI_DEVICE_ID_QLOGIC_ISP2532    0x2532
 #define PCI_DEVICE_ID_QLOGIC_ISP8432    0x8432
 #define PCI_DEVICE_ID_QLOGIC_ISP8001   0x8001
@@ -3913,6 +3952,7 @@ typedef struct scsi_qla_host {
        struct list_head vp_fcports;    /* list of fcports */
        struct list_head work_list;
        spinlock_t work_lock;
+       struct work_struct iocb_work;
 
        /* Commonly used flags and state information. */
        struct Scsi_Host *host;
@@ -4076,6 +4116,7 @@ typedef struct scsi_qla_host {
        /* Count of active session/fcport */
        int fcport_count;
        wait_queue_head_t fcport_waitQ;
+       wait_queue_head_t vref_waitq;
 } scsi_qla_host_t;
 
 struct qla27xx_image_status {
@@ -4131,14 +4172,17 @@ struct qla2_sgx {
        mb();                                           \
        if (__vha->flags.delete_progress) {             \
                atomic_dec(&__vha->vref_count);         \
+               wake_up(&__vha->vref_waitq);            \
                __bail = 1;                             \
        } else {                                        \
                __bail = 0;                             \
        }                                               \
 } while (0)
 
-#define QLA_VHA_MARK_NOT_BUSY(__vha)                   \
+#define QLA_VHA_MARK_NOT_BUSY(__vha) do {              \
        atomic_dec(&__vha->vref_count);                 \
+       wake_up(&__vha->vref_waitq);                    \
+} while (0)                                            \
 
 #define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do {      \
        atomic_inc(&__qpair->ref_count);                \
index b48cce696bac77e44f7c7579fd1829a40391da2d..989e17b0758cd51ec029204c48eddf37c55c180a 100644 (file)
@@ -19,11 +19,11 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
        struct qla_hw_data *ha = vha->hw;
        unsigned long flags;
        struct fc_port *sess = NULL;
-       struct qla_tgt *tgt= vha->vha_tgt.qla_tgt;
+       struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 
-       seq_printf(s, "%s\n",vha->host_str);
+       seq_printf(s, "%s\n", vha->host_str);
        if (tgt) {
-               seq_printf(s, "Port ID   Port Name                Handle\n");
+               seq_puts(s, "Port ID   Port Name                Handle\n");
 
                spin_lock_irqsave(&ha->tgt.sess_lock, flags);
                list_for_each_entry(sess, &vha->vp_fcports, list)
@@ -44,7 +44,6 @@ qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
        return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
 }
 
-
 static const struct file_operations dfs_tgt_sess_ops = {
        .open           = qla2x00_dfs_tgt_sess_open,
        .read           = seq_read,
@@ -52,6 +51,78 @@ static const struct file_operations dfs_tgt_sess_ops = {
        .release        = single_release,
 };
 
+static int
+qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
+{
+       scsi_qla_host_t *vha = s->private;
+       struct qla_hw_data *ha = vha->hw;
+       struct gid_list_info *gid_list;
+       dma_addr_t gid_list_dma;
+       fc_port_t fc_port;
+       char *id_iter;
+       int rc, i;
+       uint16_t entries, loop_id;
+       struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
+       seq_printf(s, "%s\n", vha->host_str);
+       if (tgt) {
+               gid_list = dma_alloc_coherent(&ha->pdev->dev,
+                   qla2x00_gid_list_size(ha),
+                   &gid_list_dma, GFP_KERNEL);
+               if (!gid_list) {
+                       ql_dbg(ql_dbg_user, vha, 0x705c,
+                           "DMA allocation failed for %u\n",
+                            qla2x00_gid_list_size(ha));
+                       return 0;
+               }
+
+               rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
+                   &entries);
+               if (rc != QLA_SUCCESS)
+                       goto out_free_id_list;
+
+               id_iter = (char *)gid_list;
+
+               seq_puts(s, "Port Name  Port ID         Loop ID\n");
+
+               for (i = 0; i < entries; i++) {
+                       struct gid_list_info *gid =
+                           (struct gid_list_info *)id_iter;
+                       loop_id = le16_to_cpu(gid->loop_id);
+                       memset(&fc_port, 0, sizeof(fc_port_t));
+
+                       fc_port.loop_id = loop_id;
+
+                       rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
+                       seq_printf(s, "%8phC  %02x%02x%02x  %d\n",
+                               fc_port.port_name, fc_port.d_id.b.domain,
+                               fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
+                               fc_port.loop_id);
+                       id_iter += ha->gid_list_info_size;
+               }
+out_free_id_list:
+               dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+                   gid_list, gid_list_dma);
+       }
+
+       return 0;
+}
+
+static int
+qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file)
+{
+       scsi_qla_host_t *vha = inode->i_private;
+
+       return single_open(file, qla2x00_dfs_tgt_port_database_show, vha);
+}
+
+static const struct file_operations dfs_tgt_port_database_ops = {
+       .open           = qla2x00_dfs_tgt_port_database_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 static int
 qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
 {
@@ -114,6 +185,21 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
        seq_printf(s, "num Q full sent = %lld\n",
                vha->tgt_counters.num_q_full_sent);
 
+       /* DIF stats */
+       seq_printf(s, "DIF Inp Bytes = %lld\n",
+               vha->qla_stats.qla_dif_stats.dif_input_bytes);
+       seq_printf(s, "DIF Outp Bytes = %lld\n",
+               vha->qla_stats.qla_dif_stats.dif_output_bytes);
+       seq_printf(s, "DIF Inp Req = %lld\n",
+               vha->qla_stats.qla_dif_stats.dif_input_requests);
+       seq_printf(s, "DIF Outp Req = %lld\n",
+               vha->qla_stats.qla_dif_stats.dif_output_requests);
+       seq_printf(s, "DIF Guard err = %d\n",
+               vha->qla_stats.qla_dif_stats.dif_guard_err);
+       seq_printf(s, "DIF Ref tag err = %d\n",
+               vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
+       seq_printf(s, "DIF App tag err = %d\n",
+               vha->qla_stats.qla_dif_stats.dif_app_tag_err);
        return 0;
 }
 
@@ -281,6 +367,14 @@ create_nodes:
                goto out;
        }
 
+       ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
+           S_IRUSR,  ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
+       if (!ha->tgt.dfs_tgt_port_database) {
+               ql_log(ql_log_warn, vha, 0xffff,
+                   "Unable to create debugFS tgt_port_database node.\n");
+               goto out;
+       }
+
        ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
            &dfs_fce_ops);
        if (!ha->dfs_fce) {
@@ -311,6 +405,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
                ha->tgt.dfs_tgt_sess = NULL;
        }
 
+       if (ha->tgt.dfs_tgt_port_database) {
+               debugfs_remove(ha->tgt.dfs_tgt_port_database);
+               ha->tgt.dfs_tgt_port_database = NULL;
+       }
+
        if (ha->dfs_fw_resource_cnt) {
                debugfs_remove(ha->dfs_fw_resource_cnt);
                ha->dfs_fw_resource_cnt = NULL;
index b3d6441d1d90eb27f1908fa27ea1ec28f024b1d9..5b2451745e9f471988e8685d68f3423ec5d5811f 100644 (file)
@@ -193,6 +193,7 @@ extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
 void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
        uint16_t *);
 int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_async_abort_cmd(srb_t *);
 
 /*
  * Global Functions in qla_mid.c source file.
@@ -256,11 +257,11 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
 extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
 extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
 extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
-       uint32_t *, uint16_t, struct qla_tgt_cmd *);
+       uint32_t *, uint16_t, struct qla_tc_param *);
 extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
-       uint32_t *, uint16_t, struct qla_tgt_cmd *);
+       uint32_t *, uint16_t, struct qla_tc_param *);
 extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
-       uint32_t *, uint16_t, struct qla_tgt_cmd *);
+       uint32_t *, uint16_t, struct qla_tc_param *);
 extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
 extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
 extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
@@ -368,7 +369,7 @@ qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
 
 extern int
 qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
-    dma_addr_t, uint);
+    dma_addr_t, uint16_t);
 
 extern int qla24xx_abort_command(srb_t *);
 extern int qla24xx_async_abort_command(srb_t *);
@@ -472,6 +473,13 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
 extern int
 qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint);
 
+int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *);
+int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8);
+int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
+    uint16_t *);
+int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *,
+       struct port_database_24xx *);
+
 /*
  * Global Function Prototypes in qla_isr.c source file.
  */
@@ -846,5 +854,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
        uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
 void qla24xx_delete_sess_fn(struct work_struct *);
 void qlt_unknown_atio_work_fn(struct work_struct *);
+void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
+void qlt_remove_target_resources(struct qla_hw_data *);
 
 #endif /* _QLA_GBL_H */
index 32fb9007f13770e4cd43650521b67e991a66d3e9..f9d2fe7b1adedf9349c11b7bfaf389c223a21ba8 100644 (file)
@@ -629,7 +629,6 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
        struct srb *sp = s;
        struct scsi_qla_host *vha = sp->vha;
        struct qla_hw_data *ha = vha->hw;
-       uint64_t zero = 0;
        struct port_database_24xx *pd;
        fc_port_t *fcport = sp->fcport;
        u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
@@ -649,48 +648,7 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
 
        pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
 
-       /* Check for logged in state. */
-       if (pd->current_login_state != PDS_PRLI_COMPLETE &&
-           pd->last_login_state != PDS_PRLI_COMPLETE) {
-               ql_dbg(ql_dbg_mbx, vha, 0xffff,
-                   "Unable to verify login-state (%x/%x) for "
-                   "loop_id %x.\n", pd->current_login_state,
-                   pd->last_login_state, fcport->loop_id);
-               rval = QLA_FUNCTION_FAILED;
-               goto gpd_error_out;
-       }
-
-       if (fcport->loop_id == FC_NO_LOOP_ID ||
-           (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
-               memcmp(fcport->port_name, pd->port_name, 8))) {
-               /* We lost the device mid way. */
-               rval = QLA_NOT_LOGGED_IN;
-               goto gpd_error_out;
-       }
-
-       /* Names are little-endian. */
-       memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
-
-       /* Get port_id of device. */
-       fcport->d_id.b.domain = pd->port_id[0];
-       fcport->d_id.b.area = pd->port_id[1];
-       fcport->d_id.b.al_pa = pd->port_id[2];
-       fcport->d_id.b.rsvd_1 = 0;
-
-       /* If not target must be initiator or unknown type. */
-       if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
-               fcport->port_type = FCT_INITIATOR;
-       else
-               fcport->port_type = FCT_TARGET;
-
-       /* Passback COS information. */
-       fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
-               FC_COS_CLASS2 : FC_COS_CLASS3;
-
-       if (pd->prli_svc_param_word_3[0] & BIT_7) {
-               fcport->flags |= FCF_CONF_COMP_SUPPORTED;
-               fcport->conf_compl_supported = 1;
-       }
+       rval = __qla24xx_parse_gpdb(vha, fcport, pd);
 
 gpd_error_out:
        memset(&ea, 0, sizeof(ea));
@@ -876,10 +834,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
        fcport->login_retry--;
 
        if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
-           (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
            (fcport->fw_login_state == DSC_LS_PRLI_PEND))
                return 0;
 
+       if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+               if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+                       return 0;
+       }
+
        /* for pure Target Mode. Login will not be initiated */
        if (vha->host->active_mode == MODE_TARGET)
                return 0;
@@ -1041,10 +1003,14 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
                fcport->flags);
 
        if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
-           (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
            (fcport->fw_login_state == DSC_LS_PRLI_PEND))
                return;
 
+       if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+               if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+                       return;
+       }
+
        if (fcport->flags & FCF_ASYNC_SENT) {
                fcport->login_retry++;
                set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
@@ -1258,7 +1224,7 @@ qla24xx_abort_sp_done(void *ptr, int res)
        complete(&abt->u.abt.comp);
 }
 
-static int
+int
 qla24xx_async_abort_cmd(srb_t *cmd_sp)
 {
        scsi_qla_host_t *vha = cmd_sp->vha;
@@ -3212,6 +3178,7 @@ next_check:
        } else {
                ql_dbg(ql_dbg_init, vha, 0x00d3,
                    "Init Firmware -- success.\n");
+               ha->flags.fw_started = 1;
        }
 
        return (rval);
@@ -3374,8 +3341,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
        uint8_t       domain;
        char            connect_type[22];
        struct qla_hw_data *ha = vha->hw;
-       unsigned long flags;
        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+       port_id_t id;
 
        /* Get host addresses. */
        rval = qla2x00_get_adapter_id(vha,
@@ -3453,13 +3420,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
 
        /* Save Host port and loop ID. */
        /* byte order - Big Endian */
-       vha->d_id.b.domain = domain;
-       vha->d_id.b.area = area;
-       vha->d_id.b.al_pa = al_pa;
-
-       spin_lock_irqsave(&ha->vport_slock, flags);
-       qlt_update_vp_map(vha, SET_AL_PA);
-       spin_unlock_irqrestore(&ha->vport_slock, flags);
+       id.b.domain = domain;
+       id.b.area = area;
+       id.b.al_pa = al_pa;
+       id.b.rsvd_1 = 0;
+       qlt_update_host_map(vha, id);
 
        if (!vha->flags.init_done)
                ql_log(ql_log_info, vha, 0x2010,
@@ -4036,6 +4001,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
                        atomic_set(&vha->loop_state, LOOP_READY);
                        ql_dbg(ql_dbg_disc, vha, 0x2069,
                            "LOOP READY.\n");
+                       ha->flags.fw_init_done = 1;
 
                        /*
                         * Process any ATIO queue entries that came in
@@ -5148,6 +5114,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
                        }
                }
                atomic_dec(&vha->vref_count);
+               wake_up(&vha->vref_waitq);
        }
        spin_unlock_irqrestore(&ha->vport_slock, flags);
 }
@@ -5526,6 +5493,11 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
        if (!(IS_P3P_TYPE(ha)))
                ha->isp_ops->reset_chip(vha);
 
+       ha->flags.n2n_ae = 0;
+       ha->flags.lip_ae = 0;
+       ha->current_topology = 0;
+       ha->flags.fw_started = 0;
+       ha->flags.fw_init_done = 0;
        ha->chip_reset++;
 
        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -6802,6 +6774,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
                return;
        if (!ha->fw_major_version)
                return;
+       if (!ha->flags.fw_started)
+               return;
 
        ret = qla2x00_stop_firmware(vha);
        for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
@@ -6815,6 +6789,9 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
                    "Attempting retry of stop-firmware command.\n");
                ret = qla2x00_stop_firmware(vha);
        }
+
+       ha->flags.fw_started = 0;
+       ha->flags.fw_init_done = 0;
 }
 
 int
index 535079280288fbd6554a3ca28e620065b8b9fe98..ea027f6a7fd4e949c1a9a53aad0de00b0a7ee361 100644 (file)
@@ -889,7 +889,7 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
 
 int
 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
-       uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+       uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
 {
        void *next_dsd;
        uint8_t avail_dsds = 0;
@@ -898,7 +898,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
        struct scatterlist *sg_prot;
        uint32_t *cur_dsd = dsd;
        uint16_t        used_dsds = tot_dsds;
-
        uint32_t        prot_int; /* protection interval */
        uint32_t        partial;
        struct qla2_sgx sgx;
@@ -966,7 +965,7 @@ alloc_and_fill:
                        } else {
                                list_add_tail(&dsd_ptr->list,
                                    &(tc->ctx->dsd_list));
-                               tc->ctx_dsd_alloced = 1;
+                               *tc->ctx_dsd_alloced = 1;
                        }
 
 
@@ -1005,7 +1004,7 @@ alloc_and_fill:
 
 int
 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
-       uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+       uint16_t tot_dsds, struct qla_tc_param *tc)
 {
        void *next_dsd;
        uint8_t avail_dsds = 0;
@@ -1066,7 +1065,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
                        } else {
                                list_add_tail(&dsd_ptr->list,
                                    &(tc->ctx->dsd_list));
-                               tc->ctx_dsd_alloced = 1;
+                               *tc->ctx_dsd_alloced = 1;
                        }
 
                        /* add new list to cmd iocb or last list */
@@ -1092,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
 
 int
 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
-       uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+       uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
 {
        void *next_dsd;
        uint8_t avail_dsds = 0;
@@ -1158,7 +1157,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
                        } else {
                                list_add_tail(&dsd_ptr->list,
                                    &(tc->ctx->dsd_list));
-                               tc->ctx_dsd_alloced = 1;
+                               *tc->ctx_dsd_alloced = 1;
                        }
 
                        /* add new list to cmd iocb or last list */
index 3c66ea29de2704fcefc71e965c071aa05c7bca78..3203367a4f423608ab69d75882d5a3141a1465a1 100644 (file)
@@ -708,6 +708,8 @@ skip_rio:
                    "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
 
                ha->isp_ops->fw_dump(vha, 1);
+               ha->flags.fw_init_done = 0;
+               ha->flags.fw_started = 0;
 
                if (IS_FWI2_CAPABLE(ha)) {
                        if (mb[1] == 0 && mb[2] == 0) {
@@ -761,6 +763,9 @@ skip_rio:
                break;
 
        case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
+               ha->flags.lip_ae = 1;
+               ha->flags.n2n_ae = 0;
+
                ql_dbg(ql_dbg_async, vha, 0x5009,
                    "LIP occurred (%x).\n", mb[1]);
 
@@ -797,6 +802,10 @@ skip_rio:
                break;
 
        case MBA_LOOP_DOWN:             /* Loop Down Event */
+               ha->flags.n2n_ae = 0;
+               ha->flags.lip_ae = 0;
+               ha->current_topology = 0;
+
                mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
                        ? RD_REG_WORD(&reg24->mailbox4) : 0;
                mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
@@ -866,6 +875,9 @@ skip_rio:
 
        /* case MBA_DCBX_COMPLETE: */
        case MBA_POINT_TO_POINT:        /* Point-to-Point */
+               ha->flags.lip_ae = 0;
+               ha->flags.n2n_ae = 1;
+
                if (IS_QLA2100(ha))
                        break;
 
@@ -1620,9 +1632,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
                QLA_LOGIO_LOGIN_RETRIED : 0;
        if (logio->entry_status) {
                ql_log(ql_log_warn, fcport->vha, 0x5034,
-                   "Async-%s error entry - hdl=%x"
+                   "Async-%s error entry - %8phC hdl=%x"
                    "portid=%02x%02x%02x entry-status=%x.\n",
-                   type, sp->handle, fcport->d_id.b.domain,
+                   type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
                    fcport->d_id.b.area, fcport->d_id.b.al_pa,
                    logio->entry_status);
                ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
@@ -1633,8 +1645,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
 
        if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
                ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
-                   "Async-%s complete - hdl=%x portid=%02x%02x%02x "
-                   "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+                   "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
+                   "iop0=%x.\n", type, fcport->port_name, sp->handle,
+                   fcport->d_id.b.domain,
                    fcport->d_id.b.area, fcport->d_id.b.al_pa,
                    le32_to_cpu(logio->io_parameter[0]));
 
@@ -1674,6 +1687,17 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
        case LSC_SCODE_NPORT_USED:
                data[0] = MBS_LOOP_ID_USED;
                break;
+       case LSC_SCODE_CMD_FAILED:
+               if (iop[1] == 0x0606) {
+                       /*
+                        * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
+                        * Target side acked.
+                        */
+                       data[0] = MBS_COMMAND_COMPLETE;
+                       goto logio_done;
+               }
+               data[0] = MBS_COMMAND_ERROR;
+               break;
        case LSC_SCODE_NOXCB:
                vha->hw->exch_starvation++;
                if (vha->hw->exch_starvation > 5) {
@@ -1695,8 +1719,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
        }
 
        ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
-           "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
-           "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+           "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
+           "iop0=%x iop1=%x.\n", type, fcport->port_name,
+               sp->handle, fcport->d_id.b.domain,
            fcport->d_id.b.area, fcport->d_id.b.al_pa,
            le16_to_cpu(logio->comp_status),
            le32_to_cpu(logio->io_parameter[0]),
@@ -2679,7 +2704,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
                return;
 
        abt = &sp->u.iocb_cmd;
-       abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
+       abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
        sp->done(sp, 0);
 }
 
@@ -2693,7 +2718,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
        struct sts_entry_24xx *pkt;
        struct qla_hw_data *ha = vha->hw;
 
-       if (!vha->flags.online)
+       if (!ha->flags.fw_started)
                return;
 
        while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
index 35079f4174179967d99568a4491713d82d96c7a3..a113ab3592a7f86eb16ce8f76d82337557cab029 100644 (file)
 #include <linux/delay.h>
 #include <linux/gfp.h>
 
+static struct mb_cmd_name {
+       uint16_t cmd;
+       const char *str;
+} mb_str[] = {
+       {MBC_GET_PORT_DATABASE,         "GPDB"},
+       {MBC_GET_ID_LIST,               "GIDList"},
+       {MBC_GET_LINK_PRIV_STATS,       "Stats"},
+};
+
+static const char *mb_to_str(uint16_t cmd)
+{
+       int i;
+       struct mb_cmd_name *e;
+
+       for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
+               e = mb_str + i;
+               if (cmd == e->cmd)
+                       return e->str;
+       }
+       return "unknown";
+}
+
 static struct rom_cmd {
        uint16_t cmd;
 } rom_cmds[] = {
@@ -2818,7 +2840,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
 
 int
 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
-    dma_addr_t stats_dma, uint options)
+    dma_addr_t stats_dma, uint16_t options)
 {
        int rval;
        mbx_cmd_t mc;
@@ -2828,19 +2850,17 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
            "Entered %s.\n", __func__);
 
-       mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
-       mcp->mb[2] = MSW(stats_dma);
-       mcp->mb[3] = LSW(stats_dma);
-       mcp->mb[6] = MSW(MSD(stats_dma));
-       mcp->mb[7] = LSW(MSD(stats_dma));
-       mcp->mb[8] = sizeof(struct link_statistics) / 4;
-       mcp->mb[9] = vha->vp_idx;
-       mcp->mb[10] = options;
-       mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
-       mcp->in_mb = MBX_2|MBX_1|MBX_0;
-       mcp->tov = MBX_TOV_SECONDS;
-       mcp->flags = IOCTL_CMD;
-       rval = qla2x00_mailbox_command(vha, mcp);
+       memset(&mc, 0, sizeof(mc));
+       mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
+       mc.mb[2] = MSW(stats_dma);
+       mc.mb[3] = LSW(stats_dma);
+       mc.mb[6] = MSW(MSD(stats_dma));
+       mc.mb[7] = LSW(MSD(stats_dma));
+       mc.mb[8] = sizeof(struct link_statistics) / 4;
+       mc.mb[9] = cpu_to_le16(vha->vp_idx);
+       mc.mb[10] = cpu_to_le16(options);
+
+       rval = qla24xx_send_mb_cmd(vha, &mc);
 
        if (rval == QLA_SUCCESS) {
                if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
@@ -3603,6 +3623,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
        scsi_qla_host_t *vp = NULL;
        unsigned long   flags;
        int found;
+       port_id_t id;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
            "Entered %s.\n", __func__);
@@ -3610,28 +3631,27 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
        if (rptid_entry->entry_status != 0)
                return;
 
+       id.b.domain = rptid_entry->port_id[2];
+       id.b.area   = rptid_entry->port_id[1];
+       id.b.al_pa  = rptid_entry->port_id[0];
+       id.b.rsvd_1 = 0;
+
        if (rptid_entry->format == 0) {
                /* loop */
-               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
+               ql_dbg(ql_dbg_async, vha, 0x10b7,
                    "Format 0 : Number of VPs setup %d, number of "
                    "VPs acquired %d.\n", rptid_entry->vp_setup,
                    rptid_entry->vp_acquired);
-               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
+               ql_dbg(ql_dbg_async, vha, 0x10b8,
                    "Primary port id %02x%02x%02x.\n",
                    rptid_entry->port_id[2], rptid_entry->port_id[1],
                    rptid_entry->port_id[0]);
 
-               vha->d_id.b.domain = rptid_entry->port_id[2];
-               vha->d_id.b.area = rptid_entry->port_id[1];
-               vha->d_id.b.al_pa = rptid_entry->port_id[0];
-
-               spin_lock_irqsave(&ha->vport_slock, flags);
-               qlt_update_vp_map(vha, SET_AL_PA);
-               spin_unlock_irqrestore(&ha->vport_slock, flags);
+               qlt_update_host_map(vha, id);
 
        } else if (rptid_entry->format == 1) {
                /* fabric */
-               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
+               ql_dbg(ql_dbg_async, vha, 0x10b9,
                    "Format 1: VP[%d] enabled - status %d - with "
                    "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
                        rptid_entry->vp_status,
@@ -3653,12 +3673,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                                            WWN_SIZE);
                                }
 
-                               vha->d_id.b.domain = rptid_entry->port_id[2];
-                               vha->d_id.b.area = rptid_entry->port_id[1];
-                               vha->d_id.b.al_pa = rptid_entry->port_id[0];
-                               spin_lock_irqsave(&ha->vport_slock, flags);
-                               qlt_update_vp_map(vha, SET_AL_PA);
-                               spin_unlock_irqrestore(&ha->vport_slock, flags);
+                               qlt_update_host_map(vha, id);
                        }
 
                        fc_host_port_name(vha->host) =
@@ -3694,12 +3709,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                        if (!found)
                                return;
 
-                       vp->d_id.b.domain = rptid_entry->port_id[2];
-                       vp->d_id.b.area =  rptid_entry->port_id[1];
-                       vp->d_id.b.al_pa = rptid_entry->port_id[0];
-                       spin_lock_irqsave(&ha->vport_slock, flags);
-                       qlt_update_vp_map(vp, SET_AL_PA);
-                       spin_unlock_irqrestore(&ha->vport_slock, flags);
+                       qlt_update_host_map(vp, id);
 
                        /*
                         * Cannot configure here as we are still sitting on the
@@ -5827,3 +5837,225 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
 
        return rval;
 }
+
+static void qla2x00_async_mb_sp_done(void *s, int res)
+{
+       struct srb *sp = s;
+
+       sp->u.iocb_cmd.u.mbx.rc = res;
+
+       complete(&sp->u.iocb_cmd.u.mbx.comp);
+       /* don't free sp here. Let the caller do the free */
+}
+
+/*
+ * This mailbox uses the iocb interface to send MB command.
+ * This allows non-critial (non chip setup) command to go
+ * out in parrallel.
+ */
+int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
+{
+       int rval = QLA_FUNCTION_FAILED;
+       srb_t *sp;
+       struct srb_iocb *c;
+
+       if (!vha->hw->flags.fw_started)
+               goto done;
+
+       sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+       if (!sp)
+               goto done;
+
+       sp->type = SRB_MB_IOCB;
+       sp->name = mb_to_str(mcp->mb[0]);
+
+       qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+       memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
+
+       c = &sp->u.iocb_cmd;
+       c->timeout = qla2x00_async_iocb_timeout;
+       init_completion(&c->u.mbx.comp);
+
+       sp->done = qla2x00_async_mb_sp_done;
+
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0xffff,
+                   "%s: %s Failed submission. %x.\n",
+                   __func__, sp->name, rval);
+               goto done_free_sp;
+       }
+
+       ql_dbg(ql_dbg_mbx, vha, 0xffff, "MB:%s hndl %x submitted\n",
+           sp->name, sp->handle);
+
+       wait_for_completion(&c->u.mbx.comp);
+       memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
+
+       rval = c->u.mbx.rc;
+       switch (rval) {
+       case QLA_FUNCTION_TIMEOUT:
+               ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Timeout. %x.\n",
+                   __func__, sp->name, rval);
+               break;
+       case  QLA_SUCCESS:
+               ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s done.\n",
+                   __func__, sp->name);
+               sp->free(sp);
+               break;
+       default:
+               ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Failed. %x.\n",
+                   __func__, sp->name, rval);
+               sp->free(sp);
+               break;
+       }
+
+       return rval;
+
+done_free_sp:
+       sp->free(sp);
+done:
+       return rval;
+}
+
+/*
+ * qla24xx_gpdb_wait
+ * NOTE: Do not call this routine from DPC thread
+ */
+int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
+{
+       int rval = QLA_FUNCTION_FAILED;
+       dma_addr_t pd_dma;
+       struct port_database_24xx *pd;
+       struct qla_hw_data *ha = vha->hw;
+       mbx_cmd_t mc;
+
+       if (!vha->hw->flags.fw_started)
+               goto done;
+
+       pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+       if (pd  == NULL) {
+               ql_log(ql_log_warn, vha, 0xffff,
+                       "Failed to allocate port database structure.\n");
+               goto done_free_sp;
+       }
+       memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
+
+       memset(&mc, 0, sizeof(mc));
+       mc.mb[0] = MBC_GET_PORT_DATABASE;
+       mc.mb[1] = cpu_to_le16(fcport->loop_id);
+       mc.mb[2] = MSW(pd_dma);
+       mc.mb[3] = LSW(pd_dma);
+       mc.mb[6] = MSW(MSD(pd_dma));
+       mc.mb[7] = LSW(MSD(pd_dma));
+       mc.mb[9] = cpu_to_le16(vha->vp_idx);
+       mc.mb[10] = cpu_to_le16((uint16_t)opt);
+
+       rval = qla24xx_send_mb_cmd(vha, &mc);
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0xffff,
+                   "%s: %8phC fail\n", __func__, fcport->port_name);
+               goto done_free_sp;
+       }
+
+       rval = __qla24xx_parse_gpdb(vha, fcport, pd);
+
+       ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %8phC done\n",
+           __func__, fcport->port_name);
+
+done_free_sp:
+       if (pd)
+               dma_pool_free(ha->s_dma_pool, pd, pd_dma);
+done:
+       return rval;
+}
+
+int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
+    struct port_database_24xx *pd)
+{
+       int rval = QLA_SUCCESS;
+       uint64_t zero = 0;
+
+       /* Check for logged in state. */
+       if (pd->current_login_state != PDS_PRLI_COMPLETE &&
+               pd->last_login_state != PDS_PRLI_COMPLETE) {
+               ql_dbg(ql_dbg_mbx, vha, 0xffff,
+                          "Unable to verify login-state (%x/%x) for "
+                          "loop_id %x.\n", pd->current_login_state,
+                          pd->last_login_state, fcport->loop_id);
+               rval = QLA_FUNCTION_FAILED;
+               goto gpd_error_out;
+       }
+
+       if (fcport->loop_id == FC_NO_LOOP_ID ||
+           (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+            memcmp(fcport->port_name, pd->port_name, 8))) {
+               /* We lost the device mid way. */
+               rval = QLA_NOT_LOGGED_IN;
+               goto gpd_error_out;
+       }
+
+       /* Names are little-endian. */
+       memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
+       memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
+
+       /* Get port_id of device. */
+       fcport->d_id.b.domain = pd->port_id[0];
+       fcport->d_id.b.area = pd->port_id[1];
+       fcport->d_id.b.al_pa = pd->port_id[2];
+       fcport->d_id.b.rsvd_1 = 0;
+
+       /* If not target must be initiator or unknown type. */
+       if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
+               fcport->port_type = FCT_INITIATOR;
+       else
+               fcport->port_type = FCT_TARGET;
+
+       /* Passback COS information. */
+       fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
+               FC_COS_CLASS2 : FC_COS_CLASS3;
+
+       if (pd->prli_svc_param_word_3[0] & BIT_7) {
+               fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+               fcport->conf_compl_supported = 1;
+       }
+
+gpd_error_out:
+       return rval;
+}
+
+/*
+ * qla24xx_gidlist__wait
+ * NOTE: don't call this routine from DPC thread.
+ */
+int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
+       void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
+{
+       int rval = QLA_FUNCTION_FAILED;
+       mbx_cmd_t mc;
+
+       if (!vha->hw->flags.fw_started)
+               goto done;
+
+       memset(&mc, 0, sizeof(mc));
+       mc.mb[0] = MBC_GET_ID_LIST;
+       mc.mb[2] = MSW(id_list_dma);
+       mc.mb[3] = LSW(id_list_dma);
+       mc.mb[6] = MSW(MSD(id_list_dma));
+       mc.mb[7] = LSW(MSD(id_list_dma));
+       mc.mb[8] = 0;
+       mc.mb[9] = cpu_to_le16(vha->vp_idx);
+
+       rval = qla24xx_send_mb_cmd(vha, &mc);
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0xffff,
+                       "%s:  fail\n", __func__);
+       } else {
+               *entries = mc.mb[1];
+               ql_dbg(ql_dbg_mbx, vha, 0xffff,
+                       "%s:  done\n", __func__);
+       }
+done:
+       return rval;
+}
index c6d6f0d912ff75ffaf9b9d810f81af735e39549b..09a490c98763a9406a6eafd3082df8f8ed50a149 100644 (file)
@@ -74,13 +74,14 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
         * ensures no active vp_list traversal while the vport is removed
         * from the queue)
         */
-       spin_lock_irqsave(&ha->vport_slock, flags);
-       while (atomic_read(&vha->vref_count)) {
-               spin_unlock_irqrestore(&ha->vport_slock, flags);
-
-               msleep(500);
+       wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count),
+           10*HZ);
 
-               spin_lock_irqsave(&ha->vport_slock, flags);
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       if (atomic_read(&vha->vref_count)) {
+               ql_dbg(ql_dbg_vport, vha, 0xfffa,
+                   "vha->vref_count=%u timeout\n", vha->vref_count.counter);
+               vha->vref_count = (atomic_t)ATOMIC_INIT(0);
        }
        list_del(&vha->list);
        qlt_update_vp_map(vha, RESET_VP_IDX);
@@ -269,6 +270,7 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
 
                        spin_lock_irqsave(&ha->vport_slock, flags);
                        atomic_dec(&vha->vref_count);
+                       wake_up(&vha->vref_waitq);
                }
                i++;
        }
index 1fed235a1b4a03172a4717a360a90f29ae383a4f..3e7011757c8267022744e19778f49cc4db286822 100644 (file)
@@ -1651,7 +1651,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
                                /* Don't abort commands in adapter during EEH
                                 * recovery as it's not accessible/responding.
                                 */
-                               if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) {
+                               if (GET_CMD_SP(sp) && !ha->flags.eeh_busy &&
+                                   (sp->type == SRB_SCSI_CMD)) {
                                        /* Get a reference to the sp and drop the lock.
                                         * The reference ensures this sp->done() call
                                         * - and not the call in qla2xxx_eh_abort() -
@@ -2560,6 +2561,20 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
        return atomic_read(&vha->loop_state) == LOOP_READY;
 }
 
+static void qla2x00_iocb_work_fn(struct work_struct *work)
+{
+       struct scsi_qla_host *vha = container_of(work,
+               struct scsi_qla_host, iocb_work);
+       int cnt = 0;
+
+       while (!list_empty(&vha->work_list)) {
+               qla2x00_do_work(vha);
+               cnt++;
+               if (cnt > 10)
+                       break;
+       }
+}
+
 /*
  * PCI driver interface
  */
@@ -3078,6 +3093,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
         */
        qla2xxx_wake_dpc(base_vha);
 
+       INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
        INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
 
        if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
@@ -3469,6 +3485,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
        qla2x00_free_sysfs_attr(base_vha, true);
 
        fc_remove_host(base_vha->host);
+       qlt_remove_target_resources(ha);
 
        scsi_remove_host(base_vha->host);
 
@@ -4268,6 +4285,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
        spin_lock_init(&vha->work_lock);
        spin_lock_init(&vha->cmd_list_lock);
        init_waitqueue_head(&vha->fcport_waitQ);
+       init_waitqueue_head(&vha->vref_waitq);
 
        vha->gnl.size = sizeof(struct get_name_list_extended) *
                        (ha->max_loop_id + 1);
@@ -4319,7 +4337,11 @@ qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
        spin_lock_irqsave(&vha->work_lock, flags);
        list_add_tail(&e->list, &vha->work_list);
        spin_unlock_irqrestore(&vha->work_lock, flags);
-       qla2xxx_wake_dpc(vha);
+
+       if (QLA_EARLY_LINKUP(vha->hw))
+               schedule_work(&vha->iocb_work);
+       else
+               qla2xxx_wake_dpc(vha);
 
        return QLA_SUCCESS;
 }
index 45f5077684f0a5b39c0645ddee831bf4071667d4..0e03ca2ab3e52358c817cdd2cdc667ba2bfb1ba3 100644 (file)
@@ -130,6 +130,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
        fc_port_t *fcport, bool local);
 void qlt_unreg_sess(struct fc_port *sess);
+static void qlt_24xx_handle_abts(struct scsi_qla_host *,
+       struct abts_recv_from_24xx *);
+
 /*
  * Global Variables
  */
@@ -140,6 +143,20 @@ static struct workqueue_struct *qla_tgt_wq;
 static DEFINE_MUTEX(qla_tgt_mutex);
 static LIST_HEAD(qla_tgt_glist);
 
+static const char *prot_op_str(u32 prot_op)
+{
+       switch (prot_op) {
+       case TARGET_PROT_NORMAL:        return "NORMAL";
+       case TARGET_PROT_DIN_INSERT:    return "DIN_INSERT";
+       case TARGET_PROT_DOUT_INSERT:   return "DOUT_INSERT";
+       case TARGET_PROT_DIN_STRIP:     return "DIN_STRIP";
+       case TARGET_PROT_DOUT_STRIP:    return "DOUT_STRIP";
+       case TARGET_PROT_DIN_PASS:      return "DIN_PASS";
+       case TARGET_PROT_DOUT_PASS:     return "DOUT_PASS";
+       default:                        return "UNKNOWN";
+       }
+}
+
 /* This API intentionally takes dest as a parameter, rather than returning
  * int value to avoid caller forgetting to issue wmb() after the store */
 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
@@ -170,21 +187,23 @@ static inline
 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
        uint8_t *d_id)
 {
-       struct qla_hw_data *ha = vha->hw;
-       uint8_t vp_idx;
-
-       if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
-               return NULL;
+       struct scsi_qla_host *host;
+       uint32_t key = 0;
 
-       if (vha->d_id.b.al_pa == d_id[2])
+       if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
+           (vha->d_id.b.al_pa == d_id[2]))
                return vha;
 
-       BUG_ON(ha->tgt.tgt_vp_map == NULL);
-       vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
-       if (likely(test_bit(vp_idx, ha->vp_idx_map)))
-               return ha->tgt.tgt_vp_map[vp_idx].vha;
+       key  = (uint32_t)d_id[0] << 16;
+       key |= (uint32_t)d_id[1] <<  8;
+       key |= (uint32_t)d_id[2];
 
-       return NULL;
+       host = btree_lookup32(&vha->hw->tgt.host_map, key);
+       if (!host)
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+                          "Unable to find host %06x\n", key);
+
+       return host;
 }
 
 static inline
@@ -389,6 +408,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
                        (struct abts_recv_from_24xx *)atio;
                struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
                        entry->vp_index);
+               unsigned long flags;
+
                if (unlikely(!host)) {
                        ql_dbg(ql_dbg_tgt, vha, 0xffff,
                            "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
@@ -396,9 +417,12 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
                            vha->vp_idx, entry->vp_index);
                        break;
                }
-               qlt_response_pkt(host, (response_t *)atio);
+               if (!ha_locked)
+                       spin_lock_irqsave(&host->hw->hardware_lock, flags);
+               qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
+               if (!ha_locked)
+                       spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
                break;
-
        }
 
        /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
@@ -554,6 +578,7 @@ void qla2x00_async_nack_sp_done(void *s, int res)
                sp->fcport->login_gen++;
                sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
                sp->fcport->logout_on_delete = 1;
+               sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
                break;
 
        case SRB_NACK_PRLI:
@@ -613,6 +638,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
                break;
        case SRB_NACK_PRLI:
                fcport->fw_login_state = DSC_LS_PRLI_PEND;
+               fcport->deleted = 0;
                c = "PRLI";
                break;
        case SRB_NACK_LOGO:
@@ -1215,7 +1241,7 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
        }
 
        /* Get list of logged in devices */
-       rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
+       rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
        if (rc != QLA_SUCCESS) {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
                    "qla_target(%d): get_id_list() failed: %x\n",
@@ -1551,6 +1577,9 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
        request_t *pkt;
        struct nack_to_isp *nack;
 
+       if (!ha->flags.fw_started)
+               return;
+
        ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
 
        /* Send marker if required */
@@ -2013,6 +2042,70 @@ void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
 }
 EXPORT_SYMBOL(qlt_free_mcmd);
 
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then
+ * reacquire
+ */
+void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
+    uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
+{
+       struct atio_from_isp *atio = &cmd->atio;
+       struct ctio7_to_24xx *ctio;
+       uint16_t temp;
+
+       ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
+           "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
+           "sense_key=%02x, asc=%02x, ascq=%02x",
+           vha, atio, scsi_status, sense_key, asc, ascq);
+
+       ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+       if (!ctio) {
+               ql_dbg(ql_dbg_async, vha, 0x3067,
+                   "qla2x00t(%ld): %s failed: unable to allocate request packet",
+                   vha->host_no, __func__);
+               goto out;
+       }
+
+       ctio->entry_type = CTIO_TYPE7;
+       ctio->entry_count = 1;
+       ctio->handle = QLA_TGT_SKIP_HANDLE;
+       ctio->nport_handle = cmd->sess->loop_id;
+       ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
+       ctio->vp_index = vha->vp_idx;
+       ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+       ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+       ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+       ctio->exchange_addr = atio->u.isp24.exchange_addr;
+       ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
+           cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
+       temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+       ctio->u.status1.ox_id = cpu_to_le16(temp);
+       ctio->u.status1.scsi_status =
+           cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
+       ctio->u.status1.response_len = cpu_to_le16(18);
+       ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
+
+       if (ctio->u.status1.residual != 0)
+               ctio->u.status1.scsi_status |=
+                   cpu_to_le16(SS_RESIDUAL_UNDER);
+
+       /* Response code and sense key */
+       put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
+           (&ctio->u.status1.sense_data)[0]);
+       /* Additional sense length */
+       put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
+       /* ASC and ASCQ */
+       put_unaligned_le32(((asc << 24) | (ascq << 16)),
+           (&ctio->u.status1.sense_data)[3]);
+
+       /* Memory Barrier */
+       wmb();
+
+       qla2x00_start_iocbs(vha, vha->req);
+out:
+       return;
+}
+
 /* callback from target fabric module code */
 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
 {
@@ -2261,7 +2354,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
                 */
                return -EAGAIN;
        } else
-               ha->tgt.cmds[h-1] = prm->cmd;
+               ha->tgt.cmds[h - 1] = prm->cmd;
 
        pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
        pkt->nport_handle = prm->cmd->loop_id;
@@ -2391,6 +2484,50 @@ static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
        return cmd->bufflen > 0;
 }
 
+static void qlt_print_dif_err(struct qla_tgt_prm *prm)
+{
+       struct qla_tgt_cmd *cmd;
+       struct scsi_qla_host *vha;
+
+       /* asc 0x10=dif error */
+       if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
+               cmd = prm->cmd;
+               vha = cmd->vha;
+               /* ASCQ */
+               switch (prm->sense_buffer[13]) {
+               case 1:
+                       ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+                           "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+                           "se_cmd=%p tag[%x]",
+                           cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+                           cmd->atio.u.isp24.exchange_addr);
+                       break;
+               case 2:
+                       ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+                           "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+                           "se_cmd=%p tag[%x]",
+                           cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+                           cmd->atio.u.isp24.exchange_addr);
+                       break;
+               case 3:
+                       ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+                           "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+                           "se_cmd=%p tag[%x]",
+                           cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+                           cmd->atio.u.isp24.exchange_addr);
+                       break;
+               default:
+                       ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+                           "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
+                           "se_cmd=%p tag[%x]",
+                           cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+                           cmd->atio.u.isp24.exchange_addr);
+                       break;
+               }
+               ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xffff, cmd->cdb, 16);
+       }
+}
+
 /*
  * Called without ha->hardware_lock held
  */
@@ -2512,18 +2649,9 @@ skip_explict_conf:
                for (i = 0; i < prm->sense_buffer_len/4; i++)
                        ((uint32_t *)ctio->u.status1.sense_data)[i] =
                                cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
-#if 0
-               if (unlikely((prm->sense_buffer_len % 4) != 0)) {
-                       static int q;
-                       if (q < 10) {
-                               ql_dbg(ql_dbg_tgt, vha, 0xe04f,
-                                   "qla_target(%d): %d bytes of sense "
-                                   "lost", prm->tgt->ha->vp_idx,
-                                   prm->sense_buffer_len % 4);
-                               q++;
-                       }
-               }
-#endif
+
+               qlt_print_dif_err(prm);
+
        } else {
                ctio->u.status1.flags &=
                    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
@@ -2537,19 +2665,9 @@ skip_explict_conf:
        /* Sense with len > 24, is it possible ??? */
 }
 
-
-
-/* diff  */
 static inline int
 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
 {
-       /*
-        * Uncomment when corresponding SCSI changes are done.
-        *
-        if (!sp->cmd->prot_chk)
-        return 0;
-        *
-        */
        switch (se_cmd->prot_op) {
        case TARGET_PROT_DOUT_INSERT:
        case TARGET_PROT_DIN_STRIP:
@@ -2570,16 +2688,38 @@ qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
        return 0;
 }
 
+static inline int
+qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
+{
+       switch (se_cmd->prot_op) {
+       case TARGET_PROT_DIN_INSERT:
+       case TARGET_PROT_DOUT_INSERT:
+       case TARGET_PROT_DIN_STRIP:
+       case TARGET_PROT_DOUT_STRIP:
+       case TARGET_PROT_DIN_PASS:
+       case TARGET_PROT_DOUT_PASS:
+           return 1;
+       default:
+           return 0;
+       }
+       return 0;
+}
+
 /*
- * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
- *
+ * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
  */
-static inline void
-qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
+static void
+qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
+    uint16_t *pfw_prot_opts)
 {
+       struct se_cmd *se_cmd = &cmd->se_cmd;
        uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
+       scsi_qla_host_t *vha = cmd->tgt->vha;
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t t32 = 0;
 
-       /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
+       /*
+        * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
         * have been immplemented by TCM, before AppTag is avail.
         * Look for modesense_handlers[]
         */
@@ -2587,65 +2727,73 @@ qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
        ctx->app_tag_mask[0] = 0x0;
        ctx->app_tag_mask[1] = 0x0;
 
+       if (IS_PI_UNINIT_CAPABLE(ha)) {
+               if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
+                   (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
+                       *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
+               else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
+                       *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
+       }
+
+       t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
+
        switch (se_cmd->prot_type) {
        case TARGET_DIF_TYPE0_PROT:
                /*
-                * No check for ql2xenablehba_err_chk, as it would be an
-                * I/O error if hba tag generation is not done.
+                * No check for ql2xenablehba_err_chk, as it
+                * would be an I/O error if hba tag generation
+                * is not done.
                 */
                ctx->ref_tag = cpu_to_le32(lba);
-
-               if (!qlt_hba_err_chk_enabled(se_cmd))
-                       break;
-
                /* enable ALL bytes of the ref tag */
                ctx->ref_tag_mask[0] = 0xff;
                ctx->ref_tag_mask[1] = 0xff;
                ctx->ref_tag_mask[2] = 0xff;
                ctx->ref_tag_mask[3] = 0xff;
                break;
-       /*
-        * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
-        * 16 bit app tag.
-        */
        case TARGET_DIF_TYPE1_PROT:
-               ctx->ref_tag = cpu_to_le32(lba);
-
-               if (!qlt_hba_err_chk_enabled(se_cmd))
-                       break;
-
-               /* enable ALL bytes of the ref tag */
-               ctx->ref_tag_mask[0] = 0xff;
-               ctx->ref_tag_mask[1] = 0xff;
-               ctx->ref_tag_mask[2] = 0xff;
-               ctx->ref_tag_mask[3] = 0xff;
-               break;
-       /*
-        * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
-        * match LBA in CDB + N
-        */
+           /*
+            * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
+            * REF tag, and 16 bit app tag.
+            */
+           ctx->ref_tag = cpu_to_le32(lba);
+           if (!qla_tgt_ref_mask_check(se_cmd) ||
+               !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
+                   *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+                   break;
+           }
+           /* enable ALL bytes of the ref tag */
+           ctx->ref_tag_mask[0] = 0xff;
+           ctx->ref_tag_mask[1] = 0xff;
+           ctx->ref_tag_mask[2] = 0xff;
+           ctx->ref_tag_mask[3] = 0xff;
+           break;
        case TARGET_DIF_TYPE2_PROT:
-               ctx->ref_tag = cpu_to_le32(lba);
-
-               if (!qlt_hba_err_chk_enabled(se_cmd))
-                       break;
-
-               /* enable ALL bytes of the ref tag */
-               ctx->ref_tag_mask[0] = 0xff;
-               ctx->ref_tag_mask[1] = 0xff;
-               ctx->ref_tag_mask[2] = 0xff;
-               ctx->ref_tag_mask[3] = 0xff;
-               break;
-
-       /* For Type 3 protection: 16 bit GUARD only */
+           /*
+            * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
+            * tag has to match LBA in CDB + N
+            */
+           ctx->ref_tag = cpu_to_le32(lba);
+           if (!qla_tgt_ref_mask_check(se_cmd) ||
+               !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
+                   *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+                   break;
+           }
+           /* enable ALL bytes of the ref tag */
+           ctx->ref_tag_mask[0] = 0xff;
+           ctx->ref_tag_mask[1] = 0xff;
+           ctx->ref_tag_mask[2] = 0xff;
+           ctx->ref_tag_mask[3] = 0xff;
+           break;
        case TARGET_DIF_TYPE3_PROT:
-               ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
-                       ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
-               break;
+           /* For TYPE 3 protection: 16 bit GUARD only */
+           *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+           ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
+               ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
+           break;
        }
 }
 
-
 static inline int
 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
 {
@@ -2664,6 +2812,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
        struct se_cmd           *se_cmd = &cmd->se_cmd;
        uint32_t h;
        struct atio_from_isp *atio = &prm->cmd->atio;
+       struct qla_tc_param     tc;
        uint16_t t16;
 
        ha = vha->hw;
@@ -2689,16 +2838,15 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
        case TARGET_PROT_DIN_INSERT:
        case TARGET_PROT_DOUT_STRIP:
                transfer_length = data_bytes;
-               data_bytes += dif_bytes;
+               if (cmd->prot_sg_cnt)
+                       data_bytes += dif_bytes;
                break;
-
        case TARGET_PROT_DIN_STRIP:
        case TARGET_PROT_DOUT_INSERT:
        case TARGET_PROT_DIN_PASS:
        case TARGET_PROT_DOUT_PASS:
                transfer_length = data_bytes + dif_bytes;
                break;
-
        default:
                BUG();
                break;
@@ -2734,7 +2882,6 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
                break;
        }
 
-
        /* ---- PKT ---- */
        /* Update entry type to indicate Command Type CRC_2 IOCB */
        pkt->entry_type  = CTIO_CRC2;
@@ -2752,9 +2899,8 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
        } else
                ha->tgt.cmds[h-1] = prm->cmd;
 
-
        pkt->handle  = h | CTIO_COMPLETION_HANDLE_MARK;
-       pkt->nport_handle = prm->cmd->loop_id;
+       pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
        pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
        pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
        pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
@@ -2775,12 +2921,10 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
        else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
                pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
 
-
        pkt->dseg_count = prm->tot_dsds;
        /* Fibre channel byte count */
        pkt->transfer_length = cpu_to_le32(transfer_length);
 
-
        /* ----- CRC context -------- */
 
        /* Allocate CRC context from global pool */
@@ -2800,13 +2944,12 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
        /* Set handle */
        crc_ctx_pkt->handle = pkt->handle;
 
-       qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
+       qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
 
        pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
        pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
        pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
 
-
        if (!bundling) {
                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
        } else {
@@ -2827,16 +2970,24 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
        crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
        crc_ctx_pkt->guard_seed = cpu_to_le16(0);
 
+       memset((uint8_t *)&tc, 0 , sizeof(tc));
+       tc.vha = vha;
+       tc.blk_sz = cmd->blk_sz;
+       tc.bufflen = cmd->bufflen;
+       tc.sg = cmd->sg;
+       tc.prot_sg = cmd->prot_sg;
+       tc.ctx = crc_ctx_pkt;
+       tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
 
        /* Walks data segments */
        pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
 
        if (!bundling && prm->prot_seg_cnt) {
                if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
-                       prm->tot_dsds, cmd))
+                       prm->tot_dsds, &tc))
                        goto crc_queuing_error;
        } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
-               (prm->tot_dsds - prm->prot_seg_cnt), cmd))
+               (prm->tot_dsds - prm->prot_seg_cnt), &tc))
                goto crc_queuing_error;
 
        if (bundling && prm->prot_seg_cnt) {
@@ -2845,18 +2996,18 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
 
                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
                if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
-                       prm->prot_seg_cnt, cmd))
+                       prm->prot_seg_cnt, &tc))
                        goto crc_queuing_error;
        }
        return QLA_SUCCESS;
 
 crc_queuing_error:
        /* Cleanup will be performed by the caller */
+       vha->hw->tgt.cmds[h - 1] = NULL;
 
        return QLA_FUNCTION_FAILED;
 }
 
-
 /*
  * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
  * QLA_TGT_XMIT_STATUS for >= 24xx silicon
@@ -2906,7 +3057,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
        else
                vha->tgt_counters.core_qla_que_buf++;
 
-       if (!vha->flags.online || cmd->reset_count != ha->chip_reset) {
+       if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) {
                /*
                 * Either the port is not online or this request was from
                 * previous life, just abort the processing.
@@ -3047,7 +3198,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
 
-       if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) ||
+       if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) ||
            (cmd->sess && cmd->sess->deleted)) {
                /*
                 * Either the port is not online or this request was from
@@ -3104,139 +3255,113 @@ EXPORT_SYMBOL(qlt_rdy_to_xfer);
 
 
 /*
- * Checks the guard or meta-data for the type of error
- * detected by the HBA.
+ * it is assumed either hardware_lock or qpair lock is held.
  */
-static inline int
+static void
 qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
-               struct ctio_crc_from_fw *sts)
+       struct ctio_crc_from_fw *sts)
 {
        uint8_t         *ap = &sts->actual_dif[0];
        uint8_t         *ep = &sts->expected_dif[0];
-       uint32_t        e_ref_tag, a_ref_tag;
-       uint16_t        e_app_tag, a_app_tag;
-       uint16_t        e_guard, a_guard;
        uint64_t        lba = cmd->se_cmd.t_task_lba;
+       uint8_t scsi_status, sense_key, asc, ascq;
+       unsigned long flags;
 
-       a_guard   = be16_to_cpu(*(uint16_t *)(ap + 0));
-       a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
-       a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
-
-       e_guard   = be16_to_cpu(*(uint16_t *)(ep + 0));
-       e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
-       e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
-
-       ql_dbg(ql_dbg_tgt, vha, 0xe075,
-           "iocb(s) %p Returned STATUS.\n", sts);
-
-       ql_dbg(ql_dbg_tgt, vha, 0xf075,
-           "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
-           cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
-           a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
-
-       /*
-        * Ignore sector if:
-        * For type     3: ref & app tag is all 'f's
-        * For type 0,1,2: app tag is all 'f's
-        */
-       if ((a_app_tag == 0xffff) &&
-           ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
-            (a_ref_tag == 0xffffffff))) {
-               uint32_t blocks_done;
-
-               /* 2TB boundary case covered automatically with this */
-               blocks_done = e_ref_tag - (uint32_t)lba + 1;
-               cmd->se_cmd.bad_sector = e_ref_tag;
-               cmd->se_cmd.pi_err = 0;
-               ql_dbg(ql_dbg_tgt, vha, 0xf074,
-                       "need to return scsi good\n");
-
-               /* Update protection tag */
-               if (cmd->prot_sg_cnt) {
-                       uint32_t i, k = 0, num_ent;
-                       struct scatterlist *sg, *sgl;
-
-
-                       sgl = cmd->prot_sg;
-
-                       /* Patch the corresponding protection tags */
-                       for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
-                               num_ent = sg_dma_len(sg) / 8;
-                               if (k + num_ent < blocks_done) {
-                                       k += num_ent;
-                                       continue;
-                               }
-                               k = blocks_done;
-                               break;
-                       }
+       cmd->trc_flags |= TRC_DIF_ERR;
 
-                       if (k != blocks_done) {
-                               ql_log(ql_log_warn, vha, 0xf076,
-                                   "unexpected tag values tag:lba=%u:%llu)\n",
-                                   e_ref_tag, (unsigned long long)lba);
-                               goto out;
-                       }
+       cmd->a_guard   = be16_to_cpu(*(uint16_t *)(ap + 0));
+       cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
+       cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
 
-#if 0
-                       struct sd_dif_tuple *spt;
-                       /* TODO:
-                        * This section came from initiator. Is it valid here?
-                        * should ulp be override with actual val???
-                        */
-                       spt = page_address(sg_page(sg)) + sg->offset;
-                       spt += j;
+       cmd->e_guard   = be16_to_cpu(*(uint16_t *)(ep + 0));
+       cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
+       cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
 
-                       spt->app_tag = 0xffff;
-                       if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
-                               spt->ref_tag = 0xffffffff;
-#endif
-               }
+       ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
+           "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
 
-               return 0;
-       }
+       scsi_status = sense_key = asc = ascq = 0;
 
-       /* check guard */
-       if (e_guard != a_guard) {
-               cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
-               cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
-
-               ql_log(ql_log_warn, vha, 0xe076,
-                   "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
-                   cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
-                   a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
-                   a_guard, e_guard, cmd);
-               goto out;
+       /* check appl tag */
+       if (cmd->e_app_tag != cmd->a_app_tag) {
+               ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+                       "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+                       "Ref[%x|%x], App[%x|%x], "
+                       "Guard [%x|%x] cmd=%p ox_id[%04x]",
+                       cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+                       cmd->a_ref_tag, cmd->e_ref_tag,
+                       cmd->a_app_tag, cmd->e_app_tag,
+                       cmd->a_guard, cmd->e_guard,
+                       cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+
+               cmd->dif_err_code = DIF_ERR_APP;
+               scsi_status = SAM_STAT_CHECK_CONDITION;
+               sense_key = ABORTED_COMMAND;
+               asc = 0x10;
+               ascq = 0x2;
        }
 
        /* check ref tag */
-       if (e_ref_tag != a_ref_tag) {
-               cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
-               cmd->se_cmd.bad_sector = e_ref_tag;
-
-               ql_log(ql_log_warn, vha, 0xe077,
-                       "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
-                       cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
-                       a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
-                       a_guard, e_guard, cmd);
+       if (cmd->e_ref_tag != cmd->a_ref_tag) {
+               ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+                       "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+                       "Ref[%x|%x], App[%x|%x], "
+                       "Guard[%x|%x] cmd=%p ox_id[%04x] ",
+                       cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+                       cmd->a_ref_tag, cmd->e_ref_tag,
+                       cmd->a_app_tag, cmd->e_app_tag,
+                       cmd->a_guard, cmd->e_guard,
+                       cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+
+               cmd->dif_err_code = DIF_ERR_REF;
+               scsi_status = SAM_STAT_CHECK_CONDITION;
+               sense_key = ABORTED_COMMAND;
+               asc = 0x10;
+               ascq = 0x3;
                goto out;
        }
 
-       /* check appl tag */
-       if (e_app_tag != a_app_tag) {
-               cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
-               cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
-
-               ql_log(ql_log_warn, vha, 0xe078,
-                       "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
-                       cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
-                       a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
-                       a_guard, e_guard, cmd);
-               goto out;
+       /* check guard */
+       if (cmd->e_guard != cmd->a_guard) {
+               ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+                       "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+                       "Ref[%x|%x], App[%x|%x], "
+                       "Guard [%x|%x] cmd=%p ox_id[%04x]",
+                       cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+                       cmd->a_ref_tag, cmd->e_ref_tag,
+                       cmd->a_app_tag, cmd->e_app_tag,
+                       cmd->a_guard, cmd->e_guard,
+                       cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+               cmd->dif_err_code = DIF_ERR_GRD;
+               scsi_status = SAM_STAT_CHECK_CONDITION;
+               sense_key = ABORTED_COMMAND;
+               asc = 0x10;
+               ascq = 0x1;
        }
 out:
-       return 1;
-}
+       switch (cmd->state) {
+       case QLA_TGT_STATE_NEED_DATA:
+               /* handle_data will load DIF error code  */
+               cmd->state = QLA_TGT_STATE_DATA_IN;
+               vha->hw->tgt.tgt_ops->handle_data(cmd);
+               break;
+       default:
+               spin_lock_irqsave(&cmd->cmd_lock, flags);
+               if (cmd->aborted) {
+                       spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+                       vha->hw->tgt.tgt_ops->free_cmd(cmd);
+                       break;
+               }
+               spin_unlock_irqrestore(&cmd->cmd_lock, flags);
 
+               qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq);
+               /* assume scsi status gets out on the wire.
+                * Will not wait for completion.
+                */
+               vha->hw->tgt.tgt_ops->free_cmd(cmd);
+               break;
+       }
+}
 
 /* If hardware_lock held on entry, might drop it, then reaquire */
 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
@@ -3251,7 +3376,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
        ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
            "Sending TERM ELS CTIO (ha=%p)\n", ha);
 
-       pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
+       pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
        if (pkt == NULL) {
                ql_dbg(ql_dbg_tgt, vha, 0xe080,
                    "qla_target(%d): %s failed: unable to allocate "
@@ -3543,6 +3668,16 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
 {
        int term = 0;
 
+       if (cmd->se_cmd.prot_op)
+               ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+                   "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
+                   "se_cmd=%p tag[%x] op %#x/%s",
+                    cmd->lba, cmd->lba,
+                    cmd->num_blks, &cmd->se_cmd,
+                    cmd->atio.u.isp24.exchange_addr,
+                    cmd->se_cmd.prot_op,
+                    prot_op_str(cmd->se_cmd.prot_op));
+
        if (ctio != NULL) {
                struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
                term = !(c->flags &
@@ -3760,32 +3895,15 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
                        struct ctio_crc_from_fw *crc =
                                (struct ctio_crc_from_fw *)ctio;
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
-                           "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
+                           "qla_target(%d): CTIO with DIF_ERROR status %x "
+                           "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
+                           "expect_dif[0x%llx]\n",
                            vha->vp_idx, status, cmd->state, se_cmd,
                            *((u64 *)&crc->actual_dif[0]),
                            *((u64 *)&crc->expected_dif[0]));
 
-                       if (qlt_handle_dif_error(vha, cmd, ctio)) {
-                               if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
-                                       /* scsi Write/xfer rdy complete */
-                                       goto skip_term;
-                               } else {
-                                       /* scsi read/xmit respond complete
-                                        * call handle dif to send scsi status
-                                        * rather than terminate exchange.
-                                        */
-                                       cmd->state = QLA_TGT_STATE_PROCESSED;
-                                       ha->tgt.tgt_ops->handle_dif_err(cmd);
-                                       return;
-                               }
-                       } else {
-                               /* Need to generate a SCSI good completion.
-                                * because FW did not send scsi status.
-                                */
-                               status = 0;
-                               goto skip_term;
-                       }
-                       break;
+                       qlt_handle_dif_error(vha, cmd, ctio);
+                       return;
                }
                default:
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
@@ -3808,7 +3926,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
                                return;
                }
        }
-skip_term:
 
        if (cmd->state == QLA_TGT_STATE_PROCESSED) {
                cmd->trc_flags |= TRC_CTIO_DONE;
@@ -4584,7 +4701,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
                }
 
                if (sess != NULL) {
-                       if (sess->fw_login_state == DSC_LS_PLOGI_PEND) {
+                       if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
+                           sess->fw_login_state != DSC_LS_PLOGI_COMP) {
                                /*
                                 * Impatient initiator sent PRLI before last
                                 * PLOGI could finish. Will force him to re-try,
@@ -4623,15 +4741,23 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
 
                /* Make session global (not used in fabric mode) */
                if (ha->current_topology != ISP_CFG_F) {
-                       set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
-                       set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
-                       qla2xxx_wake_dpc(vha);
+                       if (sess) {
+                               ql_dbg(ql_dbg_disc, vha, 0xffff,
+                                   "%s %d %8phC post nack\n",
+                                   __func__, __LINE__, sess->port_name);
+                               qla24xx_post_nack_work(vha, sess, iocb,
+                                       SRB_NACK_PRLI);
+                               res = 0;
+                       } else {
+                               set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+                               set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+                               qla2xxx_wake_dpc(vha);
+                       }
                } else {
                        if (sess) {
                                ql_dbg(ql_dbg_disc, vha, 0xffff,
-                                          "%s %d %8phC post nack\n",
-                                          __func__, __LINE__, sess->port_name);
-
+                                   "%s %d %8phC post nack\n",
+                                   __func__, __LINE__, sess->port_name);
                                qla24xx_post_nack_work(vha, sess, iocb,
                                        SRB_NACK_PRLI);
                                res = 0;
@@ -4639,7 +4765,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
                }
                break;
 
-
        case ELS_TPRLO:
                if (le16_to_cpu(iocb->u.isp24.flags) &
                        NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
@@ -5079,16 +5204,22 @@ qlt_send_busy(struct scsi_qla_host *vha,
 
 static int
 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
-       struct atio_from_isp *atio)
+       struct atio_from_isp *atio, bool ha_locked)
 {
        struct qla_hw_data *ha = vha->hw;
        uint16_t status;
+       unsigned long flags;
 
        if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
                return 0;
 
+       if (!ha_locked)
+               spin_lock_irqsave(&ha->hardware_lock, flags);
        status = temp_sam_status;
        qlt_send_busy(vha, atio, status);
+       if (!ha_locked)
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
        return 1;
 }
 
@@ -5103,7 +5234,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
        unsigned long flags;
 
        if (unlikely(tgt == NULL)) {
-               ql_dbg(ql_dbg_io, vha, 0x3064,
+               ql_dbg(ql_dbg_tgt, vha, 0x3064,
                    "ATIO pkt, but no tgt (ha %p)", ha);
                return;
        }
@@ -5133,7 +5264,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
 
 
                if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
-                       rc = qlt_chk_qfull_thresh_hold(vha, atio);
+                       rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
                        if (rc != 0) {
                                tgt->atio_irq_cmd_count--;
                                return;
@@ -5256,7 +5387,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
                        break;
                }
 
-               rc = qlt_chk_qfull_thresh_hold(vha, atio);
+               rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
                if (rc != 0) {
                        tgt->irq_cmd_count--;
                        return;
@@ -5531,7 +5662,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
 
        fcport->loop_id = loop_id;
 
-       rc = qla2x00_get_port_database(vha, fcport, 0);
+       rc = qla24xx_gpdb_wait(vha, fcport, 0);
        if (rc != QLA_SUCCESS) {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
                    "qla_target(%d): Failed to retrieve fcport "
@@ -5713,30 +5844,23 @@ static void qlt_abort_work(struct qla_tgt *tgt,
                }
        }
 
-       spin_lock_irqsave(&ha->hardware_lock, flags);
-
-       if (tgt->tgt_stop)
-               goto out_term;
-
        rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
+       ha->tgt.tgt_ops->put_sess(sess);
+       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
+
        if (rc != 0)
                goto out_term;
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-       if (sess)
-               ha->tgt.tgt_ops->put_sess(sess);
-       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
        return;
 
 out_term2:
-       spin_lock_irqsave(&ha->hardware_lock, flags);
+       if (sess)
+               ha->tgt.tgt_ops->put_sess(sess);
+       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
 
 out_term:
+       spin_lock_irqsave(&ha->hardware_lock, flags);
        qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
-       if (sess)
-               ha->tgt.tgt_ops->put_sess(sess);
-       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
 }
 
 static void qlt_tmr_work(struct qla_tgt *tgt,
@@ -5756,7 +5880,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
        spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 
        if (tgt->tgt_stop)
-               goto out_term;
+               goto out_term2;
 
        s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
        sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
@@ -5768,11 +5892,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
 
                spin_lock_irqsave(&ha->tgt.sess_lock, flags);
                if (!sess)
-                       goto out_term;
+                       goto out_term2;
        } else {
                if (sess->deleted) {
                        sess = NULL;
-                       goto out_term;
+                       goto out_term2;
                }
 
                if (!kref_get_unless_zero(&sess->sess_kref)) {
@@ -5780,7 +5904,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
                            "%s: kref_get fail %8phC\n",
                             __func__, sess->port_name);
                        sess = NULL;
-                       goto out_term;
+                       goto out_term2;
                }
        }
 
@@ -5790,17 +5914,19 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
        unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
 
        rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
-       if (rc != 0)
-               goto out_term;
-
        ha->tgt.tgt_ops->put_sess(sess);
        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+       if (rc != 0)
+               goto out_term;
        return;
 
+out_term2:
+       if (sess)
+               ha->tgt.tgt_ops->put_sess(sess);
+       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 out_term:
        qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
-       ha->tgt.tgt_ops->put_sess(sess);
-       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 }
 
 static void qlt_sess_work_fn(struct work_struct *work)
@@ -5893,13 +6019,13 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
        tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
        tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
 
-       if (base_vha->fc_vport)
-               return 0;
-
        mutex_lock(&qla_tgt_mutex);
        list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
        mutex_unlock(&qla_tgt_mutex);
 
+       if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
+               ha->tgt.tgt_ops->add_target(base_vha);
+
        return 0;
 }
 
@@ -5928,6 +6054,17 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
        return 0;
 }
 
+void qlt_remove_target_resources(struct qla_hw_data *ha)
+{
+       struct scsi_qla_host *node;
+       u32 key = 0;
+
+       btree_for_each_safe32(&ha->tgt.host_map, key, node)
+               btree_remove32(&ha->tgt.host_map, key);
+
+       btree_destroy32(&ha->tgt.host_map);
+}
+
 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
        unsigned char *b)
 {
@@ -6234,7 +6371,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
        struct atio_from_isp *pkt;
        int cnt, i;
 
-       if (!vha->flags.online)
+       if (!ha->flags.fw_started)
                return;
 
        while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
@@ -6581,6 +6718,8 @@ qlt_modify_vp_config(struct scsi_qla_host *vha,
 void
 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
 {
+       int rc;
+
        if (!QLA_TGT_MODE_ENABLED())
                return;
 
@@ -6600,6 +6739,13 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
            qlt_unknown_atio_work_fn);
 
        qlt_clear_mode(base_vha);
+
+       rc = btree_init32(&ha->tgt.host_map);
+       if (rc)
+               ql_log(ql_log_info, base_vha, 0xffff,
+                   "Unable to initialize ha->host_map btree\n");
+
+       qlt_update_vp_map(base_vha, SET_VP_IDX);
 }
 
 irqreturn_t
@@ -6642,6 +6788,8 @@ qlt_handle_abts_recv_work(struct work_struct *work)
        spin_lock_irqsave(&ha->hardware_lock, flags);
        qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       kfree(op);
 }
 
 void
@@ -6706,25 +6854,69 @@ qlt_mem_free(struct qla_hw_data *ha)
 void
 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
 {
+       void *slot;
+       u32 key;
+       int rc;
+
        if (!QLA_TGT_MODE_ENABLED())
                return;
 
+       key = vha->d_id.b24;
+
        switch (cmd) {
        case SET_VP_IDX:
                vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
                break;
        case SET_AL_PA:
-               vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
+               slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+               if (!slot) {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+                           "Save vha in host_map %p %06x\n", vha, key);
+                       rc = btree_insert32(&vha->hw->tgt.host_map,
+                               key, vha, GFP_ATOMIC);
+                       if (rc)
+                               ql_log(ql_log_info, vha, 0xffff,
+                                   "Unable to insert s_id into host_map: %06x\n",
+                                   key);
+                       return;
+               }
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+                       "replace existing vha in host_map %p %06x\n", vha, key);
+               btree_update32(&vha->hw->tgt.host_map, key, vha);
                break;
        case RESET_VP_IDX:
                vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
                break;
        case RESET_AL_PA:
-               vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+                  "clear vha in host_map %p %06x\n", vha, key);
+               slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+               if (slot)
+                       btree_remove32(&vha->hw->tgt.host_map, key);
+               vha->d_id.b24 = 0;
                break;
        }
 }
 
+void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
+{
+       unsigned long flags;
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!vha->d_id.b24) {
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               vha->d_id = id;
+               qlt_update_vp_map(vha, SET_AL_PA);
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
+       } else if (vha->d_id.b24 != id.b24) {
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               qlt_update_vp_map(vha, RESET_AL_PA);
+               vha->d_id = id;
+               qlt_update_vp_map(vha, SET_AL_PA);
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
+       }
+}
+
 static int __init qlt_parse_ini_mode(void)
 {
        if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
index a7f90dcaae37d3eaad551544c6151785faf84cb9..d64420251194eb5fa634a36699ecf07c69e09edd 100644 (file)
@@ -378,6 +378,14 @@ static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
        atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
 }
 
+static inline int get_datalen_for_atio(struct atio_from_isp *atio)
+{
+       int len = atio->u.isp24.fcp_cmnd.add_cdb_len;
+
+       return (be32_to_cpu(get_unaligned((uint32_t *)
+           &atio->u.isp24.fcp_cmnd.add_cdb[len * 4])));
+}
+
 #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
 
 /*
@@ -667,7 +675,6 @@ struct qla_tgt_func_tmpl {
        int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
                        unsigned char *, uint32_t, int, int, int);
        void (*handle_data)(struct qla_tgt_cmd *);
-       void (*handle_dif_err)(struct qla_tgt_cmd *);
        int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t,
                        uint32_t);
        void (*free_cmd)(struct qla_tgt_cmd *);
@@ -684,6 +691,9 @@ struct qla_tgt_func_tmpl {
        void (*clear_nacl_from_fcport_map)(struct fc_port *);
        void (*put_sess)(struct fc_port *);
        void (*shutdown_sess)(struct fc_port *);
+       int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts);
+       int (*chk_dif_tags)(uint32_t tag);
+       void (*add_target)(struct scsi_qla_host *);
 };
 
 int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
@@ -720,8 +730,8 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
 #define QLA_TGT_ABORT_ALL               0xFFFE
 #define QLA_TGT_NEXUS_LOSS_SESS         0xFFFD
 #define QLA_TGT_NEXUS_LOSS              0xFFFC
-#define QLA_TGT_ABTS                                   0xFFFB
-#define QLA_TGT_2G_ABORT_TASK                  0xFFFA
+#define QLA_TGT_ABTS                   0xFFFB
+#define QLA_TGT_2G_ABORT_TASK          0xFFFA
 
 /* Notify Acknowledge flags */
 #define NOTIFY_ACK_RES_COUNT        BIT_8
@@ -845,6 +855,7 @@ enum trace_flags {
        TRC_CMD_FREE = BIT_17,
        TRC_DATA_IN = BIT_18,
        TRC_ABORT = BIT_19,
+       TRC_DIF_ERR = BIT_20,
 };
 
 struct qla_tgt_cmd {
@@ -862,7 +873,6 @@ struct qla_tgt_cmd {
        unsigned int sg_mapped:1;
        unsigned int free_sg:1;
        unsigned int write_data_transferred:1;
-       unsigned int ctx_dsd_alloced:1;
        unsigned int q_full:1;
        unsigned int term_exchg:1;
        unsigned int cmd_sent_to_fw:1;
@@ -885,11 +895,25 @@ struct qla_tgt_cmd {
        struct list_head cmd_list;
 
        struct atio_from_isp atio;
-       /* t10dif */
+
+       uint8_t ctx_dsd_alloced;
+
+       /* T10-DIF */
+#define DIF_ERR_NONE 0
+#define DIF_ERR_GRD 1
+#define DIF_ERR_REF 2
+#define DIF_ERR_APP 3
+       int8_t dif_err_code;
        struct scatterlist *prot_sg;
        uint32_t prot_sg_cnt;
-       uint32_t blk_sz;
+       uint32_t blk_sz, num_blks;
+       uint8_t scsi_status, sense_key, asc, ascq;
+
        struct crc_context *ctx;
+       uint8_t         *cdb;
+       uint64_t        lba;
+       uint16_t        a_guard, e_guard, a_app_tag, e_app_tag;
+       uint32_t        a_ref_tag, e_ref_tag;
 
        uint64_t jiffies_at_alloc;
        uint64_t jiffies_at_free;
@@ -1053,4 +1077,7 @@ extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
 extern void qlt_logo_completion_handler(fc_port_t *, int);
 extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
 
+void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t,
+    uint8_t, uint8_t, uint8_t);
+
 #endif /* __QLA_TARGET_H */
index 3cb1964b7786e4e2add64d7c8f5788fd73b90134..45bc84e8e3bf50f798616de47a2f348d684222b4 100644 (file)
@@ -7,9 +7,9 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.07.00.38-k"
+#define QLA2XXX_VERSION      "9.00.00.00-k"
 
-#define QLA_DRIVER_MAJOR_VER   8
-#define QLA_DRIVER_MINOR_VER   7
+#define QLA_DRIVER_MAJOR_VER   9
+#define QLA_DRIVER_MINOR_VER   0
 #define QLA_DRIVER_PATCH_VER   0
 #define QLA_DRIVER_BETA_VER    0
index 8e8ab0fa9672a6674d3cc9556beeccc44dfc70b2..7443e4efa3aed461f225f6b04bae9223f615dd0b 100644 (file)
@@ -531,6 +531,24 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
                        return;
                }
 
+               switch (cmd->dif_err_code) {
+               case DIF_ERR_GRD:
+                       cmd->se_cmd.pi_err =
+                           TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+                       break;
+               case DIF_ERR_REF:
+                       cmd->se_cmd.pi_err =
+                           TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+                       break;
+               case DIF_ERR_APP:
+                       cmd->se_cmd.pi_err =
+                           TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
+                       break;
+               case DIF_ERR_NONE:
+               default:
+                       break;
+               }
+
                if (cmd->se_cmd.pi_err)
                        transport_generic_request_failure(&cmd->se_cmd,
                                cmd->se_cmd.pi_err);
@@ -555,25 +573,23 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
        queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
 }
 
-static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
+static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
 {
-       struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
-
-       /* take an extra kref to prevent cmd free too early.
-        * need to wait for SCSI status/check condition to
-        * finish responding generate by transport_generic_request_failure.
-        */
-       kref_get(&cmd->se_cmd.cmd_kref);
-       transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
+       return 0;
 }
 
-/*
- * Called from qla_target.c:qlt_do_ctio_completion()
- */
-static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
+static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd,
+    uint16_t *pfw_prot_opts)
 {
-       INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work);
-       queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+       struct se_cmd *se_cmd = &cmd->se_cmd;
+
+       if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
+               *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK;
+
+       if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG))
+               *pfw_prot_opts |= PO_DIS_APP_TAG_VALD;
+
+       return 0;
 }
 
 /*
@@ -1610,7 +1626,6 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
 static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
        .handle_cmd             = tcm_qla2xxx_handle_cmd,
        .handle_data            = tcm_qla2xxx_handle_data,
-       .handle_dif_err         = tcm_qla2xxx_handle_dif_err,
        .handle_tmr             = tcm_qla2xxx_handle_tmr,
        .free_cmd               = tcm_qla2xxx_free_cmd,
        .free_mcmd              = tcm_qla2xxx_free_mcmd,
@@ -1622,6 +1637,8 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
        .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
        .put_sess               = tcm_qla2xxx_put_sess,
        .shutdown_sess          = tcm_qla2xxx_shutdown_sess,
+       .get_dif_tags           = tcm_qla2xxx_dif_tags,
+       .chk_dif_tags           = tcm_qla2xxx_chk_dif_tags,
 };
 
 static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
index ba2286652ff647f285761e046f3699bed5c39eba..19125d72f322c934d84d2d71c748ca9c08418846 100644 (file)
@@ -2932,6 +2932,8 @@ EXPORT_SYMBOL(scsi_target_resume);
 /**
  * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
  * @sdev:      device to block
+ * @wait:      Whether or not to wait until ongoing .queuecommand() /
+ *             .queue_rq() calls have finished.
  *
  * Block request made by scsi lld's to temporarily stop all
  * scsi commands on the specified device. May sleep.
@@ -2949,7 +2951,7 @@ EXPORT_SYMBOL(scsi_target_resume);
  * remove the rport mutex lock and unlock calls from srp_queuecommand().
  */
 int
-scsi_internal_device_block(struct scsi_device *sdev)
+scsi_internal_device_block(struct scsi_device *sdev, bool wait)
 {
        struct request_queue *q = sdev->request_queue;
        unsigned long flags;
@@ -2969,12 +2971,16 @@ scsi_internal_device_block(struct scsi_device *sdev)
         * request queue. 
         */
        if (q->mq_ops) {
-               blk_mq_quiesce_queue(q);
+               if (wait)
+                       blk_mq_quiesce_queue(q);
+               else
+                       blk_mq_stop_hw_queues(q);
        } else {
                spin_lock_irqsave(q->queue_lock, flags);
                blk_stop_queue(q);
                spin_unlock_irqrestore(q->queue_lock, flags);
-               scsi_wait_for_queuecommand(sdev);
+               if (wait)
+                       scsi_wait_for_queuecommand(sdev);
        }
 
        return 0;
@@ -3036,7 +3042,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
 static void
 device_block(struct scsi_device *sdev, void *data)
 {
-       scsi_internal_device_block(sdev);
+       scsi_internal_device_block(sdev, true);
 }
 
 static int
index 99bfc985e1903bcffd10762d52f1a72448aaed96..f11bd102d6d5d6b5a390762448002cd5b0f357d7 100644 (file)
@@ -188,8 +188,5 @@ static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
  */
 
 #define SCSI_DEVICE_BLOCK_MAX_TIMEOUT  600     /* units in seconds */
-extern int scsi_internal_device_block(struct scsi_device *sdev);
-extern int scsi_internal_device_unblock(struct scsi_device *sdev,
-                                       enum scsi_device_state new_state);
 
 #endif /* _SCSI_PRIV_H */
index c7839f6c35ccc479c8f7a044407b35f203cd3102..fcfeddc79331bbf32a71e296cf606513ae5b3d78 100644 (file)
@@ -1783,6 +1783,8 @@ static int sd_done(struct scsi_cmnd *SCpnt)
 {
        int result = SCpnt->result;
        unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
+       unsigned int sector_size = SCpnt->device->sector_size;
+       unsigned int resid;
        struct scsi_sense_hdr sshdr;
        struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
        struct request *req = SCpnt->request;
@@ -1813,6 +1815,21 @@ static int sd_done(struct scsi_cmnd *SCpnt)
                        scsi_set_resid(SCpnt, blk_rq_bytes(req));
                }
                break;
+       default:
+               /*
+                * In case of bogus fw or device, we could end up having
+                * an unaligned partial completion. Check this here and force
+                * alignment.
+                */
+               resid = scsi_get_resid(SCpnt);
+               if (resid & (sector_size - 1)) {
+                       sd_printk(KERN_INFO, sdkp,
+                               "Unaligned partial completion (resid=%u, sector_sz=%u)\n",
+                               resid, sector_size);
+                       resid = min(scsi_bufflen(SCpnt),
+                                   round_up(resid, sector_size));
+                       scsi_set_resid(SCpnt, resid);
+               }
        }
 
        if (result) {
@@ -3075,23 +3092,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
        put_device(&sdkp->dev);
 }
 
-struct sd_devt {
-       int idx;
-       struct disk_devt disk_devt;
-};
-
-static void sd_devt_release(struct disk_devt *disk_devt)
-{
-       struct sd_devt *sd_devt = container_of(disk_devt, struct sd_devt,
-                       disk_devt);
-
-       spin_lock(&sd_index_lock);
-       ida_remove(&sd_index_ida, sd_devt->idx);
-       spin_unlock(&sd_index_lock);
-
-       kfree(sd_devt);
-}
-
 /**
  *     sd_probe - called during driver initialization and whenever a
  *     new scsi device is attached to the system. It is called once
@@ -3113,7 +3113,6 @@ static void sd_devt_release(struct disk_devt *disk_devt)
 static int sd_probe(struct device *dev)
 {
        struct scsi_device *sdp = to_scsi_device(dev);
-       struct sd_devt *sd_devt;
        struct scsi_disk *sdkp;
        struct gendisk *gd;
        int index;
@@ -3139,13 +3138,9 @@ static int sd_probe(struct device *dev)
        if (!sdkp)
                goto out;
 
-       sd_devt = kzalloc(sizeof(*sd_devt), GFP_KERNEL);
-       if (!sd_devt)
-               goto out_free;
-
        gd = alloc_disk(SD_MINORS);
        if (!gd)
-               goto out_free_devt;
+               goto out_free;
 
        do {
                if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
@@ -3161,11 +3156,6 @@ static int sd_probe(struct device *dev)
                goto out_put;
        }
 
-       atomic_set(&sd_devt->disk_devt.count, 1);
-       sd_devt->disk_devt.release = sd_devt_release;
-       sd_devt->idx = index;
-       gd->disk_devt = &sd_devt->disk_devt;
-
        error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
        if (error) {
                sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
@@ -3205,12 +3195,11 @@ static int sd_probe(struct device *dev)
        return 0;
 
  out_free_index:
-       put_disk_devt(&sd_devt->disk_devt);
-       sd_devt = NULL;
+       spin_lock(&sd_index_lock);
+       ida_remove(&sd_index_ida, index);
+       spin_unlock(&sd_index_lock);
  out_put:
        put_disk(gd);
- out_free_devt:
-       kfree(sd_devt);
  out_free:
        kfree(sdkp);
  out:
@@ -3271,7 +3260,10 @@ static void scsi_disk_release(struct device *dev)
        struct scsi_disk *sdkp = to_scsi_disk(dev);
        struct gendisk *disk = sdkp->disk;
        
-       put_disk_devt(disk->disk_devt);
+       spin_lock(&sd_index_lock);
+       ida_remove(&sd_index_ida, sdkp->index);
+       spin_unlock(&sd_index_lock);
+
        disk->private_data = NULL;
        put_disk(disk);
        put_device(&sdkp->device->sdev_gendev);
index 29b86505f796d9fa57c19b958c1c7ae4ef31b298..225abaad4d1cc86f462574a8143bc2ffb2cc69a8 100644 (file)
@@ -996,6 +996,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
                result = get_user(val, ip);
                if (result)
                        return result;
+               if (val > SG_MAX_CDB_SIZE)
+                       return -ENOMEM;
                sfp->next_cmd_len = (val > 0) ? val : 0;
                return 0;
        case SG_GET_VERSION_NUM:
index 638e5f427c901fddee96a53328cd8cc76f1368be..016639d7fef176da5e54ddc5d4d32e110ea0656a 100644 (file)
@@ -400,8 +400,6 @@ MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels")
  */
 static int storvsc_timeout = 180;
 
-static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
-
 #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
 static struct scsi_transport_template *fc_transport_template;
 #endif
@@ -1383,6 +1381,22 @@ static int storvsc_do_io(struct hv_device *device,
        return ret;
 }
 
+static int storvsc_device_alloc(struct scsi_device *sdevice)
+{
+       /*
+        * Set blist flag to permit the reading of the VPD pages even when
+        * the target may claim SPC-2 compliance. MSFT targets currently
+        * claim SPC-2 compliance while they implement post SPC-2 features.
+        * With this flag we can correctly handle WRITE_SAME_16 issues.
+        *
+        * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but
+        * still supports REPORT LUN.
+        */
+       sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES;
+
+       return 0;
+}
+
 static int storvsc_device_configure(struct scsi_device *sdevice)
 {
 
@@ -1395,14 +1409,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
 
        sdevice->no_write_same = 1;
 
-       /*
-        * Add blist flags to permit the reading of the VPD pages even when
-        * the target may claim SPC-2 compliance. MSFT targets currently
-        * claim SPC-2 compliance while they implement post SPC-2 features.
-        * With this patch we can correctly handle WRITE_SAME_16 issues.
-        */
-       sdevice->sdev_bflags |= msft_blist_flags;
-
        /*
         * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
         * if the device is a MSFT virtual device.  If the host is
@@ -1661,6 +1667,7 @@ static struct scsi_host_template scsi_driver = {
        .eh_host_reset_handler =        storvsc_host_reset_handler,
        .proc_name =            "storvsc_host",
        .eh_timed_out =         storvsc_eh_timed_out,
+       .slave_alloc =          storvsc_device_alloc,
        .slave_configure =      storvsc_device_configure,
        .cmd_per_lun =          255,
        .this_id =              -1,
index 318e4a1f76c92bab27954b5cc29a3d374c8eb6e7..54deeb754db5fccf7d918b604916b30d8defa8e5 100644 (file)
@@ -146,7 +146,7 @@ enum attr_idn {
 /* Descriptor idn for Query requests */
 enum desc_idn {
        QUERY_DESC_IDN_DEVICE           = 0x0,
-       QUERY_DESC_IDN_CONFIGURAION     = 0x1,
+       QUERY_DESC_IDN_CONFIGURATION    = 0x1,
        QUERY_DESC_IDN_UNIT             = 0x2,
        QUERY_DESC_IDN_RFU_0            = 0x3,
        QUERY_DESC_IDN_INTERCONNECT     = 0x4,
@@ -162,19 +162,13 @@ enum desc_header_offset {
        QUERY_DESC_DESC_TYPE_OFFSET     = 0x01,
 };
 
-enum ufs_desc_max_size {
-       QUERY_DESC_DEVICE_MAX_SIZE              = 0x40,
-       QUERY_DESC_CONFIGURAION_MAX_SIZE        = 0x90,
-       QUERY_DESC_UNIT_MAX_SIZE                = 0x23,
-       QUERY_DESC_INTERCONNECT_MAX_SIZE        = 0x06,
-       /*
-        * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes
-        * of descriptor header.
-        */
-       QUERY_DESC_STRING_MAX_SIZE              = 0xFE,
-       QUERY_DESC_GEOMETRY_MAX_SIZE            = 0x44,
-       QUERY_DESC_POWER_MAX_SIZE               = 0x62,
-       QUERY_DESC_RFU_MAX_SIZE                 = 0x00,
+enum ufs_desc_def_size {
+       QUERY_DESC_DEVICE_DEF_SIZE              = 0x40,
+       QUERY_DESC_CONFIGURATION_DEF_SIZE       = 0x90,
+       QUERY_DESC_UNIT_DEF_SIZE                = 0x23,
+       QUERY_DESC_INTERCONNECT_DEF_SIZE        = 0x06,
+       QUERY_DESC_GEOMETRY_DEF_SIZE            = 0x44,
+       QUERY_DESC_POWER_DEF_SIZE               = 0x62,
 };
 
 /* Unit descriptor parameters offsets in bytes*/
index a72a4ba78125b09a135c781533d46b1cbf0bc042..8e5e6c04c035e1e134d84670ff19459597df35b1 100644 (file)
@@ -309,8 +309,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
 
        mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        mmio_base = devm_ioremap_resource(dev, mem_res);
-       if (IS_ERR(*(void **)&mmio_base)) {
-               err = PTR_ERR(*(void **)&mmio_base);
+       if (IS_ERR(mmio_base)) {
+               err = PTR_ERR(mmio_base);
                goto out;
        }
 
index dc6efbd1be8ef344bb994589054c24843e442230..096e95b911bd7b29abf2c06a58191b76ce0a4139 100644 (file)
 #define ufshcd_hex_dump(prefix_str, buf, len) \
 print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
 
-static u32 ufs_query_desc_max_size[] = {
-       QUERY_DESC_DEVICE_MAX_SIZE,
-       QUERY_DESC_CONFIGURAION_MAX_SIZE,
-       QUERY_DESC_UNIT_MAX_SIZE,
-       QUERY_DESC_RFU_MAX_SIZE,
-       QUERY_DESC_INTERCONNECT_MAX_SIZE,
-       QUERY_DESC_STRING_MAX_SIZE,
-       QUERY_DESC_RFU_MAX_SIZE,
-       QUERY_DESC_GEOMETRY_MAX_SIZE,
-       QUERY_DESC_POWER_MAX_SIZE,
-       QUERY_DESC_RFU_MAX_SIZE,
-};
-
 enum {
        UFSHCD_MAX_CHANNEL      = 0,
        UFSHCD_MAX_ID           = 1,
@@ -2857,7 +2844,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
                goto out;
        }
 
-       if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
+       if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
                dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
                                __func__, *buf_len);
                err = -EINVAL;
@@ -2937,6 +2924,92 @@ static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
        return err;
 }
 
+/**
+ * ufshcd_read_desc_length - read the specified descriptor length from header
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_index: descriptor index
+ * @desc_length: pointer to variable to read the length of descriptor
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static int ufshcd_read_desc_length(struct ufs_hba *hba,
+       enum desc_idn desc_id,
+       int desc_index,
+       int *desc_length)
+{
+       int ret;
+       u8 header[QUERY_DESC_HDR_SIZE];
+       int header_len = QUERY_DESC_HDR_SIZE;
+
+       if (desc_id >= QUERY_DESC_IDN_MAX)
+               return -EINVAL;
+
+       ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+                                       desc_id, desc_index, 0, header,
+                                       &header_len);
+
+       if (ret) {
+               dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
+                       __func__, desc_id);
+               return ret;
+       } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
+               dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
+                       __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
+                       desc_id);
+               ret = -EINVAL;
+       }
+
+       *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
+       return ret;
+
+}
+
+/**
+ * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_len: mapped desc length (out)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
+       enum desc_idn desc_id, int *desc_len)
+{
+       switch (desc_id) {
+       case QUERY_DESC_IDN_DEVICE:
+               *desc_len = hba->desc_size.dev_desc;
+               break;
+       case QUERY_DESC_IDN_POWER:
+               *desc_len = hba->desc_size.pwr_desc;
+               break;
+       case QUERY_DESC_IDN_GEOMETRY:
+               *desc_len = hba->desc_size.geom_desc;
+               break;
+       case QUERY_DESC_IDN_CONFIGURATION:
+               *desc_len = hba->desc_size.conf_desc;
+               break;
+       case QUERY_DESC_IDN_UNIT:
+               *desc_len = hba->desc_size.unit_desc;
+               break;
+       case QUERY_DESC_IDN_INTERCONNECT:
+               *desc_len = hba->desc_size.interc_desc;
+               break;
+       case QUERY_DESC_IDN_STRING:
+               *desc_len = QUERY_DESC_MAX_SIZE;
+               break;
+       case QUERY_DESC_IDN_RFU_0:
+       case QUERY_DESC_IDN_RFU_1:
+               *desc_len = 0;
+               break;
+       default:
+               *desc_len = 0;
+               return -EINVAL;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
+
 /**
  * ufshcd_read_desc_param - read the specified descriptor parameter
  * @hba: Pointer to adapter instance
@@ -2951,42 +3024,49 @@ static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
 static int ufshcd_read_desc_param(struct ufs_hba *hba,
                                  enum desc_idn desc_id,
                                  int desc_index,
-                                 u32 param_offset,
+                                 u8 param_offset,
                                  u8 *param_read_buf,
-                                 u32 param_size)
+                                 u8 param_size)
 {
        int ret;
        u8 *desc_buf;
-       u32 buff_len;
+       int buff_len;
        bool is_kmalloc = true;
 
-       /* safety checks */
-       if (desc_id >= QUERY_DESC_IDN_MAX)
+       /* Safety check */
+       if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
                return -EINVAL;
 
-       buff_len = ufs_query_desc_max_size[desc_id];
-       if ((param_offset + param_size) > buff_len)
-               return -EINVAL;
+       /* Get the max length of descriptor from structure filled up at probe
+        * time.
+        */
+       ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
 
-       if (!param_offset && (param_size == buff_len)) {
-               /* memory space already available to hold full descriptor */
-               desc_buf = param_read_buf;
-               is_kmalloc = false;
-       } else {
-               /* allocate memory to hold full descriptor */
+       /* Sanity checks */
+       if (ret || !buff_len) {
+               dev_err(hba->dev, "%s: Failed to get full descriptor length",
+                       __func__);
+               return ret;
+       }
+
+       /* Check whether we need temp memory */
+       if (param_offset != 0 || param_size < buff_len) {
                desc_buf = kmalloc(buff_len, GFP_KERNEL);
                if (!desc_buf)
                        return -ENOMEM;
+       } else {
+               desc_buf = param_read_buf;
+               is_kmalloc = false;
        }
 
+       /* Request for full descriptor */
        ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
-                                       desc_id, desc_index, 0, desc_buf,
-                                       &buff_len);
+                                       desc_id, desc_index, 0,
+                                       desc_buf, &buff_len);
 
        if (ret) {
                dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
                        __func__, desc_id, desc_index, param_offset, ret);
-
                goto out;
        }
 
@@ -2998,25 +3078,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
                goto out;
        }
 
-       /*
-        * While reading variable size descriptors (like string descriptor),
-        * some UFS devices may report the "LENGTH" (field in "Transaction
-        * Specific fields" of Query Response UPIU) same as what was requested
-        * in Query Request UPIU instead of reporting the actual size of the
-        * variable size descriptor.
-        * Although it's safe to ignore the "LENGTH" field for variable size
-        * descriptors as we can always derive the length of the descriptor from
-        * the descriptor header fields. Hence this change impose the length
-        * match check only for fixed size descriptors (for which we always
-        * request the correct size as part of Query Request UPIU).
-        */
-       if ((desc_id != QUERY_DESC_IDN_STRING) &&
-           (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
-               dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
-                       __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
-               ret = -EINVAL;
-               goto out;
-       }
+       /* Check wherher we will not copy more data, than available */
+       if (is_kmalloc && param_size > buff_len)
+               param_size = buff_len;
 
        if (is_kmalloc)
                memcpy(param_read_buf, &desc_buf[param_offset], param_size);
@@ -4598,8 +4662,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
                }
                if (ufshcd_is_clkscaling_supported(hba))
                        hba->clk_scaling.active_reqs--;
-               if (ufshcd_is_clkscaling_supported(hba))
-                       hba->clk_scaling.active_reqs--;
        }
 
        /* clear corresponding bits of completed commands */
@@ -5919,8 +5981,8 @@ static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
 static void ufshcd_init_icc_levels(struct ufs_hba *hba)
 {
        int ret;
-       int buff_len = QUERY_DESC_POWER_MAX_SIZE;
-       u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
+       int buff_len = hba->desc_size.pwr_desc;
+       u8 desc_buf[hba->desc_size.pwr_desc];
 
        ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
        if (ret) {
@@ -6017,11 +6079,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
 {
        int err;
        u8 model_index;
-       u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
-       u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
+       u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
+       u8 desc_buf[hba->desc_size.dev_desc];
 
-       err = ufshcd_read_device_desc(hba, desc_buf,
-                                       QUERY_DESC_DEVICE_MAX_SIZE);
+       err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
        if (err) {
                dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
                        __func__, err);
@@ -6038,14 +6099,14 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
        model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
 
        err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
-                                       QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
+                               QUERY_DESC_MAX_SIZE, ASCII_STD);
        if (err) {
                dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
                        __func__, err);
                goto out;
        }
 
-       str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
+       str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
        strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
                min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
                      MAX_MODEL_LEN));
@@ -6251,6 +6312,51 @@ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
        hba->req_abort_count = 0;
 }
 
+static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
+{
+       int err;
+
+       err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
+               &hba->desc_size.dev_desc);
+       if (err)
+               hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+
+       err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
+               &hba->desc_size.pwr_desc);
+       if (err)
+               hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+
+       err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
+               &hba->desc_size.interc_desc);
+       if (err)
+               hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+
+       err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
+               &hba->desc_size.conf_desc);
+       if (err)
+               hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+
+       err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
+               &hba->desc_size.unit_desc);
+       if (err)
+               hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+
+       err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
+               &hba->desc_size.geom_desc);
+       if (err)
+               hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+}
+
+static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
+{
+       hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+       hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+       hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+       hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+       hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+       hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+}
+
 /**
  * ufshcd_probe_hba - probe hba to detect device and initialize
  * @hba: per-adapter instance
@@ -6285,6 +6391,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
        if (ret)
                goto out;
 
+       /* Init check for device descriptor sizes */
+       ufshcd_init_desc_sizes(hba);
+
        ret = ufs_get_device_desc(hba, &card);
        if (ret) {
                dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
@@ -6320,6 +6429,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 
        /* set the state as operational after switching to desired gear */
        hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+
        /*
         * If we are in error handling context or in power management callbacks
         * context, no need to scan the host
@@ -7530,7 +7640,7 @@ static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
        if (kstrtoul(buf, 0, &value))
                return -EINVAL;
 
-       if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX))
+       if (value >= UFS_PM_LVL_MAX)
                return -EINVAL;
 
        spin_lock_irqsave(hba->host->host_lock, flags);
@@ -7774,6 +7884,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        hba->mmio_base = mmio_base;
        hba->irq = irq;
 
+       /* Set descriptor lengths to specification defaults */
+       ufshcd_def_desc_sizes(hba);
+
        err = ufshcd_hba_init(hba);
        if (err)
                goto out_error;
index 7630600217a2ef91e7d3629e5a4d2a0716c24c2a..cdc8bd05f7dfcf7189a4a616bb90c2ea3a8e2ece 100644 (file)
@@ -220,6 +220,15 @@ struct ufs_dev_cmd {
        struct ufs_query query;
 };
 
+struct ufs_desc_size {
+       int dev_desc;
+       int pwr_desc;
+       int geom_desc;
+       int interc_desc;
+       int unit_desc;
+       int conf_desc;
+};
+
 /**
  * struct ufs_clk_info - UFS clock related info
  * @list: list headed by hba->clk_list_head
@@ -483,6 +492,7 @@ struct ufs_stats {
  * @clk_list_head: UFS host controller clocks list node head
  * @pwr_info: holds current power mode
  * @max_pwr_info: keeps the device max valid pwm
+ * @desc_size: descriptor sizes reported by device
  * @urgent_bkops_lvl: keeps track of urgent bkops level for device
  * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
  *  device is known or not.
@@ -666,6 +676,7 @@ struct ufs_hba {
        bool is_urgent_bkops_lvl_checked;
 
        struct rw_semaphore clk_scaling_lock;
+       struct ufs_desc_size desc_size;
 };
 
 /* Returns true if clocks can be gated. Otherwise false */
@@ -832,6 +843,10 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
        enum flag_idn idn, bool *flag_res);
 int ufshcd_hold(struct ufs_hba *hba, bool async);
 void ufshcd_release(struct ufs_hba *hba);
+
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
+       int *desc_length);
+
 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
 
 /* Wrapper functions for safely calling variant operations */
index ef474a7487449b4c1d51f82643988eb08fc1ed86..c374e3b5c678d215bfa9e7ed33e2d033e5d4bfb3 100644 (file)
@@ -1487,7 +1487,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                irq_flag &= ~PCI_IRQ_MSI;
 
        error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag);
-       if (error)
+       if (error < 0)
                goto out_reset_adapter;
 
        adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
index 78b1bb7bcf20ab1e3c39d7d3817b3ae3570825be..9fca977ef18d2fd4638132988215dd5b2d327967 100644 (file)
@@ -33,17 +33,10 @@ config QCOM_SMEM
          The driver provides an interface to items in a heap shared among all
          processors in a Qualcomm platform.
 
-config QCOM_SMD
-       tristate "Qualcomm Shared Memory Driver (SMD)"
-       depends on QCOM_SMEM
-       help
-         Say y here to enable support for the Qualcomm Shared Memory Driver
-         providing communication channels to remote processors in Qualcomm
-         platforms.
-
 config QCOM_SMD_RPM
        tristate "Qualcomm Resource Power Manager (RPM) over SMD"
-       depends on QCOM_SMD && OF
+       depends on ARCH_QCOM
+       depends on RPMSG && OF
        help
          If you say yes to this option, support will be included for the
          Resource Power Manager system found in the Qualcomm 8974 based
@@ -76,7 +69,8 @@ config QCOM_SMSM
 
 config QCOM_WCNSS_CTRL
        tristate "Qualcomm WCNSS control driver"
-       depends on QCOM_SMD
+       depends on ARCH_QCOM
+       depends on RPMSG
        help
          Client driver for the WCNSS_CTRL SMD channel, used to download nv
          firmware to a newly booted WCNSS chip.
index 1f30260b06b8f39ffa04d51e9b13b41c19c7b988..414f0de274fae462c78d188bca7369c1e4795f85 100644 (file)
@@ -1,7 +1,6 @@
 obj-$(CONFIG_QCOM_GSBI)        +=      qcom_gsbi.o
 obj-$(CONFIG_QCOM_MDT_LOADER)  += mdt_loader.o
 obj-$(CONFIG_QCOM_PM)  +=      spm.o
-obj-$(CONFIG_QCOM_SMD) +=      smd.o
 obj-$(CONFIG_QCOM_SMD_RPM)     += smd-rpm.o
 obj-$(CONFIG_QCOM_SMEM) +=     smem.o
 obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
index 6609d7e0edb045c3f26e159ee8938037de397dd2..c2346752b3eaacdc64fb30bfefd2a53bc05add83 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/interrupt.h>
 #include <linux/slab.h>
 
-#include <linux/soc/qcom/smd.h>
+#include <linux/rpmsg.h>
 #include <linux/soc/qcom/smd-rpm.h>
 
 #define RPM_REQUEST_TIMEOUT     (5 * HZ)
@@ -32,7 +32,7 @@
  * @ack_status:                result of the rpm request
  */
 struct qcom_smd_rpm {
-       struct qcom_smd_channel *rpm_channel;
+       struct rpmsg_endpoint *rpm_channel;
        struct device *dev;
 
        struct completion ack;
@@ -133,7 +133,7 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
        pkt->req.data_len = cpu_to_le32(count);
        memcpy(pkt->payload, buf, count);
 
-       ret = qcom_smd_send(rpm->rpm_channel, pkt, size);
+       ret = rpmsg_send(rpm->rpm_channel, pkt, size);
        if (ret)
                goto out;
 
@@ -150,14 +150,16 @@ out:
 }
 EXPORT_SYMBOL(qcom_rpm_smd_write);
 
-static int qcom_smd_rpm_callback(struct qcom_smd_channel *channel,
-                                const void *data,
-                                size_t count)
+static int qcom_smd_rpm_callback(struct rpmsg_device *rpdev,
+                                void *data,
+                                int count,
+                                void *priv,
+                                u32 addr)
 {
        const struct qcom_rpm_header *hdr = data;
        size_t hdr_length = le32_to_cpu(hdr->length);
        const struct qcom_rpm_message *msg;
-       struct qcom_smd_rpm *rpm = qcom_smd_get_drvdata(channel);
+       struct qcom_smd_rpm *rpm = dev_get_drvdata(&rpdev->dev);
        const u8 *buf = data + sizeof(struct qcom_rpm_header);
        const u8 *end = buf + hdr_length;
        char msgbuf[32];
@@ -196,59 +198,57 @@ static int qcom_smd_rpm_callback(struct qcom_smd_channel *channel,
        return 0;
 }
 
-static int qcom_smd_rpm_probe(struct qcom_smd_device *sdev)
+static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev)
 {
        struct qcom_smd_rpm *rpm;
 
-       rpm = devm_kzalloc(&sdev->dev, sizeof(*rpm), GFP_KERNEL);
+       rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL);
        if (!rpm)
                return -ENOMEM;
 
        mutex_init(&rpm->lock);
        init_completion(&rpm->ack);
 
-       rpm->dev = &sdev->dev;
-       rpm->rpm_channel = sdev->channel;
-       qcom_smd_set_drvdata(sdev->channel, rpm);
+       rpm->dev = &rpdev->dev;
+       rpm->rpm_channel = rpdev->ept;
+       dev_set_drvdata(&rpdev->dev, rpm);
 
-       dev_set_drvdata(&sdev->dev, rpm);
-
-       return of_platform_populate(sdev->dev.of_node, NULL, NULL, &sdev->dev);
+       return of_platform_populate(rpdev->dev.of_node, NULL, NULL, &rpdev->dev);
 }
 
-static void qcom_smd_rpm_remove(struct qcom_smd_device *sdev)
+static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev)
 {
-       of_platform_depopulate(&sdev->dev);
+       of_platform_depopulate(&rpdev->dev);
 }
 
 static const struct of_device_id qcom_smd_rpm_of_match[] = {
        { .compatible = "qcom,rpm-apq8084" },
        { .compatible = "qcom,rpm-msm8916" },
        { .compatible = "qcom,rpm-msm8974" },
+       { .compatible = "qcom,rpm-msm8996" },
        {}
 };
 MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match);
 
-static struct qcom_smd_driver qcom_smd_rpm_driver = {
+static struct rpmsg_driver qcom_smd_rpm_driver = {
        .probe = qcom_smd_rpm_probe,
        .remove = qcom_smd_rpm_remove,
        .callback = qcom_smd_rpm_callback,
-       .driver  = {
+       .drv  = {
                .name  = "qcom_smd_rpm",
-               .owner = THIS_MODULE,
                .of_match_table = qcom_smd_rpm_of_match,
        },
 };
 
 static int __init qcom_smd_rpm_init(void)
 {
-       return qcom_smd_driver_register(&qcom_smd_rpm_driver);
+       return register_rpmsg_driver(&qcom_smd_rpm_driver);
 }
 arch_initcall(qcom_smd_rpm_init);
 
 static void __exit qcom_smd_rpm_exit(void)
 {
-       qcom_smd_driver_unregister(&qcom_smd_rpm_driver);
+       unregister_rpmsg_driver(&qcom_smd_rpm_driver);
 }
 module_exit(qcom_smd_rpm_exit);
 
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c
deleted file mode 100644 (file)
index 322034a..0000000
+++ /dev/null
@@ -1,1560 +0,0 @@
-/*
- * Copyright (c) 2015, Sony Mobile Communications AB.
- * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/soc/qcom/smd.h>
-#include <linux/soc/qcom/smem.h>
-#include <linux/wait.h>
-
-/*
- * The Qualcomm Shared Memory communication solution provides point-to-point
- * channels for clients to send and receive streaming or packet based data.
- *
- * Each channel consists of a control item (channel info) and a ring buffer
- * pair. The channel info carry information related to channel state, flow
- * control and the offsets within the ring buffer.
- *
- * All allocated channels are listed in an allocation table, identifying the
- * pair of items by name, type and remote processor.
- *
- * Upon creating a new channel the remote processor allocates channel info and
- * ring buffer items from the smem heap and populate the allocation table. An
- * interrupt is sent to the other end of the channel and a scan for new
- * channels should be done. A channel never goes away, it will only change
- * state.
- *
- * The remote processor signals it intent for bring up the communication
- * channel by setting the state of its end of the channel to "opening" and
- * sends out an interrupt. We detect this change and register a smd device to
- * consume the channel. Upon finding a consumer we finish the handshake and the
- * channel is up.
- *
- * Upon closing a channel, the remote processor will update the state of its
- * end of the channel and signal us, we will then unregister any attached
- * device and close our end of the channel.
- *
- * Devices attached to a channel can use the qcom_smd_send function to push
- * data to the channel, this is done by copying the data into the tx ring
- * buffer, updating the pointers in the channel info and signaling the remote
- * processor.
- *
- * The remote processor does the equivalent when it transfer data and upon
- * receiving the interrupt we check the channel info for new data and delivers
- * this to the attached device. If the device is not ready to receive the data
- * we leave it in the ring buffer for now.
- */
-
-struct smd_channel_info;
-struct smd_channel_info_pair;
-struct smd_channel_info_word;
-struct smd_channel_info_word_pair;
-
-#define SMD_ALLOC_TBL_COUNT    2
-#define SMD_ALLOC_TBL_SIZE     64
-
-/*
- * This lists the various smem heap items relevant for the allocation table and
- * smd channel entries.
- */
-static const struct {
-       unsigned alloc_tbl_id;
-       unsigned info_base_id;
-       unsigned fifo_base_id;
-} smem_items[SMD_ALLOC_TBL_COUNT] = {
-       {
-               .alloc_tbl_id = 13,
-               .info_base_id = 14,
-               .fifo_base_id = 338
-       },
-       {
-               .alloc_tbl_id = 266,
-               .info_base_id = 138,
-               .fifo_base_id = 202,
-       },
-};
-
-/**
- * struct qcom_smd_edge - representing a remote processor
- * @dev:               device for this edge
- * @of_node:           of_node handle for information related to this edge
- * @edge_id:           identifier of this edge
- * @remote_pid:                identifier of remote processor
- * @irq:               interrupt for signals on this edge
- * @ipc_regmap:                regmap handle holding the outgoing ipc register
- * @ipc_offset:                offset within @ipc_regmap of the register for ipc
- * @ipc_bit:           bit in the register at @ipc_offset of @ipc_regmap
- * @channels:          list of all channels detected on this edge
- * @channels_lock:     guard for modifications of @channels
- * @allocated:         array of bitmaps representing already allocated channels
- * @smem_available:    last available amount of smem triggering a channel scan
- * @scan_work:         work item for discovering new channels
- * @state_work:                work item for edge state changes
- */
-struct qcom_smd_edge {
-       struct device dev;
-
-       struct device_node *of_node;
-       unsigned edge_id;
-       unsigned remote_pid;
-
-       int irq;
-
-       struct regmap *ipc_regmap;
-       int ipc_offset;
-       int ipc_bit;
-
-       struct list_head channels;
-       spinlock_t channels_lock;
-
-       DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
-
-       unsigned smem_available;
-
-       wait_queue_head_t new_channel_event;
-
-       struct work_struct scan_work;
-       struct work_struct state_work;
-};
-
-#define to_smd_edge(d) container_of(d, struct qcom_smd_edge, dev)
-
-/*
- * SMD channel states.
- */
-enum smd_channel_state {
-       SMD_CHANNEL_CLOSED,
-       SMD_CHANNEL_OPENING,
-       SMD_CHANNEL_OPENED,
-       SMD_CHANNEL_FLUSHING,
-       SMD_CHANNEL_CLOSING,
-       SMD_CHANNEL_RESET,
-       SMD_CHANNEL_RESET_OPENING
-};
-
-/**
- * struct qcom_smd_channel - smd channel struct
- * @edge:              qcom_smd_edge this channel is living on
- * @qsdev:             reference to a associated smd client device
- * @name:              name of the channel
- * @state:             local state of the channel
- * @remote_state:      remote state of the channel
- * @info:              byte aligned outgoing/incoming channel info
- * @info_word:         word aligned outgoing/incoming channel info
- * @tx_lock:           lock to make writes to the channel mutually exclusive
- * @fblockread_event:  wakeup event tied to tx fBLOCKREADINTR
- * @tx_fifo:           pointer to the outgoing ring buffer
- * @rx_fifo:           pointer to the incoming ring buffer
- * @fifo_size:         size of each ring buffer
- * @bounce_buffer:     bounce buffer for reading wrapped packets
- * @cb:                        callback function registered for this channel
- * @recv_lock:         guard for rx info modifications and cb pointer
- * @pkt_size:          size of the currently handled packet
- * @list:              lite entry for @channels in qcom_smd_edge
- */
-struct qcom_smd_channel {
-       struct qcom_smd_edge *edge;
-
-       struct qcom_smd_device *qsdev;
-
-       char *name;
-       enum smd_channel_state state;
-       enum smd_channel_state remote_state;
-
-       struct smd_channel_info_pair *info;
-       struct smd_channel_info_word_pair *info_word;
-
-       struct mutex tx_lock;
-       wait_queue_head_t fblockread_event;
-
-       void *tx_fifo;
-       void *rx_fifo;
-       int fifo_size;
-
-       void *bounce_buffer;
-       qcom_smd_cb_t cb;
-
-       spinlock_t recv_lock;
-
-       int pkt_size;
-
-       void *drvdata;
-
-       struct list_head list;
-};
-
-/*
- * Format of the smd_info smem items, for byte aligned channels.
- */
-struct smd_channel_info {
-       __le32 state;
-       u8  fDSR;
-       u8  fCTS;
-       u8  fCD;
-       u8  fRI;
-       u8  fHEAD;
-       u8  fTAIL;
-       u8  fSTATE;
-       u8  fBLOCKREADINTR;
-       __le32 tail;
-       __le32 head;
-};
-
-struct smd_channel_info_pair {
-       struct smd_channel_info tx;
-       struct smd_channel_info rx;
-};
-
-/*
- * Format of the smd_info smem items, for word aligned channels.
- */
-struct smd_channel_info_word {
-       __le32 state;
-       __le32 fDSR;
-       __le32 fCTS;
-       __le32 fCD;
-       __le32 fRI;
-       __le32 fHEAD;
-       __le32 fTAIL;
-       __le32 fSTATE;
-       __le32 fBLOCKREADINTR;
-       __le32 tail;
-       __le32 head;
-};
-
-struct smd_channel_info_word_pair {
-       struct smd_channel_info_word tx;
-       struct smd_channel_info_word rx;
-};
-
-#define GET_RX_CHANNEL_FLAG(channel, param)                                 \
-       ({                                                                   \
-               BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
-               channel->info_word ?                                         \
-                       le32_to_cpu(channel->info_word->rx.param) :          \
-                       channel->info->rx.param;                             \
-       })
-
-#define GET_RX_CHANNEL_INFO(channel, param)                                  \
-       ({                                                                    \
-               BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
-               le32_to_cpu(channel->info_word ?                              \
-                       channel->info_word->rx.param :                        \
-                       channel->info->rx.param);                             \
-       })
-
-#define SET_RX_CHANNEL_FLAG(channel, param, value)                          \
-       ({                                                                   \
-               BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
-               if (channel->info_word)                                      \
-                       channel->info_word->rx.param = cpu_to_le32(value);   \
-               else                                                         \
-                       channel->info->rx.param = value;                     \
-       })
-
-#define SET_RX_CHANNEL_INFO(channel, param, value)                           \
-       ({                                                                    \
-               BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
-               if (channel->info_word)                                       \
-                       channel->info_word->rx.param = cpu_to_le32(value);    \
-               else                                                          \
-                       channel->info->rx.param = cpu_to_le32(value);         \
-       })
-
-#define GET_TX_CHANNEL_FLAG(channel, param)                                 \
-       ({                                                                   \
-               BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
-               channel->info_word ?                                         \
-                       le32_to_cpu(channel->info_word->tx.param) :          \
-                       channel->info->tx.param;                             \
-       })
-
-#define GET_TX_CHANNEL_INFO(channel, param)                                  \
-       ({                                                                    \
-               BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
-               le32_to_cpu(channel->info_word ?                              \
-                       channel->info_word->tx.param :                        \
-                       channel->info->tx.param);                             \
-       })
-
-#define SET_TX_CHANNEL_FLAG(channel, param, value)                          \
-       ({                                                                   \
-               BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
-               if (channel->info_word)                                      \
-                       channel->info_word->tx.param = cpu_to_le32(value);   \
-               else                                                         \
-                       channel->info->tx.param = value;                     \
-       })
-
-#define SET_TX_CHANNEL_INFO(channel, param, value)                           \
-       ({                                                                    \
-               BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
-               if (channel->info_word)                                       \
-                       channel->info_word->tx.param = cpu_to_le32(value);   \
-               else                                                          \
-                       channel->info->tx.param = cpu_to_le32(value);         \
-       })
-
-/**
- * struct qcom_smd_alloc_entry - channel allocation entry
- * @name:      channel name
- * @cid:       channel index
- * @flags:     channel flags and edge id
- * @ref_count: reference count of the channel
- */
-struct qcom_smd_alloc_entry {
-       u8 name[20];
-       __le32 cid;
-       __le32 flags;
-       __le32 ref_count;
-} __packed;
-
-#define SMD_CHANNEL_FLAGS_EDGE_MASK    0xff
-#define SMD_CHANNEL_FLAGS_STREAM       BIT(8)
-#define SMD_CHANNEL_FLAGS_PACKET       BIT(9)
-
-/*
- * Each smd packet contains a 20 byte header, with the first 4 being the length
- * of the packet.
- */
-#define SMD_PACKET_HEADER_LEN  20
-
-/*
- * Signal the remote processor associated with 'channel'.
- */
-static void qcom_smd_signal_channel(struct qcom_smd_channel *channel)
-{
-       struct qcom_smd_edge *edge = channel->edge;
-
-       regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit));
-}
-
-/*
- * Initialize the tx channel info
- */
-static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
-{
-       SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
-       SET_TX_CHANNEL_FLAG(channel, fDSR, 0);
-       SET_TX_CHANNEL_FLAG(channel, fCTS, 0);
-       SET_TX_CHANNEL_FLAG(channel, fCD, 0);
-       SET_TX_CHANNEL_FLAG(channel, fRI, 0);
-       SET_TX_CHANNEL_FLAG(channel, fHEAD, 0);
-       SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
-       SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
-       SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
-       SET_TX_CHANNEL_INFO(channel, head, 0);
-       SET_RX_CHANNEL_INFO(channel, tail, 0);
-
-       qcom_smd_signal_channel(channel);
-
-       channel->state = SMD_CHANNEL_CLOSED;
-       channel->pkt_size = 0;
-}
-
-/*
- * Set the callback for a channel, with appropriate locking
- */
-static void qcom_smd_channel_set_callback(struct qcom_smd_channel *channel,
-                                         qcom_smd_cb_t cb)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&channel->recv_lock, flags);
-       channel->cb = cb;
-       spin_unlock_irqrestore(&channel->recv_lock, flags);
-};
-
-/*
- * Calculate the amount of data available in the rx fifo
- */
-static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel)
-{
-       unsigned head;
-       unsigned tail;
-
-       head = GET_RX_CHANNEL_INFO(channel, head);
-       tail = GET_RX_CHANNEL_INFO(channel, tail);
-
-       return (head - tail) & (channel->fifo_size - 1);
-}
-
-/*
- * Set tx channel state and inform the remote processor
- */
-static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
-                                      int state)
-{
-       struct qcom_smd_edge *edge = channel->edge;
-       bool is_open = state == SMD_CHANNEL_OPENED;
-
-       if (channel->state == state)
-               return;
-
-       dev_dbg(&edge->dev, "set_state(%s, %d)\n", channel->name, state);
-
-       SET_TX_CHANNEL_FLAG(channel, fDSR, is_open);
-       SET_TX_CHANNEL_FLAG(channel, fCTS, is_open);
-       SET_TX_CHANNEL_FLAG(channel, fCD, is_open);
-
-       SET_TX_CHANNEL_INFO(channel, state, state);
-       SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
-
-       channel->state = state;
-       qcom_smd_signal_channel(channel);
-}
-
-/*
- * Copy count bytes of data using 32bit accesses, if that's required.
- */
-static void smd_copy_to_fifo(void __iomem *dst,
-                            const void *src,
-                            size_t count,
-                            bool word_aligned)
-{
-       if (word_aligned) {
-               __iowrite32_copy(dst, src, count / sizeof(u32));
-       } else {
-               memcpy_toio(dst, src, count);
-       }
-}
-
-/*
- * Copy count bytes of data using 32bit accesses, if that is required.
- */
-static void smd_copy_from_fifo(void *dst,
-                              const void __iomem *src,
-                              size_t count,
-                              bool word_aligned)
-{
-       if (word_aligned) {
-               __ioread32_copy(dst, src, count / sizeof(u32));
-       } else {
-               memcpy_fromio(dst, src, count);
-       }
-}
-
-/*
- * Read count bytes of data from the rx fifo into buf, but don't advance the
- * tail.
- */
-static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel,
-                                   void *buf, size_t count)
-{
-       bool word_aligned;
-       unsigned tail;
-       size_t len;
-
-       word_aligned = channel->info_word;
-       tail = GET_RX_CHANNEL_INFO(channel, tail);
-
-       len = min_t(size_t, count, channel->fifo_size - tail);
-       if (len) {
-               smd_copy_from_fifo(buf,
-                                  channel->rx_fifo + tail,
-                                  len,
-                                  word_aligned);
-       }
-
-       if (len != count) {
-               smd_copy_from_fifo(buf + len,
-                                  channel->rx_fifo,
-                                  count - len,
-                                  word_aligned);
-       }
-
-       return count;
-}
-
-/*
- * Advance the rx tail by count bytes.
- */
-static void qcom_smd_channel_advance(struct qcom_smd_channel *channel,
-                                    size_t count)
-{
-       unsigned tail;
-
-       tail = GET_RX_CHANNEL_INFO(channel, tail);
-       tail += count;
-       tail &= (channel->fifo_size - 1);
-       SET_RX_CHANNEL_INFO(channel, tail, tail);
-}
-
-/*
- * Read out a single packet from the rx fifo and deliver it to the device
- */
-static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
-{
-       unsigned tail;
-       size_t len;
-       void *ptr;
-       int ret;
-
-       if (!channel->cb)
-               return 0;
-
-       tail = GET_RX_CHANNEL_INFO(channel, tail);
-
-       /* Use bounce buffer if the data wraps */
-       if (tail + channel->pkt_size >= channel->fifo_size) {
-               ptr = channel->bounce_buffer;
-               len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size);
-       } else {
-               ptr = channel->rx_fifo + tail;
-               len = channel->pkt_size;
-       }
-
-       ret = channel->cb(channel, ptr, len);
-       if (ret < 0)
-               return ret;
-
-       /* Only forward the tail if the client consumed the data */
-       qcom_smd_channel_advance(channel, len);
-
-       channel->pkt_size = 0;
-
-       return 0;
-}
-
-/*
- * Per channel interrupt handling
- */
-static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
-{
-       bool need_state_scan = false;
-       int remote_state;
-       __le32 pktlen;
-       int avail;
-       int ret;
-
-       /* Handle state changes */
-       remote_state = GET_RX_CHANNEL_INFO(channel, state);
-       if (remote_state != channel->remote_state) {
-               channel->remote_state = remote_state;
-               need_state_scan = true;
-       }
-       /* Indicate that we have seen any state change */
-       SET_RX_CHANNEL_FLAG(channel, fSTATE, 0);
-
-       /* Signal waiting qcom_smd_send() about the interrupt */
-       if (!GET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR))
-               wake_up_interruptible(&channel->fblockread_event);
-
-       /* Don't consume any data until we've opened the channel */
-       if (channel->state != SMD_CHANNEL_OPENED)
-               goto out;
-
-       /* Indicate that we've seen the new data */
-       SET_RX_CHANNEL_FLAG(channel, fHEAD, 0);
-
-       /* Consume data */
-       for (;;) {
-               avail = qcom_smd_channel_get_rx_avail(channel);
-
-               if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) {
-                       qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen));
-                       qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN);
-                       channel->pkt_size = le32_to_cpu(pktlen);
-               } else if (channel->pkt_size && avail >= channel->pkt_size) {
-                       ret = qcom_smd_channel_recv_single(channel);
-                       if (ret)
-                               break;
-               } else {
-                       break;
-               }
-       }
-
-       /* Indicate that we have seen and updated tail */
-       SET_RX_CHANNEL_FLAG(channel, fTAIL, 1);
-
-       /* Signal the remote that we've consumed the data (if requested) */
-       if (!GET_RX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) {
-               /* Ensure ordering of channel info updates */
-               wmb();
-
-               qcom_smd_signal_channel(channel);
-       }
-
-out:
-       return need_state_scan;
-}
-
-/*
- * The edge interrupts are triggered by the remote processor on state changes,
- * channel info updates or when new channels are created.
- */
-static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
-{
-       struct qcom_smd_edge *edge = data;
-       struct qcom_smd_channel *channel;
-       unsigned available;
-       bool kick_scanner = false;
-       bool kick_state = false;
-
-       /*
-        * Handle state changes or data on each of the channels on this edge
-        */
-       spin_lock(&edge->channels_lock);
-       list_for_each_entry(channel, &edge->channels, list) {
-               spin_lock(&channel->recv_lock);
-               kick_state |= qcom_smd_channel_intr(channel);
-               spin_unlock(&channel->recv_lock);
-       }
-       spin_unlock(&edge->channels_lock);
-
-       /*
-        * Creating a new channel requires allocating an smem entry, so we only
-        * have to scan if the amount of available space in smem have changed
-        * since last scan.
-        */
-       available = qcom_smem_get_free_space(edge->remote_pid);
-       if (available != edge->smem_available) {
-               edge->smem_available = available;
-               kick_scanner = true;
-       }
-
-       if (kick_scanner)
-               schedule_work(&edge->scan_work);
-       if (kick_state)
-               schedule_work(&edge->state_work);
-
-       return IRQ_HANDLED;
-}
-
-/*
- * Delivers any outstanding packets in the rx fifo, can be used after probe of
- * the clients to deliver any packets that wasn't delivered before the client
- * was setup.
- */
-static void qcom_smd_channel_resume(struct qcom_smd_channel *channel)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&channel->recv_lock, flags);
-       qcom_smd_channel_intr(channel);
-       spin_unlock_irqrestore(&channel->recv_lock, flags);
-}
-
-/*
- * Calculate how much space is available in the tx fifo.
- */
-static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel)
-{
-       unsigned head;
-       unsigned tail;
-       unsigned mask = channel->fifo_size - 1;
-
-       head = GET_TX_CHANNEL_INFO(channel, head);
-       tail = GET_TX_CHANNEL_INFO(channel, tail);
-
-       return mask - ((head - tail) & mask);
-}
-
-/*
- * Write count bytes of data into channel, possibly wrapping in the ring buffer
- */
-static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
-                              const void *data,
-                              size_t count)
-{
-       bool word_aligned;
-       unsigned head;
-       size_t len;
-
-       word_aligned = channel->info_word;
-       head = GET_TX_CHANNEL_INFO(channel, head);
-
-       len = min_t(size_t, count, channel->fifo_size - head);
-       if (len) {
-               smd_copy_to_fifo(channel->tx_fifo + head,
-                                data,
-                                len,
-                                word_aligned);
-       }
-
-       if (len != count) {
-               smd_copy_to_fifo(channel->tx_fifo,
-                                data + len,
-                                count - len,
-                                word_aligned);
-       }
-
-       head += count;
-       head &= (channel->fifo_size - 1);
-       SET_TX_CHANNEL_INFO(channel, head, head);
-
-       return count;
-}
-
-/**
- * qcom_smd_send - write data to smd channel
- * @channel:   channel handle
- * @data:      buffer of data to write
- * @len:       number of bytes to write
- *
- * This is a blocking write of len bytes into the channel's tx ring buffer and
- * signal the remote end. It will sleep until there is enough space available
- * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid
- * polling.
- */
-int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
-{
-       __le32 hdr[5] = { cpu_to_le32(len), };
-       int tlen = sizeof(hdr) + len;
-       int ret;
-
-       /* Word aligned channels only accept word size aligned data */
-       if (channel->info_word && len % 4)
-               return -EINVAL;
-
-       /* Reject packets that are too big */
-       if (tlen >= channel->fifo_size)
-               return -EINVAL;
-
-       ret = mutex_lock_interruptible(&channel->tx_lock);
-       if (ret)
-               return ret;
-
-       while (qcom_smd_get_tx_avail(channel) < tlen) {
-               if (channel->state != SMD_CHANNEL_OPENED) {
-                       ret = -EPIPE;
-                       goto out;
-               }
-
-               SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0);
-
-               ret = wait_event_interruptible(channel->fblockread_event,
-                                      qcom_smd_get_tx_avail(channel) >= tlen ||
-                                      channel->state != SMD_CHANNEL_OPENED);
-               if (ret)
-                       goto out;
-
-               SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
-       }
-
-       SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
-
-       qcom_smd_write_fifo(channel, hdr, sizeof(hdr));
-       qcom_smd_write_fifo(channel, data, len);
-
-       SET_TX_CHANNEL_FLAG(channel, fHEAD, 1);
-
-       /* Ensure ordering of channel info updates */
-       wmb();
-
-       qcom_smd_signal_channel(channel);
-
-out:
-       mutex_unlock(&channel->tx_lock);
-
-       return ret;
-}
-EXPORT_SYMBOL(qcom_smd_send);
-
-static struct qcom_smd_device *to_smd_device(struct device *dev)
-{
-       return container_of(dev, struct qcom_smd_device, dev);
-}
-
-static struct qcom_smd_driver *to_smd_driver(struct device *dev)
-{
-       struct qcom_smd_device *qsdev = to_smd_device(dev);
-
-       return container_of(qsdev->dev.driver, struct qcom_smd_driver, driver);
-}
-
-static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
-{
-       struct qcom_smd_device *qsdev = to_smd_device(dev);
-       struct qcom_smd_driver *qsdrv = container_of(drv, struct qcom_smd_driver, driver);
-       const struct qcom_smd_id *match = qsdrv->smd_match_table;
-       const char *name = qsdev->channel->name;
-
-       if (match) {
-               while (match->name[0]) {
-                       if (!strcmp(match->name, name))
-                               return 1;
-                       match++;
-               }
-       }
-
-       return of_driver_match_device(dev, drv);
-}
-
-/*
- * Helper for opening a channel
- */
-static int qcom_smd_channel_open(struct qcom_smd_channel *channel,
-                                qcom_smd_cb_t cb)
-{
-       size_t bb_size;
-
-       /*
-        * Packets are maximum 4k, but reduce if the fifo is smaller
-        */
-       bb_size = min(channel->fifo_size, SZ_4K);
-       channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL);
-       if (!channel->bounce_buffer)
-               return -ENOMEM;
-
-       qcom_smd_channel_set_callback(channel, cb);
-       qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
-       qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
-
-       return 0;
-}
-
-/*
- * Helper for closing and resetting a channel
- */
-static void qcom_smd_channel_close(struct qcom_smd_channel *channel)
-{
-       qcom_smd_channel_set_callback(channel, NULL);
-
-       kfree(channel->bounce_buffer);
-       channel->bounce_buffer = NULL;
-
-       qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
-       qcom_smd_channel_reset(channel);
-}
-
-/*
- * Probe the smd client.
- *
- * The remote side have indicated that it want the channel to be opened, so
- * complete the state handshake and probe our client driver.
- */
-static int qcom_smd_dev_probe(struct device *dev)
-{
-       struct qcom_smd_device *qsdev = to_smd_device(dev);
-       struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
-       struct qcom_smd_channel *channel = qsdev->channel;
-       int ret;
-
-       ret = qcom_smd_channel_open(channel, qsdrv->callback);
-       if (ret)
-               return ret;
-
-       ret = qsdrv->probe(qsdev);
-       if (ret)
-               goto err;
-
-       qcom_smd_channel_resume(channel);
-
-       return 0;
-
-err:
-       dev_err(&qsdev->dev, "probe failed\n");
-
-       qcom_smd_channel_close(channel);
-       return ret;
-}
-
-/*
- * Remove the smd client.
- *
- * The channel is going away, for some reason, so remove the smd client and
- * reset the channel state.
- */
-static int qcom_smd_dev_remove(struct device *dev)
-{
-       struct qcom_smd_device *qsdev = to_smd_device(dev);
-       struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
-       struct qcom_smd_channel *channel = qsdev->channel;
-
-       qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
-
-       /*
-        * Make sure we don't race with the code receiving data.
-        */
-       qcom_smd_channel_set_callback(channel, NULL);
-
-       /* Wake up any sleepers in qcom_smd_send() */
-       wake_up_interruptible(&channel->fblockread_event);
-
-       /*
-        * We expect that the client might block in remove() waiting for any
-        * outstanding calls to qcom_smd_send() to wake up and finish.
-        */
-       if (qsdrv->remove)
-               qsdrv->remove(qsdev);
-
-       /* The client is now gone, close the primary channel */
-       qcom_smd_channel_close(channel);
-       channel->qsdev = NULL;
-
-       return 0;
-}
-
-static struct bus_type qcom_smd_bus = {
-       .name = "qcom_smd",
-       .match = qcom_smd_dev_match,
-       .probe = qcom_smd_dev_probe,
-       .remove = qcom_smd_dev_remove,
-};
-
-/*
- * Release function for the qcom_smd_device object.
- */
-static void qcom_smd_release_device(struct device *dev)
-{
-       struct qcom_smd_device *qsdev = to_smd_device(dev);
-
-       kfree(qsdev);
-}
-
-/*
- * Finds the device_node for the smd child interested in this channel.
- */
-static struct device_node *qcom_smd_match_channel(struct device_node *edge_node,
-                                                 const char *channel)
-{
-       struct device_node *child;
-       const char *name;
-       const char *key;
-       int ret;
-
-       for_each_available_child_of_node(edge_node, child) {
-               key = "qcom,smd-channels";
-               ret = of_property_read_string(child, key, &name);
-               if (ret)
-                       continue;
-
-               if (strcmp(name, channel) == 0)
-                       return child;
-       }
-
-       return NULL;
-}
-
-/*
- * Create a smd client device for channel that is being opened.
- */
-static int qcom_smd_create_device(struct qcom_smd_channel *channel)
-{
-       struct qcom_smd_device *qsdev;
-       struct qcom_smd_edge *edge = channel->edge;
-       struct device_node *node;
-       int ret;
-
-       if (channel->qsdev)
-               return -EEXIST;
-
-       dev_dbg(&edge->dev, "registering '%s'\n", channel->name);
-
-       qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
-       if (!qsdev)
-               return -ENOMEM;
-
-       node = qcom_smd_match_channel(edge->of_node, channel->name);
-       dev_set_name(&qsdev->dev, "%s.%s",
-                    edge->of_node->name,
-                    node ? node->name : channel->name);
-
-       qsdev->dev.parent = &edge->dev;
-       qsdev->dev.bus = &qcom_smd_bus;
-       qsdev->dev.release = qcom_smd_release_device;
-       qsdev->dev.of_node = node;
-
-       qsdev->channel = channel;
-
-       channel->qsdev = qsdev;
-
-       ret = device_register(&qsdev->dev);
-       if (ret) {
-               dev_err(&edge->dev, "device_register failed: %d\n", ret);
-               put_device(&qsdev->dev);
-       }
-
-       return ret;
-}
-
-/*
- * Destroy a smd client device for a channel that's going away.
- */
-static void qcom_smd_destroy_device(struct qcom_smd_channel *channel)
-{
-       struct device *dev;
-
-       BUG_ON(!channel->qsdev);
-
-       dev = &channel->qsdev->dev;
-
-       device_unregister(dev);
-       of_node_put(dev->of_node);
-       put_device(dev);
-}
-
-/**
- * qcom_smd_driver_register - register a smd driver
- * @qsdrv:     qcom_smd_driver struct
- */
-int qcom_smd_driver_register(struct qcom_smd_driver *qsdrv)
-{
-       qsdrv->driver.bus = &qcom_smd_bus;
-       return driver_register(&qsdrv->driver);
-}
-EXPORT_SYMBOL(qcom_smd_driver_register);
-
-void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel)
-{
-       return channel->drvdata;
-}
-EXPORT_SYMBOL(qcom_smd_get_drvdata);
-
-void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data)
-{
-       channel->drvdata = data;
-}
-EXPORT_SYMBOL(qcom_smd_set_drvdata);
-
-/**
- * qcom_smd_driver_unregister - unregister a smd driver
- * @qsdrv:     qcom_smd_driver struct
- */
-void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv)
-{
-       driver_unregister(&qsdrv->driver);
-}
-EXPORT_SYMBOL(qcom_smd_driver_unregister);
-
-static struct qcom_smd_channel *
-qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name)
-{
-       struct qcom_smd_channel *channel;
-       struct qcom_smd_channel *ret = NULL;
-       unsigned long flags;
-       unsigned state;
-
-       spin_lock_irqsave(&edge->channels_lock, flags);
-       list_for_each_entry(channel, &edge->channels, list) {
-               if (strcmp(channel->name, name))
-                       continue;
-
-               state = GET_RX_CHANNEL_INFO(channel, state);
-               if (state != SMD_CHANNEL_OPENING &&
-                   state != SMD_CHANNEL_OPENED)
-                       continue;
-
-               ret = channel;
-               break;
-       }
-       spin_unlock_irqrestore(&edge->channels_lock, flags);
-
-       return ret;
-}
-
-/**
- * qcom_smd_open_channel() - claim additional channels on the same edge
- * @sdev:      smd_device handle
- * @name:      channel name
- * @cb:                callback method to use for incoming data
- *
- * Returns a channel handle on success, or -EPROBE_DEFER if the channel isn't
- * ready.
- *
- * Any channels returned must be closed with a call to qcom_smd_close_channel()
- */
-struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *parent,
-                                              const char *name,
-                                              qcom_smd_cb_t cb)
-{
-       struct qcom_smd_channel *channel;
-       struct qcom_smd_device *sdev = parent->qsdev;
-       struct qcom_smd_edge *edge = parent->edge;
-       int ret;
-
-       /* Wait up to HZ for the channel to appear */
-       ret = wait_event_interruptible_timeout(edge->new_channel_event,
-                       (channel = qcom_smd_find_channel(edge, name)) != NULL,
-                       HZ);
-       if (!ret)
-               return ERR_PTR(-ETIMEDOUT);
-
-       if (channel->state != SMD_CHANNEL_CLOSED) {
-               dev_err(&sdev->dev, "channel %s is busy\n", channel->name);
-               return ERR_PTR(-EBUSY);
-       }
-
-       channel->qsdev = sdev;
-       ret = qcom_smd_channel_open(channel, cb);
-       if (ret) {
-               channel->qsdev = NULL;
-               return ERR_PTR(ret);
-       }
-
-       return channel;
-}
-EXPORT_SYMBOL(qcom_smd_open_channel);
-
-/**
- * qcom_smd_close_channel() - close an additionally opened channel
- * @channel:   channel handle, returned by qcom_smd_open_channel()
- */
-void qcom_smd_close_channel(struct qcom_smd_channel *channel)
-{
-       qcom_smd_channel_close(channel);
-       channel->qsdev = NULL;
-}
-EXPORT_SYMBOL(qcom_smd_close_channel);
-
-/*
- * Allocate the qcom_smd_channel object for a newly found smd channel,
- * retrieving and validating the smem items involved.
- */
-static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge,
-                                                       unsigned smem_info_item,
-                                                       unsigned smem_fifo_item,
-                                                       char *name)
-{
-       struct qcom_smd_channel *channel;
-       size_t fifo_size;
-       size_t info_size;
-       void *fifo_base;
-       void *info;
-       int ret;
-
-       channel = devm_kzalloc(&edge->dev, sizeof(*channel), GFP_KERNEL);
-       if (!channel)
-               return ERR_PTR(-ENOMEM);
-
-       channel->edge = edge;
-       channel->name = devm_kstrdup(&edge->dev, name, GFP_KERNEL);
-       if (!channel->name)
-               return ERR_PTR(-ENOMEM);
-
-       mutex_init(&channel->tx_lock);
-       spin_lock_init(&channel->recv_lock);
-       init_waitqueue_head(&channel->fblockread_event);
-
-       info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size);
-       if (IS_ERR(info)) {
-               ret = PTR_ERR(info);
-               goto free_name_and_channel;
-       }
-
-       /*
-        * Use the size of the item to figure out which channel info struct to
-        * use.
-        */
-       if (info_size == 2 * sizeof(struct smd_channel_info_word)) {
-               channel->info_word = info;
-       } else if (info_size == 2 * sizeof(struct smd_channel_info)) {
-               channel->info = info;
-       } else {
-               dev_err(&edge->dev,
-                       "channel info of size %zu not supported\n", info_size);
-               ret = -EINVAL;
-               goto free_name_and_channel;
-       }
-
-       fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size);
-       if (IS_ERR(fifo_base)) {
-               ret =  PTR_ERR(fifo_base);
-               goto free_name_and_channel;
-       }
-
-       /* The channel consist of a rx and tx fifo of equal size */
-       fifo_size /= 2;
-
-       dev_dbg(&edge->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
-                         name, info_size, fifo_size);
-
-       channel->tx_fifo = fifo_base;
-       channel->rx_fifo = fifo_base + fifo_size;
-       channel->fifo_size = fifo_size;
-
-       qcom_smd_channel_reset(channel);
-
-       return channel;
-
-free_name_and_channel:
-       devm_kfree(&edge->dev, channel->name);
-       devm_kfree(&edge->dev, channel);
-
-       return ERR_PTR(ret);
-}
-
-/*
- * Scans the allocation table for any newly allocated channels, calls
- * qcom_smd_create_channel() to create representations of these and add
- * them to the edge's list of channels.
- */
-static void qcom_channel_scan_worker(struct work_struct *work)
-{
-       struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work);
-       struct qcom_smd_alloc_entry *alloc_tbl;
-       struct qcom_smd_alloc_entry *entry;
-       struct qcom_smd_channel *channel;
-       unsigned long flags;
-       unsigned fifo_id;
-       unsigned info_id;
-       int tbl;
-       int i;
-       u32 eflags, cid;
-
-       for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) {
-               alloc_tbl = qcom_smem_get(edge->remote_pid,
-                                   smem_items[tbl].alloc_tbl_id, NULL);
-               if (IS_ERR(alloc_tbl))
-                       continue;
-
-               for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) {
-                       entry = &alloc_tbl[i];
-                       eflags = le32_to_cpu(entry->flags);
-                       if (test_bit(i, edge->allocated[tbl]))
-                               continue;
-
-                       if (entry->ref_count == 0)
-                               continue;
-
-                       if (!entry->name[0])
-                               continue;
-
-                       if (!(eflags & SMD_CHANNEL_FLAGS_PACKET))
-                               continue;
-
-                       if ((eflags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id)
-                               continue;
-
-                       cid = le32_to_cpu(entry->cid);
-                       info_id = smem_items[tbl].info_base_id + cid;
-                       fifo_id = smem_items[tbl].fifo_base_id + cid;
-
-                       channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name);
-                       if (IS_ERR(channel))
-                               continue;
-
-                       spin_lock_irqsave(&edge->channels_lock, flags);
-                       list_add(&channel->list, &edge->channels);
-                       spin_unlock_irqrestore(&edge->channels_lock, flags);
-
-                       dev_dbg(&edge->dev, "new channel found: '%s'\n", channel->name);
-                       set_bit(i, edge->allocated[tbl]);
-
-                       wake_up_interruptible(&edge->new_channel_event);
-               }
-       }
-
-       schedule_work(&edge->state_work);
-}
-
-/*
- * This per edge worker scans smem for any new channels and register these. It
- * then scans all registered channels for state changes that should be handled
- * by creating or destroying smd client devices for the registered channels.
- *
- * LOCKING: edge->channels_lock only needs to cover the list operations, as the
- * worker is killed before any channels are deallocated
- */
-static void qcom_channel_state_worker(struct work_struct *work)
-{
-       struct qcom_smd_channel *channel;
-       struct qcom_smd_edge *edge = container_of(work,
-                                                 struct qcom_smd_edge,
-                                                 state_work);
-       unsigned remote_state;
-       unsigned long flags;
-
-       /*
-        * Register a device for any closed channel where the remote processor
-        * is showing interest in opening the channel.
-        */
-       spin_lock_irqsave(&edge->channels_lock, flags);
-       list_for_each_entry(channel, &edge->channels, list) {
-               if (channel->state != SMD_CHANNEL_CLOSED)
-                       continue;
-
-               remote_state = GET_RX_CHANNEL_INFO(channel, state);
-               if (remote_state != SMD_CHANNEL_OPENING &&
-                   remote_state != SMD_CHANNEL_OPENED)
-                       continue;
-
-               spin_unlock_irqrestore(&edge->channels_lock, flags);
-               qcom_smd_create_device(channel);
-               spin_lock_irqsave(&edge->channels_lock, flags);
-       }
-
-       /*
-        * Unregister the device for any channel that is opened where the
-        * remote processor is closing the channel.
-        */
-       list_for_each_entry(channel, &edge->channels, list) {
-               if (channel->state != SMD_CHANNEL_OPENING &&
-                   channel->state != SMD_CHANNEL_OPENED)
-                       continue;
-
-               remote_state = GET_RX_CHANNEL_INFO(channel, state);
-               if (remote_state == SMD_CHANNEL_OPENING ||
-                   remote_state == SMD_CHANNEL_OPENED)
-                       continue;
-
-               spin_unlock_irqrestore(&edge->channels_lock, flags);
-               qcom_smd_destroy_device(channel);
-               spin_lock_irqsave(&edge->channels_lock, flags);
-       }
-       spin_unlock_irqrestore(&edge->channels_lock, flags);
-}
-
-/*
- * Parses an of_node describing an edge.
- */
-static int qcom_smd_parse_edge(struct device *dev,
-                              struct device_node *node,
-                              struct qcom_smd_edge *edge)
-{
-       struct device_node *syscon_np;
-       const char *key;
-       int irq;
-       int ret;
-
-       INIT_LIST_HEAD(&edge->channels);
-       spin_lock_init(&edge->channels_lock);
-
-       INIT_WORK(&edge->scan_work, qcom_channel_scan_worker);
-       INIT_WORK(&edge->state_work, qcom_channel_state_worker);
-
-       edge->of_node = of_node_get(node);
-
-       key = "qcom,smd-edge";
-       ret = of_property_read_u32(node, key, &edge->edge_id);
-       if (ret) {
-               dev_err(dev, "edge missing %s property\n", key);
-               return -EINVAL;
-       }
-
-       edge->remote_pid = QCOM_SMEM_HOST_ANY;
-       key = "qcom,remote-pid";
-       of_property_read_u32(node, key, &edge->remote_pid);
-
-       syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
-       if (!syscon_np) {
-               dev_err(dev, "no qcom,ipc node\n");
-               return -ENODEV;
-       }
-
-       edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
-       if (IS_ERR(edge->ipc_regmap))
-               return PTR_ERR(edge->ipc_regmap);
-
-       key = "qcom,ipc";
-       ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
-       if (ret < 0) {
-               dev_err(dev, "no offset in %s\n", key);
-               return -EINVAL;
-       }
-
-       ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
-       if (ret < 0) {
-               dev_err(dev, "no bit in %s\n", key);
-               return -EINVAL;
-       }
-
-       irq = irq_of_parse_and_map(node, 0);
-       if (irq < 0) {
-               dev_err(dev, "required smd interrupt missing\n");
-               return -EINVAL;
-       }
-
-       ret = devm_request_irq(dev, irq,
-                              qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
-                              node->name, edge);
-       if (ret) {
-               dev_err(dev, "failed to request smd irq\n");
-               return ret;
-       }
-
-       edge->irq = irq;
-
-       return 0;
-}
-
-/*
- * Release function for an edge.
- * Reset the state of each associated channel and free the edge context.
- */
-static void qcom_smd_edge_release(struct device *dev)
-{
-       struct qcom_smd_channel *channel;
-       struct qcom_smd_edge *edge = to_smd_edge(dev);
-
-       list_for_each_entry(channel, &edge->channels, list) {
-               SET_RX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
-               SET_RX_CHANNEL_INFO(channel, head, 0);
-               SET_RX_CHANNEL_INFO(channel, tail, 0);
-       }
-
-       kfree(edge);
-}
-
-/**
- * qcom_smd_register_edge() - register an edge based on an device_node
- * @parent:    parent device for the edge
- * @node:      device_node describing the edge
- *
- * Returns an edge reference, or negative ERR_PTR() on failure.
- */
-struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
-                                            struct device_node *node)
-{
-       struct qcom_smd_edge *edge;
-       int ret;
-
-       edge = kzalloc(sizeof(*edge), GFP_KERNEL);
-       if (!edge)
-               return ERR_PTR(-ENOMEM);
-
-       init_waitqueue_head(&edge->new_channel_event);
-
-       edge->dev.parent = parent;
-       edge->dev.release = qcom_smd_edge_release;
-       dev_set_name(&edge->dev, "%s:%s", dev_name(parent), node->name);
-       ret = device_register(&edge->dev);
-       if (ret) {
-               pr_err("failed to register smd edge\n");
-               return ERR_PTR(ret);
-       }
-
-       ret = qcom_smd_parse_edge(&edge->dev, node, edge);
-       if (ret) {
-               dev_err(&edge->dev, "failed to parse smd edge\n");
-               goto unregister_dev;
-       }
-
-       schedule_work(&edge->scan_work);
-
-       return edge;
-
-unregister_dev:
-       put_device(&edge->dev);
-       return ERR_PTR(ret);
-}
-EXPORT_SYMBOL(qcom_smd_register_edge);
-
-static int qcom_smd_remove_device(struct device *dev, void *data)
-{
-       device_unregister(dev);
-       of_node_put(dev->of_node);
-       put_device(dev);
-
-       return 0;
-}
-
-/**
- * qcom_smd_unregister_edge() - release an edge and its children
- * @edge:      edge reference acquired from qcom_smd_register_edge
- */
-int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
-{
-       int ret;
-
-       disable_irq(edge->irq);
-       cancel_work_sync(&edge->scan_work);
-       cancel_work_sync(&edge->state_work);
-
-       ret = device_for_each_child(&edge->dev, NULL, qcom_smd_remove_device);
-       if (ret)
-               dev_warn(&edge->dev, "can't remove smd device: %d\n", ret);
-
-       device_unregister(&edge->dev);
-
-       return 0;
-}
-EXPORT_SYMBOL(qcom_smd_unregister_edge);
-
-static int qcom_smd_probe(struct platform_device *pdev)
-{
-       struct device_node *node;
-       void *p;
-
-       /* Wait for smem */
-       p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL);
-       if (PTR_ERR(p) == -EPROBE_DEFER)
-               return PTR_ERR(p);
-
-       for_each_available_child_of_node(pdev->dev.of_node, node)
-               qcom_smd_register_edge(&pdev->dev, node);
-
-       return 0;
-}
-
-static int qcom_smd_remove_edge(struct device *dev, void *data)
-{
-       struct qcom_smd_edge *edge = to_smd_edge(dev);
-
-       return qcom_smd_unregister_edge(edge);
-}
-
-/*
- * Shut down all smd clients by making sure that each edge stops processing
- * events and scanning for new channels, then call destroy on the devices.
- */
-static int qcom_smd_remove(struct platform_device *pdev)
-{
-       int ret;
-
-       ret = device_for_each_child(&pdev->dev, NULL, qcom_smd_remove_edge);
-       if (ret)
-               dev_warn(&pdev->dev, "can't remove smd device: %d\n", ret);
-
-       return ret;
-}
-
-static const struct of_device_id qcom_smd_of_match[] = {
-       { .compatible = "qcom,smd" },
-       {}
-};
-MODULE_DEVICE_TABLE(of, qcom_smd_of_match);
-
-static struct platform_driver qcom_smd_driver = {
-       .probe = qcom_smd_probe,
-       .remove = qcom_smd_remove,
-       .driver = {
-               .name = "qcom-smd",
-               .of_match_table = qcom_smd_of_match,
-       },
-};
-
-static int __init qcom_smd_init(void)
-{
-       int ret;
-
-       ret = bus_register(&qcom_smd_bus);
-       if (ret) {
-               pr_err("failed to register smd bus: %d\n", ret);
-               return ret;
-       }
-
-       return platform_driver_register(&qcom_smd_driver);
-}
-postcore_initcall(qcom_smd_init);
-
-static void __exit qcom_smd_exit(void)
-{
-       platform_driver_unregister(&qcom_smd_driver);
-       bus_unregister(&qcom_smd_bus);
-}
-module_exit(qcom_smd_exit);
-
-MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
-MODULE_DESCRIPTION("Qualcomm Shared Memory Driver");
-MODULE_LICENSE("GPL v2");
index 520aedd29965498295b3992e4c8c76d2ea6b8c04..b9069184df193f34aa6d9c8bfa4f6502d6764574 100644 (file)
 #include <linux/firmware.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/soc/qcom/smd.h>
 #include <linux/io.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
+#include <linux/rpmsg.h>
 #include <linux/soc/qcom/wcnss_ctrl.h>
 
 #define WCNSS_REQUEST_TIMEOUT  (5 * HZ)
@@ -40,7 +40,7 @@
  */
 struct wcnss_ctrl {
        struct device *dev;
-       struct qcom_smd_channel *channel;
+       struct rpmsg_endpoint *channel;
 
        struct completion ack;
        struct completion cbc;
@@ -122,11 +122,13 @@ struct wcnss_download_nv_resp {
  *
  * Handles any incoming packets from the remote WCNSS_CTRL service.
  */
-static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel,
-                                  const void *data,
-                                  size_t count)
+static int wcnss_ctrl_smd_callback(struct rpmsg_device *rpdev,
+                                  void *data,
+                                  int count,
+                                  void *priv,
+                                  u32 addr)
 {
-       struct wcnss_ctrl *wcnss = qcom_smd_get_drvdata(channel);
+       struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev);
        const struct wcnss_download_nv_resp *nvresp;
        const struct wcnss_version_resp *version;
        const struct wcnss_msg_hdr *hdr = data;
@@ -180,7 +182,7 @@ static int wcnss_request_version(struct wcnss_ctrl *wcnss)
 
        msg.type = WCNSS_VERSION_REQ;
        msg.len = sizeof(msg);
-       ret = qcom_smd_send(wcnss->channel, &msg, sizeof(msg));
+       ret = rpmsg_send(wcnss->channel, &msg, sizeof(msg));
        if (ret < 0)
                return ret;
 
@@ -238,7 +240,7 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
 
                memcpy(req->fragment, data, req->frag_size);
 
-               ret = qcom_smd_send(wcnss->channel, req, req->hdr.len);
+               ret = rpmsg_send(wcnss->channel, req, req->hdr.len);
                if (ret < 0) {
                        dev_err(wcnss->dev, "failed to send smd packet\n");
                        goto release_fw;
@@ -274,11 +276,16 @@ free_req:
  * @name:      SMD channel name
  * @cb:                callback to handle incoming data on the channel
  */
-struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb)
+struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rpmsg_rx_cb_t cb, void *priv)
 {
+       struct rpmsg_channel_info chinfo;
        struct wcnss_ctrl *_wcnss = wcnss;
 
-       return qcom_smd_open_channel(_wcnss->channel, name, cb);
+       strncpy(chinfo.name, name, sizeof(chinfo.name));
+       chinfo.src = RPMSG_ADDR_ANY;
+       chinfo.dst = RPMSG_ADDR_ANY;
+
+       return rpmsg_create_ept(_wcnss->channel->rpdev, cb, priv, chinfo);
 }
 EXPORT_SYMBOL(qcom_wcnss_open_channel);
 
@@ -306,35 +313,34 @@ static void wcnss_async_probe(struct work_struct *work)
        of_platform_populate(wcnss->dev->of_node, NULL, NULL, wcnss->dev);
 }
 
-static int wcnss_ctrl_probe(struct qcom_smd_device *sdev)
+static int wcnss_ctrl_probe(struct rpmsg_device *rpdev)
 {
        struct wcnss_ctrl *wcnss;
 
-       wcnss = devm_kzalloc(&sdev->dev, sizeof(*wcnss), GFP_KERNEL);
+       wcnss = devm_kzalloc(&rpdev->dev, sizeof(*wcnss), GFP_KERNEL);
        if (!wcnss)
                return -ENOMEM;
 
-       wcnss->dev = &sdev->dev;
-       wcnss->channel = sdev->channel;
+       wcnss->dev = &rpdev->dev;
+       wcnss->channel = rpdev->ept;
 
        init_completion(&wcnss->ack);
        init_completion(&wcnss->cbc);
        INIT_WORK(&wcnss->probe_work, wcnss_async_probe);
 
-       qcom_smd_set_drvdata(sdev->channel, wcnss);
-       dev_set_drvdata(&sdev->dev, wcnss);
+       dev_set_drvdata(&rpdev->dev, wcnss);
 
        schedule_work(&wcnss->probe_work);
 
        return 0;
 }
 
-static void wcnss_ctrl_remove(struct qcom_smd_device *sdev)
+static void wcnss_ctrl_remove(struct rpmsg_device *rpdev)
 {
-       struct wcnss_ctrl *wcnss = qcom_smd_get_drvdata(sdev->channel);
+       struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev);
 
        cancel_work_sync(&wcnss->probe_work);
-       of_platform_depopulate(&sdev->dev);
+       of_platform_depopulate(&rpdev->dev);
 }
 
 static const struct of_device_id wcnss_ctrl_of_match[] = {
@@ -342,18 +348,18 @@ static const struct of_device_id wcnss_ctrl_of_match[] = {
        {}
 };
 
-static struct qcom_smd_driver wcnss_ctrl_driver = {
+static struct rpmsg_driver wcnss_ctrl_driver = {
        .probe = wcnss_ctrl_probe,
        .remove = wcnss_ctrl_remove,
        .callback = wcnss_ctrl_smd_callback,
-       .driver  = {
+       .drv  = {
                .name  = "qcom_wcnss_ctrl",
                .owner = THIS_MODULE,
                .of_match_table = wcnss_ctrl_of_match,
        },
 };
 
-module_qcom_smd_driver(wcnss_ctrl_driver);
+module_rpmsg_driver(wcnss_ctrl_driver);
 
 MODULE_DESCRIPTION("Qualcomm WCNSS control driver");
 MODULE_LICENSE("GPL v2");
index b7b87ecefcdfc712b74a79298594cdaa94ab68bc..9fca8d225ee092e92e1fb71a7a6faff5ee1a831c 100644 (file)
@@ -532,7 +532,7 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock)
 
        newsock->ops = sock->ops;
 
-       rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
+       rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false);
        if (rc == -EAGAIN) {
                /* Nothing ready, so wait for activity */
                init_waitqueue_entry(&wait, current);
@@ -540,7 +540,7 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock)
                set_current_state(TASK_INTERRUPTIBLE);
                schedule();
                remove_wait_queue(sk_sleep(sock->sk), &wait);
-               rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
+               rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false);
        }
 
        if (rc)
index 7f8cf875157c60009d51b22c71cc47b402394091..65a2856319948e4c3fb184814334c651f4f9eed4 100644 (file)
@@ -336,7 +336,6 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
                if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
                           cvm_oct_device[port])) {
                        struct net_device *dev = cvm_oct_device[port];
-                       struct octeon_ethernet *priv = netdev_priv(dev);
 
                        /*
                         * Only accept packets for devices that are
index e61e4ca064a8ab43b4dc4954b22fa3a52310379f..74094fff4367813a03e5fb59357a1e1054ea002d 100644 (file)
@@ -1,6 +1,7 @@
 config BCM2835_VCHIQ
        tristate "Videocore VCHIQ"
        depends on HAS_DMA
+       depends on OF
        depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
        default y
        help
index f5e330099bfca713f4cb12bd2dc77826fdad1b3b..fd7c16a7ca6e06ad53e6d6df54ab739550ae4a4a 100644 (file)
@@ -43,7 +43,7 @@
 #include "target_core_ua.h"
 
 static sense_reason_t core_alua_check_transition(int state, int valid,
-                                                int *primary);
+                                                int *primary, int explicit);
 static int core_alua_set_tg_pt_secondary_state(
                struct se_lun *lun, int explicit, int offline);
 
@@ -335,8 +335,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
                 * the state is a primary or secondary target port asymmetric
                 * access state.
                 */
-               rc = core_alua_check_transition(alua_access_state,
-                                               valid_states, &primary);
+               rc = core_alua_check_transition(alua_access_state, valid_states,
+                                               &primary, 1);
                if (rc) {
                        /*
                         * If the SET TARGET PORT GROUPS attempts to establish
@@ -691,7 +691,7 @@ target_alua_state_check(struct se_cmd *cmd)
 
        if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
                return 0;
-       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
                return 0;
 
        /*
@@ -762,7 +762,7 @@ target_alua_state_check(struct se_cmd *cmd)
  * Check implicit and explicit ALUA state change request.
  */
 static sense_reason_t
-core_alua_check_transition(int state, int valid, int *primary)
+core_alua_check_transition(int state, int valid, int *primary, int explicit)
 {
        /*
         * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
@@ -804,11 +804,14 @@ core_alua_check_transition(int state, int valid, int *primary)
                *primary = 0;
                break;
        case ALUA_ACCESS_STATE_TRANSITION:
-               /*
-                * Transitioning is set internally, and
-                * cannot be selected manually.
-                */
-               goto not_supported;
+               if (!(valid & ALUA_T_SUP) || explicit)
+                       /*
+                        * Transitioning is set internally and by tcmu daemon,
+                        * and cannot be selected through a STPG.
+                        */
+                       goto not_supported;
+               *primary = 0;
+               break;
        default:
                pr_err("Unknown ALUA access state: 0x%02x\n", state);
                return TCM_INVALID_PARAMETER_LIST;
@@ -1013,7 +1016,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
 static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
 {
        struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
-               struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
+               struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
        bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
                         ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
@@ -1070,32 +1073,19 @@ static int core_alua_do_transition_tg_pt(
        if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
                return 0;
 
-       if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+       if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION)
                return -EAGAIN;
 
        /*
         * Flush any pending transitions
         */
-       if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
-           atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
-           ALUA_ACCESS_STATE_TRANSITION) {
-               /* Just in case */
-               tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
-               tg_pt_gp->tg_pt_gp_transition_complete = &wait;
-               flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
-               wait_for_completion(&wait);
-               tg_pt_gp->tg_pt_gp_transition_complete = NULL;
-               return 0;
-       }
+       if (!explicit)
+               flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
 
        /*
         * Save the old primary ALUA access state, and set the current state
         * to ALUA_ACCESS_STATE_TRANSITION.
         */
-       tg_pt_gp->tg_pt_gp_alua_previous_state =
-               atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
-       tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
-
        atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
                        ALUA_ACCESS_STATE_TRANSITION);
        tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
@@ -1104,6 +1094,13 @@ static int core_alua_do_transition_tg_pt(
 
        core_alua_queue_state_change_ua(tg_pt_gp);
 
+       if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+               return 0;
+
+       tg_pt_gp->tg_pt_gp_alua_previous_state =
+               atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+       tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+
        /*
         * Check for the optional ALUA primary state transition delay
         */
@@ -1117,17 +1114,9 @@ static int core_alua_do_transition_tg_pt(
        atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
-       if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
-               unsigned long transition_tmo;
-
-               transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
-               queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
-                                  &tg_pt_gp->tg_pt_gp_transition_work,
-                                  transition_tmo);
-       } else {
+       schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
+       if (explicit) {
                tg_pt_gp->tg_pt_gp_transition_complete = &wait;
-               queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
-                                  &tg_pt_gp->tg_pt_gp_transition_work, 0);
                wait_for_completion(&wait);
                tg_pt_gp->tg_pt_gp_transition_complete = NULL;
        }
@@ -1149,8 +1138,12 @@ int core_alua_do_port_transition(
        struct t10_alua_tg_pt_gp *tg_pt_gp;
        int primary, valid_states, rc = 0;
 
+       if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
+               return -ENODEV;
+
        valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
-       if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
+       if (core_alua_check_transition(new_state, valid_states, &primary,
+                                      explicit) != 0)
                return -EINVAL;
 
        local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
@@ -1695,8 +1688,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
        mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
        spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
        atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
-       INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
-                         core_alua_do_transition_tg_pt_work);
+       INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
+                 core_alua_do_transition_tg_pt_work);
        tg_pt_gp->tg_pt_gp_dev = dev;
        atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
                ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
@@ -1804,7 +1797,7 @@ void core_alua_free_tg_pt_gp(
        dev->t10_alua.alua_tg_pt_gps_counter--;
        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
-       flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
+       flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
 
        /*
         * Allow a struct t10_alua_tg_pt_gp_member * referenced by
@@ -1973,7 +1966,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
        unsigned char buf[TG_PT_GROUP_NAME_BUF];
        int move = 0;
 
-       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
            (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
                return -ENODEV;
 
@@ -2230,7 +2223,7 @@ ssize_t core_alua_store_offline_bit(
        unsigned long tmp;
        int ret;
 
-       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
            (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
                return -ENODEV;
 
@@ -2316,7 +2309,8 @@ ssize_t core_alua_store_secondary_write_metadata(
 
 int core_setup_alua(struct se_device *dev)
 {
-       if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
+       if (!(dev->transport->transport_flags &
+            TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
            !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
                struct t10_alua_lu_gp_member *lu_gp_mem;
 
index 54b36c9835be3ae2127cb1f447321eba73b824ac..38b5025e4c7a877f9e5c0bcfa6995262b6330e32 100644 (file)
@@ -421,6 +421,10 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
                pr_err("Missing tfo->aborted_task()\n");
                return -EINVAL;
        }
+       if (!tfo->check_stop_free) {
+               pr_err("Missing tfo->check_stop_free()\n");
+               return -EINVAL;
+       }
        /*
         * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
         * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
index a8f8e53f2f574852de573a08a86ad1c25b4cf332..94cda7991e80abbffb32941c8d8f5cfcbd262e3f 100644 (file)
@@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
 
        buf = kzalloc(12, GFP_KERNEL);
        if (!buf)
-               return;
+               goto out_free;
 
        memset(cdb, 0, MAX_COMMAND_SIZE);
        cdb[0] = MODE_SENSE;
@@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
         * If MODE_SENSE still returns zero, set the default value to 1024.
         */
        sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+out_free:
        if (!sdev->sector_size)
                sdev->sector_size = 1024;
-out_free:
+
        kfree(buf);
 }
 
@@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
                                sd->lun, sd->queue_depth);
        }
 
-       dev->dev_attrib.hw_block_size = sd->sector_size;
+       dev->dev_attrib.hw_block_size =
+               min_not_zero((int)sd->sector_size, 512);
        dev->dev_attrib.hw_max_sectors =
-               min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+               min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
        dev->dev_attrib.hw_queue_depth = sd->queue_depth;
 
        /*
@@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
        /*
         * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
         */
-       if (sd->type == TYPE_TAPE)
+       if (sd->type == TYPE_TAPE) {
                pscsi_tape_read_blocksize(dev, sd);
+               dev->dev_attrib.hw_block_size = sd->sector_size;
+       }
        return 0;
 }
 
@@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
 /*
  * Called with struct Scsi_Host->host_lock called.
  */
-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
+static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
        __releases(sh->host_lock)
 {
        struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
@@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
        return 0;
 }
 
-/*
- * Called with struct Scsi_Host->host_lock called.
- */
-static int pscsi_create_type_other(struct se_device *dev,
-               struct scsi_device *sd)
-       __releases(sh->host_lock)
-{
-       struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
-       struct Scsi_Host *sh = sd->host;
-       int ret;
-
-       spin_unlock_irq(sh->host_lock);
-       ret = pscsi_add_device_to_list(dev, sd);
-       if (ret)
-               return ret;
-
-       pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
-               phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
-               sd->channel, sd->id, sd->lun);
-       return 0;
-}
-
 static int pscsi_configure_device(struct se_device *dev)
 {
        struct se_hba *hba = dev->se_hba;
@@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev)
                case TYPE_DISK:
                        ret = pscsi_create_type_disk(dev, sd);
                        break;
-               case TYPE_ROM:
-                       ret = pscsi_create_type_rom(dev, sd);
-                       break;
                default:
-                       ret = pscsi_create_type_other(dev, sd);
+                       ret = pscsi_create_type_nondisk(dev, sd);
                        break;
                }
 
@@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev)
                else if (pdv->pdv_lld_host)
                        scsi_host_put(pdv->pdv_lld_host);
 
-               if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
-                       scsi_device_put(sd);
+               scsi_device_put(sd);
 
                pdv->pdv_sd = NULL;
        }
@@ -1064,7 +1042,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
        if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
                return pdv->pdv_bd->bd_part->nr_sects;
 
-       dump_stack();
        return 0;
 }
 
@@ -1103,7 +1080,8 @@ static void pscsi_req_done(struct request *req, int uptodate)
 static const struct target_backend_ops pscsi_ops = {
        .name                   = "pscsi",
        .owner                  = THIS_MODULE,
-       .transport_flags        = TRANSPORT_FLAG_PASSTHROUGH,
+       .transport_flags        = TRANSPORT_FLAG_PASSTHROUGH |
+                                 TRANSPORT_FLAG_PASSTHROUGH_ALUA,
        .attach_hba             = pscsi_attach_hba,
        .detach_hba             = pscsi_detach_hba,
        .pmode_enable_hba       = pscsi_pmode_enable_hba,
index 68d8aef7ab78d4084b57e6fd0fa0b0afce7251df..c194063f169b13ce44bf014894960693530e25d7 100644 (file)
@@ -1105,9 +1105,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                        return ret;
                break;
        case VERIFY:
+       case VERIFY_16:
                size = 0;
-               sectors = transport_get_sectors_10(cdb);
-               cmd->t_task_lba = transport_lba_32(cdb);
+               if (cdb[0] == VERIFY) {
+                       sectors = transport_get_sectors_10(cdb);
+                       cmd->t_task_lba = transport_lba_32(cdb);
+               } else {
+                       sectors = transport_get_sectors_16(cdb);
+                       cmd->t_task_lba = transport_lba_64(cdb);
+               }
                cmd->execute_cmd = sbc_emulate_noop;
                goto check_lba;
        case REZERO_UNIT:
index c0dbfa0165750523e552b93fdbb0c64c94cdab2d..6fb191914f458f7889508652e19b860355387491 100644 (file)
@@ -602,7 +602,8 @@ int core_tpg_add_lun(
        if (ret)
                goto out_kill_ref;
 
-       if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
+       if (!(dev->transport->transport_flags &
+            TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
            !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
                target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
 
index 434d9d693989179f72abca120e01155d664d0c87..b1a3cdb29468cf84e7eb48d6c8c41934c0b5b4cb 100644 (file)
@@ -636,8 +636,7 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
         * Fabric modules are expected to return '1' here if the se_cmd being
         * passed is released at this point, or zero if not being released.
         */
-       return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd)
-               : 0;
+       return cmd->se_tfo->check_stop_free(cmd);
 }
 
 static void transport_lun_remove_cmd(struct se_cmd *cmd)
index c3adefe95e50f7f7054e272e15fc5e37663d11c9..c6874c38a10bc45e86beae58ddfed175664d51cf 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/stringify.h>
 #include <linux/bitops.h>
 #include <linux/highmem.h>
+#include <linux/configfs.h>
 #include <net/genetlink.h>
 #include <scsi/scsi_common.h>
 #include <scsi/scsi_proto.h>
@@ -112,6 +113,7 @@ struct tcmu_dev {
        spinlock_t commands_lock;
 
        struct timer_list timeout;
+       unsigned int cmd_time_out;
 
        char dev_config[TCMU_CONFIG_LEN];
 };
@@ -172,7 +174,9 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
 
        tcmu_cmd->se_cmd = se_cmd;
        tcmu_cmd->tcmu_dev = udev;
-       tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
+       if (udev->cmd_time_out)
+               tcmu_cmd->deadline = jiffies +
+                                       msecs_to_jiffies(udev->cmd_time_out);
 
        idr_preload(GFP_KERNEL);
        spin_lock_irq(&udev->commands_lock);
@@ -451,7 +455,11 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 
                pr_debug("sleeping for ring space\n");
                spin_unlock_irq(&udev->cmdr_lock);
-               ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
+               if (udev->cmd_time_out)
+                       ret = schedule_timeout(
+                                       msecs_to_jiffies(udev->cmd_time_out));
+               else
+                       ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
                finish_wait(&udev->wait_cmdr, &__wait);
                if (!ret) {
                        pr_warn("tcmu: command timed out\n");
@@ -526,8 +534,9 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
        /* TODO: only if FLUSH and FUA? */
        uio_event_notify(&udev->uio_info);
 
-       mod_timer(&udev->timeout,
-               round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
+       if (udev->cmd_time_out)
+               mod_timer(&udev->timeout, round_jiffies_up(jiffies +
+                         msecs_to_jiffies(udev->cmd_time_out)));
 
        return TCM_NO_SENSE;
 }
@@ -742,6 +751,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
        }
 
        udev->hba = hba;
+       udev->cmd_time_out = TCMU_TIME_OUT;
 
        init_waitqueue_head(&udev->wait_cmdr);
        spin_lock_init(&udev->cmdr_lock);
@@ -960,7 +970,8 @@ static int tcmu_configure_device(struct se_device *dev)
        if (dev->dev_attrib.hw_block_size == 0)
                dev->dev_attrib.hw_block_size = 512;
        /* Other attributes can be configured in userspace */
-       dev->dev_attrib.hw_max_sectors = 128;
+       if (!dev->dev_attrib.hw_max_sectors)
+               dev->dev_attrib.hw_max_sectors = 128;
        dev->dev_attrib.hw_queue_depth = 128;
 
        ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
@@ -997,6 +1008,11 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
        kfree(udev);
 }
 
+static bool tcmu_dev_configured(struct tcmu_dev *udev)
+{
+       return udev->uio_info.uio_dev ? true : false;
+}
+
 static void tcmu_free_device(struct se_device *dev)
 {
        struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1018,8 +1034,7 @@ static void tcmu_free_device(struct se_device *dev)
        spin_unlock_irq(&udev->commands_lock);
        WARN_ON(!all_expired);
 
-       /* Device was configured */
-       if (udev->uio_info.uio_dev) {
+       if (tcmu_dev_configured(udev)) {
                tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
                                   udev->uio_info.uio_dev->minor);
 
@@ -1031,16 +1046,42 @@ static void tcmu_free_device(struct se_device *dev)
 }
 
 enum {
-       Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err,
+       Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
+       Opt_err,
 };
 
 static match_table_t tokens = {
        {Opt_dev_config, "dev_config=%s"},
        {Opt_dev_size, "dev_size=%u"},
        {Opt_hw_block_size, "hw_block_size=%u"},
+       {Opt_hw_max_sectors, "hw_max_sectors=%u"},
        {Opt_err, NULL}
 };
 
+static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
+{
+       unsigned long tmp_ul;
+       char *arg_p;
+       int ret;
+
+       arg_p = match_strdup(arg);
+       if (!arg_p)
+               return -ENOMEM;
+
+       ret = kstrtoul(arg_p, 0, &tmp_ul);
+       kfree(arg_p);
+       if (ret < 0) {
+               pr_err("kstrtoul() failed for dev attrib\n");
+               return ret;
+       }
+       if (!tmp_ul) {
+               pr_err("dev attrib must be nonzero\n");
+               return -EINVAL;
+       }
+       *dev_attrib = tmp_ul;
+       return 0;
+}
+
 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
                const char *page, ssize_t count)
 {
@@ -1048,7 +1089,6 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
        char *orig, *ptr, *opts, *arg_p;
        substring_t args[MAX_OPT_ARGS];
        int ret = 0, token;
-       unsigned long tmp_ul;
 
        opts = kstrdup(page, GFP_KERNEL);
        if (!opts)
@@ -1082,26 +1122,19 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
                                pr_err("kstrtoul() failed for dev_size=\n");
                        break;
                case Opt_hw_block_size:
-                       arg_p = match_strdup(&args[0]);
-                       if (!arg_p) {
-                               ret = -ENOMEM;
-                               break;
-                       }
-                       ret = kstrtoul(arg_p, 0, &tmp_ul);
-                       kfree(arg_p);
-                       if (ret < 0) {
-                               pr_err("kstrtoul() failed for hw_block_size=\n");
-                               break;
-                       }
-                       if (!tmp_ul) {
-                               pr_err("hw_block_size must be nonzero\n");
-                               break;
-                       }
-                       dev->dev_attrib.hw_block_size = tmp_ul;
+                       ret = tcmu_set_dev_attrib(&args[0],
+                                       &(dev->dev_attrib.hw_block_size));
+                       break;
+               case Opt_hw_max_sectors:
+                       ret = tcmu_set_dev_attrib(&args[0],
+                                       &(dev->dev_attrib.hw_max_sectors));
                        break;
                default:
                        break;
                }
+
+               if (ret)
+                       break;
        }
 
        kfree(orig);
@@ -1134,7 +1167,48 @@ tcmu_parse_cdb(struct se_cmd *cmd)
        return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
 }
 
-static const struct target_backend_ops tcmu_ops = {
+static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
+{
+       struct se_dev_attrib *da = container_of(to_config_group(item),
+                                       struct se_dev_attrib, da_group);
+       struct tcmu_dev *udev = container_of(da->da_dev,
+                                       struct tcmu_dev, se_dev);
+
+       return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
+}
+
+static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
+                                      size_t count)
+{
+       struct se_dev_attrib *da = container_of(to_config_group(item),
+                                       struct se_dev_attrib, da_group);
+       struct tcmu_dev *udev = container_of(da->da_dev,
+                                       struct tcmu_dev, se_dev);
+       u32 val;
+       int ret;
+
+       if (da->da_dev->export_count) {
+               pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
+               return -EINVAL;
+       }
+
+       ret = kstrtou32(page, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       if (!val) {
+               pr_err("Illegal value for cmd_time_out\n");
+               return -EINVAL;
+       }
+
+       udev->cmd_time_out = val * MSEC_PER_SEC;
+       return count;
+}
+CONFIGFS_ATTR(tcmu_, cmd_time_out);
+
+static struct configfs_attribute **tcmu_attrs;
+
+static struct target_backend_ops tcmu_ops = {
        .name                   = "user",
        .owner                  = THIS_MODULE,
        .transport_flags        = TRANSPORT_FLAG_PASSTHROUGH,
@@ -1148,12 +1222,12 @@ static const struct target_backend_ops tcmu_ops = {
        .show_configfs_dev_params = tcmu_show_configfs_dev_params,
        .get_device_type        = sbc_get_device_type,
        .get_blocks             = tcmu_get_blocks,
-       .tb_dev_attrib_attrs    = passthrough_attrib_attrs,
+       .tb_dev_attrib_attrs    = NULL,
 };
 
 static int __init tcmu_module_init(void)
 {
-       int ret;
+       int ret, i, len = 0;
 
        BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
 
@@ -1175,12 +1249,31 @@ static int __init tcmu_module_init(void)
                goto out_unreg_device;
        }
 
+       for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
+               len += sizeof(struct configfs_attribute *);
+       }
+       len += sizeof(struct configfs_attribute *) * 2;
+
+       tcmu_attrs = kzalloc(len, GFP_KERNEL);
+       if (!tcmu_attrs) {
+               ret = -ENOMEM;
+               goto out_unreg_genl;
+       }
+
+       for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
+               tcmu_attrs[i] = passthrough_attrib_attrs[i];
+       }
+       tcmu_attrs[i] = &tcmu_attr_cmd_time_out;
+       tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
+
        ret = transport_backend_register(&tcmu_ops);
        if (ret)
-               goto out_unreg_genl;
+               goto out_attrs;
 
        return 0;
 
+out_attrs:
+       kfree(tcmu_attrs);
 out_unreg_genl:
        genl_unregister_family(&tcmu_genl_family);
 out_unreg_device:
@@ -1194,6 +1287,7 @@ out_free_cache:
 static void __exit tcmu_module_exit(void)
 {
        target_backend_unregister(&tcmu_ops);
+       kfree(tcmu_attrs);
        genl_unregister_family(&tcmu_genl_family);
        root_device_unregister(tcmu_root_device);
        kmem_cache_destroy(tcmu_cmd_cache);
index 91048eeca28b2dc3d81f234ed60bbfdd796f8277..69d0f430b2d190756de94d4b6b6334d1af2dfa50 100644 (file)
@@ -107,8 +107,6 @@ struct cpufreq_cooling_device {
 };
 static DEFINE_IDA(cpufreq_ida);
 
-static unsigned int cpufreq_dev_count;
-
 static DEFINE_MUTEX(cooling_list_lock);
 static LIST_HEAD(cpufreq_dev_list);
 
@@ -395,13 +393,20 @@ static int get_static_power(struct cpufreq_cooling_device *cpufreq_device,
 
        opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz,
                                         true);
+       if (IS_ERR(opp)) {
+               dev_warn_ratelimited(cpufreq_device->cpu_dev,
+                                    "Failed to find OPP for frequency %lu: %ld\n",
+                                    freq_hz, PTR_ERR(opp));
+               return -EINVAL;
+       }
+
        voltage = dev_pm_opp_get_voltage(opp);
        dev_pm_opp_put(opp);
 
        if (voltage == 0) {
-               dev_warn_ratelimited(cpufreq_device->cpu_dev,
-                                    "Failed to get voltage for frequency %lu: %ld\n",
-                                    freq_hz, IS_ERR(opp) ? PTR_ERR(opp) : 0);
+               dev_err_ratelimited(cpufreq_device->cpu_dev,
+                                   "Failed to get voltage for frequency %lu\n",
+                                   freq_hz);
                return -EINVAL;
        }
 
@@ -693,9 +698,9 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
 
        *state = cpufreq_cooling_get_level(cpu, target_freq);
        if (*state == THERMAL_CSTATE_INVALID) {
-               dev_warn_ratelimited(&cdev->device,
-                                    "Failed to convert %dKHz for cpu %d into a cdev state\n",
-                                    target_freq, cpu);
+               dev_err_ratelimited(&cdev->device,
+                                   "Failed to convert %dKHz for cpu %d into a cdev state\n",
+                                   target_freq, cpu);
                return -EINVAL;
        }
 
@@ -771,6 +776,7 @@ __cpufreq_cooling_register(struct device_node *np,
        unsigned int freq, i, num_cpus;
        int ret;
        struct thermal_cooling_device_ops *cooling_ops;
+       bool first;
 
        if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL))
                return ERR_PTR(-ENOMEM);
@@ -874,13 +880,14 @@ __cpufreq_cooling_register(struct device_node *np,
        cpufreq_dev->cool_dev = cool_dev;
 
        mutex_lock(&cooling_list_lock);
+       /* Register the notifier for first cpufreq cooling device */
+       first = list_empty(&cpufreq_dev_list);
        list_add(&cpufreq_dev->node, &cpufreq_dev_list);
+       mutex_unlock(&cooling_list_lock);
 
-       /* Register the notifier for first cpufreq cooling device */
-       if (!cpufreq_dev_count++)
+       if (first)
                cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
                                          CPUFREQ_POLICY_NOTIFIER);
-       mutex_unlock(&cooling_list_lock);
 
        goto put_policy;
 
@@ -1021,6 +1028,7 @@ EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
 {
        struct cpufreq_cooling_device *cpufreq_dev;
+       bool last;
 
        if (!cdev)
                return;
@@ -1028,14 +1036,15 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
        cpufreq_dev = cdev->devdata;
 
        mutex_lock(&cooling_list_lock);
+       list_del(&cpufreq_dev->node);
        /* Unregister the notifier for the last cpufreq cooling device */
-       if (!--cpufreq_dev_count)
+       last = list_empty(&cpufreq_dev_list);
+       mutex_unlock(&cooling_list_lock);
+
+       if (last)
                cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
                                            CPUFREQ_POLICY_NOTIFIER);
 
-       list_del(&cpufreq_dev->node);
-       mutex_unlock(&cooling_list_lock);
-
        thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
        ida_simple_remove(&cpufreq_ida, cpufreq_dev->id);
        kfree(cpufreq_dev->dyn_power_table);
index 7743a78d472397bbf9a10c0b1ac2d7e4b163a6a6..4bf4ad58cffda0172a48138248f4b406790c58e8 100644 (file)
@@ -186,16 +186,22 @@ get_static_power(struct devfreq_cooling_device *dfc, unsigned long freq)
                return 0;
 
        opp = dev_pm_opp_find_freq_exact(dev, freq, true);
-       if (IS_ERR(opp) && (PTR_ERR(opp) == -ERANGE))
+       if (PTR_ERR(opp) == -ERANGE)
                opp = dev_pm_opp_find_freq_exact(dev, freq, false);
 
+       if (IS_ERR(opp)) {
+               dev_err_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n",
+                                   freq, PTR_ERR(opp));
+               return 0;
+       }
+
        voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
        dev_pm_opp_put(opp);
 
        if (voltage == 0) {
-               dev_warn_ratelimited(dev,
-                                    "Failed to get voltage for frequency %lu: %ld\n",
-                                    freq, IS_ERR(opp) ? PTR_ERR(opp) : 0);
+               dev_err_ratelimited(dev,
+                                   "Failed to get voltage for frequency %lu\n",
+                                   freq);
                return 0;
        }
 
index 1bacbc3b19a05cc7b685ddf93b14d5ca10d67acf..e94aea8c0d0535cbc05933bb02067263e62e8292 100644 (file)
 #define DEFAULT_TX_BUF_COUNT 3
 
 struct n_hdlc_buf {
-       struct n_hdlc_buf *link;
+       struct list_head  list_item;
        int               count;
        char              buf[1];
 };
@@ -122,8 +122,7 @@ struct n_hdlc_buf {
 #define        N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe)
 
 struct n_hdlc_buf_list {
-       struct n_hdlc_buf *head;
-       struct n_hdlc_buf *tail;
+       struct list_head  list;
        int               count;
        spinlock_t        spinlock;
 };
@@ -136,7 +135,6 @@ struct n_hdlc_buf_list {
  * @backup_tty - TTY to use if tty gets closed
  * @tbusy - reentrancy flag for tx wakeup code
  * @woke_up - FIXME: describe this field
- * @tbuf - currently transmitting tx buffer
  * @tx_buf_list - list of pending transmit frame buffers
  * @rx_buf_list - list of received frame buffers
  * @tx_free_buf_list - list unused transmit frame buffers
@@ -149,7 +147,6 @@ struct n_hdlc {
        struct tty_struct       *backup_tty;
        int                     tbusy;
        int                     woke_up;
-       struct n_hdlc_buf       *tbuf;
        struct n_hdlc_buf_list  tx_buf_list;
        struct n_hdlc_buf_list  rx_buf_list;
        struct n_hdlc_buf_list  tx_free_buf_list;
@@ -159,6 +156,8 @@ struct n_hdlc {
 /*
  * HDLC buffer list manipulation functions
  */
+static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
+                                               struct n_hdlc_buf *buf);
 static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
                           struct n_hdlc_buf *buf);
 static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
@@ -208,16 +207,9 @@ static void flush_tx_queue(struct tty_struct *tty)
 {
        struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
        struct n_hdlc_buf *buf;
-       unsigned long flags;
 
        while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
                n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
-       spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
-       if (n_hdlc->tbuf) {
-               n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf);
-               n_hdlc->tbuf = NULL;
-       }
-       spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
 }
 
 static struct tty_ldisc_ops n_hdlc_ldisc = {
@@ -283,7 +275,6 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc)
                } else
                        break;
        }
-       kfree(n_hdlc->tbuf);
        kfree(n_hdlc);
        
 }      /* end of n_hdlc_release() */
@@ -402,13 +393,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
        n_hdlc->woke_up = 0;
        spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
 
-       /* get current transmit buffer or get new transmit */
-       /* buffer from list of pending transmit buffers */
-               
-       tbuf = n_hdlc->tbuf;
-       if (!tbuf)
-               tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
-               
+       tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
        while (tbuf) {
                if (debuglevel >= DEBUG_LEVEL_INFO)     
                        printk("%s(%d)sending frame %p, count=%d\n",
@@ -420,7 +405,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
 
                /* rollback was possible and has been done */
                if (actual == -ERESTARTSYS) {
-                       n_hdlc->tbuf = tbuf;
+                       n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
                        break;
                }
                /* if transmit error, throw frame away by */
@@ -435,10 +420,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
                                        
                        /* free current transmit buffer */
                        n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf);
-                       
-                       /* this tx buffer is done */
-                       n_hdlc->tbuf = NULL;
-                       
+
                        /* wait up sleeping writers */
                        wake_up_interruptible(&tty->write_wait);
        
@@ -448,10 +430,12 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
                        if (debuglevel >= DEBUG_LEVEL_INFO)     
                                printk("%s(%d)frame %p pending\n",
                                        __FILE__,__LINE__,tbuf);
-                                       
-                       /* buffer not accepted by driver */
-                       /* set this buffer as pending buffer */
-                       n_hdlc->tbuf = tbuf;
+
+                       /*
+                        * the buffer was not accepted by driver,
+                        * return it back into tx queue
+                        */
+                       n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
                        break;
                }
        }
@@ -749,7 +733,8 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
        int error = 0;
        int count;
        unsigned long flags;
-       
+       struct n_hdlc_buf *buf = NULL;
+
        if (debuglevel >= DEBUG_LEVEL_INFO)     
                printk("%s(%d)n_hdlc_tty_ioctl() called %d\n",
                        __FILE__,__LINE__,cmd);
@@ -763,8 +748,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
                /* report count of read data available */
                /* in next available frame (if any) */
                spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags);
-               if (n_hdlc->rx_buf_list.head)
-                       count = n_hdlc->rx_buf_list.head->count;
+               buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list,
+                                               struct n_hdlc_buf, list_item);
+               if (buf)
+                       count = buf->count;
                else
                        count = 0;
                spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags);
@@ -776,8 +763,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
                count = tty_chars_in_buffer(tty);
                /* add size of next output frame in queue */
                spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags);
-               if (n_hdlc->tx_buf_list.head)
-                       count += n_hdlc->tx_buf_list.head->count;
+               buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list,
+                                               struct n_hdlc_buf, list_item);
+               if (buf)
+                       count += buf->count;
                spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags);
                error = put_user(count, (int __user *)arg);
                break;
@@ -825,14 +814,14 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
                poll_wait(filp, &tty->write_wait, wait);
 
                /* set bits for operations that won't block */
-               if (n_hdlc->rx_buf_list.head)
+               if (!list_empty(&n_hdlc->rx_buf_list.list))
                        mask |= POLLIN | POLLRDNORM;    /* readable */
                if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
                        mask |= POLLHUP;
                if (tty_hung_up_p(filp))
                        mask |= POLLHUP;
                if (!tty_is_writelocked(tty) &&
-                               n_hdlc->tx_free_buf_list.head)
+                               !list_empty(&n_hdlc->tx_free_buf_list.list))
                        mask |= POLLOUT | POLLWRNORM;   /* writable */
        }
        return mask;
@@ -856,7 +845,12 @@ static struct n_hdlc *n_hdlc_alloc(void)
        spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock);
        spin_lock_init(&n_hdlc->rx_buf_list.spinlock);
        spin_lock_init(&n_hdlc->tx_buf_list.spinlock);
-       
+
+       INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list);
+       INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list);
+       INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list);
+       INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list);
+
        /* allocate free rx buffer list */
        for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
                buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
@@ -883,54 +877,66 @@ static struct n_hdlc *n_hdlc_alloc(void)
        
 }      /* end of n_hdlc_alloc() */
 
+/**
+ * n_hdlc_buf_return - put the HDLC buffer after the head of the specified list
+ * @buf_list - pointer to the buffer list
+ * @buf - pointer to the buffer
+ */
+static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
+                                               struct n_hdlc_buf *buf)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&buf_list->spinlock, flags);
+
+       list_add(&buf->list_item, &buf_list->list);
+       buf_list->count++;
+
+       spin_unlock_irqrestore(&buf_list->spinlock, flags);
+}
+
 /**
  * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list
- * @list - pointer to buffer list
+ * @buf_list - pointer to buffer list
  * @buf        - pointer to buffer
  */
-static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
+static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list,
                           struct n_hdlc_buf *buf)
 {
        unsigned long flags;
-       spin_lock_irqsave(&list->spinlock,flags);
-       
-       buf->link=NULL;
-       if (list->tail)
-               list->tail->link = buf;
-       else
-               list->head = buf;
-       list->tail = buf;
-       (list->count)++;
-       
-       spin_unlock_irqrestore(&list->spinlock,flags);
-       
+
+       spin_lock_irqsave(&buf_list->spinlock, flags);
+
+       list_add_tail(&buf->list_item, &buf_list->list);
+       buf_list->count++;
+
+       spin_unlock_irqrestore(&buf_list->spinlock, flags);
 }      /* end of n_hdlc_buf_put() */
 
 /**
  * n_hdlc_buf_get - remove and return an HDLC buffer from list
- * @list - pointer to HDLC buffer list
+ * @buf_list - pointer to HDLC buffer list
  * 
  * Remove and return an HDLC buffer from the head of the specified HDLC buffer
  * list.
  * Returns a pointer to HDLC buffer if available, otherwise %NULL.
  */
-static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list)
+static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list)
 {
        unsigned long flags;
        struct n_hdlc_buf *buf;
-       spin_lock_irqsave(&list->spinlock,flags);
-       
-       buf = list->head;
+
+       spin_lock_irqsave(&buf_list->spinlock, flags);
+
+       buf = list_first_entry_or_null(&buf_list->list,
+                                               struct n_hdlc_buf, list_item);
        if (buf) {
-               list->head = buf->link;
-               (list->count)--;
+               list_del(&buf->list_item);
+               buf_list->count--;
        }
-       if (!list->head)
-               list->tail = NULL;
-       
-       spin_unlock_irqrestore(&list->spinlock,flags);
+
+       spin_unlock_irqrestore(&buf_list->spinlock, flags);
        return buf;
-       
 }      /* end of n_hdlc_buf_get() */
 
 static char hdlc_banner[] __initdata =
index 6ee55a2d47bb429f73cf9d2b2711b109f6746a3d..e65808c482f1847d09d819a24defb0e5cf6508b5 100644 (file)
@@ -257,7 +257,7 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
 {
        unsigned int baud = tty_termios_baud_rate(termios);
        struct dw8250_data *d = p->private_data;
-       unsigned int rate;
+       long rate;
        int ret;
 
        if (IS_ERR(d->clk) || !old)
@@ -265,7 +265,12 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
 
        clk_disable_unprepare(d->clk);
        rate = clk_round_rate(d->clk, baud * 16);
-       ret = clk_set_rate(d->clk, rate);
+       if (rate < 0)
+               ret = rate;
+       else if (rate == 0)
+               ret = -ENOENT;
+       else
+               ret = clk_set_rate(d->clk, rate);
        clk_prepare_enable(d->clk);
 
        if (!ret)
index a65fb8197aecb6af029bc4469d5920618dd39359..0e3f529d50e9d07bd684c92ddda4878ac553b9e7 100644 (file)
@@ -128,9 +128,13 @@ config SERIAL_8250_PCI
          by the parport_serial driver, enabled with CONFIG_PARPORT_SERIAL.
 
 config SERIAL_8250_EXAR
-        tristate "8250/16550 PCI device support"
-        depends on SERIAL_8250_PCI
+       tristate "8250/16550 Exar/Commtech PCI/PCIe device support"
+       depends on SERIAL_8250_PCI
        default SERIAL_8250
+       help
+         This builds support for XR17C1xx, XR17V3xx and some Commtech
+         422x PCIe serial cards that are not covered by the more generic
+         SERIAL_8250_PCI option.
 
 config SERIAL_8250_HP300
        tristate
index 8789ea423ccfd1054d9d5433538d0a8e466525dd..b0a377725d636c11cddad62470d821a31f1fff96 100644 (file)
@@ -2373,7 +2373,7 @@ static int __init pl011_console_match(struct console *co, char *name, int idx,
        if (strcmp(name, "qdf2400_e44") == 0) {
                pr_info_once("UART: Working around QDF2400 SoC erratum 44");
                qdf2400_e44_present = true;
-       } else if (strcmp(name, "pl011") != 0 || strcmp(name, "ttyAMA") != 0) {
+       } else if (strcmp(name, "pl011") != 0) {
                return -ENODEV;
        }
 
@@ -2452,18 +2452,37 @@ static void pl011_early_write(struct console *con, const char *s, unsigned n)
        uart_console_write(&dev->port, s, n, pl011_putc);
 }
 
+/*
+ * On non-ACPI systems, earlycon is enabled by specifying
+ * "earlycon=pl011,<address>" on the kernel command line.
+ *
+ * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
+ * by specifying only "earlycon" on the command line.  Because it requires
+ * SPCR, the console starts after ACPI is parsed, which is later than a
+ * traditional early console.
+ *
+ * To get the traditional early console that starts before ACPI is parsed,
+ * specify the full "earlycon=pl011,<address>" option.
+ */
 static int __init pl011_early_console_setup(struct earlycon_device *device,
                                            const char *opt)
 {
        if (!device->port.membase)
                return -ENODEV;
 
-       device->con->write = qdf2400_e44_present ?
-                               qdf2400_e44_early_write : pl011_early_write;
+       /* On QDF2400 SOCs affected by Erratum 44, the "qdf2400_e44" must
+        * also be specified, e.g. "earlycon=pl011,<address>,qdf2400_e44".
+        */
+       if (!strcmp(device->options, "qdf2400_e44"))
+               device->con->write = qdf2400_e44_early_write;
+       else
+               device->con->write = pl011_early_write;
+
        return 0;
 }
 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
 OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
+EARLYCON_DECLARE(qdf2400_e44, pl011_early_console_setup);
 
 #else
 #define AMBA_CONSOLE   NULL
index dcebb28ffbc412d5282ef32b7f3c133ff984ab46..1f50a83ef958609c3f27473b135e84e65303330d 100644 (file)
@@ -1951,6 +1951,11 @@ static void atmel_flush_buffer(struct uart_port *port)
                atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
                atmel_port->pdc_tx.ofs = 0;
        }
+       /*
+        * in uart_flush_buffer(), the xmit circular buffer has just
+        * been cleared, so we have to reset tx_len accordingly.
+        */
+       atmel_port->tx_len = 0;
 }
 
 /*
@@ -2483,6 +2488,9 @@ static void atmel_console_write(struct console *co, const char *s, u_int count)
        pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
        atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
 
+       /* Make sure that tx path is actually able to send characters */
+       atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
+
        uart_console_write(port, s, count, atmel_console_putchar);
 
        /*
index 6989b227d1349baeb0cb05ed9f7cff8572e36324..be94246b6fcca1874161470035b9d9bd91237f88 100644 (file)
@@ -1088,7 +1088,7 @@ static void mxs_auart_settermios(struct uart_port *u,
                                        AUART_LINECTRL_BAUD_DIV_MAX);
                baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN;
                baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max);
-               div = u->uartclk * 32 / baud;
+               div = DIV_ROUND_CLOSEST(u->uartclk * 32, baud);
        }
 
        ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
index b4f86c219db1e0f46047471ee47588f7a1cd0567..7a17aedbf902e05034129a832941f27fe5dc38c8 100644 (file)
@@ -1031,8 +1031,10 @@ static int s3c64xx_serial_startup(struct uart_port *port)
        if (ourport->dma) {
                ret = s3c24xx_serial_request_dma(ourport);
                if (ret < 0) {
-                       dev_warn(port->dev, "DMA request failed\n");
-                       return ret;
+                       dev_warn(port->dev,
+                                "DMA request failed, DMA will not be used\n");
+                       devm_kfree(port->dev, ourport->dma);
+                       ourport->dma = NULL;
                }
        }
 
index bcf1d33e6ffe0b3cb9952e88046658df1358543b..c334bcc59c649eedc2933ac29c4dc1ef45ae21d2 100644 (file)
@@ -575,12 +575,13 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
                        pinctrl_select_state(ascport->pinctrl,
                                             ascport->states[NO_HW_FLOWCTRL]);
 
-                       gpiod = devm_get_gpiod_from_child(port->dev, "rts",
-                                                         &np->fwnode);
-                       if (!IS_ERR(gpiod)) {
-                               gpiod_direction_output(gpiod, 0);
+                       gpiod = devm_fwnode_get_gpiod_from_child(port->dev,
+                                                                "rts",
+                                                                &np->fwnode,
+                                                                GPIOD_OUT_LOW,
+                                                                np->name);
+                       if (!IS_ERR(gpiod))
                                ascport->rts = gpiod;
-                       }
                }
        }
 
index c6fc7141d7b2814cc122d430c38d61bbde675358..677f0ddc986c11005dc9469dc0fb3adf96d1f5a5 100644 (file)
@@ -446,7 +446,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
         */
        NULL,                           /* a */
        &sysrq_reboot_op,               /* b */
-       &sysrq_crash_op,                /* c & ibm_emac driver debug */
+       &sysrq_crash_op,                /* c */
        &sysrq_showlocks_op,            /* d */
        &sysrq_term_op,                 /* e */
        &sysrq_moom_op,                 /* f */
index 68947f6de5ad6339adea804182597229c3eb1d38..b0500a0a87b86161b8cf8befcee9753ff6cda74d 100644 (file)
@@ -271,10 +271,13 @@ const struct file_operations tty_ldiscs_proc_fops = {
 
 struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
 {
+       struct tty_ldisc *ld;
+
        ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT);
-       if (!tty->ldisc)
+       ld = tty->ldisc;
+       if (!ld)
                ldsem_up_read(&tty->ldisc_sem);
-       return tty->ldisc;
+       return ld;
 }
 EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
 
@@ -488,41 +491,6 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
        tty_ldisc_debug(tty, "%p: closed\n", ld);
 }
 
-/**
- *     tty_ldisc_restore       -       helper for tty ldisc change
- *     @tty: tty to recover
- *     @old: previous ldisc
- *
- *     Restore the previous line discipline or N_TTY when a line discipline
- *     change fails due to an open error
- */
-
-static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
-{
-       struct tty_ldisc *new_ldisc;
-       int r;
-
-       /* There is an outstanding reference here so this is safe */
-       old = tty_ldisc_get(tty, old->ops->num);
-       WARN_ON(IS_ERR(old));
-       tty->ldisc = old;
-       tty_set_termios_ldisc(tty, old->ops->num);
-       if (tty_ldisc_open(tty, old) < 0) {
-               tty_ldisc_put(old);
-               /* This driver is always present */
-               new_ldisc = tty_ldisc_get(tty, N_TTY);
-               if (IS_ERR(new_ldisc))
-                       panic("n_tty: get");
-               tty->ldisc = new_ldisc;
-               tty_set_termios_ldisc(tty, N_TTY);
-               r = tty_ldisc_open(tty, new_ldisc);
-               if (r < 0)
-                       panic("Couldn't open N_TTY ldisc for "
-                             "%s --- error %d.",
-                             tty_name(tty), r);
-       }
-}
-
 /**
  *     tty_set_ldisc           -       set line discipline
  *     @tty: the terminal to set
@@ -536,12 +504,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
 
 int tty_set_ldisc(struct tty_struct *tty, int disc)
 {
-       int retval;
-       struct tty_ldisc *old_ldisc, *new_ldisc;
-
-       new_ldisc = tty_ldisc_get(tty, disc);
-       if (IS_ERR(new_ldisc))
-               return PTR_ERR(new_ldisc);
+       int retval, old_disc;
 
        tty_lock(tty);
        retval = tty_ldisc_lock(tty, 5 * HZ);
@@ -554,7 +517,8 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
        }
 
        /* Check the no-op case */
-       if (tty->ldisc->ops->num == disc)
+       old_disc = tty->ldisc->ops->num;
+       if (old_disc == disc)
                goto out;
 
        if (test_bit(TTY_HUPPED, &tty->flags)) {
@@ -563,34 +527,25 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
                goto out;
        }
 
-       old_ldisc = tty->ldisc;
-
-       /* Shutdown the old discipline. */
-       tty_ldisc_close(tty, old_ldisc);
-
-       /* Now set up the new line discipline. */
-       tty->ldisc = new_ldisc;
-       tty_set_termios_ldisc(tty, disc);
-
-       retval = tty_ldisc_open(tty, new_ldisc);
+       retval = tty_ldisc_reinit(tty, disc);
        if (retval < 0) {
                /* Back to the old one or N_TTY if we can't */
-               tty_ldisc_put(new_ldisc);
-               tty_ldisc_restore(tty, old_ldisc);
+               if (tty_ldisc_reinit(tty, old_disc) < 0) {
+                       pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n");
+                       if (tty_ldisc_reinit(tty, N_TTY) < 0) {
+                               /* At this point we have tty->ldisc == NULL. */
+                               pr_err("tty: reinitializing N_TTY failed\n");
+                       }
+               }
        }
 
-       if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) {
+       if (tty->ldisc && tty->ldisc->ops->num != old_disc &&
+           tty->ops->set_ldisc) {
                down_read(&tty->termios_rwsem);
                tty->ops->set_ldisc(tty);
                up_read(&tty->termios_rwsem);
        }
 
-       /* At this point we hold a reference to the new ldisc and a
-          reference to the old ldisc, or we hold two references to
-          the old ldisc (if it was restored as part of error cleanup
-          above). In either case, releasing a single reference from
-          the old ldisc is correct. */
-       new_ldisc = old_ldisc;
 out:
        tty_ldisc_unlock(tty);
 
@@ -598,7 +553,6 @@ out:
           already running */
        tty_buffer_restart_work(tty->port);
 err:
-       tty_ldisc_put(new_ldisc);       /* drop the extra reference */
        tty_unlock(tty);
        return retval;
 }
@@ -659,10 +613,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
        int retval;
 
        ld = tty_ldisc_get(tty, disc);
-       if (IS_ERR(ld)) {
-               BUG_ON(disc == N_TTY);
+       if (IS_ERR(ld))
                return PTR_ERR(ld);
-       }
 
        if (tty->ldisc) {
                tty_ldisc_close(tty, tty->ldisc);
@@ -674,10 +626,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
        tty_set_termios_ldisc(tty, disc);
        retval = tty_ldisc_open(tty, tty->ldisc);
        if (retval) {
-               if (!WARN_ON(disc == N_TTY)) {
-                       tty_ldisc_put(tty->ldisc);
-                       tty->ldisc = NULL;
-               }
+               tty_ldisc_put(tty->ldisc);
+               tty->ldisc = NULL;
        }
        return retval;
 }
index c5f0fc906136b580b23df4e3708fd04d2c01e27b..8af8d9542663379ef1367ceb871f4753314bc984 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/module.h>
 #include <linux/sched/signal.h>
 #include <linux/sched/debug.h>
-#include <linux/sched/debug.h>
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
 #include <linux/mm.h>
index f03692ec552056845c6fa50947e38abca47ea66b..8fb309a0ff6b5dae0c6f867cd323585aeff17252 100644 (file)
@@ -1381,7 +1381,7 @@ static int usbtmc_probe(struct usb_interface *intf,
 
        dev_dbg(&intf->dev, "%s called\n", __func__);
 
-       data = kmalloc(sizeof(*data), GFP_KERNEL);
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
@@ -1444,6 +1444,13 @@ static int usbtmc_probe(struct usb_interface *intf,
                        break;
                }
        }
+
+       if (!data->bulk_out || !data->bulk_in) {
+               dev_err(&intf->dev, "bulk endpoints not found\n");
+               retcode = -ENODEV;
+               goto err_put;
+       }
+
        /* Find int endpoint */
        for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) {
                endpoint = &iface_desc->endpoint[n].desc;
@@ -1469,8 +1476,10 @@ static int usbtmc_probe(struct usb_interface *intf,
        if (data->iin_ep_present) {
                /* allocate int urb */
                data->iin_urb = usb_alloc_urb(0, GFP_KERNEL);
-               if (!data->iin_urb)
+               if (!data->iin_urb) {
+                       retcode = -ENOMEM;
                        goto error_register;
+               }
 
                /* Protect interrupt in endpoint data until iin_urb is freed */
                kref_get(&data->kref);
@@ -1478,8 +1487,10 @@ static int usbtmc_probe(struct usb_interface *intf,
                /* allocate buffer for interrupt in */
                data->iin_buffer = kmalloc(data->iin_wMaxPacketSize,
                                        GFP_KERNEL);
-               if (!data->iin_buffer)
+               if (!data->iin_buffer) {
+                       retcode = -ENOMEM;
                        goto error_register;
+               }
 
                /* fill interrupt urb */
                usb_fill_int_urb(data->iin_urb, data->usb_dev,
@@ -1512,6 +1523,7 @@ error_register:
        sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
        sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
        usbtmc_free_int(data);
+err_put:
        kref_put(&data->kref, usbtmc_delete);
        return retcode;
 }
index 25dbd8c7aec73345d357c2a75ff0cde26c918217..4be52c602e9b7a7de6a76ecb4be686cb12ec9950 100644 (file)
@@ -280,6 +280,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
 
                        /*
                         * Adjust bInterval for quirked devices.
+                        */
+                       /*
+                        * This quirk fixes bIntervals reported in ms.
+                        */
+                       if (to_usb_device(ddev)->quirks &
+                               USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) {
+                               n = clamp(fls(d->bInterval) + 3, i, j);
+                               i = j = n;
+                       }
+                       /*
                         * This quirk fixes bIntervals reported in
                         * linear microframes.
                         */
index 612fab6e54fb84f2d84372d202c77823fc3be921..79bdca5cb9c7ae8f01b1f4a25c7ceacd9c716e2c 100644 (file)
@@ -520,8 +520,10 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
         */
        tbuf_size =  max_t(u16, sizeof(struct usb_hub_descriptor), wLength);
        tbuf = kzalloc(tbuf_size, GFP_KERNEL);
-       if (!tbuf)
-               return -ENOMEM;
+       if (!tbuf) {
+               status = -ENOMEM;
+               goto err_alloc;
+       }
 
        bufp = tbuf;
 
@@ -734,6 +736,7 @@ error:
        }
 
        kfree(tbuf);
+ err_alloc:
 
        /* any errors get returned through the urb completion */
        spin_lock_irq(&hcd_root_hub_lock);
index f0dd08198d7426b9973bb676bca12b8cb76d7e7b..5286bf67869a83e1d7e1d3f1ca0ebc87db5cf7a4 100644 (file)
@@ -4275,7 +4275,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
        struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
        int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
 
-       if (!udev->usb2_hw_lpm_capable)
+       if (!udev->usb2_hw_lpm_capable || !udev->bos)
                return;
 
        if (hub)
index 24f9f98968a5d860f83920287a5b7deb4c98bed6..96b21b0dac1e8c15fb20c19c85d13a58ab95b285 100644 (file)
@@ -170,6 +170,14 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* M-Systems Flash Disk Pioneers */
        { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Baum Vario Ultra */
+       { USB_DEVICE(0x0904, 0x6101), .driver_info =
+                       USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+       { USB_DEVICE(0x0904, 0x6102), .driver_info =
+                       USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+       { USB_DEVICE(0x0904, 0x6103), .driver_info =
+                       USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+
        /* Keytouch QWERTY Panel keyboard */
        { USB_DEVICE(0x0926, 0x3333), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
index 2092e46b1380e91712f3aab90434ba157dde809e..f8d0747810e78d7cc7930fed1b71cfc9c5aeb048 100644 (file)
@@ -250,6 +250,7 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
                val = dwc3_omap_read_utmi_ctrl(omap);
                val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG;
                dwc3_omap_write_utmi_ctrl(omap, val);
+               break;
 
        case OMAP_DWC3_VBUS_OFF:
                val = dwc3_omap_read_utmi_ctrl(omap);
@@ -392,7 +393,7 @@ static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap)
 {
        u32                     reg;
        struct device_node      *node = omap->dev->of_node;
-       int                     utmi_mode = 0;
+       u32                     utmi_mode = 0;
 
        reg = dwc3_omap_read_utmi_ctrl(omap);
 
index 4db97ecae8859ba0bc03c91c4948fc7af205390f..79e7a3480d51b07abf3988a51f1790f6caec7ca3 100644 (file)
@@ -171,6 +171,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
                int status)
 {
        struct dwc3                     *dwc = dep->dwc;
+       unsigned int                    unmap_after_complete = false;
 
        req->started = false;
        list_del(&req->list);
@@ -180,11 +181,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
        if (req->request.status == -EINPROGRESS)
                req->request.status = status;
 
-       if (dwc->ep0_bounced && dep->number <= 1)
+       /*
+        * NOTICE we don't want to unmap before calling ->complete() if we're
+        * dealing with a bounced ep0 request. If we unmap it here, we would end
+        * up overwritting the contents of req->buf and this could confuse the
+        * gadget driver.
+        */
+       if (dwc->ep0_bounced && dep->number <= 1) {
                dwc->ep0_bounced = false;
-
-       usb_gadget_unmap_request_by_dev(dwc->sysdev,
-                       &req->request, req->direction);
+               unmap_after_complete = true;
+       } else {
+               usb_gadget_unmap_request_by_dev(dwc->sysdev,
+                               &req->request, req->direction);
+       }
 
        trace_dwc3_gadget_giveback(req);
 
@@ -192,6 +201,10 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
        usb_gadget_giveback_request(&dep->endpoint, &req->request);
        spin_lock(&dwc->lock);
 
+       if (unmap_after_complete)
+               usb_gadget_unmap_request_by_dev(dwc->sysdev,
+                               &req->request, req->direction);
+
        if (dep->number > 1)
                pm_runtime_put(dwc->dev);
 }
@@ -1342,6 +1355,68 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
                if (r == req) {
                        /* wait until it is processed */
                        dwc3_stop_active_transfer(dwc, dep->number, true);
+
+                       /*
+                        * If request was already started, this means we had to
+                        * stop the transfer. With that we also need to ignore
+                        * all TRBs used by the request, however TRBs can only
+                        * be modified after completion of END_TRANSFER
+                        * command. So what we do here is that we wait for
+                        * END_TRANSFER completion and only after that, we jump
+                        * over TRBs by clearing HWO and incrementing dequeue
+                        * pointer.
+                        *
+                        * Note that we have 2 possible types of transfers here:
+                        *
+                        * i) Linear buffer request
+                        * ii) SG-list based request
+                        *
+                        * SG-list based requests will have r->num_pending_sgs
+                        * set to a valid number (> 0). Linear requests,
+                        * normally use a single TRB.
+                        *
+                        * For each of these two cases, if r->unaligned flag is
+                        * set, one extra TRB has been used to align transfer
+                        * size to wMaxPacketSize.
+                        *
+                        * All of these cases need to be taken into
+                        * consideration so we don't mess up our TRB ring
+                        * pointers.
+                        */
+                       wait_event_lock_irq(dep->wait_end_transfer,
+                                       !(dep->flags & DWC3_EP_END_TRANSFER_PENDING),
+                                       dwc->lock);
+
+                       if (!r->trb)
+                               goto out1;
+
+                       if (r->num_pending_sgs) {
+                               struct dwc3_trb *trb;
+                               int i = 0;
+
+                               for (i = 0; i < r->num_pending_sgs; i++) {
+                                       trb = r->trb + i;
+                                       trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+                                       dwc3_ep_inc_deq(dep);
+                               }
+
+                               if (r->unaligned) {
+                                       trb = r->trb + r->num_pending_sgs + 1;
+                                       trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+                                       dwc3_ep_inc_deq(dep);
+                               }
+                       } else {
+                               struct dwc3_trb *trb = r->trb;
+
+                               trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+                               dwc3_ep_inc_deq(dep);
+
+                               if (r->unaligned) {
+                                       trb = r->trb + 1;
+                                       trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+                                       dwc3_ep_inc_deq(dep);
+                               }
+                       }
                        goto out1;
                }
                dev_err(dwc->dev, "request %p was not queued to %s\n",
@@ -1352,6 +1427,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
 
 out1:
        /* giveback the request */
+       dep->queued_requests--;
        dwc3_gadget_giveback(dep, req, -ECONNRESET);
 
 out0:
@@ -2126,12 +2202,12 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
                return 1;
        }
 
-       if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
-               return 1;
-
        count = trb->size & DWC3_TRB_SIZE_MASK;
        req->remaining += count;
 
+       if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
+               return 1;
+
        if (dep->direction) {
                if (count) {
                        trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
@@ -3228,15 +3304,10 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
 
 int dwc3_gadget_suspend(struct dwc3 *dwc)
 {
-       int ret;
-
        if (!dwc->gadget_driver)
                return 0;
 
-       ret = dwc3_gadget_run_stop(dwc, false, false);
-       if (ret < 0)
-               return ret;
-
+       dwc3_gadget_run_stop(dwc, false, false);
        dwc3_disconnect_gadget(dwc);
        __dwc3_gadget_stop(dwc);
 
index 3129bcf74d7d8de7ffe8d23923a7ba34d4e5cff9..265e223ab64554f6be78d37d15e36a86bb571c74 100644 (file)
@@ -28,23 +28,23 @@ struct dwc3;
 #define gadget_to_dwc(g)       (container_of(g, struct dwc3, gadget))
 
 /* DEPCFG parameter 1 */
-#define DWC3_DEPCFG_INT_NUM(n)         ((n) << 0)
+#define DWC3_DEPCFG_INT_NUM(n)         (((n) & 0x1f) << 0)
 #define DWC3_DEPCFG_XFER_COMPLETE_EN   (1 << 8)
 #define DWC3_DEPCFG_XFER_IN_PROGRESS_EN        (1 << 9)
 #define DWC3_DEPCFG_XFER_NOT_READY_EN  (1 << 10)
 #define DWC3_DEPCFG_FIFO_ERROR_EN      (1 << 11)
 #define DWC3_DEPCFG_STREAM_EVENT_EN    (1 << 13)
-#define DWC3_DEPCFG_BINTERVAL_M1(n)    ((n) << 16)
+#define DWC3_DEPCFG_BINTERVAL_M1(n)    (((n) & 0xff) << 16)
 #define DWC3_DEPCFG_STREAM_CAPABLE     (1 << 24)
-#define DWC3_DEPCFG_EP_NUMBER(n)       ((n) << 25)
+#define DWC3_DEPCFG_EP_NUMBER(n)       (((n) & 0x1f) << 25)
 #define DWC3_DEPCFG_BULK_BASED         (1 << 30)
 #define DWC3_DEPCFG_FIFO_BASED         (1 << 31)
 
 /* DEPCFG parameter 0 */
-#define DWC3_DEPCFG_EP_TYPE(n)         ((n) << 1)
-#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) ((n) << 3)
-#define DWC3_DEPCFG_FIFO_NUMBER(n)     ((n) << 17)
-#define DWC3_DEPCFG_BURST_SIZE(n)      ((n) << 22)
+#define DWC3_DEPCFG_EP_TYPE(n)         (((n) & 0x3) << 1)
+#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) (((n) & 0x7ff) << 3)
+#define DWC3_DEPCFG_FIFO_NUMBER(n)     (((n) & 0x1f) << 17)
+#define DWC3_DEPCFG_BURST_SIZE(n)      (((n) & 0xf) << 22)
 #define DWC3_DEPCFG_DATA_SEQ_NUM(n)    ((n) << 26)
 /* This applies for core versions earlier than 1.94a */
 #define DWC3_DEPCFG_IGN_SEQ_NUM                (1 << 31)
index 78c44979dde382ca8c58e9f9f84024180ce9663d..cbff3b02840df901ca0ca03c646b14f5f6085719 100644 (file)
@@ -269,6 +269,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
                ret = unregister_gadget(gi);
                if (ret)
                        goto err;
+               kfree(name);
        } else {
                if (gi->composite.gadget_driver.udc_name) {
                        ret = -EBUSY;
index a30766ca422644ce91be7660b8822b4a7357f7f3..5e3828d9dac7f3af922456d141191ddd0733bbaf 100644 (file)
@@ -535,13 +535,15 @@ static int acm_notify_serial_state(struct f_acm *acm)
 {
        struct usb_composite_dev *cdev = acm->port.func.config->cdev;
        int                     status;
+       __le16                  serial_state;
 
        spin_lock(&acm->lock);
        if (acm->notify_req) {
                dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n",
                        acm->port_num, acm->serial_state);
+               serial_state = cpu_to_le16(acm->serial_state);
                status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
-                               0, &acm->serial_state, sizeof(acm->serial_state));
+                               0, &serial_state, sizeof(acm->serial_state));
        } else {
                acm->pending = true;
                status = 0;
index a5b7cd6156987a80ad74ba73bde16a30de4e2bf9..a0085571824d9b4352c7245625a30c3248d789f5 100644 (file)
@@ -1834,11 +1834,14 @@ static int ffs_func_eps_enable(struct ffs_function *func)
        spin_lock_irqsave(&func->ffs->eps_lock, flags);
        while(count--) {
                struct usb_endpoint_descriptor *ds;
+               struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
+               int needs_comp_desc = false;
                int desc_idx;
 
-               if (ffs->gadget->speed == USB_SPEED_SUPER)
+               if (ffs->gadget->speed == USB_SPEED_SUPER) {
                        desc_idx = 2;
-               else if (ffs->gadget->speed == USB_SPEED_HIGH)
+                       needs_comp_desc = true;
+               } else if (ffs->gadget->speed == USB_SPEED_HIGH)
                        desc_idx = 1;
                else
                        desc_idx = 0;
@@ -1855,6 +1858,14 @@ static int ffs_func_eps_enable(struct ffs_function *func)
 
                ep->ep->driver_data = ep;
                ep->ep->desc = ds;
+
+               comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds +
+                               USB_DT_ENDPOINT_SIZE);
+               ep->ep->maxburst = comp_desc->bMaxBurst + 1;
+
+               if (needs_comp_desc)
+                       ep->ep->comp_desc = comp_desc;
+
                ret = usb_ep_enable(ep->ep);
                if (likely(!ret)) {
                        epfile->ep = ep;
@@ -2253,7 +2264,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
 
                if (len < sizeof(*d) ||
                    d->bFirstInterfaceNumber >= ffs->interfaces_count ||
-                   d->Reserved1)
+                   !d->Reserved1)
                        return -EINVAL;
                for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
                        if (d->Reserved2[i])
index 89b48bcc377a16d426d6f5826a2e1ce5301124b6..5eea44823ca06d06955eb2bc51782cac6cd345ec 100644 (file)
@@ -367,7 +367,7 @@ try_again:
        count  = min_t(unsigned, count, hidg->report_length);
 
        spin_unlock_irqrestore(&hidg->write_spinlock, flags);
-       status = copy_from_user(hidg->req->buf, buffer, count);
+       status = copy_from_user(req->buf, buffer, count);
 
        if (status != 0) {
                ERROR(hidg->func.config->cdev,
@@ -378,9 +378,9 @@ try_again:
 
        spin_lock_irqsave(&hidg->write_spinlock, flags);
 
-       /* we our function has been disabled by host */
+       /* when our function has been disabled by host */
        if (!hidg->req) {
-               free_ep_req(hidg->in_ep, hidg->req);
+               free_ep_req(hidg->in_ep, req);
                /*
                 * TODO
                 * Should we fail with error here?
@@ -394,7 +394,7 @@ try_again:
        req->complete = f_hidg_req_complete;
        req->context  = hidg;
 
-       status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC);
+       status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
        if (status < 0) {
                ERROR(hidg->func.config->cdev,
                        "usb_ep_queue error on int endpoint %zd\n", status);
index 224717e63a5300970867a663cd030d8cd62068f6..864819ff9a7d362962eb837755886423b57906fb 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/etherdevice.h>
index 27ed51b5082f66de17c41761f260a4b96dcc0f33..f8a1881609a2c808f690f6d31ea6bcaf118b8bb4 100644 (file)
@@ -258,13 +258,6 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
        memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req));
        v4l2_event_queue(&uvc->vdev, &v4l2_event);
 
-       /* Pass additional setup data to userspace */
-       if (uvc->event_setup_out && uvc->event_length) {
-               uvc->control_req->length = uvc->event_length;
-               return usb_ep_queue(uvc->func.config->cdev->gadget->ep0,
-                       uvc->control_req, GFP_ATOMIC);
-       }
-
        return 0;
 }
 
@@ -601,6 +594,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
        opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U);
        opts->streaming_maxburst = min(opts->streaming_maxburst, 15U);
 
+       /* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */
+       if (opts->streaming_maxburst &&
+           (opts->streaming_maxpacket % 1024) != 0) {
+               opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024);
+               INFO(cdev, "overriding streaming_maxpacket to %d\n",
+                    opts->streaming_maxpacket);
+       }
+
        /* Fill in the FS/HS/SS Video Streaming specific descriptors from the
         * module parameters.
         *
@@ -632,7 +633,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
        uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst;
        uvc_ss_streaming_comp.wBytesPerInterval =
                cpu_to_le16(max_packet_size * max_packet_mult *
-                           opts->streaming_maxburst);
+                           (opts->streaming_maxburst + 1));
 
        /* Allocate endpoints. */
        ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
index a2615d64d07c1967d7cd2c25ab2e046747f6bd7d..a2c916869293720e378ced6b265532846eca52a3 100644 (file)
@@ -84,8 +84,7 @@ static int ep_open(struct inode *, struct file *);
 
 /* /dev/gadget/$CHIP represents ep0 and the whole device */
 enum ep0_state {
-       /* DISBLED is the initial state.
-        */
+       /* DISABLED is the initial state. */
        STATE_DEV_DISABLED = 0,
 
        /* Only one open() of /dev/gadget/$CHIP; only one file tracks
@@ -1782,8 +1781,10 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
 
        spin_lock_irq (&dev->lock);
        value = -EINVAL;
-       if (dev->buf)
+       if (dev->buf) {
+               kfree(kbuf);
                goto fail;
+       }
        dev->buf = kbuf;
 
        /* full or low speed config */
index 11bbce28bc231b701bef74cc38b99e5dbd3cc6ca..2035906b8ced173c2e869a3272334d4265acf79c 100644 (file)
@@ -610,7 +610,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
 {
        struct usba_ep *ep = to_usba_ep(_ep);
        struct usba_udc *udc = ep->udc;
-       unsigned long flags, ept_cfg, maxpacket;
+       unsigned long flags, maxpacket;
        unsigned int nr_trans;
 
        DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
@@ -630,7 +630,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
        ep->is_in = 0;
 
        DBG(DBG_ERR, "%s: EPT_CFG = 0x%lx (maxpacket = %lu)\n",
-                       ep->ep.name, ept_cfg, maxpacket);
+                       ep->ep.name, ep->ept_cfg, maxpacket);
 
        if (usb_endpoint_dir_in(desc)) {
                ep->is_in = 1;
index c60abe3a68f9cf48c21831d40bc32c4e9390b892..8cabc5944d5f1d834db7dd2186777cd79536016b 100644 (file)
@@ -1031,6 +1031,8 @@ static int dummy_udc_probe(struct platform_device *pdev)
        int             rc;
 
        dum = *((void **)dev_get_platdata(&pdev->dev));
+       /* Clear usb_gadget region for new registration to udc-core */
+       memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
        dum->gadget.name = gadget_name;
        dum->gadget.ops = &dummy_ops;
        dum->gadget.max_speed = USB_SPEED_SUPER;
index 85504419ab312e58a83c52d524bfdebadf5e1ed7..3828c2ec8623b155c9948ae90dcebdc4a0106ce6 100644 (file)
@@ -1146,15 +1146,15 @@ static int scan_dma_completions(struct net2280_ep *ep)
         */
        while (!list_empty(&ep->queue)) {
                struct net2280_request  *req;
-               u32                     tmp;
+               u32 req_dma_count;
 
                req = list_entry(ep->queue.next,
                                struct net2280_request, queue);
                if (!req->valid)
                        break;
                rmb();
-               tmp = le32_to_cpup(&req->td->dmacount);
-               if ((tmp & BIT(VALID_BIT)) != 0)
+               req_dma_count = le32_to_cpup(&req->td->dmacount);
+               if ((req_dma_count & BIT(VALID_BIT)) != 0)
                        break;
 
                /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
@@ -1163,40 +1163,41 @@ static int scan_dma_completions(struct net2280_ep *ep)
                 */
                if (unlikely(req->td->dmadesc == 0)) {
                        /* paranoia */
-                       tmp = readl(&ep->dma->dmacount);
-                       if (tmp & DMA_BYTE_COUNT_MASK)
+                       u32 const ep_dmacount = readl(&ep->dma->dmacount);
+
+                       if (ep_dmacount & DMA_BYTE_COUNT_MASK)
                                break;
                        /* single transfer mode */
-                       dma_done(ep, req, tmp, 0);
+                       dma_done(ep, req, req_dma_count, 0);
                        num_completed++;
                        break;
                } else if (!ep->is_in &&
                           (req->req.length % ep->ep.maxpacket) &&
                           !(ep->dev->quirks & PLX_PCIE)) {
 
-                       tmp = readl(&ep->regs->ep_stat);
+                       u32 const ep_stat = readl(&ep->regs->ep_stat);
                        /* AVOID TROUBLE HERE by not issuing short reads from
                         * your gadget driver.  That helps avoids errata 0121,
                         * 0122, and 0124; not all cases trigger the warning.
                         */
-                       if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
+                       if ((ep_stat & BIT(NAK_OUT_PACKETS)) == 0) {
                                ep_warn(ep->dev, "%s lost packet sync!\n",
                                                ep->ep.name);
                                req->req.status = -EOVERFLOW;
                        } else {
-                               tmp = readl(&ep->regs->ep_avail);
-                               if (tmp) {
+                               u32 const ep_avail = readl(&ep->regs->ep_avail);
+                               if (ep_avail) {
                                        /* fifo gets flushed later */
                                        ep->out_overflow = 1;
                                        ep_dbg(ep->dev,
                                                "%s dma, discard %d len %d\n",
-                                               ep->ep.name, tmp,
+                                               ep->ep.name, ep_avail,
                                                req->req.length);
                                        req->req.status = -EOVERFLOW;
                                }
                        }
                }
-               dma_done(ep, req, tmp, 0);
+               dma_done(ep, req, req_dma_count, 0);
                num_completed++;
        }
 
index a97da645c1b9eaecc5e5bb0bcb5ad25857c43ec8..8a365aad66fe2e38eaf0a869ae0f774349f694f2 100644 (file)
@@ -1523,7 +1523,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
                td = phys_to_virt(addr);
                addr2 = (dma_addr_t)td->next;
                pci_pool_free(dev->data_requests, td, addr);
-               td->next = 0x00;
                addr = addr2;
        }
        req->chain_len = 1;
index e1335ad5bce9f2c96dc729e14ede027994b8b381..832c4fdbe98512a2b70b6b9e9424acc21a09b83b 100644 (file)
@@ -2534,9 +2534,10 @@ static int pxa_udc_remove(struct platform_device *_dev)
        usb_del_gadget_udc(&udc->gadget);
        pxa_cleanup_debugfs(udc);
 
-       if (!IS_ERR_OR_NULL(udc->transceiver))
+       if (!IS_ERR_OR_NULL(udc->transceiver)) {
                usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy);
-       usb_put_phy(udc->transceiver);
+               usb_put_phy(udc->transceiver);
+       }
 
        udc->transceiver = NULL;
        the_controller = NULL;
index 414e3c376dbbd59587dc3398f4a90872a5aae19c..5302f988e7e670eec3fbd66f0058a49e91f22b76 100644 (file)
@@ -350,7 +350,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 
                case USB_PORT_FEAT_SUSPEND:
                        dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n");
-                       if (valid_port(wIndex)) {
+                       if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
                                ohci_at91_port_suspend(ohci_at91->sfr_regmap,
                                                       1);
                                return 0;
@@ -393,7 +393,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 
                case USB_PORT_FEAT_SUSPEND:
                        dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n");
-                       if (valid_port(wIndex)) {
+                       if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
                                ohci_at91_port_suspend(ohci_at91->sfr_regmap,
                                                       0);
                                return 0;
index 363d125300eacfbef6287644f89af6abe007c920..2b4a00fa735dfef5c51b4a9f583e5783e87ad957 100644 (file)
@@ -109,7 +109,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
        xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
 
        /* xhci 1.1 controllers have the HCCPARAMS2 register */
-       if (hci_version > 100) {
+       if (hci_version > 0x100) {
                temp = readl(&xhci->cap_regs->hcc_params2);
                xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp);
                xhci_dbg(xhci, "  HC %s Force save context capability",
index 9066ec9e0c2e7aacefabcd41f8f77805d635afe2..67d5dc79b6b50e6fbaa30cddface8602dded27ee 100644 (file)
@@ -382,7 +382,6 @@ static int usb_wakeup_of_property_parse(struct xhci_hcd_mtk *mtk,
 
 static int xhci_mtk_setup(struct usb_hcd *hcd);
 static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = {
-       .extra_priv_size = sizeof(struct xhci_hcd),
        .reset = xhci_mtk_setup,
 };
 
@@ -678,13 +677,13 @@ static int xhci_mtk_probe(struct platform_device *pdev)
                goto power_off_phys;
        }
 
-       if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
-               xhci->shared_hcd->can_do_streams = 1;
-
        ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
        if (ret)
                goto put_usb3_hcd;
 
+       if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+               xhci->shared_hcd->can_do_streams = 1;
+
        ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
        if (ret)
                goto dealloc_usb2_hcd;
index 6d33b42ffcf5224a1b347666c77b010df9001d91..6ed468fa7d5e593ca2e0fdabc6048fb7f2dd9f5d 100644 (file)
@@ -286,6 +286,8 @@ static int xhci_plat_remove(struct platform_device *dev)
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        struct clk *clk = xhci->clk;
 
+       xhci->xhc_state |= XHCI_STATE_REMOVING;
+
        usb_remove_hcd(xhci->shared_hcd);
        usb_phy_shutdown(hcd->usb_phy);
 
@@ -342,6 +344,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
 static struct platform_driver usb_xhci_driver = {
        .probe  = xhci_plat_probe,
        .remove = xhci_plat_remove,
+       .shutdown       = usb_hcd_platform_shutdown,
        .driver = {
                .name = "xhci-hcd",
                .pm = DEV_PM_OPS,
index d9936c771fa074593e77aad4aa86f0771388e6b1..a3309aa02993dfa79e52a2b93a87b9efa289d498 100644 (file)
@@ -1989,6 +1989,9 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
                case TRB_NORMAL:
                        td->urb->actual_length = requested - remaining;
                        goto finish_td;
+               case TRB_STATUS:
+                       td->urb->actual_length = requested;
+                       goto finish_td;
                default:
                        xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
                                  trb_type);
index a59fafb4b329f532be52773c53a874b00cae789a..74436f8ca5382f736dbf352c21b7c2f83a8438ba 100644 (file)
@@ -1308,7 +1308,6 @@ static int tegra_xhci_setup(struct usb_hcd *hcd)
 }
 
 static const struct xhci_driver_overrides tegra_xhci_overrides __initconst = {
-       .extra_priv_size = sizeof(struct xhci_hcd),
        .reset = tegra_xhci_setup,
 };
 
index 6d6c46000e56cc76895a34f9d3980c949030b8a2..953fd8f62df0787b0479286c12513656f141614f 100644 (file)
@@ -868,7 +868,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
 
        spin_lock_irqsave(&xhci->lock, flags);
 
-       /* disble usb3 ports Wake bits*/
+       /* disable usb3 ports Wake bits */
        port_index = xhci->num_usb3_ports;
        port_array = xhci->usb3_ports;
        while (port_index--) {
@@ -879,7 +879,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
                        writel(t2, port_array[port_index]);
        }
 
-       /* disble usb2 ports Wake bits*/
+       /* disable usb2 ports Wake bits */
        port_index = xhci->num_usb2_ports;
        port_array = xhci->usb2_ports;
        while (port_index--) {
@@ -1477,6 +1477,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
        struct xhci_ring *ep_ring;
        struct xhci_virt_ep *ep;
        struct xhci_command *command;
+       struct xhci_virt_device *vdev;
 
        xhci = hcd_to_xhci(hcd);
        spin_lock_irqsave(&xhci->lock, flags);
@@ -1485,15 +1486,27 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 
        /* Make sure the URB hasn't completed or been unlinked already */
        ret = usb_hcd_check_unlink_urb(hcd, urb, status);
-       if (ret || !urb->hcpriv)
+       if (ret)
                goto done;
+
+       /* give back URB now if we can't queue it for cancel */
+       vdev = xhci->devs[urb->dev->slot_id];
+       urb_priv = urb->hcpriv;
+       if (!vdev || !urb_priv)
+               goto err_giveback;
+
+       ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+       ep = &vdev->eps[ep_index];
+       ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+       if (!ep || !ep_ring)
+               goto err_giveback;
+
        temp = readl(&xhci->op_regs->status);
        if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                                "HW died, freeing TD.");
-               urb_priv = urb->hcpriv;
                for (i = urb_priv->num_tds_done;
-                    i < urb_priv->num_tds && xhci->devs[urb->dev->slot_id];
+                    i < urb_priv->num_tds;
                     i++) {
                        td = &urb_priv->td[i];
                        if (!list_empty(&td->td_list))
@@ -1501,23 +1514,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                        if (!list_empty(&td->cancelled_td_list))
                                list_del_init(&td->cancelled_td_list);
                }
-
-               usb_hcd_unlink_urb_from_ep(hcd, urb);
-               spin_unlock_irqrestore(&xhci->lock, flags);
-               usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
-               xhci_urb_free_priv(urb_priv);
-               return ret;
+               goto err_giveback;
        }
 
-       ep_index = xhci_get_endpoint_index(&urb->ep->desc);
-       ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
-       ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
-       if (!ep_ring) {
-               ret = -EINVAL;
-               goto done;
-       }
-
-       urb_priv = urb->hcpriv;
        i = urb_priv->num_tds_done;
        if (i < urb_priv->num_tds)
                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -1554,6 +1553,14 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 done:
        spin_unlock_irqrestore(&xhci->lock, flags);
        return ret;
+
+err_giveback:
+       if (urb_priv)
+               xhci_urb_free_priv(urb_priv);
+       usb_hcd_unlink_urb_from_ep(hcd, urb);
+       spin_unlock_irqrestore(&xhci->lock, flags);
+       usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
+       return ret;
 }
 
 /* Drop an endpoint from a new bandwidth configuration for this device.
index 8b9fd7534f698b937b5a89113acc12c716368075..502bfe30a077a20120616af74b378c62bc2ec6d9 100644 (file)
@@ -347,6 +347,9 @@ static int idmouse_probe(struct usb_interface *interface,
        if (iface_desc->desc.bInterfaceClass != 0x0A)
                return -ENODEV;
 
+       if (iface_desc->desc.bNumEndpoints < 1)
+               return -ENODEV;
+
        /* allocate memory for our device state and initialize it */
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
        if (dev == NULL)
index 095778ff984de25838b1e0eda53e0794dd382f8c..37c63cb39714b86ada39d3232565ef1595ed71c6 100644 (file)
@@ -781,12 +781,6 @@ static int iowarrior_probe(struct usb_interface *interface,
        iface_desc = interface->cur_altsetting;
        dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
 
-       if (iface_desc->desc.bNumEndpoints < 1) {
-               dev_err(&interface->dev, "Invalid number of endpoints\n");
-               retval = -EINVAL;
-               goto error;
-       }
-
        /* set up the endpoint information */
        for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
                endpoint = &iface_desc->endpoint[i].desc;
@@ -797,6 +791,21 @@ static int iowarrior_probe(struct usb_interface *interface,
                        /* this one will match for the IOWarrior56 only */
                        dev->int_out_endpoint = endpoint;
        }
+
+       if (!dev->int_in_endpoint) {
+               dev_err(&interface->dev, "no interrupt-in endpoint found\n");
+               retval = -ENODEV;
+               goto error;
+       }
+
+       if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
+               if (!dev->int_out_endpoint) {
+                       dev_err(&interface->dev, "no interrupt-out endpoint found\n");
+                       retval = -ENODEV;
+                       goto error;
+               }
+       }
+
        /* we have to check the report_size often, so remember it in the endianness suitable for our machine */
        dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
        if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
index 77176511658f3328f830ecdbd056f072a6c84f8a..d3d12475326663def2ce1f389f06f040d2a56126 100644 (file)
@@ -366,6 +366,10 @@ static int lvs_rh_probe(struct usb_interface *intf,
 
        hdev = interface_to_usbdev(intf);
        desc = intf->cur_altsetting;
+
+       if (desc->desc.bNumEndpoints < 1)
+               return -ENODEV;
+
        endpoint = &desc->endpoint[0].desc;
 
        /* valid only for SS root hub */
index 4e18600dc9b43e2603ec2c8f371545e63b75a404..91f66d68bcb7b55bd000e18ad551d8b6bc27ff7c 100644 (file)
@@ -375,18 +375,24 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
        if (of_get_property(np, "dynamic-power-switching", NULL))
                hub->conf_data2 |= BIT(7);
 
-       if (of_get_property(np, "oc-delay-100us", NULL)) {
-               hub->conf_data2 &= ~BIT(5);
-               hub->conf_data2 &= ~BIT(4);
-       } else if (of_get_property(np, "oc-delay-4ms", NULL)) {
-               hub->conf_data2 &= ~BIT(5);
-               hub->conf_data2 |= BIT(4);
-       } else if (of_get_property(np, "oc-delay-8ms", NULL)) {
-               hub->conf_data2 |= BIT(5);
-               hub->conf_data2 &= ~BIT(4);
-       } else if (of_get_property(np, "oc-delay-16ms", NULL)) {
-               hub->conf_data2 |= BIT(5);
-               hub->conf_data2 |= BIT(4);
+       if (!of_property_read_u32(np, "oc-delay-us", property_u32)) {
+               if (*property_u32 == 100) {
+                       /* 100 us*/
+                       hub->conf_data2 &= ~BIT(5);
+                       hub->conf_data2 &= ~BIT(4);
+               } else if (*property_u32 == 4000) {
+                       /* 4 ms */
+                       hub->conf_data2 &= ~BIT(5);
+                       hub->conf_data2 |= BIT(4);
+               } else if (*property_u32 == 16000) {
+                       /* 16 ms */
+                       hub->conf_data2 |= BIT(5);
+                       hub->conf_data2 |= BIT(4);
+               } else {
+                       /* 8 ms (DEFAULT) */
+                       hub->conf_data2 |= BIT(5);
+                       hub->conf_data2 &= ~BIT(4);
+               }
        }
 
        if (of_get_property(np, "compound-device", NULL))
@@ -432,30 +438,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
                }
        }
 
-       hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
-       if (!of_property_read_u32(np, "max-sp-power", property_u32))
-               hub->max_power_sp = min_t(u8, be32_to_cpu(*property_u32) / 2,
-                                         250);
-
-       hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS;
-       if (!of_property_read_u32(np, "max-bp-power", property_u32))
-               hub->max_power_bp = min_t(u8, be32_to_cpu(*property_u32) / 2,
-                                         250);
-
-       hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF;
-       if (!of_property_read_u32(np, "max-sp-current", property_u32))
-               hub->max_current_sp = min_t(u8, be32_to_cpu(*property_u32) / 2,
-                                           250);
-
-       hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS;
-       if (!of_property_read_u32(np, "max-bp-current", property_u32))
-               hub->max_current_bp = min_t(u8, be32_to_cpu(*property_u32) / 2,
-                                           250);
-
        hub->power_on_time = USB251XB_DEF_POWER_ON_TIME;
-       if (!of_property_read_u32(np, "power-on-time", property_u32))
-               hub->power_on_time = min_t(u8, be32_to_cpu(*property_u32) / 2,
-                                          255);
+       if (!of_property_read_u32(np, "power-on-time-ms", property_u32))
+               hub->power_on_time = min_t(u8, *property_u32 / 2, 255);
 
        if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1))
                hub->lang_id = USB251XB_DEF_LANGUAGE_ID;
@@ -492,6 +477,10 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
        /* The following parameters are currently not exposed to devicetree, but
         * may be as soon as needed.
         */
+       hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
+       hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS;
+       hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF;
+       hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS;
        hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE;
        hub->boost_up = USB251XB_DEF_BOOST_UP;
        hub->boost_x = USB251XB_DEF_BOOST_X;
index e45a3a680db8f6490257c0f6decdfd6d1d8247d8..07014cad6dbe357bca938561eaf976dbbee902d4 100644 (file)
@@ -709,6 +709,11 @@ static int uss720_probe(struct usb_interface *intf,
 
        interface = intf->cur_altsetting;
 
+       if (interface->desc.bNumEndpoints < 3) {
+               usb_put_dev(usbdev);
+               return -ENODEV;
+       }
+
        /*
         * Allocate parport interface 
         */
index d8bae6ca890475b16bf7d45058ae35713ca46656..0c3664ab705eed549e73d53dea13976066182ee1 100644 (file)
@@ -2490,8 +2490,8 @@ static int musb_remove(struct platform_device *pdev)
        musb_host_cleanup(musb);
        musb_gadget_cleanup(musb);
 
-       spin_lock_irqsave(&musb->lock, flags);
        musb_platform_disable(musb);
+       spin_lock_irqsave(&musb->lock, flags);
        musb_disable_interrupts(musb);
        musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
        spin_unlock_irqrestore(&musb->lock, flags);
index 00e272bfee39a94520f16aeef03beed6a08ea43c..355655f8a3fbc9c4e3d8acd03eefa31a5736fb2f 100644 (file)
@@ -238,8 +238,27 @@ static void cppi41_dma_callback(void *private_data,
                        transferred < cppi41_channel->packet_sz)
                cppi41_channel->prog_len = 0;
 
-       if (cppi41_channel->is_tx)
-               empty = musb_is_tx_fifo_empty(hw_ep);
+       if (cppi41_channel->is_tx) {
+               u8 type;
+
+               if (is_host_active(musb))
+                       type = hw_ep->out_qh->type;
+               else
+                       type = hw_ep->ep_in.type;
+
+               if (type == USB_ENDPOINT_XFER_ISOC)
+                       /*
+                        * Don't use the early-TX-interrupt workaround below
+                        * for Isoch transfter. Since Isoch are periodic
+                        * transfer, by the time the next transfer is
+                        * scheduled, the current one should be done already.
+                        *
+                        * This avoids audio playback underrun issue.
+                        */
+                       empty = true;
+               else
+                       empty = musb_is_tx_fifo_empty(hw_ep);
+       }
 
        if (!cppi41_channel->is_tx || empty) {
                cppi41_trans_done(cppi41_channel);
index 7c047c4a2565cca25690c47d85a2bb9eb29f728e..9c7ee26ef388062bdc5e1f1fb2097fc010882950 100644 (file)
@@ -933,7 +933,7 @@ static int dsps_probe(struct platform_device *pdev)
        if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
                ret = dsps_setup_optional_vbus_irq(pdev, glue);
                if (ret)
-                       return ret;
+                       goto err_iounmap;
        }
 
        platform_set_drvdata(pdev, glue);
@@ -946,6 +946,8 @@ static int dsps_probe(struct platform_device *pdev)
 
 err:
        pm_runtime_disable(&pdev->dev);
+err_iounmap:
+       iounmap(glue->usbss_base);
        return ret;
 }
 
@@ -956,6 +958,7 @@ static int dsps_remove(struct platform_device *pdev)
        platform_device_unregister(glue->musb);
 
        pm_runtime_disable(&pdev->dev);
+       iounmap(glue->usbss_base);
 
        return 0;
 }
index db68156568e6e7bc209eaf942eaa9c51bd4e15ad..f333024660b4d0d4c6c8f54e5b209285de9a169a 100644 (file)
@@ -33,6 +33,12 @@ static const struct i2c_device_id isp1301_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, isp1301_id);
 
+static const struct of_device_id isp1301_of_match[] = {
+       {.compatible = "nxp,isp1301" },
+       { },
+};
+MODULE_DEVICE_TABLE(of, isp1301_of_match);
+
 static struct i2c_client *isp1301_i2c_client;
 
 static int __isp1301_write(struct isp1301 *isp, u8 reg, u8 value, u8 clear)
@@ -130,6 +136,7 @@ static int isp1301_remove(struct i2c_client *client)
 static struct i2c_driver isp1301_driver = {
        .driver = {
                .name = DRV_NAME,
+               .of_match_table = isp1301_of_match,
        },
        .probe = isp1301_probe,
        .remove = isp1301_remove,
index ab78111e09680f47267cbe5fc7fe9a25ca5eea21..6537d3ca2797d8573236578e3088f6dbce1ce1b5 100644 (file)
@@ -1500,7 +1500,7 @@ static int digi_read_oob_callback(struct urb *urb)
                return -1;
 
        /* handle each oob command */
-       for (i = 0; i < urb->actual_length - 4; i += 4) {
+       for (i = 0; i < urb->actual_length - 3; i += 4) {
                opcode = buf[i];
                line = buf[i + 1];
                status = buf[i + 2];
index ceaeebaa6f90587b6d8ea01e53daaad243be8b6d..a76b95d32157871f5e2964b629784a7642da8480 100644 (file)
@@ -1674,6 +1674,12 @@ static void edge_interrupt_callback(struct urb *urb)
        function    = TIUMP_GET_FUNC_FROM_CODE(data[0]);
        dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__,
                port_number, function, data[1]);
+
+       if (port_number >= edge_serial->serial->num_ports) {
+               dev_err(dev, "bad port number %d\n", port_number);
+               goto exit;
+       }
+
        port = edge_serial->serial->port[port_number];
        edge_port = usb_get_serial_port_data(port);
        if (!edge_port) {
@@ -1755,7 +1761,7 @@ static void edge_bulk_in_callback(struct urb *urb)
 
        port_number = edge_port->port->port_number;
 
-       if (edge_port->lsr_event) {
+       if (urb->actual_length > 0 && edge_port->lsr_event) {
                edge_port->lsr_event = 0;
                dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n",
                        __func__, port_number, edge_port->lsr_mask, *data);
index a180b17d24323b074aee19e33bf0f497ad271d8a..dd706953b4660905cc5abf5e85477bfb6bdb4149 100644 (file)
@@ -31,7 +31,6 @@
 #define BT_IGNITIONPRO_ID      0x2000
 
 /* function prototypes */
-static int  omninet_open(struct tty_struct *tty, struct usb_serial_port *port);
 static void omninet_process_read_urb(struct urb *urb);
 static void omninet_write_bulk_callback(struct urb *urb);
 static int  omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
@@ -60,7 +59,6 @@ static struct usb_serial_driver zyxel_omninet_device = {
        .attach =               omninet_attach,
        .port_probe =           omninet_port_probe,
        .port_remove =          omninet_port_remove,
-       .open =                 omninet_open,
        .write =                omninet_write,
        .write_room =           omninet_write_room,
        .write_bulk_callback =  omninet_write_bulk_callback,
@@ -140,17 +138,6 @@ static int omninet_port_remove(struct usb_serial_port *port)
        return 0;
 }
 
-static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port)
-{
-       struct usb_serial       *serial = port->serial;
-       struct usb_serial_port  *wport;
-
-       wport = serial->port[1];
-       tty_port_tty_set(&wport->port, tty);
-
-       return usb_serial_generic_open(tty, port);
-}
-
 #define OMNINET_HEADERLEN      4
 #define OMNINET_BULKOUTSIZE    64
 #define OMNINET_PAYLOADSIZE    (OMNINET_BULKOUTSIZE - OMNINET_HEADERLEN)
index 42cc72e54c051b2115c358bcee8bfc534258d206..af67a0de6b5d475d2be95952ddfb2354546a0fbb 100644 (file)
@@ -233,6 +233,14 @@ static void option_instat_callback(struct urb *urb);
 #define BANDRICH_PRODUCT_1012                  0x1012
 
 #define QUALCOMM_VENDOR_ID                     0x05C6
+/* These Quectel products use Qualcomm's vendor ID */
+#define QUECTEL_PRODUCT_UC20                   0x9003
+#define QUECTEL_PRODUCT_UC15                   0x9090
+
+#define QUECTEL_VENDOR_ID                      0x2c7c
+/* These Quectel products use Quectel's vendor ID */
+#define QUECTEL_PRODUCT_EC21                   0x0121
+#define QUECTEL_PRODUCT_EC25                   0x0125
 
 #define CMOTECH_VENDOR_ID                      0x16d8
 #define CMOTECH_PRODUCT_6001                   0x6001
@@ -1161,7 +1169,14 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
-       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
+       /* Quectel products using Qualcomm vendor ID */
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       /* Quectel products using Quectel vendor ID */
+       { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
index 696458db7e3c45e661a9825d05df0fe25dc0a832..38b3f0d8cd580f2366136003934d00a475b1d7f1 100644 (file)
@@ -169,6 +169,8 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x413c, 0x81a9)},   /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81b1)},   /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81b3)},   /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
+       {DEVICE_SWI(0x413c, 0x81b5)},   /* Dell Wireless 5811e QDL */
+       {DEVICE_SWI(0x413c, 0x81b6)},   /* Dell Wireless 5811e QDL */
 
        /* Huawei devices */
        {DEVICE_HWI(0x03f0, 0x581d)},   /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
index 93c6c9b08daae534234ca75121f696cec521857d..8a069aa154eda461ae12807d2518bbfd0bb27bf4 100644 (file)
@@ -200,6 +200,11 @@ static void safe_process_read_urb(struct urb *urb)
        if (!safe)
                goto out;
 
+       if (length < 2) {
+               dev_err(&port->dev, "malformed packet\n");
+               return;
+       }
+
        fcs = fcs_compute10(data, length, CRC10_INITFCS);
        if (fcs) {
                dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
index 16cc18369111d039ffededa7559075a869638708..9129f6cb823074a555a90f74611dac3f0164a60d 100644 (file)
@@ -2071,6 +2071,20 @@ UNUSUAL_DEV(  0x1370, 0x6828, 0x0110, 0x0110,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE ),
 
+/*
+ * Reported by Tobias Jakobi <tjakobi@math.uni-bielefeld.de>
+ * The INIC-3619 bridge is used in the StarTech SLSODDU33B
+ * SATA-USB enclosure for slimline optical drives.
+ *
+ * The quirk enables MakeMKV to properly exchange keys with
+ * an installed BD drive.
+ */
+UNUSUAL_DEV(  0x13fd, 0x3609, 0x0209, 0x0209,
+               "Initio Corporation",
+               "INIC-3619",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_IGNORE_RESIDUE ),
+
 /* Reported by Qinglin Ye <yestyle@gmail.com> */
 UNUSUAL_DEV(  0x13fe, 0x3600, 0x0100, 0x0100,
                "Kingston",
index 252c7bd9218afd5db373325b55b561c304ab0e88..d01496fd27fe88460988745a57c32c0cf5f14840 100644 (file)
@@ -39,6 +39,9 @@ int wa_create(struct wahc *wa, struct usb_interface *iface,
        int result;
        struct device *dev = &iface->dev;
 
+       if (iface->cur_altsetting->desc.bNumEndpoints < 3)
+               return -ENODEV;
+
        result = wa_rpipes_create(wa);
        if (result < 0)
                goto error_rpipes_create;
index 0aa6c3c29d17260f684b8bf79944eff43af04427..35a1e777b4497ad0ad1cfa18db0834354732b128 100644 (file)
@@ -823,6 +823,9 @@ static int hwarc_probe(struct usb_interface *iface,
        struct hwarc *hwarc;
        struct device *dev = &iface->dev;
 
+       if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+               return -ENODEV;
+
        result = -ENOMEM;
        uwb_rc = uwb_rc_alloc();
        if (uwb_rc == NULL) {
index 2bfc846ac071341ace37aa50dc126fc5b08a519f..6345e85822a42457f11c607effb3fbb66ce8c89f 100644 (file)
@@ -362,6 +362,9 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
                                 result);
        }
 
+       if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+               return -ENODEV;
+
        result = -ENOMEM;
        i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
        if (i1480_usb == NULL) {
index 609f4f982c74c59a5b4fd87dfd83367765ebbc1e..561084ab387f3fd7c8ae3fa3e91c27d8329f7fe4 100644 (file)
@@ -403,6 +403,7 @@ static void vfio_group_release(struct kref *kref)
        struct iommu_group *iommu_group = group->iommu_group;
 
        WARN_ON(!list_empty(&group->device_list));
+       WARN_ON(group->notifier.head);
 
        list_for_each_entry_safe(unbound, tmp,
                                 &group->unbound_list, unbound_next) {
@@ -1573,6 +1574,10 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
                return -EBUSY;
        }
 
+       /* Warn if previous user didn't cleanup and re-init to drop them */
+       if (WARN_ON(group->notifier.head))
+               BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
+
        filep->private_data = group;
 
        return 0;
@@ -1584,9 +1589,6 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep)
 
        filep->private_data = NULL;
 
-       /* Any user didn't unregister? */
-       WARN_ON(group->notifier.head);
-
        vfio_group_try_dissolve_container(group);
 
        atomic_dec(&group->opened);
index c26fa1f3ed8606e65870f05aa47b7eca5fdf381e..32d2633092a37edf64ec4b9d2afc9fa4f12ea77d 100644 (file)
@@ -1182,8 +1182,7 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
        return NULL;
 }
 
-static bool vfio_iommu_has_resv_msi(struct iommu_group *group,
-                                   phys_addr_t *base)
+static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
 {
        struct list_head group_resv_regions;
        struct iommu_resv_region *region, *next;
@@ -1192,7 +1191,7 @@ static bool vfio_iommu_has_resv_msi(struct iommu_group *group,
        INIT_LIST_HEAD(&group_resv_regions);
        iommu_get_group_resv_regions(group, &group_resv_regions);
        list_for_each_entry(region, &group_resv_regions, list) {
-               if (region->type & IOMMU_RESV_MSI) {
+               if (region->type == IOMMU_RESV_SW_MSI) {
                        *base = region->start;
                        ret = true;
                        goto out;
@@ -1283,7 +1282,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
        if (ret)
                goto out_domain;
 
-       resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base);
+       resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base);
 
        INIT_LIST_HEAD(&domain->group_list);
        list_add(&group->next, &domain->group_list);
index ce5e63d2c66aac7d019c422ec294cab025e94e5e..44eed8eb0725b25e3c9765e19387e7c338ab9bbb 100644 (file)
@@ -223,6 +223,46 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
        return len;
 }
 
+static int
+vhost_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+       struct vhost_vsock *vsock;
+       struct virtio_vsock_pkt *pkt, *n;
+       int cnt = 0;
+       LIST_HEAD(freeme);
+
+       /* Find the vhost_vsock according to guest context id  */
+       vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
+       if (!vsock)
+               return -ENODEV;
+
+       spin_lock_bh(&vsock->send_pkt_list_lock);
+       list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
+               if (pkt->vsk != vsk)
+                       continue;
+               list_move(&pkt->list, &freeme);
+       }
+       spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+       list_for_each_entry_safe(pkt, n, &freeme, list) {
+               if (pkt->reply)
+                       cnt++;
+               list_del(&pkt->list);
+               virtio_transport_free_pkt(pkt);
+       }
+
+       if (cnt) {
+               struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+               int new_cnt;
+
+               new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
+               if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
+                       vhost_poll_queue(&tx_vq->poll);
+       }
+
+       return 0;
+}
+
 static struct virtio_vsock_pkt *
 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
                      unsigned int out, unsigned int in)
@@ -675,6 +715,7 @@ static struct virtio_transport vhost_transport = {
                .release                  = virtio_transport_release,
                .connect                  = virtio_transport_connect,
                .shutdown                 = virtio_transport_shutdown,
+               .cancel_pkt               = vhost_transport_cancel_pkt,
 
                .dgram_enqueue            = virtio_transport_dgram_enqueue,
                .dgram_dequeue            = virtio_transport_dgram_dequeue,
index 4e1191508228cd86f6c3ee8174f4320c89e14686..34adf9b9c0538815db33f62ed842de49be5222e7 100644 (file)
@@ -242,11 +242,11 @@ static inline void update_stat(struct virtio_balloon *vb, int idx,
 
 #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
 
-static void update_balloon_stats(struct virtio_balloon *vb)
+static unsigned int update_balloon_stats(struct virtio_balloon *vb)
 {
        unsigned long events[NR_VM_EVENT_ITEMS];
        struct sysinfo i;
-       int idx = 0;
+       unsigned int idx = 0;
        long available;
 
        all_vm_events(events);
@@ -254,18 +254,22 @@ static void update_balloon_stats(struct virtio_balloon *vb)
 
        available = si_mem_available();
 
+#ifdef CONFIG_VM_EVENT_COUNTERS
        update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
                                pages_to_bytes(events[PSWPIN]));
        update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
                                pages_to_bytes(events[PSWPOUT]));
        update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
        update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
+#endif
        update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
                                pages_to_bytes(i.freeram));
        update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
                                pages_to_bytes(i.totalram));
        update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
                                pages_to_bytes(available));
+
+       return idx;
 }
 
 /*
@@ -291,14 +295,14 @@ static void stats_handle_request(struct virtio_balloon *vb)
 {
        struct virtqueue *vq;
        struct scatterlist sg;
-       unsigned int len;
+       unsigned int len, num_stats;
 
-       update_balloon_stats(vb);
+       num_stats = update_balloon_stats(vb);
 
        vq = vb->stats_vq;
        if (!virtqueue_get_buf(vq, &len))
                return;
-       sg_init_one(&sg, vb->stats, sizeof(vb->stats));
+       sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
        virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
        virtqueue_kick(vq);
 }
@@ -423,13 +427,16 @@ static int init_vqs(struct virtio_balloon *vb)
        vb->deflate_vq = vqs[1];
        if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
                struct scatterlist sg;
+               unsigned int num_stats;
                vb->stats_vq = vqs[2];
 
                /*
                 * Prime this virtqueue with one buffer so the hypervisor can
                 * use it to signal us later (it can't be broken yet!).
                 */
-               sg_init_one(&sg, vb->stats, sizeof vb->stats);
+               num_stats = update_balloon_stats(vb);
+
+               sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
                if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
                    < 0)
                        BUG();
index df548a6fb844f701d65301503d998a05e6d19703..590534910dc617836e18c91b6576410a0299de26 100644 (file)
@@ -147,7 +147,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        const char *name = dev_name(&vp_dev->vdev.dev);
-       int i, err = -ENOMEM, allocated_vectors, nvectors;
+       int i, j, err = -ENOMEM, allocated_vectors, nvectors;
        unsigned flags = PCI_IRQ_MSIX;
        bool shared = false;
        u16 msix_vec;
@@ -212,7 +212,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
        if (!vp_dev->msix_vector_map)
                goto out_disable_config_irq;
 
-       allocated_vectors = 1; /* vector 0 is the config interrupt */
+       allocated_vectors = j = 1; /* vector 0 is the config interrupt */
        for (i = 0; i < nvqs; ++i) {
                if (!names[i]) {
                        vqs[i] = NULL;
@@ -236,18 +236,19 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
                        continue;
                }
 
-               snprintf(vp_dev->msix_names[i + 1],
+               snprintf(vp_dev->msix_names[j],
                         sizeof(*vp_dev->msix_names), "%s-%s",
                         dev_name(&vp_dev->vdev.dev), names[i]);
                err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
                                  vring_interrupt, IRQF_SHARED,
-                                 vp_dev->msix_names[i + 1], vqs[i]);
+                                 vp_dev->msix_names[j], vqs[i]);
                if (err) {
                        /* don't free this irq on error */
                        vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
                        goto out_remove_vqs;
                }
                vp_dev->msix_vector_map[i] = msix_vec;
+               j++;
 
                /*
                 * Use a different vector for each queue if they are available,
index c77a0751a31173344de0c02c3f70d18ec259ca63..f3bf8f4e2d6cef09101b53aa9f1a69563b206287 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/spinlock.h>
 #include <linux/slab.h>
 #include <linux/highmem.h>
+#include <linux/refcount.h>
 
 #include <xen/xen.h>
 #include <xen/grant_table.h>
@@ -86,7 +87,7 @@ struct grant_map {
        int index;
        int count;
        int flags;
-       atomic_t users;
+       refcount_t users;
        struct unmap_notify notify;
        struct ioctl_gntdev_grant_ref *grants;
        struct gnttab_map_grant_ref   *map_ops;
@@ -166,7 +167,7 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
 
        add->index = 0;
        add->count = count;
-       atomic_set(&add->users, 1);
+       refcount_set(&add->users, 1);
 
        return add;
 
@@ -212,7 +213,7 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
        if (!map)
                return;
 
-       if (!atomic_dec_and_test(&map->users))
+       if (!refcount_dec_and_test(&map->users))
                return;
 
        atomic_sub(map->count, &pages_mapped);
@@ -400,7 +401,7 @@ static void gntdev_vma_open(struct vm_area_struct *vma)
        struct grant_map *map = vma->vm_private_data;
 
        pr_debug("gntdev_vma_open %p\n", vma);
-       atomic_inc(&map->users);
+       refcount_inc(&map->users);
 }
 
 static void gntdev_vma_close(struct vm_area_struct *vma)
@@ -1004,7 +1005,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
                goto unlock_out;
        }
 
-       atomic_inc(&map->users);
+       refcount_inc(&map->users);
 
        vma->vm_ops = &gntdev_vmops;
 
index f8afc6dcc29f2769694308092a4b543e5e0bed49..e8cef1ad0fe31e0139903399d70730c7eafdc399 100644 (file)
@@ -681,3 +681,50 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
        return 0;
 }
 EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
+
+/*
+ * Create userspace mapping for the DMA-coherent memory.
+ * This function should be called with the pages from the current domain only,
+ * passing pages mapped from other domains would lead to memory corruption.
+ */
+int
+xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+                    void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                    unsigned long attrs)
+{
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+       if (__generic_dma_ops(dev)->mmap)
+               return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr,
+                                                   dma_addr, size, attrs);
+#endif
+       return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
+
+/*
+ * This function should be called with the pages from the current domain only,
+ * passing pages mapped from other domains would lead to memory corruption.
+ */
+int
+xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
+                       void *cpu_addr, dma_addr_t handle, size_t size,
+                       unsigned long attrs)
+{
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+       if (__generic_dma_ops(dev)->get_sgtable) {
+#if 0
+       /*
+        * This check verifies that the page belongs to the current domain and
+        * is not one mapped from another domain.
+        * This check is for debug only, and should not go to production build
+        */
+               unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
+               BUG_ON (!page_is_ram(bfn));
+#endif
+               return __generic_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
+                                                          handle, size, attrs);
+       }
+#endif
+       return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable);
index 4ce10bcca18b1f600c351675142240dbe94a4022..23e391d3ec015d0c5b38b21619898c282826f59c 100644 (file)
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/types.h>
+#include <linux/syscore_ops.h>
 #include <linux/acpi.h>
 #include <acpi/processor.h>
 #include <xen/xen.h>
-#include <xen/xen-ops.h>
 #include <xen/interface/platform.h>
 #include <asm/xen/hypercall.h>
 
@@ -408,7 +408,7 @@ static int check_acpi_ids(struct acpi_processor *pr_backup)
        acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
                            ACPI_UINT32_MAX,
                            read_acpi_id, NULL, NULL, NULL);
-       acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL);
+       acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, read_acpi_id, NULL, NULL);
 
 upload:
        if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) {
@@ -466,15 +466,33 @@ static int xen_upload_processor_pm_data(void)
        return rc;
 }
 
-static int xen_acpi_processor_resume(struct notifier_block *nb,
-                                    unsigned long action, void *data)
+static void xen_acpi_processor_resume_worker(struct work_struct *dummy)
 {
+       int rc;
+
        bitmap_zero(acpi_ids_done, nr_acpi_bits);
-       return xen_upload_processor_pm_data();
+
+       rc = xen_upload_processor_pm_data();
+       if (rc != 0)
+               pr_info("ACPI data upload failed, error = %d\n", rc);
+}
+
+static void xen_acpi_processor_resume(void)
+{
+       static DECLARE_WORK(wq, xen_acpi_processor_resume_worker);
+
+       /*
+        * xen_upload_processor_pm_data() calls non-atomic code.
+        * However, the context for xen_acpi_processor_resume is syscore
+        * with only the boot CPU online and in an atomic context.
+        *
+        * So defer the upload for some point safer.
+        */
+       schedule_work(&wq);
 }
 
-struct notifier_block xen_acpi_processor_resume_nb = {
-       .notifier_call = xen_acpi_processor_resume,
+static struct syscore_ops xap_syscore_ops = {
+       .resume = xen_acpi_processor_resume,
 };
 
 static int __init xen_acpi_processor_init(void)
@@ -527,7 +545,7 @@ static int __init xen_acpi_processor_init(void)
        if (rc)
                goto err_unregister;
 
-       xen_resume_notifier_register(&xen_acpi_processor_resume_nb);
+       register_syscore_ops(&xap_syscore_ops);
 
        return 0;
 err_unregister:
@@ -544,7 +562,7 @@ static void __exit xen_acpi_processor_exit(void)
 {
        int i;
 
-       xen_resume_notifier_unregister(&xen_acpi_processor_resume_nb);
+       unregister_syscore_ops(&xap_syscore_ops);
        kfree(acpi_ids_done);
        kfree(acpi_id_present);
        kfree(acpi_id_cst_present);
index 4d343eed08f51e1a3d2a0628dccb256b95858fae..1f4733b80c877426fa337e67eebf708b5fb9b41c 100644 (file)
@@ -55,7 +55,6 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/miscdevice.h>
-#include <linux/init.h>
 
 #include <xen/xenbus.h>
 #include <xen/xen.h>
index b29447e03ede0d638950fa0dd64d908004156ea6..25d404d22caebcfd6b6b60d6287e36258f1185eb 100644 (file)
@@ -362,7 +362,7 @@ static void afs_callback_updater(struct work_struct *work)
 {
        struct afs_server *server;
        struct afs_vnode *vnode, *xvnode;
-       time_t now;
+       time64_t now;
        long timeout;
        int ret;
 
@@ -370,7 +370,7 @@ static void afs_callback_updater(struct work_struct *work)
 
        _enter("");
 
-       now = get_seconds();
+       now = ktime_get_real_seconds();
 
        /* find the first vnode to update */
        spin_lock(&server->cb_lock);
@@ -424,7 +424,8 @@ static void afs_callback_updater(struct work_struct *work)
 
        /* and then reschedule */
        _debug("reschedule");
-       vnode->update_at = get_seconds() + afs_vnode_update_timeout;
+       vnode->update_at = ktime_get_real_seconds() +
+                       afs_vnode_update_timeout;
 
        spin_lock(&server->cb_lock);
 
index 2edbdcbf6432add190464b5a5f414592953c944a..3062cceb5c2aebcc4a15e3c52d1b26ecea82f20d 100644 (file)
@@ -187,7 +187,6 @@ static int afs_deliver_cb_callback(struct afs_call *call)
        struct afs_callback *cb;
        struct afs_server *server;
        __be32 *bp;
-       u32 tmp;
        int ret, loop;
 
        _enter("{%u}", call->unmarshall);
@@ -249,9 +248,9 @@ static int afs_deliver_cb_callback(struct afs_call *call)
                if (ret < 0)
                        return ret;
 
-               tmp = ntohl(call->tmp);
-               _debug("CB count: %u", tmp);
-               if (tmp != call->count && tmp != 0)
+               call->count2 = ntohl(call->tmp);
+               _debug("CB count: %u", call->count2);
+               if (call->count2 != call->count && call->count2 != 0)
                        return -EBADMSG;
                call->offset = 0;
                call->unmarshall++;
@@ -259,14 +258,14 @@ static int afs_deliver_cb_callback(struct afs_call *call)
        case 4:
                _debug("extract CB array");
                ret = afs_extract_data(call, call->buffer,
-                                      call->count * 3 * 4, false);
+                                      call->count2 * 3 * 4, false);
                if (ret < 0)
                        return ret;
 
                _debug("unmarshall CB array");
                cb = call->request;
                bp = call->buffer;
-               for (loop = call->count; loop > 0; loop--, cb++) {
+               for (loop = call->count2; loop > 0; loop--, cb++) {
                        cb->version     = ntohl(*bp++);
                        cb->expiry      = ntohl(*bp++);
                        cb->type        = ntohl(*bp++);
index ba7b71fba34bcc4cd5f8b8a305ace06a388ac607..0d5b8508869bf0642a88d4c87b3feb49c1fab433 100644 (file)
@@ -30,6 +30,7 @@ static int afs_readpages(struct file *filp, struct address_space *mapping,
 
 const struct file_operations afs_file_operations = {
        .open           = afs_open,
+       .flush          = afs_flush,
        .release        = afs_release,
        .llseek         = generic_file_llseek,
        .read_iter      = generic_file_read_iter,
@@ -184,10 +185,13 @@ int afs_page_filler(void *data, struct page *page)
                if (!req)
                        goto enomem;
 
+               /* We request a full page.  If the page is a partial one at the
+                * end of the file, the server will return a short read and the
+                * unmarshalling code will clear the unfilled space.
+                */
                atomic_set(&req->usage, 1);
                req->pos = (loff_t)page->index << PAGE_SHIFT;
-               req->len = min_t(size_t, i_size_read(inode) - req->pos,
-                                PAGE_SIZE);
+               req->len = PAGE_SIZE;
                req->nr_pages = 1;
                req->pages[0] = page;
                get_page(page);
@@ -208,7 +212,13 @@ int afs_page_filler(void *data, struct page *page)
                        fscache_uncache_page(vnode->cache, page);
 #endif
                        BUG_ON(PageFsCache(page));
-                       goto error;
+
+                       if (ret == -EINTR ||
+                           ret == -ENOMEM ||
+                           ret == -ERESTARTSYS ||
+                           ret == -EAGAIN)
+                               goto error;
+                       goto io_error;
                }
 
                SetPageUptodate(page);
@@ -227,10 +237,12 @@ int afs_page_filler(void *data, struct page *page)
        _leave(" = 0");
        return 0;
 
+io_error:
+       SetPageError(page);
+       goto error;
 enomem:
        ret = -ENOMEM;
 error:
-       SetPageError(page);
        unlock_page(page);
        _leave(" = %d", ret);
        return ret;
index ac8e766978dc440e8690fbf44333d41f9894f92a..19f76ae36982df43be740c1bf73d396b1a81c77c 100644 (file)
 #include "internal.h"
 #include "afs_fs.h"
 
+/*
+ * We need somewhere to discard into in case the server helpfully returns more
+ * than we asked for in FS.FetchData{,64}.
+ */
+static u8 afs_discard_buffer[64];
+
 /*
  * decode an AFSFid block
  */
@@ -105,7 +111,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
                        vnode->vfs_inode.i_mode = mode;
                }
 
-               vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server;
+               vnode->vfs_inode.i_ctime.tv_sec = status->mtime_client;
                vnode->vfs_inode.i_mtime        = vnode->vfs_inode.i_ctime;
                vnode->vfs_inode.i_atime        = vnode->vfs_inode.i_ctime;
                vnode->vfs_inode.i_version      = data_version;
@@ -139,7 +145,7 @@ static void xdr_decode_AFSCallBack(const __be32 **_bp, struct afs_vnode *vnode)
        vnode->cb_version       = ntohl(*bp++);
        vnode->cb_expiry        = ntohl(*bp++);
        vnode->cb_type          = ntohl(*bp++);
-       vnode->cb_expires       = vnode->cb_expiry + get_seconds();
+       vnode->cb_expires       = vnode->cb_expiry + ktime_get_real_seconds();
        *_bp = bp;
 }
 
@@ -315,7 +321,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
        void *buffer;
        int ret;
 
-       _enter("{%u,%zu/%u;%u/%llu}",
+       _enter("{%u,%zu/%u;%llu/%llu}",
               call->unmarshall, call->offset, call->count,
               req->remain, req->actual_len);
 
@@ -353,12 +359,6 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
 
                req->actual_len |= ntohl(call->tmp);
                _debug("DATA length: %llu", req->actual_len);
-               /* Check that the server didn't want to send us extra.  We
-                * might want to just discard instead, but that requires
-                * cooperation from AF_RXRPC.
-                */
-               if (req->actual_len > req->len)
-                       return -EBADMSG;
 
                req->remain = req->actual_len;
                call->offset = req->pos & (PAGE_SIZE - 1);
@@ -368,6 +368,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
                call->unmarshall++;
 
        begin_page:
+               ASSERTCMP(req->index, <, req->nr_pages);
                if (req->remain > PAGE_SIZE - call->offset)
                        size = PAGE_SIZE - call->offset;
                else
@@ -378,7 +379,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
 
                /* extract the returned data */
        case 3:
-               _debug("extract data %u/%llu %zu/%u",
+               _debug("extract data %llu/%llu %zu/%u",
                       req->remain, req->actual_len, call->offset, call->count);
 
                buffer = kmap(req->pages[req->index]);
@@ -389,19 +390,40 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
                if (call->offset == PAGE_SIZE) {
                        if (req->page_done)
                                req->page_done(call, req);
+                       req->index++;
                        if (req->remain > 0) {
-                               req->index++;
                                call->offset = 0;
+                               if (req->index >= req->nr_pages) {
+                                       call->unmarshall = 4;
+                                       goto begin_discard;
+                               }
                                goto begin_page;
                        }
                }
+               goto no_more_data;
+
+               /* Discard any excess data the server gave us */
+       begin_discard:
+       case 4:
+               size = min_t(loff_t, sizeof(afs_discard_buffer), req->remain);
+               call->count = size;
+               _debug("extract discard %llu/%llu %zu/%u",
+                      req->remain, req->actual_len, call->offset, call->count);
+
+               call->offset = 0;
+               ret = afs_extract_data(call, afs_discard_buffer, call->count, true);
+               req->remain -= call->offset;
+               if (ret < 0)
+                       return ret;
+               if (req->remain > 0)
+                       goto begin_discard;
 
        no_more_data:
                call->offset = 0;
-               call->unmarshall++;
+               call->unmarshall = 5;
 
                /* extract the metadata */
-       case 4:
+       case 5:
                ret = afs_extract_data(call, call->buffer,
                                       (21 + 3 + 6) * 4, false);
                if (ret < 0)
@@ -416,16 +438,17 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
                call->offset = 0;
                call->unmarshall++;
 
-       case 5:
+       case 6:
                break;
        }
 
-       if (call->count < PAGE_SIZE) {
-               buffer = kmap(req->pages[req->index]);
-               memset(buffer + call->count, 0, PAGE_SIZE - call->count);
-               kunmap(req->pages[req->index]);
+       for (; req->index < req->nr_pages; req->index++) {
+               if (call->count < PAGE_SIZE)
+                       zero_user_segment(req->pages[req->index],
+                                         call->count, PAGE_SIZE);
                if (req->page_done)
                        req->page_done(call, req);
+               call->count = 0;
        }
 
        _leave(" = 0 [done]");
@@ -711,8 +734,8 @@ int afs_fs_create(struct afs_server *server,
                memset(bp, 0, padsz);
                bp = (void *) bp + padsz;
        }
-       *bp++ = htonl(AFS_SET_MODE);
-       *bp++ = 0; /* mtime */
+       *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
+       *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
        *bp++ = 0; /* owner */
        *bp++ = 0; /* group */
        *bp++ = htonl(mode & S_IALLUGO); /* unix mode */
@@ -980,8 +1003,8 @@ int afs_fs_symlink(struct afs_server *server,
                memset(bp, 0, c_padsz);
                bp = (void *) bp + c_padsz;
        }
-       *bp++ = htonl(AFS_SET_MODE);
-       *bp++ = 0; /* mtime */
+       *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
+       *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
        *bp++ = 0; /* owner */
        *bp++ = 0; /* group */
        *bp++ = htonl(S_IRWXUGO); /* unix mode */
@@ -1180,8 +1203,8 @@ static int afs_fs_store_data64(struct afs_server *server,
        *bp++ = htonl(vnode->fid.vnode);
        *bp++ = htonl(vnode->fid.unique);
 
-       *bp++ = 0; /* mask */
-       *bp++ = 0; /* mtime */
+       *bp++ = htonl(AFS_SET_MTIME); /* mask */
+       *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
        *bp++ = 0; /* owner */
        *bp++ = 0; /* group */
        *bp++ = 0; /* unix mode */
@@ -1213,7 +1236,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
        _enter(",%x,{%x:%u},,",
               key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode);
 
-       size = to - offset;
+       size = (loff_t)to - (loff_t)offset;
        if (first != last)
                size += (loff_t)(last - first) << PAGE_SHIFT;
        pos = (loff_t)first << PAGE_SHIFT;
@@ -1257,8 +1280,8 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
        *bp++ = htonl(vnode->fid.vnode);
        *bp++ = htonl(vnode->fid.unique);
 
-       *bp++ = 0; /* mask */
-       *bp++ = 0; /* mtime */
+       *bp++ = htonl(AFS_SET_MTIME); /* mask */
+       *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
        *bp++ = 0; /* owner */
        *bp++ = 0; /* group */
        *bp++ = 0; /* unix mode */
index 1e4897a048d2ee0dee49b613f22336b7118ff9f8..aae55dd151087e16f123adc0ebe51e47e393b297 100644 (file)
@@ -54,8 +54,21 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
                inode->i_fop    = &afs_dir_file_operations;
                break;
        case AFS_FTYPE_SYMLINK:
-               inode->i_mode   = S_IFLNK | vnode->status.mode;
-               inode->i_op     = &page_symlink_inode_operations;
+               /* Symlinks with a mode of 0644 are actually mountpoints. */
+               if ((vnode->status.mode & 0777) == 0644) {
+                       inode->i_flags |= S_AUTOMOUNT;
+
+                       spin_lock(&vnode->lock);
+                       set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
+                       spin_unlock(&vnode->lock);
+
+                       inode->i_mode   = S_IFDIR | 0555;
+                       inode->i_op     = &afs_mntpt_inode_operations;
+                       inode->i_fop    = &afs_mntpt_file_operations;
+               } else {
+                       inode->i_mode   = S_IFLNK | vnode->status.mode;
+                       inode->i_op     = &page_symlink_inode_operations;
+               }
                inode_nohighmem(inode);
                break;
        default:
@@ -70,27 +83,15 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
 
        set_nlink(inode, vnode->status.nlink);
        inode->i_uid            = vnode->status.owner;
-       inode->i_gid            = GLOBAL_ROOT_GID;
+       inode->i_gid            = vnode->status.group;
        inode->i_size           = vnode->status.size;
-       inode->i_ctime.tv_sec   = vnode->status.mtime_server;
+       inode->i_ctime.tv_sec   = vnode->status.mtime_client;
        inode->i_ctime.tv_nsec  = 0;
        inode->i_atime          = inode->i_mtime = inode->i_ctime;
        inode->i_blocks         = 0;
        inode->i_generation     = vnode->fid.unique;
        inode->i_version        = vnode->status.data_version;
        inode->i_mapping->a_ops = &afs_fs_aops;
-
-       /* check to see whether a symbolic link is really a mountpoint */
-       if (vnode->status.type == AFS_FTYPE_SYMLINK) {
-               afs_mntpt_check_symlink(vnode, key);
-
-               if (test_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags)) {
-                       inode->i_mode   = S_IFDIR | vnode->status.mode;
-                       inode->i_op     = &afs_mntpt_inode_operations;
-                       inode->i_fop    = &afs_mntpt_file_operations;
-               }
-       }
-
        return 0;
 }
 
@@ -245,12 +246,13 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
                        vnode->cb_version = 0;
                        vnode->cb_expiry = 0;
                        vnode->cb_type = 0;
-                       vnode->cb_expires = get_seconds();
+                       vnode->cb_expires = ktime_get_real_seconds();
                } else {
                        vnode->cb_version = cb->version;
                        vnode->cb_expiry = cb->expiry;
                        vnode->cb_type = cb->type;
-                       vnode->cb_expires = vnode->cb_expiry + get_seconds();
+                       vnode->cb_expires = vnode->cb_expiry +
+                               ktime_get_real_seconds();
                }
        }
 
@@ -323,7 +325,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
            !test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
            !test_bit(AFS_VNODE_MODIFIED, &vnode->flags) &&
            !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
-               if (vnode->cb_expires < get_seconds() + 10) {
+               if (vnode->cb_expires < ktime_get_real_seconds() + 10) {
                        _debug("callback expired");
                        set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
                } else {
@@ -444,7 +446,7 @@ void afs_evict_inode(struct inode *inode)
 
        mutex_lock(&vnode->permits_lock);
        permits = vnode->permits;
-       rcu_assign_pointer(vnode->permits, NULL);
+       RCU_INIT_POINTER(vnode->permits, NULL);
        mutex_unlock(&vnode->permits_lock);
        if (permits)
                call_rcu(&permits->rcu, afs_zap_permits);
index 5dfa56903a2d4b6ff058160ef973efaaa5e690d8..a6901360fb81d435bf47a85b781a89a1056fd900 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/compiler.h>
 #include <linux/kernel.h>
+#include <linux/ktime.h>
 #include <linux/fs.h>
 #include <linux/pagemap.h>
 #include <linux/rxrpc.h>
@@ -90,7 +91,10 @@ struct afs_call {
        unsigned                request_size;   /* size of request data */
        unsigned                reply_max;      /* maximum size of reply */
        unsigned                first_offset;   /* offset into mapping[first] */
-       unsigned                last_to;        /* amount of mapping[last] */
+       union {
+               unsigned        last_to;        /* amount of mapping[last] */
+               unsigned        count2;         /* count used in unmarshalling */
+       };
        unsigned char           unmarshall;     /* unmarshalling phase */
        bool                    incoming;       /* T if incoming call */
        bool                    send_pages;     /* T if data from mapping should be sent */
@@ -127,12 +131,11 @@ struct afs_call_type {
  */
 struct afs_read {
        loff_t                  pos;            /* Where to start reading */
-       loff_t                  len;            /* How much to read */
+       loff_t                  len;            /* How much we're asking for */
        loff_t                  actual_len;     /* How much we're actually getting */
+       loff_t                  remain;         /* Amount remaining */
        atomic_t                usage;
-       unsigned int            remain;         /* Amount remaining */
        unsigned int            index;          /* Which page we're reading into */
-       unsigned int            pg_offset;      /* Offset in page we're at */
        unsigned int            nr_pages;
        void (*page_done)(struct afs_call *, struct afs_read *);
        struct page             *pages[];
@@ -247,7 +250,7 @@ struct afs_cache_vhash {
  */
 struct afs_vlocation {
        atomic_t                usage;
-       time_t                  time_of_death;  /* time at which put reduced usage to 0 */
+       time64_t                time_of_death;  /* time at which put reduced usage to 0 */
        struct list_head        link;           /* link in cell volume location list */
        struct list_head        grave;          /* link in master graveyard list */
        struct list_head        update;         /* link in master update list */
@@ -258,7 +261,7 @@ struct afs_vlocation {
        struct afs_cache_vlocation vldb;        /* volume information DB record */
        struct afs_volume       *vols[3];       /* volume access record pointer (index by type) */
        wait_queue_head_t       waitq;          /* status change waitqueue */
-       time_t                  update_at;      /* time at which record should be updated */
+       time64_t                update_at;      /* time at which record should be updated */
        spinlock_t              lock;           /* access lock */
        afs_vlocation_state_t   state;          /* volume location state */
        unsigned short          upd_rej_cnt;    /* ENOMEDIUM count during update */
@@ -271,7 +274,7 @@ struct afs_vlocation {
  */
 struct afs_server {
        atomic_t                usage;
-       time_t                  time_of_death;  /* time at which put reduced usage to 0 */
+       time64_t                time_of_death;  /* time at which put reduced usage to 0 */
        struct in_addr          addr;           /* server address */
        struct afs_cell         *cell;          /* cell in which server resides */
        struct list_head        link;           /* link in cell's server list */
@@ -374,8 +377,8 @@ struct afs_vnode {
        struct rb_node          server_rb;      /* link in server->fs_vnodes */
        struct rb_node          cb_promise;     /* link in server->cb_promises */
        struct work_struct      cb_broken_work; /* work to be done on callback break */
-       time_t                  cb_expires;     /* time at which callback expires */
-       time_t                  cb_expires_at;  /* time used to order cb_promise */
+       time64_t                cb_expires;     /* time at which callback expires */
+       time64_t                cb_expires_at;  /* time used to order cb_promise */
        unsigned                cb_version;     /* callback version */
        unsigned                cb_expiry;      /* callback expiry time */
        afs_callback_type_t     cb_type;        /* type of callback */
@@ -557,7 +560,6 @@ extern const struct inode_operations afs_autocell_inode_operations;
 extern const struct file_operations afs_mntpt_file_operations;
 
 extern struct vfsmount *afs_d_automount(struct path *);
-extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *);
 extern void afs_mntpt_kill_timer(void);
 
 /*
@@ -718,6 +720,7 @@ extern int afs_writepages(struct address_space *, struct writeback_control *);
 extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
 extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
 extern int afs_writeback_all(struct afs_vnode *);
+extern int afs_flush(struct file *, fl_owner_t);
 extern int afs_fsync(struct file *, loff_t, loff_t, int);
 
 
index 91ea1aa0d8b3ab0a817b525e9f9b3deec98f775f..100b207efc9eaddff4ed9f7e0e4415ed62ba2880 100644 (file)
@@ -84,6 +84,8 @@ int afs_abort_to_error(u32 abort_code)
        case RXKADDATALEN:      return -EKEYREJECTED;
        case RXKADILLEGALLEVEL: return -EKEYREJECTED;
 
+       case RXGEN_OPCODE:      return -ENOTSUPP;
+
        default:                return -EREMOTEIO;
        }
 }
index d4fb0afc0097d4947d3c2013cf27f521b055d423..bd3b65cde282a24769f7c549c9fe52c85b6c8e4e 100644 (file)
@@ -46,59 +46,6 @@ static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out);
 
 static unsigned long afs_mntpt_expiry_timeout = 10 * 60;
 
-/*
- * check a symbolic link to see whether it actually encodes a mountpoint
- * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately
- */
-int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
-{
-       struct page *page;
-       size_t size;
-       char *buf;
-       int ret;
-
-       _enter("{%x:%u,%u}",
-              vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
-
-       /* read the contents of the symlink into the pagecache */
-       page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0,
-                              afs_page_filler, key);
-       if (IS_ERR(page)) {
-               ret = PTR_ERR(page);
-               goto out;
-       }
-
-       ret = -EIO;
-       if (PageError(page))
-               goto out_free;
-
-       buf = kmap(page);
-
-       /* examine the symlink's contents */
-       size = vnode->status.size;
-       _debug("symlink to %*.*s", (int) size, (int) size, buf);
-
-       if (size > 2 &&
-           (buf[0] == '%' || buf[0] == '#') &&
-           buf[size - 1] == '.'
-           ) {
-               _debug("symlink is a mountpoint");
-               spin_lock(&vnode->lock);
-               set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
-               vnode->vfs_inode.i_flags |= S_AUTOMOUNT;
-               spin_unlock(&vnode->lock);
-       }
-
-       ret = 0;
-
-       kunmap(page);
-out_free:
-       put_page(page);
-out:
-       _leave(" = %d", ret);
-       return ret;
-}
-
 /*
  * no valid lookup procedure on this sort of dir
  */
index 419ef05dcb5ec7149a3a0b5de657c75bbc6eabb4..d5990eb160bdf49a3a916c0195d04fbe521ef2b2 100644 (file)
@@ -259,67 +259,74 @@ void afs_flat_call_destructor(struct afs_call *call)
        call->buffer = NULL;
 }
 
+#define AFS_BVEC_MAX 8
+
+/*
+ * Load the given bvec with the next few pages.
+ */
+static void afs_load_bvec(struct afs_call *call, struct msghdr *msg,
+                         struct bio_vec *bv, pgoff_t first, pgoff_t last,
+                         unsigned offset)
+{
+       struct page *pages[AFS_BVEC_MAX];
+       unsigned int nr, n, i, to, bytes = 0;
+
+       nr = min_t(pgoff_t, last - first + 1, AFS_BVEC_MAX);
+       n = find_get_pages_contig(call->mapping, first, nr, pages);
+       ASSERTCMP(n, ==, nr);
+
+       msg->msg_flags |= MSG_MORE;
+       for (i = 0; i < nr; i++) {
+               to = PAGE_SIZE;
+               if (first + i >= last) {
+                       to = call->last_to;
+                       msg->msg_flags &= ~MSG_MORE;
+               }
+               bv[i].bv_page = pages[i];
+               bv[i].bv_len = to - offset;
+               bv[i].bv_offset = offset;
+               bytes += to - offset;
+               offset = 0;
+       }
+
+       iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, bv, nr, bytes);
+}
+
 /*
  * attach the data from a bunch of pages on an inode to a call
  */
 static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
 {
-       struct page *pages[8];
-       unsigned count, n, loop, offset, to;
+       struct bio_vec bv[AFS_BVEC_MAX];
+       unsigned int bytes, nr, loop, offset;
        pgoff_t first = call->first, last = call->last;
        int ret;
 
-       _enter("");
-
        offset = call->first_offset;
        call->first_offset = 0;
 
        do {
-               _debug("attach %lx-%lx", first, last);
-
-               count = last - first + 1;
-               if (count > ARRAY_SIZE(pages))
-                       count = ARRAY_SIZE(pages);
-               n = find_get_pages_contig(call->mapping, first, count, pages);
-               ASSERTCMP(n, ==, count);
-
-               loop = 0;
-               do {
-                       struct bio_vec bvec = {.bv_page = pages[loop],
-                                              .bv_offset = offset};
-                       msg->msg_flags = 0;
-                       to = PAGE_SIZE;
-                       if (first + loop >= last)
-                               to = call->last_to;
-                       else
-                               msg->msg_flags = MSG_MORE;
-                       bvec.bv_len = to - offset;
-                       offset = 0;
-
-                       _debug("- range %u-%u%s",
-                              offset, to, msg->msg_flags ? " [more]" : "");
-                       iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC,
-                                     &bvec, 1, to - offset);
-
-                       /* have to change the state *before* sending the last
-                        * packet as RxRPC might give us the reply before it
-                        * returns from sending the request */
-                       if (first + loop >= last)
-                               call->state = AFS_CALL_AWAIT_REPLY;
-                       ret = rxrpc_kernel_send_data(afs_socket, call->rxcall,
-                                                    msg, to - offset);
-                       if (ret < 0)
-                               break;
-               } while (++loop < count);
-               first += count;
-
-               for (loop = 0; loop < count; loop++)
-                       put_page(pages[loop]);
+               afs_load_bvec(call, msg, bv, first, last, offset);
+               offset = 0;
+               bytes = msg->msg_iter.count;
+               nr = msg->msg_iter.nr_segs;
+
+               /* Have to change the state *before* sending the last
+                * packet as RxRPC might give us the reply before it
+                * returns from sending the request.
+                */
+               if (first + nr - 1 >= last)
+                       call->state = AFS_CALL_AWAIT_REPLY;
+               ret = rxrpc_kernel_send_data(afs_socket, call->rxcall,
+                                            msg, bytes);
+               for (loop = 0; loop < nr; loop++)
+                       put_page(bv[loop].bv_page);
                if (ret < 0)
                        break;
+
+               first += nr;
        } while (first <= last);
 
-       _leave(" = %d", ret);
        return ret;
 }
 
@@ -333,6 +340,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
        struct rxrpc_call *rxcall;
        struct msghdr msg;
        struct kvec iov[1];
+       size_t offset;
+       u32 abort_code;
        int ret;
 
        _enter("%x,{%d},", addr->s_addr, ntohs(call->port));
@@ -381,9 +390,11 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
        msg.msg_controllen      = 0;
        msg.msg_flags           = (call->send_pages ? MSG_MORE : 0);
 
-       /* have to change the state *before* sending the last packet as RxRPC
-        * might give us the reply before it returns from sending the
-        * request */
+       /* We have to change the state *before* sending the last packet as
+        * rxrpc might give us the reply before it returns from sending the
+        * request.  Further, if the send fails, we may already have been given
+        * a notification and may have collected it.
+        */
        if (!call->send_pages)
                call->state = AFS_CALL_AWAIT_REPLY;
        ret = rxrpc_kernel_send_data(afs_socket, rxcall,
@@ -405,7 +416,17 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
        return afs_wait_for_call_to_complete(call);
 
 error_do_abort:
-       rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD");
+       call->state = AFS_CALL_COMPLETE;
+       if (ret != -ECONNABORTED) {
+               rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT,
+                                       ret, "KSD");
+       } else {
+               abort_code = 0;
+               offset = 0;
+               rxrpc_kernel_recv_data(afs_socket, rxcall, NULL, 0, &offset,
+                                      false, &abort_code);
+               ret = call->type->abort_to_error(abort_code);
+       }
 error_kill_call:
        afs_put_call(call);
        _leave(" = %d", ret);
@@ -452,16 +473,18 @@ static void afs_deliver_to_call(struct afs_call *call)
                case -EINPROGRESS:
                case -EAGAIN:
                        goto out;
+               case -ECONNABORTED:
+                       goto call_complete;
                case -ENOTCONN:
                        abort_code = RX_CALL_DEAD;
                        rxrpc_kernel_abort_call(afs_socket, call->rxcall,
-                                               abort_code, -ret, "KNC");
-                       goto do_abort;
+                                               abort_code, ret, "KNC");
+                       goto save_error;
                case -ENOTSUPP:
-                       abort_code = RX_INVALID_OPERATION;
+                       abort_code = RXGEN_OPCODE;
                        rxrpc_kernel_abort_call(afs_socket, call->rxcall,
-                                               abort_code, -ret, "KIV");
-                       goto do_abort;
+                                               abort_code, ret, "KIV");
+                       goto save_error;
                case -ENODATA:
                case -EBADMSG:
                case -EMSGSIZE:
@@ -470,8 +493,8 @@ static void afs_deliver_to_call(struct afs_call *call)
                        if (call->state != AFS_CALL_AWAIT_REPLY)
                                abort_code = RXGEN_SS_UNMARSHAL;
                        rxrpc_kernel_abort_call(afs_socket, call->rxcall,
-                                               abort_code, EBADMSG, "KUM");
-                       goto do_abort;
+                                               abort_code, -EBADMSG, "KUM");
+                       goto save_error;
                }
        }
 
@@ -482,8 +505,9 @@ out:
        _leave("");
        return;
 
-do_abort:
+save_error:
        call->error = ret;
+call_complete:
        call->state = AFS_CALL_COMPLETE;
        goto done;
 }
@@ -493,7 +517,6 @@ do_abort:
  */
 static int afs_wait_for_call_to_complete(struct afs_call *call)
 {
-       const char *abort_why;
        int ret;
 
        DECLARE_WAITQUEUE(myself, current);
@@ -512,13 +535,8 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
                        continue;
                }
 
-               abort_why = "KWC";
-               ret = call->error;
-               if (call->state == AFS_CALL_COMPLETE)
-                       break;
-               abort_why = "KWI";
-               ret = -EINTR;
-               if (signal_pending(current))
+               if (call->state == AFS_CALL_COMPLETE ||
+                   signal_pending(current))
                        break;
                schedule();
        }
@@ -526,13 +544,14 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
        remove_wait_queue(&call->waitq, &myself);
        __set_current_state(TASK_RUNNING);
 
-       /* kill the call */
+       /* Kill off the call if it's still live. */
        if (call->state < AFS_CALL_COMPLETE) {
-               _debug("call incomplete");
+               _debug("call interrupted");
                rxrpc_kernel_abort_call(afs_socket, call->rxcall,
-                                       RX_CALL_DEAD, -ret, abort_why);
+                                       RX_USER_ABORT, -EINTR, "KWI");
        }
 
+       ret = call->error;
        _debug("call complete");
        afs_put_call(call);
        _leave(" = %d", ret);
@@ -735,7 +754,7 @@ void afs_send_empty_reply(struct afs_call *call)
        case -ENOMEM:
                _debug("oom");
                rxrpc_kernel_abort_call(afs_socket, call->rxcall,
-                                       RX_USER_ABORT, ENOMEM, "KOO");
+                                       RX_USER_ABORT, -ENOMEM, "KOO");
        default:
                _leave(" [error]");
                return;
@@ -773,7 +792,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
        if (n == -ENOMEM) {
                _debug("oom");
                rxrpc_kernel_abort_call(afs_socket, call->rxcall,
-                                       RX_USER_ABORT, ENOMEM, "KOO");
+                                       RX_USER_ABORT, -ENOMEM, "KOO");
        }
        _leave(" [error]");
 }
index 8d010422dc8962b72fb3af64f75fdedb8e892cc0..ecb86a6701801cb74745bc99b74f9d8a367a2792 100644 (file)
@@ -114,7 +114,7 @@ void afs_clear_permits(struct afs_vnode *vnode)
 
        mutex_lock(&vnode->permits_lock);
        permits = vnode->permits;
-       rcu_assign_pointer(vnode->permits, NULL);
+       RCU_INIT_POINTER(vnode->permits, NULL);
        mutex_unlock(&vnode->permits_lock);
 
        if (permits)
@@ -340,17 +340,22 @@ int afs_permission(struct inode *inode, int mask)
        } else {
                if (!(access & AFS_ACE_LOOKUP))
                        goto permission_denied;
+               if ((mask & MAY_EXEC) && !(inode->i_mode & S_IXUSR))
+                       goto permission_denied;
                if (mask & (MAY_EXEC | MAY_READ)) {
                        if (!(access & AFS_ACE_READ))
                                goto permission_denied;
+                       if (!(inode->i_mode & S_IRUSR))
+                               goto permission_denied;
                } else if (mask & MAY_WRITE) {
                        if (!(access & AFS_ACE_WRITE))
                                goto permission_denied;
+                       if (!(inode->i_mode & S_IWUSR))
+                               goto permission_denied;
                }
        }
 
        key_put(key);
-       ret = generic_permission(inode, mask);
        _leave(" = %d", ret);
        return ret;
 
index d4066ab7dd5505b364a6506a1a2d932274bb5d9d..c001b1f2455fbf6dee4c9635c95590ada3890483 100644 (file)
@@ -242,7 +242,7 @@ void afs_put_server(struct afs_server *server)
        spin_lock(&afs_server_graveyard_lock);
        if (atomic_read(&server->usage) == 0) {
                list_move_tail(&server->grave, &afs_server_graveyard);
-               server->time_of_death = get_seconds();
+               server->time_of_death = ktime_get_real_seconds();
                queue_delayed_work(afs_wq, &afs_server_reaper,
                                   afs_server_timeout * HZ);
        }
@@ -277,9 +277,9 @@ static void afs_reap_server(struct work_struct *work)
        LIST_HEAD(corpses);
        struct afs_server *server;
        unsigned long delay, expiry;
-       time_t now;
+       time64_t now;
 
-       now = get_seconds();
+       now = ktime_get_real_seconds();
        spin_lock(&afs_server_graveyard_lock);
 
        while (!list_empty(&afs_server_graveyard)) {
index d7d8dd8c0b3187e6fe7eaed8e6300cb06826ff81..37b7c3b342a6b5a1f2f0cd06c0538e8e1d7f9073 100644 (file)
@@ -340,7 +340,8 @@ static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl)
        struct afs_vlocation *xvl;
 
        /* wait at least 10 minutes before updating... */
-       vl->update_at = get_seconds() + afs_vlocation_update_timeout;
+       vl->update_at = ktime_get_real_seconds() +
+                       afs_vlocation_update_timeout;
 
        spin_lock(&afs_vlocation_updates_lock);
 
@@ -506,7 +507,7 @@ void afs_put_vlocation(struct afs_vlocation *vl)
        if (atomic_read(&vl->usage) == 0) {
                _debug("buried");
                list_move_tail(&vl->grave, &afs_vlocation_graveyard);
-               vl->time_of_death = get_seconds();
+               vl->time_of_death = ktime_get_real_seconds();
                queue_delayed_work(afs_wq, &afs_vlocation_reap,
                                   afs_vlocation_timeout * HZ);
 
@@ -543,11 +544,11 @@ static void afs_vlocation_reaper(struct work_struct *work)
        LIST_HEAD(corpses);
        struct afs_vlocation *vl;
        unsigned long delay, expiry;
-       time_t now;
+       time64_t now;
 
        _enter("");
 
-       now = get_seconds();
+       now = ktime_get_real_seconds();
        spin_lock(&afs_vlocation_graveyard_lock);
 
        while (!list_empty(&afs_vlocation_graveyard)) {
@@ -622,13 +623,13 @@ static void afs_vlocation_updater(struct work_struct *work)
 {
        struct afs_cache_vlocation vldb;
        struct afs_vlocation *vl, *xvl;
-       time_t now;
+       time64_t now;
        long timeout;
        int ret;
 
        _enter("");
 
-       now = get_seconds();
+       now = ktime_get_real_seconds();
 
        /* find a record to update */
        spin_lock(&afs_vlocation_updates_lock);
@@ -684,7 +685,8 @@ static void afs_vlocation_updater(struct work_struct *work)
 
        /* and then reschedule */
        _debug("reschedule");
-       vl->update_at = get_seconds() + afs_vlocation_update_timeout;
+       vl->update_at = ktime_get_real_seconds() +
+                       afs_vlocation_update_timeout;
 
        spin_lock(&afs_vlocation_updates_lock);
 
index c83c1a0e851fb34051c026bcea8e2a561299cf95..2d2fccd5044bcd9b02127246824c1221ec502484 100644 (file)
@@ -84,10 +84,9 @@ void afs_put_writeback(struct afs_writeback *wb)
  * partly or wholly fill a page that's under preparation for writing
  */
 static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
-                        loff_t pos, struct page *page)
+                        loff_t pos, unsigned int len, struct page *page)
 {
        struct afs_read *req;
-       loff_t i_size;
        int ret;
 
        _enter(",,%llu", (unsigned long long)pos);
@@ -99,14 +98,10 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
 
        atomic_set(&req->usage, 1);
        req->pos = pos;
+       req->len = len;
        req->nr_pages = 1;
        req->pages[0] = page;
-
-       i_size = i_size_read(&vnode->vfs_inode);
-       if (pos + PAGE_SIZE > i_size)
-               req->len = i_size - pos;
-       else
-               req->len = PAGE_SIZE;
+       get_page(page);
 
        ret = afs_vnode_fetch_data(vnode, key, req);
        afs_put_read(req);
@@ -159,12 +154,12 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
                kfree(candidate);
                return -ENOMEM;
        }
-       *pagep = page;
-       /* page won't leak in error case: it eventually gets cleaned off LRU */
 
        if (!PageUptodate(page) && len != PAGE_SIZE) {
-               ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
+               ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
                if (ret < 0) {
+                       unlock_page(page);
+                       put_page(page);
                        kfree(candidate);
                        _leave(" = %d [prep]", ret);
                        return ret;
@@ -172,6 +167,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
                SetPageUptodate(page);
        }
 
+       /* page won't leak in error case: it eventually gets cleaned off LRU */
+       *pagep = page;
+
 try_again:
        spin_lock(&vnode->writeback_lock);
 
@@ -233,7 +231,7 @@ flush_conflicting_wb:
        if (wb->state == AFS_WBACK_PENDING)
                wb->state = AFS_WBACK_CONFLICTING;
        spin_unlock(&vnode->writeback_lock);
-       if (PageDirty(page)) {
+       if (clear_page_dirty_for_io(page)) {
                ret = afs_write_back_from_locked_page(wb, page);
                if (ret < 0) {
                        afs_put_writeback(candidate);
@@ -257,7 +255,9 @@ int afs_write_end(struct file *file, struct address_space *mapping,
                  struct page *page, void *fsdata)
 {
        struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+       struct key *key = file->private_data;
        loff_t i_size, maybe_i_size;
+       int ret;
 
        _enter("{%x:%u},{%lx}",
               vnode->fid.vid, vnode->fid.vnode, page->index);
@@ -273,6 +273,20 @@ int afs_write_end(struct file *file, struct address_space *mapping,
                spin_unlock(&vnode->writeback_lock);
        }
 
+       if (!PageUptodate(page)) {
+               if (copied < len) {
+                       /* Try and load any missing data from the server.  The
+                        * unmarshalling routine will take care of clearing any
+                        * bits that are beyond the EOF.
+                        */
+                       ret = afs_fill_page(vnode, key, pos + copied,
+                                           len - copied, page);
+                       if (ret < 0)
+                               return ret;
+               }
+               SetPageUptodate(page);
+       }
+
        set_page_dirty(page);
        if (PageDirty(page))
                _debug("dirtied");
@@ -307,10 +321,14 @@ static void afs_kill_pages(struct afs_vnode *vnode, bool error,
                ASSERTCMP(pv.nr, ==, count);
 
                for (loop = 0; loop < count; loop++) {
-                       ClearPageUptodate(pv.pages[loop]);
+                       struct page *page = pv.pages[loop];
+                       ClearPageUptodate(page);
                        if (error)
-                               SetPageError(pv.pages[loop]);
-                       end_page_writeback(pv.pages[loop]);
+                               SetPageError(page);
+                       if (PageWriteback(page))
+                               end_page_writeback(page);
+                       if (page->index >= first)
+                               first = page->index + 1;
                }
 
                __pagevec_release(&pv);
@@ -335,8 +353,6 @@ static int afs_write_back_from_locked_page(struct afs_writeback *wb,
        _enter(",%lx", primary_page->index);
 
        count = 1;
-       if (!clear_page_dirty_for_io(primary_page))
-               BUG();
        if (test_set_page_writeback(primary_page))
                BUG();
 
@@ -502,17 +518,17 @@ static int afs_writepages_region(struct address_space *mapping,
                 */
                lock_page(page);
 
-               if (page->mapping != mapping) {
+               if (page->mapping != mapping || !PageDirty(page)) {
                        unlock_page(page);
                        put_page(page);
                        continue;
                }
 
-               if (wbc->sync_mode != WB_SYNC_NONE)
-                       wait_on_page_writeback(page);
-
-               if (PageWriteback(page) || !PageDirty(page)) {
+               if (PageWriteback(page)) {
                        unlock_page(page);
+                       if (wbc->sync_mode != WB_SYNC_NONE)
+                               wait_on_page_writeback(page);
+                       put_page(page);
                        continue;
                }
 
@@ -523,6 +539,8 @@ static int afs_writepages_region(struct address_space *mapping,
                wb->state = AFS_WBACK_WRITING;
                spin_unlock(&wb->vnode->writeback_lock);
 
+               if (!clear_page_dirty_for_io(page))
+                       BUG();
                ret = afs_write_back_from_locked_page(wb, page);
                unlock_page(page);
                put_page(page);
@@ -745,6 +763,20 @@ out:
        return ret;
 }
 
+/*
+ * Flush out all outstanding writes on a file opened for writing when it is
+ * closed.
+ */
+int afs_flush(struct file *file, fl_owner_t id)
+{
+       _enter("");
+
+       if ((file->f_mode & FMODE_WRITE) == 0)
+               return 0;
+
+       return vfs_fsync(file, 0);
+}
+
 /*
  * notification that a previously read-only page is about to become writable
  * - if it returns an error, the caller will deliver a bus error signal
index 29b7fc28c607232987cc3b28fbe9a92e0f766df7..c4115901d9064f68217c3be825f233d7d6872cf6 100644 (file)
@@ -1259,7 +1259,7 @@ struct btrfs_root {
        atomic_t will_be_snapshoted;
 
        /* For qgroup metadata space reserve */
-       atomic_t qgroup_meta_rsv;
+       atomic64_t qgroup_meta_rsv;
 };
 static inline u32 btrfs_inode_sectorsize(const struct inode *inode)
 {
index 08b74daf35d05f70dac01adbac73d10925f50879..eb1ee7b6f532b74409a6bb50fdb204cb2c8332a1 100644 (file)
@@ -1342,7 +1342,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
        atomic_set(&root->orphan_inodes, 0);
        atomic_set(&root->refs, 1);
        atomic_set(&root->will_be_snapshoted, 0);
-       atomic_set(&root->qgroup_meta_rsv, 0);
+       atomic64_set(&root->qgroup_meta_rsv, 0);
        root->log_transid = 0;
        root->log_transid_committed = -1;
        root->last_log_commit = 0;
index 28e81922a21c1ecead950f50cf3e685ad03c57f6..27fdb250b4467f65a8c6a42d06835f3bb3a36aec 100644 (file)
@@ -1714,7 +1714,8 @@ static int __process_pages_contig(struct address_space *mapping,
                         * can we find nothing at @index.
                         */
                        ASSERT(page_ops & PAGE_LOCK);
-                       return ret;
+                       err = -EAGAIN;
+                       goto out;
                }
 
                for (i = 0; i < ret; i++) {
@@ -2583,26 +2584,36 @@ static void end_bio_extent_readpage(struct bio *bio)
 
                if (tree->ops) {
                        ret = tree->ops->readpage_io_failed_hook(page, mirror);
-                       if (!ret && !bio->bi_error)
-                               uptodate = 1;
-               } else {
+                       if (ret == -EAGAIN) {
+                               /*
+                                * Data inode's readpage_io_failed_hook() always
+                                * returns -EAGAIN.
+                                *
+                                * The generic bio_readpage_error handles errors
+                                * the following way: If possible, new read
+                                * requests are created and submitted and will
+                                * end up in end_bio_extent_readpage as well (if
+                                * we're lucky, not in the !uptodate case). In
+                                * that case it returns 0 and we just go on with
+                                * the next page in our bio. If it can't handle
+                                * the error it will return -EIO and we remain
+                                * responsible for that page.
+                                */
+                               ret = bio_readpage_error(bio, offset, page,
+                                                        start, end, mirror);
+                               if (ret == 0) {
+                                       uptodate = !bio->bi_error;
+                                       offset += len;
+                                       continue;
+                               }
+                       }
+
                        /*
-                        * The generic bio_readpage_error handles errors the
-                        * following way: If possible, new read requests are
-                        * created and submitted and will end up in
-                        * end_bio_extent_readpage as well (if we're lucky, not
-                        * in the !uptodate case). In that case it returns 0 and
-                        * we just go on with the next page in our bio. If it
-                        * can't handle the error it will return -EIO and we
-                        * remain responsible for that page.
+                        * metadata's readpage_io_failed_hook() always returns
+                        * -EIO and fixes nothing.  -EIO is also returned if
+                        * data inode error could not be fixed.
                         */
-                       ret = bio_readpage_error(bio, offset, page, start, end,
-                                                mirror);
-                       if (ret == 0) {
-                               uptodate = !bio->bi_error;
-                               offset += len;
-                               continue;
-                       }
+                       ASSERT(ret == -EIO);
                }
 readpage_ok:
                if (likely(uptodate)) {
index c40060cc481f60440044d00ea4a76904cc4d9761..a18510be76c141e5d4b4d687c2eb5498cc273c56 100644 (file)
@@ -6709,6 +6709,20 @@ static noinline int uncompress_inline(struct btrfs_path *path,
        max_size = min_t(unsigned long, PAGE_SIZE, max_size);
        ret = btrfs_decompress(compress_type, tmp, page,
                               extent_offset, inline_size, max_size);
+
+       /*
+        * decompression code contains a memset to fill in any space between the end
+        * of the uncompressed data and the end of max_size in case the decompressed
+        * data ends up shorter than ram_bytes.  That doesn't cover the hole between
+        * the end of an inline extent and the beginning of the next block, so we
+        * cover that region here.
+        */
+
+       if (max_size + pg_offset < PAGE_SIZE) {
+               char *map = kmap(page);
+               memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
+               kunmap(page);
+       }
        kfree(tmp);
        return ret;
 }
@@ -10509,9 +10523,9 @@ out_inode:
 }
 
 __attribute__((const))
-static int dummy_readpage_io_failed_hook(struct page *page, int failed_mirror)
+static int btrfs_readpage_io_failed_hook(struct page *page, int failed_mirror)
 {
-       return 0;
+       return -EAGAIN;
 }
 
 static const struct inode_operations btrfs_dir_inode_operations = {
@@ -10556,7 +10570,7 @@ static const struct extent_io_ops btrfs_extent_io_ops = {
        .submit_bio_hook = btrfs_submit_bio_hook,
        .readpage_end_io_hook = btrfs_readpage_end_io_hook,
        .merge_bio_hook = btrfs_merge_bio_hook,
-       .readpage_io_failed_hook = dummy_readpage_io_failed_hook,
+       .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
 
        /* optional callbacks */
        .fill_delalloc = run_delalloc_range,
index a5da750c1087fdc118e3ba696962260a9a761fc6..a59801dc2a340bcd3c5a34af9ef7277061967377 100644 (file)
@@ -2948,20 +2948,20 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
        ret = qgroup_reserve(root, num_bytes, enforce);
        if (ret < 0)
                return ret;
-       atomic_add(num_bytes, &root->qgroup_meta_rsv);
+       atomic64_add(num_bytes, &root->qgroup_meta_rsv);
        return ret;
 }
 
 void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
-       int reserved;
+       u64 reserved;
 
        if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
            !is_fstree(root->objectid))
                return;
 
-       reserved = atomic_xchg(&root->qgroup_meta_rsv, 0);
+       reserved = atomic64_xchg(&root->qgroup_meta_rsv, 0);
        if (reserved == 0)
                return;
        btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved);
@@ -2976,8 +2976,8 @@ void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
                return;
 
        BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
-       WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes);
-       atomic_sub(num_bytes, &root->qgroup_meta_rsv);
+       WARN_ON(atomic64_read(&root->qgroup_meta_rsv) < num_bytes);
+       atomic64_sub(num_bytes, &root->qgroup_meta_rsv);
        btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes);
 }
 
index 456c8901489b6c6b468901854bcdcbc53cb5cf13..a60d5bfb8a49e2bfc3faef10f4ea353a9da5f8b8 100644 (file)
@@ -6305,8 +6305,13 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
                goto out;
        }
 
+       /*
+        * Check that we don't overflow at later allocations, we request
+        * clone_sources_count + 1 items, and compare to unsigned long inside
+        * access_ok.
+        */
        if (arg->clone_sources_count >
-           ULLONG_MAX / sizeof(*arg->clone_sources)) {
+           ULONG_MAX / sizeof(struct clone_root) - 1) {
                ret = -EINVAL;
                goto out;
        }
index 15e1db8738aecad0c8a86888c0fa1ada5f9b7623..8c91f37ac0ebd784e3eb9b964f57f9c73ea37a5d 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/freezer.h>
 #include <linux/namei.h>
 #include <linux/random.h>
+#include <linux/uuid.h>
 #include <linux/xattr.h>
 #include <net/ipv6.h>
 #include "cifsfs.h"
index 9ae695ae3ed7be3788db2a889e34cde8a3224c9c..858698dcde3cd0a44209164fa9226e1762c504a8 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/pagevec.h>
 #include <linux/freezer.h>
 #include <linux/namei.h>
+#include <linux/uuid.h>
 #include <linux/uaccess.h>
 #include <asm/processor.h>
 #include <linux/inet.h>
index 7446496850a3bd5f21fb36e12b65ba5c78532612..fb75fe908225d77572eaa0a962ab5cd2afa227dd 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/vfs.h>
 #include <linux/task_io_accounting_ops.h>
 #include <linux/uaccess.h>
+#include <linux/uuid.h>
 #include <linux/pagemap.h>
 #include <linux/xattr.h>
 #include "smb2pdu.h"
index 02a7a9286449d467741d64e8e817bb2902309926..6d6eca394d4d4107b8459b828b563acbbc9f082e 100644 (file)
@@ -327,7 +327,6 @@ EXPORT_SYMBOL(fscrypt_decrypt_page);
 static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
 {
        struct dentry *dir;
-       struct fscrypt_info *ci;
        int dir_has_key, cached_with_key;
 
        if (flags & LOOKUP_RCU)
@@ -339,18 +338,11 @@ static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
                return 0;
        }
 
-       ci = d_inode(dir)->i_crypt_info;
-       if (ci && ci->ci_keyring_key &&
-           (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
-                                         (1 << KEY_FLAG_REVOKED) |
-                                         (1 << KEY_FLAG_DEAD))))
-               ci = NULL;
-
        /* this should eventually be an flag in d_flags */
        spin_lock(&dentry->d_lock);
        cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
        spin_unlock(&dentry->d_lock);
-       dir_has_key = (ci != NULL);
+       dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
        dput(dir);
 
        /*
index 13052b85c3930f071be764c5fbbeb091429002d3..37b49894c762344841117b2a0042c5e9ec8b7140 100644 (file)
@@ -350,7 +350,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
                fname->disk_name.len = iname->len;
                return 0;
        }
-       ret = fscrypt_get_crypt_info(dir);
+       ret = fscrypt_get_encryption_info(dir);
        if (ret && ret != -EOPNOTSUPP)
                return ret;
 
index fdbb8af32eafdb6bae492658d376abf15f8a3d69..e39696e644942a80d110035e2d49570840823af7 100644 (file)
@@ -67,7 +67,6 @@ struct fscrypt_info {
        u8 ci_filename_mode;
        u8 ci_flags;
        struct crypto_skcipher *ci_ctfm;
-       struct key *ci_keyring_key;
        u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
 };
 
@@ -101,7 +100,4 @@ extern int fscrypt_do_page_crypto(const struct inode *inode,
 extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
                                              gfp_t gfp_flags);
 
-/* keyinfo.c */
-extern int fscrypt_get_crypt_info(struct inode *);
-
 #endif /* _FSCRYPT_PRIVATE_H */
index d5d896fa5a71675272131d797919924b2398a85a..8cdfddce2b34868f0cfe3f71da55d64187172a38 100644 (file)
@@ -95,6 +95,7 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
        kfree(description);
        if (IS_ERR(keyring_key))
                return PTR_ERR(keyring_key);
+       down_read(&keyring_key->sem);
 
        if (keyring_key->type != &key_type_logon) {
                printk_once(KERN_WARNING
@@ -102,11 +103,9 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
                res = -ENOKEY;
                goto out;
        }
-       down_read(&keyring_key->sem);
        ukp = user_key_payload_locked(keyring_key);
        if (ukp->datalen != sizeof(struct fscrypt_key)) {
                res = -EINVAL;
-               up_read(&keyring_key->sem);
                goto out;
        }
        master_key = (struct fscrypt_key *)ukp->data;
@@ -117,17 +116,11 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
                                "%s: key size incorrect: %d\n",
                                __func__, master_key->size);
                res = -ENOKEY;
-               up_read(&keyring_key->sem);
                goto out;
        }
        res = derive_key_aes(ctx->nonce, master_key->raw, raw_key);
-       up_read(&keyring_key->sem);
-       if (res)
-               goto out;
-
-       crypt_info->ci_keyring_key = keyring_key;
-       return 0;
 out:
+       up_read(&keyring_key->sem);
        key_put(keyring_key);
        return res;
 }
@@ -169,12 +162,11 @@ static void put_crypt_info(struct fscrypt_info *ci)
        if (!ci)
                return;
 
-       key_put(ci->ci_keyring_key);
        crypto_free_skcipher(ci->ci_ctfm);
        kmem_cache_free(fscrypt_info_cachep, ci);
 }
 
-int fscrypt_get_crypt_info(struct inode *inode)
+int fscrypt_get_encryption_info(struct inode *inode)
 {
        struct fscrypt_info *crypt_info;
        struct fscrypt_context ctx;
@@ -184,21 +176,15 @@ int fscrypt_get_crypt_info(struct inode *inode)
        u8 *raw_key = NULL;
        int res;
 
+       if (inode->i_crypt_info)
+               return 0;
+
        res = fscrypt_initialize(inode->i_sb->s_cop->flags);
        if (res)
                return res;
 
        if (!inode->i_sb->s_cop->get_context)
                return -EOPNOTSUPP;
-retry:
-       crypt_info = ACCESS_ONCE(inode->i_crypt_info);
-       if (crypt_info) {
-               if (!crypt_info->ci_keyring_key ||
-                               key_validate(crypt_info->ci_keyring_key) == 0)
-                       return 0;
-               fscrypt_put_encryption_info(inode, crypt_info);
-               goto retry;
-       }
 
        res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
        if (res < 0) {
@@ -229,7 +215,6 @@ retry:
        crypt_info->ci_data_mode = ctx.contents_encryption_mode;
        crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
        crypt_info->ci_ctfm = NULL;
-       crypt_info->ci_keyring_key = NULL;
        memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
                                sizeof(crypt_info->ci_master_key));
 
@@ -273,14 +258,8 @@ retry:
        if (res)
                goto out;
 
-       kzfree(raw_key);
-       raw_key = NULL;
-       if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) {
-               put_crypt_info(crypt_info);
-               goto retry;
-       }
-       return 0;
-
+       if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
+               crypt_info = NULL;
 out:
        if (res == -ENOKEY)
                res = 0;
@@ -288,6 +267,7 @@ out:
        kzfree(raw_key);
        return res;
 }
+EXPORT_SYMBOL(fscrypt_get_encryption_info);
 
 void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
 {
@@ -305,17 +285,3 @@ void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
        put_crypt_info(ci);
 }
 EXPORT_SYMBOL(fscrypt_put_encryption_info);
-
-int fscrypt_get_encryption_info(struct inode *inode)
-{
-       struct fscrypt_info *ci = inode->i_crypt_info;
-
-       if (!ci ||
-               (ci->ci_keyring_key &&
-                (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
-                                              (1 << KEY_FLAG_REVOKED) |
-                                              (1 << KEY_FLAG_DEAD)))))
-               return fscrypt_get_crypt_info(inode);
-       return 0;
-}
-EXPORT_SYMBOL(fscrypt_get_encryption_info);
index 14b76da71269487f22b941f82dbf01d6adea07c7..4908906d54d562263093cd5245fcb14e36d18b8e 100644 (file)
@@ -33,17 +33,10 @@ static int create_encryption_context_from_policy(struct inode *inode,
                                const struct fscrypt_policy *policy)
 {
        struct fscrypt_context ctx;
-       int res;
 
        if (!inode->i_sb->s_cop->set_context)
                return -EOPNOTSUPP;
 
-       if (inode->i_sb->s_cop->prepare_context) {
-               res = inode->i_sb->s_cop->prepare_context(inode);
-               if (res)
-                       return res;
-       }
-
        ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
        memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
                                        FS_KEY_DESCRIPTOR_SIZE);
index 7d398d300e972c3604727ac8d6d4fbf7302318a6..9382db998ec9549319f47b55ccd561abf1169874 100644 (file)
@@ -743,7 +743,7 @@ static int tcp_accept_from_sock(struct connection *con)
        newsock->type = con->sock->type;
        newsock->ops = con->sock->ops;
 
-       result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK);
+       result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK, true);
        if (result < 0)
                goto accept_err;
 
index 341251421ced00ab1be854c4145cf408cfc93c2f..5420767c9b686a7a9db30bbf932e85071c83c9f6 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/seq_file.h>
 #include <linux/compat.h>
 #include <linux/rculist.h>
+#include <net/busy_poll.h>
 
 /*
  * LOCKING:
@@ -224,6 +225,11 @@ struct eventpoll {
        /* used to optimize loop detection check */
        int visited;
        struct list_head visited_list_link;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       /* used to track busy poll napi_id */
+       unsigned int napi_id;
+#endif
 };
 
 /* Wait structure used by the poll hooks */
@@ -384,6 +390,77 @@ static inline int ep_events_available(struct eventpoll *ep)
        return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
 }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static bool ep_busy_loop_end(void *p, unsigned long start_time)
+{
+       struct eventpoll *ep = p;
+
+       return ep_events_available(ep) || busy_loop_timeout(start_time);
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+/*
+ * Busy poll if globally on and supporting sockets found && no events,
+ * busy loop will return if need_resched or ep_events_available.
+ *
+ * we must do our busy polling with irqs enabled
+ */
+static void ep_busy_loop(struct eventpoll *ep, int nonblock)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       unsigned int napi_id = READ_ONCE(ep->napi_id);
+
+       if ((napi_id >= MIN_NAPI_ID) && net_busy_loop_on())
+               napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end, ep);
+#endif
+}
+
+static inline void ep_reset_busy_poll_napi_id(struct eventpoll *ep)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       if (ep->napi_id)
+               ep->napi_id = 0;
+#endif
+}
+
+/*
+ * Set epoll busy poll NAPI ID from sk.
+ */
+static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       struct eventpoll *ep;
+       unsigned int napi_id;
+       struct socket *sock;
+       struct sock *sk;
+       int err;
+
+       if (!net_busy_loop_on())
+               return;
+
+       sock = sock_from_file(epi->ffd.file, &err);
+       if (!sock)
+               return;
+
+       sk = sock->sk;
+       if (!sk)
+               return;
+
+       napi_id = READ_ONCE(sk->sk_napi_id);
+       ep = epi->ep;
+
+       /* Non-NAPI IDs can be rejected
+        *      or
+        * Nothing to do if we already have this ID
+        */
+       if (napi_id < MIN_NAPI_ID || napi_id == ep->napi_id)
+               return;
+
+       /* record NAPI ID for use in next busy poll */
+       ep->napi_id = napi_id;
+#endif
+}
+
 /**
  * ep_call_nested - Perform a bound (possibly) nested call, by checking
  *                  that the recursion limit is not exceeded, and that
@@ -1022,6 +1099,8 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
 
        spin_lock_irqsave(&ep->lock, flags);
 
+       ep_set_busy_poll_napi_id(epi);
+
        /*
         * If the event mask does not contain any poll(2) event, we consider the
         * descriptor to be disabled. This condition is likely the effect of the
@@ -1363,6 +1442,9 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
        /* We have to drop the new item inside our item list to keep track of it */
        spin_lock_irqsave(&ep->lock, flags);
 
+       /* record NAPI ID of new item if present */
+       ep_set_busy_poll_napi_id(epi);
+
        /* If the file is already "ready" we drop it inside the ready list */
        if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
                list_add_tail(&epi->rdllink, &ep->rdllist);
@@ -1637,9 +1719,20 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
        }
 
 fetch_events:
+
+       if (!ep_events_available(ep))
+               ep_busy_loop(ep, timed_out);
+
        spin_lock_irqsave(&ep->lock, flags);
 
        if (!ep_events_available(ep)) {
+               /*
+                * Busy poll timed out.  Drop NAPI ID for now, we can add
+                * it back in when we have moved a socket with a valid NAPI
+                * ID onto the ready list.
+                */
+               ep_reset_busy_poll_napi_id(ep);
+
                /*
                 * We don't have any available event to return to the caller.
                 * We need to sleep here, and we will be wake up by
index 30a9f210d1e32c8a01635821b2937407d5b21773..375fb1c05d49ce87a287213720ca0ebf5e0deef1 100644 (file)
@@ -1169,10 +1169,9 @@ static int ext4_finish_convert_inline_dir(handle_t *handle,
        set_buffer_uptodate(dir_block);
        err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
        if (err)
-               goto out;
+               return err;
        set_buffer_verified(dir_block);
-out:
-       return err;
+       return ext4_mark_inode_dirty(handle, inode);
 }
 
 static int ext4_convert_inline_data_nolock(handle_t *handle,
index 7385e6a6b6cb549041d098a565c36c20794f7f14..4247d8d25687814dd1b844ea5555feeb43854d99 100644 (file)
@@ -5400,7 +5400,7 @@ int ext4_getattr(const struct path *path, struct kstat *stat,
         * If there is inline data in the inode, the inode will normally not
         * have data blocks allocated (it may have an external xattr block).
         * Report at least one sector for such files, so tools like tar, rsync,
-        * others doen't incorrectly think the file is completely sparse.
+        * others don't incorrectly think the file is completely sparse.
         */
        if (unlikely(ext4_has_inline_data(inode)))
                stat->blocks += (stat->size + 511) >> 9;
index 578f8c33fb44ad34062e978277f5def1d8aeebe1..c992ef2c2f94c0865d14de67e2c5b99857edf71f 100644 (file)
@@ -511,7 +511,7 @@ mext_check_arguments(struct inode *orig_inode,
        if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) !=
            (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) {
                ext4_debug("ext4 move extent: orig and donor's start "
-                       "offset are not alligned [ino:orig %lu, donor %lu]\n",
+                       "offsets are not aligned [ino:orig %lu, donor %lu]\n",
                        orig_inode->i_ino, donor_inode->i_ino);
                return -EINVAL;
        }
index 2e03a0a88d92f7731346a26cbdc934458549660f..a9448db1cf7e87c2bd41daac6fbab7729bc87ecc 100644 (file)
@@ -1120,17 +1120,16 @@ static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
                                 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
 }
 
-static int ext4_prepare_context(struct inode *inode)
-{
-       return ext4_convert_inline_data(inode);
-}
-
 static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
                                                        void *fs_data)
 {
        handle_t *handle = fs_data;
        int res, res2, retries = 0;
 
+       res = ext4_convert_inline_data(inode);
+       if (res)
+               return res;
+
        /*
         * If a journal handle was specified, then the encryption context is
         * being set on a new inode via inheritance and is part of a larger
@@ -1196,7 +1195,6 @@ static unsigned ext4_max_namelen(struct inode *inode)
 static const struct fscrypt_operations ext4_cryptops = {
        .key_prefix             = "ext4:",
        .get_context            = ext4_get_context,
-       .prepare_context        = ext4_prepare_context,
        .set_context            = ext4_set_context,
        .dummy_context          = ext4_dummy_context,
        .is_encrypted           = ext4_encrypted_inode,
index 67636acf762475e211a641f4720e862ba886a40a..996e7900d4c8ea2d16f65f47e3f1082552b2fc66 100644 (file)
@@ -131,31 +131,26 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
 }
 
 static int ext4_xattr_block_csum_verify(struct inode *inode,
-                                       sector_t block_nr,
-                                       struct ext4_xattr_header *hdr)
+                                       struct buffer_head *bh)
 {
-       if (ext4_has_metadata_csum(inode->i_sb) &&
-           (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
-               return 0;
-       return 1;
-}
-
-static void ext4_xattr_block_csum_set(struct inode *inode,
-                                     sector_t block_nr,
-                                     struct ext4_xattr_header *hdr)
-{
-       if (!ext4_has_metadata_csum(inode->i_sb))
-               return;
+       struct ext4_xattr_header *hdr = BHDR(bh);
+       int ret = 1;
 
-       hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
+       if (ext4_has_metadata_csum(inode->i_sb)) {
+               lock_buffer(bh);
+               ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
+                                                       bh->b_blocknr, hdr));
+               unlock_buffer(bh);
+       }
+       return ret;
 }
 
-static inline int ext4_handle_dirty_xattr_block(handle_t *handle,
-                                               struct inode *inode,
-                                               struct buffer_head *bh)
+static void ext4_xattr_block_csum_set(struct inode *inode,
+                                     struct buffer_head *bh)
 {
-       ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh));
-       return ext4_handle_dirty_metadata(handle, inode, bh);
+       if (ext4_has_metadata_csum(inode->i_sb))
+               BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
+                                               bh->b_blocknr, BHDR(bh));
 }
 
 static inline const struct xattr_handler *
@@ -233,7 +228,7 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
        if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
            BHDR(bh)->h_blocks != cpu_to_le32(1))
                return -EFSCORRUPTED;
-       if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
+       if (!ext4_xattr_block_csum_verify(inode, bh))
                return -EFSBADCRC;
        error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
                                       bh->b_data);
@@ -618,23 +613,22 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
                        }
                }
 
+               ext4_xattr_block_csum_set(inode, bh);
                /*
                 * Beware of this ugliness: Releasing of xattr block references
                 * from different inodes can race and so we have to protect
                 * from a race where someone else frees the block (and releases
                 * its journal_head) before we are done dirtying the buffer. In
                 * nojournal mode this race is harmless and we actually cannot
-                * call ext4_handle_dirty_xattr_block() with locked buffer as
+                * call ext4_handle_dirty_metadata() with locked buffer as
                 * that function can call sync_dirty_buffer() so for that case
                 * we handle the dirtying after unlocking the buffer.
                 */
                if (ext4_handle_valid(handle))
-                       error = ext4_handle_dirty_xattr_block(handle, inode,
-                                                             bh);
+                       error = ext4_handle_dirty_metadata(handle, inode, bh);
                unlock_buffer(bh);
                if (!ext4_handle_valid(handle))
-                       error = ext4_handle_dirty_xattr_block(handle, inode,
-                                                             bh);
+                       error = ext4_handle_dirty_metadata(handle, inode, bh);
                if (IS_SYNC(inode))
                        ext4_handle_sync(handle);
                dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
@@ -863,13 +857,14 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
                                ext4_xattr_cache_insert(ext4_mb_cache,
                                        bs->bh);
                        }
+                       ext4_xattr_block_csum_set(inode, bs->bh);
                        unlock_buffer(bs->bh);
                        if (error == -EFSCORRUPTED)
                                goto bad_block;
                        if (!error)
-                               error = ext4_handle_dirty_xattr_block(handle,
-                                                                     inode,
-                                                                     bs->bh);
+                               error = ext4_handle_dirty_metadata(handle,
+                                                                  inode,
+                                                                  bs->bh);
                        if (error)
                                goto cleanup;
                        goto inserted;
@@ -967,10 +962,11 @@ inserted:
                                        ce->e_reusable = 0;
                                ea_bdebug(new_bh, "reusing; refcount now=%d",
                                          ref);
+                               ext4_xattr_block_csum_set(inode, new_bh);
                                unlock_buffer(new_bh);
-                               error = ext4_handle_dirty_xattr_block(handle,
-                                                                     inode,
-                                                                     new_bh);
+                               error = ext4_handle_dirty_metadata(handle,
+                                                                  inode,
+                                                                  new_bh);
                                if (error)
                                        goto cleanup_dquot;
                        }
@@ -1020,11 +1016,12 @@ getblk_failed:
                                goto getblk_failed;
                        }
                        memcpy(new_bh->b_data, s->base, new_bh->b_size);
+                       ext4_xattr_block_csum_set(inode, new_bh);
                        set_buffer_uptodate(new_bh);
                        unlock_buffer(new_bh);
                        ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
-                       error = ext4_handle_dirty_xattr_block(handle,
-                                                             inode, new_bh);
+                       error = ext4_handle_dirty_metadata(handle, inode,
+                                                          new_bh);
                        if (error)
                                goto cleanup;
                }
index a77df377e2e8197097912c9248948c7e729ce566..ee2d0a485fc3478fc5f93b5b85c6dad0431e8ea0 100644 (file)
@@ -196,6 +196,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
        si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
        si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE;
        si->base_mem += NM_I(sbi)->nat_blocks / 8;
+       si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short);
 
 get_cache:
        si->cache_mem = 0;
index 4650c9b85de77679adaa275406512868671bb1bb..8d5c62b07b283f53e90ded2366c8bb9375409fa2 100644 (file)
@@ -750,7 +750,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
        dentry_blk = page_address(page);
        bit_pos = dentry - dentry_blk->dentry;
        for (i = 0; i < slots; i++)
-               clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
+               __clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
 
        /* Let's check and deallocate this dentry page */
        bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
index e849f83d611407b8968bec904c10f1939c40b4f1..0a6e115562f62edca5b60ee4c833e889a904c202 100644 (file)
@@ -561,6 +561,8 @@ struct f2fs_nm_info {
        struct mutex build_lock;        /* lock for build free nids */
        unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE];
        unsigned char *nat_block_bitmap;
+       unsigned short *free_nid_count; /* free nid count of NAT block */
+       spinlock_t free_nid_lock;       /* protect updating of nid count */
 
        /* for checkpoint */
        char *nat_bitmap;               /* NAT bitmap pointer */
index 94967171dee87a381655ede9190ff0f66b3ca4af..481aa8dc79f46f4c156cf67cca665e8160e36e6a 100644 (file)
@@ -338,9 +338,6 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
                set_nat_flag(e, IS_CHECKPOINTED, false);
        __set_nat_cache_dirty(nm_i, e);
 
-       if (enabled_nat_bits(sbi, NULL) && new_blkaddr == NEW_ADDR)
-               clear_bit_le(NAT_BLOCK_OFFSET(ni->nid), nm_i->empty_nat_bits);
-
        /* update fsync_mark if its inode nat entry is still alive */
        if (ni->nid != ni->ino)
                e = __lookup_nat_cache(nm_i, ni->ino);
@@ -1823,7 +1820,8 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
                kmem_cache_free(free_nid_slab, i);
 }
 
-void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set)
+static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
+                       bool set, bool build, bool locked)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
@@ -1833,9 +1831,18 @@ void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set)
                return;
 
        if (set)
-               set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+               __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
        else
-               clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+               __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+
+       if (!locked)
+               spin_lock(&nm_i->free_nid_lock);
+       if (set)
+               nm_i->free_nid_count[nat_ofs]++;
+       else if (!build)
+               nm_i->free_nid_count[nat_ofs]--;
+       if (!locked)
+               spin_unlock(&nm_i->free_nid_lock);
 }
 
 static void scan_nat_page(struct f2fs_sb_info *sbi,
@@ -1847,7 +1854,10 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
        unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
        int i;
 
-       set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
+       if (test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
+               return;
+
+       __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
 
        i = start_nid % NAT_ENTRY_PER_BLOCK;
 
@@ -1861,7 +1871,7 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
                f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
                if (blk_addr == NULL_ADDR)
                        freed = add_free_nid(sbi, start_nid, true);
-               update_free_nid_bitmap(sbi, start_nid, freed);
+               update_free_nid_bitmap(sbi, start_nid, freed, true, false);
        }
 }
 
@@ -1877,6 +1887,8 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
        for (i = 0; i < nm_i->nat_blocks; i++) {
                if (!test_bit_le(i, nm_i->nat_block_bitmap))
                        continue;
+               if (!nm_i->free_nid_count[i])
+                       continue;
                for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
                        nid_t nid;
 
@@ -1907,58 +1919,6 @@ out:
        up_read(&nm_i->nat_tree_lock);
 }
 
-static int scan_nat_bits(struct f2fs_sb_info *sbi)
-{
-       struct f2fs_nm_info *nm_i = NM_I(sbi);
-       struct page *page;
-       unsigned int i = 0;
-       nid_t nid;
-
-       if (!enabled_nat_bits(sbi, NULL))
-               return -EAGAIN;
-
-       down_read(&nm_i->nat_tree_lock);
-check_empty:
-       i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
-       if (i >= nm_i->nat_blocks) {
-               i = 0;
-               goto check_partial;
-       }
-
-       for (nid = i * NAT_ENTRY_PER_BLOCK; nid < (i + 1) * NAT_ENTRY_PER_BLOCK;
-                                                                       nid++) {
-               if (unlikely(nid >= nm_i->max_nid))
-                       break;
-               add_free_nid(sbi, nid, true);
-       }
-
-       if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS)
-               goto out;
-       i++;
-       goto check_empty;
-
-check_partial:
-       i = find_next_zero_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
-       if (i >= nm_i->nat_blocks) {
-               disable_nat_bits(sbi, true);
-               up_read(&nm_i->nat_tree_lock);
-               return -EINVAL;
-       }
-
-       nid = i * NAT_ENTRY_PER_BLOCK;
-       page = get_current_nat_page(sbi, nid);
-       scan_nat_page(sbi, page, nid);
-       f2fs_put_page(page, 1);
-
-       if (nm_i->nid_cnt[FREE_NID_LIST] < MAX_FREE_NIDS) {
-               i++;
-               goto check_partial;
-       }
-out:
-       up_read(&nm_i->nat_tree_lock);
-       return 0;
-}
-
 static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -1980,21 +1940,6 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
 
                if (nm_i->nid_cnt[FREE_NID_LIST])
                        return;
-
-               /* try to find free nids with nat_bits */
-               if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST])
-                       return;
-       }
-
-       /* find next valid candidate */
-       if (enabled_nat_bits(sbi, NULL)) {
-               int idx = find_next_zero_bit_le(nm_i->full_nat_bits,
-                                       nm_i->nat_blocks, 0);
-
-               if (idx >= nm_i->nat_blocks)
-                       set_sbi_flag(sbi, SBI_NEED_FSCK);
-               else
-                       nid = idx * NAT_ENTRY_PER_BLOCK;
        }
 
        /* readahead nat pages to be scanned */
@@ -2081,7 +2026,7 @@ retry:
                __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
                nm_i->available_nids--;
 
-               update_free_nid_bitmap(sbi, *nid, false);
+               update_free_nid_bitmap(sbi, *nid, false, false, false);
 
                spin_unlock(&nm_i->nid_list_lock);
                return true;
@@ -2137,7 +2082,7 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
 
        nm_i->available_nids++;
 
-       update_free_nid_bitmap(sbi, nid, true);
+       update_free_nid_bitmap(sbi, nid, true, false, false);
 
        spin_unlock(&nm_i->nid_list_lock);
 
@@ -2383,7 +2328,7 @@ add_out:
        list_add_tail(&nes->set_list, head);
 }
 
-void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
+static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
                                                struct page *page)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -2402,16 +2347,16 @@ void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
                        valid++;
        }
        if (valid == 0) {
-               set_bit_le(nat_index, nm_i->empty_nat_bits);
-               clear_bit_le(nat_index, nm_i->full_nat_bits);
+               __set_bit_le(nat_index, nm_i->empty_nat_bits);
+               __clear_bit_le(nat_index, nm_i->full_nat_bits);
                return;
        }
 
-       clear_bit_le(nat_index, nm_i->empty_nat_bits);
+       __clear_bit_le(nat_index, nm_i->empty_nat_bits);
        if (valid == NAT_ENTRY_PER_BLOCK)
-               set_bit_le(nat_index, nm_i->full_nat_bits);
+               __set_bit_le(nat_index, nm_i->full_nat_bits);
        else
-               clear_bit_le(nat_index, nm_i->full_nat_bits);
+               __clear_bit_le(nat_index, nm_i->full_nat_bits);
 }
 
 static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
@@ -2467,11 +2412,11 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
                        add_free_nid(sbi, nid, false);
                        spin_lock(&NM_I(sbi)->nid_list_lock);
                        NM_I(sbi)->available_nids++;
-                       update_free_nid_bitmap(sbi, nid, true);
+                       update_free_nid_bitmap(sbi, nid, true, false, false);
                        spin_unlock(&NM_I(sbi)->nid_list_lock);
                } else {
                        spin_lock(&NM_I(sbi)->nid_list_lock);
-                       update_free_nid_bitmap(sbi, nid, false);
+                       update_free_nid_bitmap(sbi, nid, false, false, false);
                        spin_unlock(&NM_I(sbi)->nid_list_lock);
                }
        }
@@ -2577,6 +2522,40 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
        return 0;
 }
 
+inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
+{
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
+       unsigned int i = 0;
+       nid_t nid, last_nid;
+
+       if (!enabled_nat_bits(sbi, NULL))
+               return;
+
+       for (i = 0; i < nm_i->nat_blocks; i++) {
+               i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
+               if (i >= nm_i->nat_blocks)
+                       break;
+
+               __set_bit_le(i, nm_i->nat_block_bitmap);
+
+               nid = i * NAT_ENTRY_PER_BLOCK;
+               last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
+
+               spin_lock(&nm_i->free_nid_lock);
+               for (; nid < last_nid; nid++)
+                       update_free_nid_bitmap(sbi, nid, true, true, true);
+               spin_unlock(&nm_i->free_nid_lock);
+       }
+
+       for (i = 0; i < nm_i->nat_blocks; i++) {
+               i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
+               if (i >= nm_i->nat_blocks)
+                       break;
+
+               __set_bit_le(i, nm_i->nat_block_bitmap);
+       }
+}
+
 static int init_node_manager(struct f2fs_sb_info *sbi)
 {
        struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
@@ -2638,7 +2617,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
        return 0;
 }
 
-int init_free_nid_cache(struct f2fs_sb_info *sbi)
+static int init_free_nid_cache(struct f2fs_sb_info *sbi)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
 
@@ -2651,6 +2630,14 @@ int init_free_nid_cache(struct f2fs_sb_info *sbi)
                                                                GFP_KERNEL);
        if (!nm_i->nat_block_bitmap)
                return -ENOMEM;
+
+       nm_i->free_nid_count = f2fs_kvzalloc(nm_i->nat_blocks *
+                                       sizeof(unsigned short), GFP_KERNEL);
+       if (!nm_i->free_nid_count)
+               return -ENOMEM;
+
+       spin_lock_init(&nm_i->free_nid_lock);
+
        return 0;
 }
 
@@ -2670,6 +2657,9 @@ int build_node_manager(struct f2fs_sb_info *sbi)
        if (err)
                return err;
 
+       /* load free nid status from nat_bits table */
+       load_free_nid_bitmap(sbi);
+
        build_free_nids(sbi, true, true);
        return 0;
 }
@@ -2730,6 +2720,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
 
        kvfree(nm_i->nat_block_bitmap);
        kvfree(nm_i->free_nid_bitmap);
+       kvfree(nm_i->free_nid_count);
 
        kfree(nm_i->nat_bitmap);
        kfree(nm_i->nat_bits);
index 4bd7a8b19332d176d78b0a40c24e7bb12bbe2f5e..29ef7088c5582a480b6a1f7965fbbcca4f07e24e 100644 (file)
@@ -1163,6 +1163,12 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
                if (f2fs_discard_en(sbi) &&
                        !f2fs_test_and_set_bit(offset, se->discard_map))
                        sbi->discard_blks--;
+
+               /* don't overwrite by SSR to keep node chain */
+               if (se->type == CURSEG_WARM_NODE) {
+                       if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
+                               se->ckpt_valid_blocks++;
+               }
        } else {
                if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) {
 #ifdef CONFIG_F2FS_CHECK_FS
index 338d2f73eb29c8f1691a22a162e5929875bbf8cf..a2c05f2ada6dd86576df1dede141c05248126187 100644 (file)
@@ -1359,6 +1359,16 @@ out:
        return 0;
 }
 
+static void fat_dummy_inode_init(struct inode *inode)
+{
+       /* Initialize this dummy inode to work as no-op. */
+       MSDOS_I(inode)->mmu_private = 0;
+       MSDOS_I(inode)->i_start = 0;
+       MSDOS_I(inode)->i_logstart = 0;
+       MSDOS_I(inode)->i_attrs = 0;
+       MSDOS_I(inode)->i_pos = 0;
+}
+
 static int fat_read_root(struct inode *inode)
 {
        struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
@@ -1803,12 +1813,13 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
        fat_inode = new_inode(sb);
        if (!fat_inode)
                goto out_fail;
-       MSDOS_I(fat_inode)->i_pos = 0;
+       fat_dummy_inode_init(fat_inode);
        sbi->fat_inode = fat_inode;
 
        fsinfo_inode = new_inode(sb);
        if (!fsinfo_inode)
                goto out_fail;
+       fat_dummy_inode_init(fsinfo_inode);
        fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
        sbi->fsinfo_inode = fsinfo_inode;
        insert_inode_hash(fsinfo_inode);
index ef600591d96f9a42be98699025f4cf94ef8e7762..63ee2940775ce9c16daca5c2f7590e0c6e57bc07 100644 (file)
@@ -173,19 +173,33 @@ static void wb_wakeup(struct bdi_writeback *wb)
        spin_unlock_bh(&wb->work_lock);
 }
 
+static void finish_writeback_work(struct bdi_writeback *wb,
+                                 struct wb_writeback_work *work)
+{
+       struct wb_completion *done = work->done;
+
+       if (work->auto_free)
+               kfree(work);
+       if (done && atomic_dec_and_test(&done->cnt))
+               wake_up_all(&wb->bdi->wb_waitq);
+}
+
 static void wb_queue_work(struct bdi_writeback *wb,
                          struct wb_writeback_work *work)
 {
        trace_writeback_queue(wb, work);
 
-       spin_lock_bh(&wb->work_lock);
-       if (!test_bit(WB_registered, &wb->state))
-               goto out_unlock;
        if (work->done)
                atomic_inc(&work->done->cnt);
-       list_add_tail(&work->list, &wb->work_list);
-       mod_delayed_work(bdi_wq, &wb->dwork, 0);
-out_unlock:
+
+       spin_lock_bh(&wb->work_lock);
+
+       if (test_bit(WB_registered, &wb->state)) {
+               list_add_tail(&work->list, &wb->work_list);
+               mod_delayed_work(bdi_wq, &wb->dwork, 0);
+       } else
+               finish_writeback_work(wb, work);
+
        spin_unlock_bh(&wb->work_lock);
 }
 
@@ -1873,16 +1887,9 @@ static long wb_do_writeback(struct bdi_writeback *wb)
 
        set_bit(WB_writeback_running, &wb->state);
        while ((work = get_next_work_item(wb)) != NULL) {
-               struct wb_completion *done = work->done;
-
                trace_writeback_exec(wb, work);
-
                wrote += wb_writeback(wb, work);
-
-               if (work->auto_free)
-                       kfree(work);
-               if (done && atomic_dec_and_test(&done->cnt))
-                       wake_up_all(&wb->bdi->wb_waitq);
+               finish_writeback_work(wb, work);
        }
 
        /*
index c45084ac642d1929058ea5d903ad796d574a45cc..511e1ed7e2ded7b0a9dc9882cffe0b66c37c96a2 100644 (file)
@@ -207,7 +207,7 @@ struct lm_lockname {
        struct gfs2_sbd *ln_sbd;
        u64 ln_number;
        unsigned int ln_type;
-};
+} __packed __aligned(sizeof(int));
 
 #define lm_name_equal(name1, name2) \
         (((name1)->ln_number == (name2)->ln_number) && \
index 8f96461236f655c3eef66694d45d9c4381a58210..7163fe014b57f4e15813c1969958d5764b18e5af 100644 (file)
@@ -695,14 +695,11 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
 
        inode = new_inode(sb);
        if (inode) {
-               struct hugetlbfs_inode_info *info;
                inode->i_ino = get_next_ino();
                inode->i_mode = S_IFDIR | config->mode;
                inode->i_uid = config->uid;
                inode->i_gid = config->gid;
                inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
-               info = HUGETLBFS_I(inode);
-               mpol_shared_policy_init(&info->policy, NULL);
                inode->i_op = &hugetlbfs_dir_inode_operations;
                inode->i_fop = &simple_dir_operations;
                /* directory inodes start off with i_nlink == 2 (for "." entry) */
@@ -733,7 +730,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
 
        inode = new_inode(sb);
        if (inode) {
-               struct hugetlbfs_inode_info *info;
                inode->i_ino = get_next_ino();
                inode_init_owner(inode, dir, mode);
                lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
@@ -741,15 +737,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
                inode->i_mapping->a_ops = &hugetlbfs_aops;
                inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
                inode->i_mapping->private_data = resv_map;
-               info = HUGETLBFS_I(inode);
-               /*
-                * The policy is initialized here even if we are creating a
-                * private inode because initialization simply creates an
-                * an empty rb tree and calls rwlock_init(), later when we
-                * call mpol_free_shared_policy() it will just return because
-                * the rb tree will still be empty.
-                */
-               mpol_shared_policy_init(&info->policy, NULL);
                switch (mode & S_IFMT) {
                default:
                        init_special_inode(inode, mode, dev);
@@ -937,6 +924,18 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
                hugetlbfs_inc_free_inodes(sbinfo);
                return NULL;
        }
+
+       /*
+        * Any time after allocation, hugetlbfs_destroy_inode can be called
+        * for the inode.  mpol_free_shared_policy is unconditionally called
+        * as part of hugetlbfs_destroy_inode.  So, initialize policy here
+        * in case of a quick call to destroy.
+        *
+        * Note that the policy is initialized even if we are creating a
+        * private inode.  This simplifies hugetlbfs_destroy_inode.
+        */
+       mpol_shared_policy_init(&p->policy, NULL);
+
        return &p->vfs_inode;
 }
 
index 3ca1a8e44135ed757bc309cd750899d51f093970..141c3cd55a8b2d974f431d7710fbe4de58f78355 100644 (file)
@@ -846,7 +846,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
        struct address_space *mapping = iocb->ki_filp->f_mapping;
        struct inode *inode = file_inode(iocb->ki_filp);
        size_t count = iov_iter_count(iter);
-       loff_t pos = iocb->ki_pos, end = iocb->ki_pos + count - 1, ret = 0;
+       loff_t pos = iocb->ki_pos, start = pos;
+       loff_t end = iocb->ki_pos + count - 1, ret = 0;
        unsigned int flags = IOMAP_DIRECT;
        struct blk_plug plug;
        struct iomap_dio *dio;
@@ -887,12 +888,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
        }
 
        if (mapping->nrpages) {
-               ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
+               ret = filemap_write_and_wait_range(mapping, start, end);
                if (ret)
                        goto out_free_dio;
 
                ret = invalidate_inode_pages2_range(mapping,
-                               iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
+                               start >> PAGE_SHIFT, end >> PAGE_SHIFT);
                WARN_ON_ONCE(ret);
                ret = 0;
        }
@@ -941,6 +942,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                __set_current_state(TASK_RUNNING);
        }
 
+       ret = iomap_dio_complete(dio);
+
        /*
         * Try again to invalidate clean pages which might have been cached by
         * non-direct readahead, or faulted in by get_user_pages() if the source
@@ -949,12 +952,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
         * this invalidation fails, tough, the write still worked...
         */
        if (iov_iter_rw(iter) == WRITE && mapping->nrpages) {
-               ret = invalidate_inode_pages2_range(mapping,
-                               iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
-               WARN_ON_ONCE(ret);
+               int err = invalidate_inode_pages2_range(mapping,
+                               start >> PAGE_SHIFT, end >> PAGE_SHIFT);
+               WARN_ON_ONCE(err);
        }
 
-       return iomap_dio_complete(dio);
+       return ret;
 
 out_free_dio:
        kfree(dio);
index a1a359bfcc9cd4ff84254e464788ab3031dfe90f..5adc2fb62b0fab89899e5d0acba1e8019a73c766 100644 (file)
@@ -1125,10 +1125,8 @@ static journal_t *journal_init_common(struct block_device *bdev,
 
        /* Set up a default-sized revoke table for the new mount. */
        err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
-       if (err) {
-               kfree(journal);
-               return NULL;
-       }
+       if (err)
+               goto err_cleanup;
 
        spin_lock_init(&journal->j_history_lock);
 
@@ -1145,23 +1143,25 @@ static journal_t *journal_init_common(struct block_device *bdev,
        journal->j_wbufsize = n;
        journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *),
                                        GFP_KERNEL);
-       if (!journal->j_wbuf) {
-               kfree(journal);
-               return NULL;
-       }
+       if (!journal->j_wbuf)
+               goto err_cleanup;
 
        bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize);
        if (!bh) {
                pr_err("%s: Cannot get buffer for journal superblock\n",
                        __func__);
-               kfree(journal->j_wbuf);
-               kfree(journal);
-               return NULL;
+               goto err_cleanup;
        }
        journal->j_sb_buffer = bh;
        journal->j_superblock = (journal_superblock_t *)bh->b_data;
 
        return journal;
+
+err_cleanup:
+       kfree(journal->j_wbuf);
+       jbd2_journal_destroy_revoke(journal);
+       kfree(journal);
+       return NULL;
 }
 
 /* jbd2_journal_init_dev and jbd2_journal_init_inode:
index cfc38b5521189f8ff64330ff33aa6ac8c25794ec..f9aefcda585418abcc37e58226eb39f3ac883172 100644 (file)
@@ -280,6 +280,7 @@ int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
 
 fail1:
        jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
+       journal->j_revoke_table[0] = NULL;
 fail0:
        return -ENOMEM;
 }
index 8e4dc7ab584c2df9bf802c2827dacb03ef534097..ac2dfe0c5a9c8520aa8b40c4bab97b710799cc78 100644 (file)
@@ -809,7 +809,8 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
                if (kn->flags & KERNFS_HAS_MMAP)
                        unmap_mapping_range(inode->i_mapping, 0, 0, 1);
 
-               kernfs_release_file(kn, of);
+               if (kn->flags & KERNFS_HAS_RELEASE)
+                       kernfs_release_file(kn, of);
        }
 
        mutex_unlock(&kernfs_open_file_mutex);
index bb79972dc638ba8bf27beef1930deeb186820af5..773774531aff5fc081610706ea39756b0e5a5c25 100644 (file)
@@ -232,12 +232,12 @@ static struct svc_serv_ops nfs41_cb_sv_ops = {
        .svo_module             = THIS_MODULE,
 };
 
-struct svc_serv_ops *nfs4_cb_sv_ops[] = {
+static struct svc_serv_ops *nfs4_cb_sv_ops[] = {
        [0] = &nfs40_cb_sv_ops,
        [1] = &nfs41_cb_sv_ops,
 };
 #else
-struct svc_serv_ops *nfs4_cb_sv_ops[] = {
+static struct svc_serv_ops *nfs4_cb_sv_ops[] = {
        [0] = &nfs40_cb_sv_ops,
        [1] = NULL,
 };
index 91a8d610ba0fa6db7cc76458ec2514aec9b124db..390ada8741bcbfd2e4aaecb3f759ec0707003674 100644 (file)
@@ -325,10 +325,33 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
        return NULL;
 }
 
-static bool nfs_client_init_is_complete(const struct nfs_client *clp)
+/*
+ * Return true if @clp is done initializing, false if still working on it.
+ *
+ * Use nfs_client_init_status to check if it was successful.
+ */
+bool nfs_client_init_is_complete(const struct nfs_client *clp)
 {
        return clp->cl_cons_state <= NFS_CS_READY;
 }
+EXPORT_SYMBOL_GPL(nfs_client_init_is_complete);
+
+/*
+ * Return 0 if @clp was successfully initialized, -errno otherwise.
+ *
+ * This must be called *after* nfs_client_init_is_complete() returns true,
+ * otherwise it will pop WARN_ON_ONCE and return -EINVAL
+ */
+int nfs_client_init_status(const struct nfs_client *clp)
+{
+       /* called without checking nfs_client_init_is_complete */
+       if (clp->cl_cons_state > NFS_CS_READY) {
+               WARN_ON_ONCE(1);
+               return -EINVAL;
+       }
+       return clp->cl_cons_state;
+}
+EXPORT_SYMBOL_GPL(nfs_client_init_status);
 
 int nfs_wait_client_init_complete(const struct nfs_client *clp)
 {
index fb499a3f21b58ed341bbe17933bd5e191c850212..f92ba8d6c5569099f6c469eda92446ad0d7e148d 100644 (file)
@@ -2055,7 +2055,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 {
        struct inode *old_inode = d_inode(old_dentry);
        struct inode *new_inode = d_inode(new_dentry);
-       struct dentry *dentry = NULL, *rehash = NULL;
+       struct dentry *dentry = NULL;
        struct rpc_task *task;
        int error = -EBUSY;
 
@@ -2078,10 +2078,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                 * To prevent any new references to the target during the
                 * rename, we unhash the dentry in advance.
                 */
-               if (!d_unhashed(new_dentry)) {
+               if (!d_unhashed(new_dentry))
                        d_drop(new_dentry);
-                       rehash = new_dentry;
-               }
 
                if (d_count(new_dentry) > 2) {
                        int err;
@@ -2098,7 +2096,6 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                                goto out;
 
                        new_dentry = dentry;
-                       rehash = NULL;
                        new_inode = NULL;
                }
        }
@@ -2119,8 +2116,6 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                error = task->tk_status;
        rpc_put_task(task);
 out:
-       if (rehash)
-               d_rehash(rehash);
        trace_nfs_rename_exit(old_dir, old_dentry,
                        new_dir, new_dentry, error);
        /* new dentry created? */
index 44347f4bdc1516f54f030ca9f0d95332ab816116..acd30baca46166c902aa5dfae1663184cc30e235 100644 (file)
@@ -202,10 +202,10 @@ static int filelayout_async_handle_error(struct rpc_task *task,
                        task->tk_status);
                nfs4_mark_deviceid_unavailable(devid);
                pnfs_error_mark_layout_for_return(inode, lseg);
-               pnfs_set_lo_fail(lseg);
                rpc_wake_up(&tbl->slot_tbl_waitq);
                /* fall through */
        default:
+               pnfs_set_lo_fail(lseg);
 reset:
                dprintk("%s Retry through MDS. Error %d\n", __func__,
                        task->tk_status);
@@ -560,6 +560,50 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
        return PNFS_ATTEMPTED;
 }
 
+static int
+filelayout_check_deviceid(struct pnfs_layout_hdr *lo,
+                         struct nfs4_filelayout_segment *fl,
+                         gfp_t gfp_flags)
+{
+       struct nfs4_deviceid_node *d;
+       struct nfs4_file_layout_dsaddr *dsaddr;
+       int status = -EINVAL;
+
+       /* find and reference the deviceid */
+       d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &fl->deviceid,
+                       lo->plh_lc_cred, gfp_flags);
+       if (d == NULL)
+               goto out;
+
+       dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
+       /* Found deviceid is unavailable */
+       if (filelayout_test_devid_unavailable(&dsaddr->id_node))
+               goto out_put;
+
+       fl->dsaddr = dsaddr;
+
+       if (fl->first_stripe_index >= dsaddr->stripe_count) {
+               dprintk("%s Bad first_stripe_index %u\n",
+                               __func__, fl->first_stripe_index);
+               goto out_put;
+       }
+
+       if ((fl->stripe_type == STRIPE_SPARSE &&
+           fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
+           (fl->stripe_type == STRIPE_DENSE &&
+           fl->num_fh != dsaddr->stripe_count)) {
+               dprintk("%s num_fh %u not valid for given packing\n",
+                       __func__, fl->num_fh);
+               goto out_put;
+       }
+       status = 0;
+out:
+       return status;
+out_put:
+       nfs4_fl_put_deviceid(dsaddr);
+       goto out;
+}
+
 /*
  * filelayout_check_layout()
  *
@@ -572,11 +616,8 @@ static int
 filelayout_check_layout(struct pnfs_layout_hdr *lo,
                        struct nfs4_filelayout_segment *fl,
                        struct nfs4_layoutget_res *lgr,
-                       struct nfs4_deviceid *id,
                        gfp_t gfp_flags)
 {
-       struct nfs4_deviceid_node *d;
-       struct nfs4_file_layout_dsaddr *dsaddr;
        int status = -EINVAL;
 
        dprintk("--> %s\n", __func__);
@@ -601,41 +642,10 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
                goto out;
        }
 
-       /* find and reference the deviceid */
-       d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), id,
-                       lo->plh_lc_cred, gfp_flags);
-       if (d == NULL)
-               goto out;
-
-       dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
-       /* Found deviceid is unavailable */
-       if (filelayout_test_devid_unavailable(&dsaddr->id_node))
-               goto out_put;
-
-       fl->dsaddr = dsaddr;
-
-       if (fl->first_stripe_index >= dsaddr->stripe_count) {
-               dprintk("%s Bad first_stripe_index %u\n",
-                               __func__, fl->first_stripe_index);
-               goto out_put;
-       }
-
-       if ((fl->stripe_type == STRIPE_SPARSE &&
-           fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
-           (fl->stripe_type == STRIPE_DENSE &&
-           fl->num_fh != dsaddr->stripe_count)) {
-               dprintk("%s num_fh %u not valid for given packing\n",
-                       __func__, fl->num_fh);
-               goto out_put;
-       }
-
        status = 0;
 out:
        dprintk("--> %s returns %d\n", __func__, status);
        return status;
-out_put:
-       nfs4_fl_put_deviceid(dsaddr);
-       goto out;
 }
 
 static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
@@ -657,7 +667,6 @@ static int
 filelayout_decode_layout(struct pnfs_layout_hdr *flo,
                         struct nfs4_filelayout_segment *fl,
                         struct nfs4_layoutget_res *lgr,
-                        struct nfs4_deviceid *id,
                         gfp_t gfp_flags)
 {
        struct xdr_stream stream;
@@ -682,9 +691,9 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
        if (unlikely(!p))
                goto out_err;
 
-       memcpy(id, p, sizeof(*id));
+       memcpy(&fl->deviceid, p, sizeof(fl->deviceid));
        p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
-       nfs4_print_deviceid(id);
+       nfs4_print_deviceid(&fl->deviceid);
 
        nfl_util = be32_to_cpup(p++);
        if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS)
@@ -831,15 +840,14 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
 {
        struct nfs4_filelayout_segment *fl;
        int rc;
-       struct nfs4_deviceid id;
 
        dprintk("--> %s\n", __func__);
        fl = kzalloc(sizeof(*fl), gfp_flags);
        if (!fl)
                return NULL;
 
-       rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags);
-       if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) {
+       rc = filelayout_decode_layout(layoutid, fl, lgr, gfp_flags);
+       if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, gfp_flags)) {
                _filelayout_free_lseg(fl);
                return NULL;
        }
@@ -888,18 +896,51 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
        return min(stripe_unit - (unsigned int)stripe_offset, size);
 }
 
+static struct pnfs_layout_segment *
+fl_pnfs_update_layout(struct inode *ino,
+                     struct nfs_open_context *ctx,
+                     loff_t pos,
+                     u64 count,
+                     enum pnfs_iomode iomode,
+                     bool strict_iomode,
+                     gfp_t gfp_flags)
+{
+       struct pnfs_layout_segment *lseg = NULL;
+       struct pnfs_layout_hdr *lo;
+       struct nfs4_filelayout_segment *fl;
+       int status;
+
+       lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode,
+                                 gfp_flags);
+       if (!lseg)
+               lseg = ERR_PTR(-ENOMEM);
+       if (IS_ERR(lseg))
+               goto out;
+
+       lo = NFS_I(ino)->layout;
+       fl = FILELAYOUT_LSEG(lseg);
+
+       status = filelayout_check_deviceid(lo, fl, gfp_flags);
+       if (status)
+               lseg = ERR_PTR(status);
+out:
+       if (IS_ERR(lseg))
+               pnfs_put_lseg(lseg);
+       return lseg;
+}
+
 static void
 filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
                        struct nfs_page *req)
 {
        if (!pgio->pg_lseg) {
-               pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
-                                          req->wb_context,
-                                          0,
-                                          NFS4_MAX_UINT64,
-                                          IOMODE_READ,
-                                          false,
-                                          GFP_KERNEL);
+               pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode,
+                                                     req->wb_context,
+                                                     0,
+                                                     NFS4_MAX_UINT64,
+                                                     IOMODE_READ,
+                                                     false,
+                                                     GFP_KERNEL);
                if (IS_ERR(pgio->pg_lseg)) {
                        pgio->pg_error = PTR_ERR(pgio->pg_lseg);
                        pgio->pg_lseg = NULL;
@@ -919,13 +960,13 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
        int status;
 
        if (!pgio->pg_lseg) {
-               pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
-                                          req->wb_context,
-                                          0,
-                                          NFS4_MAX_UINT64,
-                                          IOMODE_RW,
-                                          false,
-                                          GFP_NOFS);
+               pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode,
+                                                     req->wb_context,
+                                                     0,
+                                                     NFS4_MAX_UINT64,
+                                                     IOMODE_RW,
+                                                     false,
+                                                     GFP_NOFS);
                if (IS_ERR(pgio->pg_lseg)) {
                        pgio->pg_error = PTR_ERR(pgio->pg_lseg);
                        pgio->pg_lseg = NULL;
index 2896cb833a11375e064f926c3a50b5cbfd499f39..79323b5dab0cb38a212318d774273cb7ce88187f 100644 (file)
@@ -55,15 +55,16 @@ struct nfs4_file_layout_dsaddr {
 };
 
 struct nfs4_filelayout_segment {
-       struct pnfs_layout_segment generic_hdr;
-       u32 stripe_type;
-       u32 commit_through_mds;
-       u32 stripe_unit;
-       u32 first_stripe_index;
-       u64 pattern_offset;
-       struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */
-       unsigned int num_fh;
-       struct nfs_fh **fh_array;
+       struct pnfs_layout_segment      generic_hdr;
+       u32                             stripe_type;
+       u32                             commit_through_mds;
+       u32                             stripe_unit;
+       u32                             first_stripe_index;
+       u64                             pattern_offset;
+       struct nfs4_deviceid            deviceid;
+       struct nfs4_file_layout_dsaddr  *dsaddr; /* Point to GETDEVINFO data */
+       unsigned int                    num_fh;
+       struct nfs_fh                   **fh_array;
 };
 
 struct nfs4_filelayout {
index f956ca20a8a3595e36e6cae0e913dc90a47b1e22..d913e818858f3fee8d7d5c199714d2d79b1bef39 100644 (file)
@@ -266,6 +266,7 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
        struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
        struct nfs4_pnfs_ds *ret = ds;
        struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
+       int status;
 
        if (ds == NULL) {
                printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
@@ -277,9 +278,14 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
        if (ds->ds_clp)
                goto out_test_devid;
 
-       nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
+       status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
                             dataserver_retrans, 4,
                             s->nfs_client->cl_minorversion);
+       if (status) {
+               nfs4_mark_deviceid_unavailable(devid);
+               ret = NULL;
+               goto out;
+       }
 
 out_test_devid:
        if (ret->ds_clp == NULL ||
index f4f39b0ab09b25170ed1f9f9a9a961ecadb9a5d2..98b34c9b0564b348615a0d560b863c11cd17ad5e 100644 (file)
@@ -175,7 +175,19 @@ ff_layout_no_read_on_rw(struct pnfs_layout_segment *lseg)
 static inline bool
 ff_layout_test_devid_unavailable(struct nfs4_deviceid_node *node)
 {
-       return nfs4_test_deviceid_unavailable(node);
+       /*
+        * Flexfiles should never mark a DS unavailable, but if it does
+        * print a (ratelimited) warning as this can affect performance.
+        */
+       if (nfs4_test_deviceid_unavailable(node)) {
+               u32 *p = (u32 *)node->deviceid.data;
+
+               pr_warn_ratelimited("NFS: flexfiles layout referencing an "
+                               "unavailable device [%x%x%x%x]\n",
+                               p[0], p[1], p[2], p[3]);
+               return true;
+       }
+       return false;
 }
 
 static inline int
index e5a6f248697b369003e89ed526608d7cd2a296eb..457cfeb1d5c162e4177450eb941460a2fe39f3b1 100644 (file)
@@ -208,6 +208,10 @@ static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg,
                } else
                        goto outerr;
        }
+
+       if (IS_ERR(mirror->mirror_ds))
+               goto outerr;
+
        if (mirror->mirror_ds->ds == NULL) {
                struct nfs4_deviceid_node *devid;
                devid = &mirror->mirror_ds->id_node;
@@ -384,6 +388,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
        struct inode *ino = lseg->pls_layout->plh_inode;
        struct nfs_server *s = NFS_SERVER(ino);
        unsigned int max_payload;
+       int status;
 
        if (!ff_layout_mirror_valid(lseg, mirror, true)) {
                pr_err_ratelimited("NFS: %s: No data server for offset index %d\n",
@@ -404,7 +409,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
        /* FIXME: For now we assume the server sent only one version of NFS
         * to use for the DS.
         */
-       nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
+       status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
                             dataserver_retrans,
                             mirror->mirror_ds->ds_versions[0].version,
                             mirror->mirror_ds->ds_versions[0].minor_version);
@@ -420,11 +425,11 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
                        mirror->mirror_ds->ds_versions[0].wsize = max_payload;
                goto out;
        }
+out_fail:
        ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
                                 mirror, lseg->pls_range.offset,
                                 lseg->pls_range.length, NFS4ERR_NXIO,
                                 OP_ILLEGAL, GFP_NOIO);
-out_fail:
        if (fail_return || !ff_layout_has_available_ds(lseg))
                pnfs_error_mark_layout_for_return(ino, lseg);
        ds = NULL;
index 09ca5095c04e427c881785170aefe7fdf58e7621..7b38fedb7e032824ec509edca5cf465a22147851 100644 (file)
@@ -186,6 +186,8 @@ extern struct nfs_server *nfs_clone_server(struct nfs_server *,
                                           struct nfs_fh *,
                                           struct nfs_fattr *,
                                           rpc_authflavor_t);
+extern bool nfs_client_init_is_complete(const struct nfs_client *clp);
+extern int nfs_client_init_status(const struct nfs_client *clp);
 extern int nfs_wait_client_init_complete(const struct nfs_client *clp);
 extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
 extern struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
index 5ae9d64ea08bc80c97c7c4c5b71ee73ef1a6ba8b..8346ccbf2d52e518b6fa61d0c8cbb3d033ec1f02 100644 (file)
@@ -1023,9 +1023,9 @@ static void nfs4_session_set_rwsize(struct nfs_server *server)
        server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
        server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
 
-       if (server->rsize > server_resp_sz)
+       if (!server->rsize || server->rsize > server_resp_sz)
                server->rsize = server_resp_sz;
-       if (server->wsize > server_rqst_sz)
+       if (!server->wsize || server->wsize > server_rqst_sz)
                server->wsize = server_rqst_sz;
 #endif /* CONFIG_NFS_V4_1 */
 }
index 1b183686c6d4f06c3b1d4ed044c527bff6ba4a83..201ca3f2c4bac14986220fcdf6a6c37b734ffa96 100644 (file)
@@ -2258,8 +2258,6 @@ static int nfs4_opendata_access(struct rpc_cred *cred,
        if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
                return 0;
 
-       /* even though OPEN succeeded, access is denied. Close the file */
-       nfs4_close_state(state, fmode);
        return -EACCES;
 }
 
@@ -2444,17 +2442,14 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
        }
 
        nfs4_stateid_copy(&stateid, &delegation->stateid);
-       if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
+       if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) ||
+               !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
+                       &delegation->flags)) {
                rcu_read_unlock();
                nfs_finish_clear_delegation_stateid(state, &stateid);
                return;
        }
 
-       if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags)) {
-               rcu_read_unlock();
-               return;
-       }
-
        cred = get_rpccred(delegation->cred);
        rcu_read_unlock();
        status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
@@ -7427,11 +7422,11 @@ static void nfs4_exchange_id_release(void *data)
        struct nfs41_exchange_id_data *cdata =
                                        (struct nfs41_exchange_id_data *)data;
 
-       nfs_put_client(cdata->args.client);
        if (cdata->xprt) {
                xprt_put(cdata->xprt);
                rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient);
        }
+       nfs_put_client(cdata->args.client);
        kfree(cdata->res.impl_id);
        kfree(cdata->res.server_scope);
        kfree(cdata->res.server_owner);
@@ -7538,10 +7533,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
        task_setup_data.callback_data = calldata;
 
        task = rpc_run_task(&task_setup_data);
-       if (IS_ERR(task)) {
-       status = PTR_ERR(task);
-               goto out_impl_id;
-       }
+       if (IS_ERR(task))
+               return PTR_ERR(task);
 
        if (!xprt) {
                status = rpc_wait_for_completion_task(task);
@@ -7569,6 +7562,7 @@ out_server_owner:
        kfree(calldata->res.server_owner);
 out_calldata:
        kfree(calldata);
+       nfs_put_client(clp);
        goto out;
 }
 
index f0369e36275341404db0684aebb4e9bdba273205..80ce289eea05326336a7edecbe8a132ee4900d23 100644 (file)
@@ -3942,7 +3942,7 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
                if (len <= 0)
                        goto out;
                dprintk("%s: name=%s\n", __func__, group_name->data);
-               return NFS_ATTR_FATTR_OWNER_NAME;
+               return NFS_ATTR_FATTR_GROUP_NAME;
        } else {
                len = xdr_stream_decode_opaque_inline(xdr, (void **)&p,
                                XDR_MAX_NETOBJ);
index 63f77b49a586a53a1abbcf7b517aa2a90f3ddb2e..590e1e35781f0b737b5b277d76ab56092f8e3f3b 100644 (file)
@@ -367,7 +367,7 @@ void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds);
 struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs,
                                      gfp_t gfp_flags);
 void nfs4_pnfs_v3_ds_connect_unload(void);
-void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
+int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
                          struct nfs4_deviceid_node *devid, unsigned int timeo,
                          unsigned int retrans, u32 version, u32 minor_version);
 struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net,
index 9414b492439fbf0e70d32f9238ac29b8e9cf50be..7250b95549ecc73bd1dbdae9ec909aac64f93a49 100644 (file)
@@ -745,15 +745,17 @@ out:
 /*
  * Create an rpc connection to the nfs4_pnfs_ds data server.
  * Currently only supports IPv4 and IPv6 addresses.
- * If connection fails, make devid unavailable.
+ * If connection fails, make devid unavailable and return a -errno.
  */
-void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
+int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
                          struct nfs4_deviceid_node *devid, unsigned int timeo,
                          unsigned int retrans, u32 version, u32 minor_version)
 {
-       if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
-               int err = 0;
+       int err;
 
+again:
+       err = 0;
+       if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
                if (version == 3) {
                        err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo,
                                                       retrans);
@@ -766,12 +768,29 @@ void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
                        err = -EPROTONOSUPPORT;
                }
 
-               if (err)
-                       nfs4_mark_deviceid_unavailable(devid);
                nfs4_clear_ds_conn_bit(ds);
        } else {
                nfs4_wait_ds_connect(ds);
+
+               /* what was waited on didn't connect AND didn't mark unavail */
+               if (!ds->ds_clp && !nfs4_test_deviceid_unavailable(devid))
+                       goto again;
        }
+
+       /*
+        * At this point the ds->ds_clp should be ready, but it might have
+        * hit an error.
+        */
+       if (!err) {
+               if (!ds->ds_clp || !nfs_client_init_is_complete(ds->ds_clp)) {
+                       WARN_ON_ONCE(ds->ds_clp ||
+                               !nfs4_test_deviceid_unavailable(devid));
+                       return -EINVAL;
+               }
+               err = nfs_client_init_status(ds->ds_clp);
+       }
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
 
index e75b056f46f43583b84da4a423cbafedb850c630..abb2c8a3be42e4755f747c62a1cec5466f13ee77 100644 (file)
@@ -1784,7 +1784,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
                        (long long)req_offset(req));
                if (status < 0) {
                        nfs_context_set_write_error(req->wb_context, status);
-                       nfs_inode_remove_request(req);
+                       if (req->wb_page)
+                               nfs_inode_remove_request(req);
                        dprintk_cont(", error = %d\n", status);
                        goto next;
                }
@@ -1793,7 +1794,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
                 * returned by the server against all stored verfs. */
                if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) {
                        /* We have a match */
-                       nfs_inode_remove_request(req);
+                       if (req->wb_page)
+                               nfs_inode_remove_request(req);
                        dprintk_cont(" OK\n");
                        goto next;
                }
index 73e75ac905258c17bdc107c0c071e8d14df739f0..8bf8f667a8cf2fe8359f74b41497bc9436ca0efb 100644 (file)
@@ -538,13 +538,21 @@ out_free:
 
 static ssize_t
 nfsd_print_version_support(char *buf, int remaining, const char *sep,
-               unsigned vers, unsigned minor)
+               unsigned vers, int minor)
 {
-       const char *format = (minor == 0) ? "%s%c%u" : "%s%c%u.%u";
+       const char *format = minor < 0 ? "%s%c%u" : "%s%c%u.%u";
        bool supported = !!nfsd_vers(vers, NFSD_TEST);
 
-       if (vers == 4 && !nfsd_minorversion(minor, NFSD_TEST))
+       if (vers == 4 && minor >= 0 &&
+           !nfsd_minorversion(minor, NFSD_TEST))
                supported = false;
+       if (minor == 0 && supported)
+               /*
+                * special case for backward compatability.
+                * +4.0 is never reported, it is implied by
+                * +4, unless -4.0 is present.
+                */
+               return 0;
        return snprintf(buf, remaining, format, sep,
                        supported ? '+' : '-', vers, minor);
 }
@@ -554,7 +562,6 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
        char *mesg = buf;
        char *vers, *minorp, sign;
        int len, num, remaining;
-       unsigned minor;
        ssize_t tlen = 0;
        char *sep;
        struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id);
@@ -575,6 +582,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
                if (len <= 0) return -EINVAL;
                do {
                        enum vers_op cmd;
+                       unsigned minor;
                        sign = *vers;
                        if (sign == '+' || sign == '-')
                                num = simple_strtol((vers+1), &minorp, 0);
@@ -585,8 +593,8 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
                                        return -EINVAL;
                                if (kstrtouint(minorp+1, 0, &minor) < 0)
                                        return -EINVAL;
-                       } else
-                               minor = 0;
+                       }
+
                        cmd = sign == '-' ? NFSD_CLEAR : NFSD_SET;
                        switch(num) {
                        case 2:
@@ -594,8 +602,20 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
                                nfsd_vers(num, cmd);
                                break;
                        case 4:
-                               if (nfsd_minorversion(minor, cmd) >= 0)
-                                       break;
+                               if (*minorp == '.') {
+                                       if (nfsd_minorversion(minor, cmd) < 0)
+                                               return -EINVAL;
+                               } else if ((cmd == NFSD_SET) != nfsd_vers(num, NFSD_TEST)) {
+                                       /*
+                                        * Either we have +4 and no minors are enabled,
+                                        * or we have -4 and at least one minor is enabled.
+                                        * In either case, propagate 'cmd' to all minors.
+                                        */
+                                       minor = 0;
+                                       while (nfsd_minorversion(minor, cmd) >= 0)
+                                               minor++;
+                               }
+                               break;
                        default:
                                return -EINVAL;
                        }
@@ -612,9 +632,11 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
        sep = "";
        remaining = SIMPLE_TRANSACTION_LIMIT;
        for (num=2 ; num <= 4 ; num++) {
+               int minor;
                if (!nfsd_vers(num, NFSD_AVAIL))
                        continue;
-               minor = 0;
+
+               minor = -1;
                do {
                        len = nfsd_print_version_support(buf, remaining,
                                        sep, num, minor);
@@ -624,7 +646,8 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
                        buf += len;
                        tlen += len;
                        minor++;
-                       sep = " ";
+                       if (len)
+                               sep = " ";
                } while (num == 4 && minor <= NFSD_SUPPORTED_MINOR_VERSION);
        }
 out:
index fa82b7707e8531f9b7e8065391c3f54387c2740d..03a7e9da4da0225e58fd53c4589ed682e0732141 100644 (file)
@@ -786,6 +786,7 @@ nfserrno (int errno)
                { nfserr_serverfault, -ESERVERFAULT },
                { nfserr_serverfault, -ENFILE },
                { nfserr_io, -EUCLEAN },
+               { nfserr_perm, -ENOKEY },
        };
        int     i;
 
index 786a4a2cb2d7a96cbde629c17b6ac58ab82bc84e..31e1f959345715a59f8f6b9d9ec00a0ec00fece5 100644 (file)
@@ -167,7 +167,8 @@ nfsd_adjust_nfsd_versions4(void)
 
 int nfsd_minorversion(u32 minorversion, enum vers_op change)
 {
-       if (minorversion > NFSD_SUPPORTED_MINOR_VERSION)
+       if (minorversion > NFSD_SUPPORTED_MINOR_VERSION &&
+           change != NFSD_AVAIL)
                return -1;
        switch(change) {
        case NFSD_SET:
@@ -415,23 +416,20 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
 
 void nfsd_reset_versions(void)
 {
-       int found_one = 0;
        int i;
 
-       for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
-               if (nfsd_program.pg_vers[i])
-                       found_one = 1;
-       }
+       for (i = 0; i < NFSD_NRVERS; i++)
+               if (nfsd_vers(i, NFSD_TEST))
+                       return;
 
-       if (!found_one) {
-               for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++)
-                       nfsd_program.pg_vers[i] = nfsd_version[i];
-#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
-               for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++)
-                       nfsd_acl_program.pg_vers[i] =
-                               nfsd_acl_version[i];
-#endif
-       }
+       for (i = 0; i < NFSD_NRVERS; i++)
+               if (i != 4)
+                       nfsd_vers(i, NFSD_SET);
+               else {
+                       int minor = 0;
+                       while (nfsd_minorversion(minor, NFSD_SET) >= 0)
+                               minor++;
+               }
 }
 
 /*
index 4348027384f5edf06a66dd417214b9bbd3dd05cd..d0ab7e56d0b41a7a97f3640eb0ab4801f747dffc 100644 (file)
@@ -1863,7 +1863,7 @@ static int o2net_accept_one(struct socket *sock, int *more)
 
        new_sock->type = sock->type;
        new_sock->ops = sock->ops;
-       ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
+       ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, false);
        if (ret < 0)
                goto out;
 
index 1953986ee6bc221f555f4c53f8129f9c865f91e5..6e610a205e1556477ba80e512f1243629c193141 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/slab.h>
 #include <linux/cred.h>
 #include <linux/xattr.h>
-#include <linux/sched/signal.h>
 #include "overlayfs.h"
 #include "ovl_entry.h"
 
index e2112270d75a5f878e291bb5bb681474e3c4eeaf..9287d3a96e35582af28d880e7f1b9bc903ef205b 100644 (file)
@@ -409,7 +409,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
        int retval, i, timed_out = 0;
        u64 slack = 0;
        unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
-       unsigned long busy_end = 0;
+       unsigned long busy_start = 0;
 
        rcu_read_lock();
        retval = max_select_fd(n, fds);
@@ -512,11 +512,11 @@ int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
 
                /* only if found POLL_BUSY_LOOP sockets && not out of time */
                if (can_busy_loop && !need_resched()) {
-                       if (!busy_end) {
-                               busy_end = busy_loop_end_time();
+                       if (!busy_start) {
+                               busy_start = busy_loop_current_time();
                                continue;
                        }
-                       if (!busy_loop_timeout(busy_end))
+                       if (!busy_loop_timeout(busy_start))
                                continue;
                }
                busy_flag = 0;
@@ -800,7 +800,7 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
        int timed_out = 0, count = 0;
        u64 slack = 0;
        unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
-       unsigned long busy_end = 0;
+       unsigned long busy_start = 0;
 
        /* Optimise the no-wait case */
        if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
@@ -853,11 +853,11 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
 
                /* only if found POLL_BUSY_LOOP sockets && not out of time */
                if (can_busy_loop && !need_resched()) {
-                       if (!busy_end) {
-                               busy_end = busy_loop_end_time();
+                       if (!busy_start) {
+                               busy_start = busy_loop_current_time();
                                continue;
                        }
-                       if (!busy_loop_timeout(busy_end))
+                       if (!busy_loop_timeout(busy_start))
                                continue;
                }
                busy_flag = 0;
index 384fa759a563341b309df47537f9c94da0ee9ebb..c543cdb5f8ed9b803eea973dbb5ed68cfe395e1f 100644 (file)
@@ -400,9 +400,9 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
             clockid != CLOCK_BOOTTIME_ALARM))
                return -EINVAL;
 
-       if (!capable(CAP_WAKE_ALARM) &&
-           (clockid == CLOCK_REALTIME_ALARM ||
-            clockid == CLOCK_BOOTTIME_ALARM))
+       if ((clockid == CLOCK_REALTIME_ALARM ||
+            clockid == CLOCK_BOOTTIME_ALARM) &&
+           !capable(CAP_WAKE_ALARM))
                return -EPERM;
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -449,7 +449,7 @@ static int do_timerfd_settime(int ufd, int flags,
                return ret;
        ctx = f.file->private_data;
 
-       if (!capable(CAP_WAKE_ALARM) && isalarm(ctx)) {
+       if (isalarm(ctx) && !capable(CAP_WAKE_ALARM)) {
                fdput(f);
                return -EPERM;
        }
index 973607df579db324e64da1c3c26999d60853fa80..1d227b0fcf49ff26b40bdd726b3839fb8f353f35 100644 (file)
@@ -138,8 +138,6 @@ out:
  * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
  * context.
  * @ctx: [in] Pointer to the userfaultfd context.
- *
- * Returns: In case of success, returns not zero.
  */
 static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
 {
@@ -267,6 +265,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
 {
        struct mm_struct *mm = ctx->mm;
        pgd_t *pgd;
+       p4d_t *p4d;
        pud_t *pud;
        pmd_t *pmd, _pmd;
        pte_t *pte;
@@ -277,7 +276,10 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
        pgd = pgd_offset(mm, address);
        if (!pgd_present(*pgd))
                goto out;
-       pud = pud_offset(pgd, address);
+       p4d = p4d_offset(pgd, address);
+       if (!p4d_present(*p4d))
+               goto out;
+       pud = pud_offset(p4d, address);
        if (!pud_present(*pud))
                goto out;
        pmd = pmd_offset(pud, address);
@@ -490,7 +492,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
                         * in such case.
                         */
                        down_read(&mm->mmap_sem);
-                       ret = 0;
+                       ret = VM_FAULT_NOPAGE;
                }
        }
 
@@ -527,10 +529,11 @@ out:
        return ret;
 }
 
-static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
-                                            struct userfaultfd_wait_queue *ewq)
+static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
+                                             struct userfaultfd_wait_queue *ewq)
 {
-       int ret = 0;
+       if (WARN_ON_ONCE(current->flags & PF_EXITING))
+               goto out;
 
        ewq->ctx = ctx;
        init_waitqueue_entry(&ewq->wq, current);
@@ -547,8 +550,16 @@ static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
                        break;
                if (ACCESS_ONCE(ctx->released) ||
                    fatal_signal_pending(current)) {
-                       ret = -1;
                        __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
+                       if (ewq->msg.event == UFFD_EVENT_FORK) {
+                               struct userfaultfd_ctx *new;
+
+                               new = (struct userfaultfd_ctx *)
+                                       (unsigned long)
+                                       ewq->msg.arg.reserved.reserved1;
+
+                               userfaultfd_ctx_put(new);
+                       }
                        break;
                }
 
@@ -566,9 +577,8 @@ static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
         * ctx may go away after this if the userfault pseudo fd is
         * already released.
         */
-
+out:
        userfaultfd_ctx_put(ctx);
-       return ret;
 }
 
 static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
@@ -626,7 +636,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
        return 0;
 }
 
-static int dup_fctx(struct userfaultfd_fork_ctx *fctx)
+static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
 {
        struct userfaultfd_ctx *ctx = fctx->orig;
        struct userfaultfd_wait_queue ewq;
@@ -636,17 +646,15 @@ static int dup_fctx(struct userfaultfd_fork_ctx *fctx)
        ewq.msg.event = UFFD_EVENT_FORK;
        ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
 
-       return userfaultfd_event_wait_completion(ctx, &ewq);
+       userfaultfd_event_wait_completion(ctx, &ewq);
 }
 
 void dup_userfaultfd_complete(struct list_head *fcs)
 {
-       int ret = 0;
        struct userfaultfd_fork_ctx *fctx, *n;
 
        list_for_each_entry_safe(fctx, n, fcs, list) {
-               if (!ret)
-                       ret = dup_fctx(fctx);
+               dup_fctx(fctx);
                list_del(&fctx->list);
                kfree(fctx);
        }
@@ -689,8 +697,7 @@ void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
        userfaultfd_event_wait_completion(ctx, &ewq);
 }
 
-void userfaultfd_remove(struct vm_area_struct *vma,
-                       struct vm_area_struct **prev,
+bool userfaultfd_remove(struct vm_area_struct *vma,
                        unsigned long start, unsigned long end)
 {
        struct mm_struct *mm = vma->vm_mm;
@@ -699,13 +706,11 @@ void userfaultfd_remove(struct vm_area_struct *vma,
 
        ctx = vma->vm_userfaultfd_ctx.ctx;
        if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
-               return;
+               return true;
 
        userfaultfd_ctx_get(ctx);
        up_read(&mm->mmap_sem);
 
-       *prev = NULL; /* We wait for ACK w/o the mmap semaphore */
-
        msg_init(&ewq.msg);
 
        ewq.msg.event = UFFD_EVENT_REMOVE;
@@ -714,7 +719,7 @@ void userfaultfd_remove(struct vm_area_struct *vma,
 
        userfaultfd_event_wait_completion(ctx, &ewq);
 
-       down_read(&mm->mmap_sem);
+       return false;
 }
 
 static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
@@ -775,34 +780,6 @@ void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
        }
 }
 
-void userfaultfd_exit(struct mm_struct *mm)
-{
-       struct vm_area_struct *vma = mm->mmap;
-
-       /*
-        * We can do the vma walk without locking because the caller
-        * (exit_mm) knows it now has exclusive access
-        */
-       while (vma) {
-               struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
-
-               if (ctx && (ctx->features & UFFD_FEATURE_EVENT_EXIT)) {
-                       struct userfaultfd_wait_queue ewq;
-
-                       userfaultfd_ctx_get(ctx);
-
-                       msg_init(&ewq.msg);
-                       ewq.msg.event = UFFD_EVENT_EXIT;
-
-                       userfaultfd_event_wait_completion(ctx, &ewq);
-
-                       ctx->features &= ~UFFD_FEATURE_EVENT_EXIT;
-               }
-
-               vma = vma->vm_next;
-       }
-}
-
 static int userfaultfd_release(struct inode *inode, struct file *file)
 {
        struct userfaultfd_ctx *ctx = file->private_data;
index 2dfdc62f795e63177e3f2f58306840656644492d..70a5b55e0870a0523c0dd8ce629debf2fccebe25 100644 (file)
 #include "kmem.h"
 #include "xfs_message.h"
 
-/*
- * Greedy allocation.  May fail and may return vmalloced memory.
- */
-void *
-kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
-{
-       void            *ptr;
-       size_t          kmsize = maxsize;
-
-       while (!(ptr = vzalloc(kmsize))) {
-               if ((kmsize >>= 1) <= minsize)
-                       kmsize = minsize;
-       }
-       if (ptr)
-               *size = kmsize;
-       return ptr;
-}
-
 void *
 kmem_alloc(size_t size, xfs_km_flags_t flags)
 {
index 689f746224e7ab8a0fbf3d2f9acb4f1dd68a9a16..f0fc84fcaac2553283f90bc3f157b924bd03d932 100644 (file)
@@ -69,8 +69,6 @@ static inline void  kmem_free(const void *ptr)
 }
 
 
-extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
-
 static inline void *
 kmem_zalloc(size_t size, xfs_km_flags_t flags)
 {
index a9c66d47757a757324e5fbf4224883e1d369588a..9bd104f32908962046af6d2dd4437a045fecdb36 100644 (file)
@@ -763,8 +763,8 @@ xfs_bmap_extents_to_btree(
                args.type = XFS_ALLOCTYPE_START_BNO;
                args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
        } else if (dfops->dop_low) {
-try_another_ag:
                args.type = XFS_ALLOCTYPE_START_BNO;
+try_another_ag:
                args.fsbno = *firstblock;
        } else {
                args.type = XFS_ALLOCTYPE_NEAR_BNO;
@@ -790,13 +790,17 @@ try_another_ag:
        if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
            args.fsbno == NULLFSBLOCK &&
            args.type == XFS_ALLOCTYPE_NEAR_BNO) {
-               dfops->dop_low = true;
+               args.type = XFS_ALLOCTYPE_FIRST_AG;
                goto try_another_ag;
        }
+       if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
+               xfs_iroot_realloc(ip, -1, whichfork);
+               xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+               return -ENOSPC;
+       }
        /*
         * Allocation can't fail, the space was reserved.
         */
-       ASSERT(args.fsbno != NULLFSBLOCK);
        ASSERT(*firstblock == NULLFSBLOCK ||
               args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
        *firstblock = cur->bc_private.b.firstblock = args.fsbno;
@@ -4150,6 +4154,19 @@ xfs_bmapi_read(
        return 0;
 }
 
+/*
+ * Add a delayed allocation extent to an inode. Blocks are reserved from the
+ * global pool and the extent inserted into the inode in-core extent tree.
+ *
+ * On entry, got refers to the first extent beyond the offset of the extent to
+ * allocate or eof is specified if no such extent exists. On return, got refers
+ * to the extent record that was inserted to the inode fork.
+ *
+ * Note that the allocated extent may have been merged with contiguous extents
+ * during insertion into the inode fork. Thus, got does not reflect the current
+ * state of the inode fork on return. If necessary, the caller can use lastx to
+ * look up the updated record in the inode fork.
+ */
 int
 xfs_bmapi_reserve_delalloc(
        struct xfs_inode        *ip,
@@ -4236,13 +4253,8 @@ xfs_bmapi_reserve_delalloc(
        got->br_startblock = nullstartblock(indlen);
        got->br_blockcount = alen;
        got->br_state = XFS_EXT_NORM;
-       xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
 
-       /*
-        * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
-        * might have merged it into one of the neighbouring ones.
-        */
-       xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got);
+       xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
 
        /*
         * Tag the inode if blocks were preallocated. Note that COW fork
@@ -4254,10 +4266,6 @@ xfs_bmapi_reserve_delalloc(
        if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
                xfs_inode_set_cowblocks_tag(ip);
 
-       ASSERT(got->br_startoff <= aoff);
-       ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen);
-       ASSERT(isnullstartblock(got->br_startblock));
-       ASSERT(got->br_state == XFS_EXT_NORM);
        return 0;
 
 out_unreserve_blocks:
index f93072b58a58323ae952d55d568a9e53384f88d3..fd55db47938562868d25d4998407ce7650c4da4f 100644 (file)
@@ -447,8 +447,8 @@ xfs_bmbt_alloc_block(
 
        if (args.fsbno == NULLFSBLOCK) {
                args.fsbno = be64_to_cpu(start->l);
-try_another_ag:
                args.type = XFS_ALLOCTYPE_START_BNO;
+try_another_ag:
                /*
                 * Make sure there is sufficient room left in the AG to
                 * complete a full tree split for an extent insert.  If
@@ -488,8 +488,8 @@ try_another_ag:
        if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
            args.fsbno == NULLFSBLOCK &&
            args.type == XFS_ALLOCTYPE_NEAR_BNO) {
-               cur->bc_private.b.dfops->dop_low = true;
                args.fsbno = cur->bc_private.b.firstblock;
+               args.type = XFS_ALLOCTYPE_FIRST_AG;
                goto try_another_ag;
        }
 
@@ -506,7 +506,7 @@ try_another_ag:
                        goto error0;
                cur->bc_private.b.dfops->dop_low = true;
        }
-       if (args.fsbno == NULLFSBLOCK) {
+       if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
                XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
                *stat = 0;
                return 0;
index d04547fcf274af0eaee18096c94b22652551b9f7..eb00bc133bca673c556eb85a18385bbc3748dfcf 100644 (file)
@@ -125,6 +125,8 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
 extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
 extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
 extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
+extern int xfs_dir2_sf_verify(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *sfp,
+               int size);
 
 /* xfs_dir2_readdir.c */
 extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
index c6809ff41197d934c068e84b19eb77986bc7dccf..96b45cd6c63f0686d3c1cce5c41b232f0ab82080 100644 (file)
@@ -629,6 +629,93 @@ xfs_dir2_sf_check(
 }
 #endif /* DEBUG */
 
+/* Verify the consistency of an inline directory. */
+int
+xfs_dir2_sf_verify(
+       struct xfs_mount                *mp,
+       struct xfs_dir2_sf_hdr          *sfp,
+       int                             size)
+{
+       struct xfs_dir2_sf_entry        *sfep;
+       struct xfs_dir2_sf_entry        *next_sfep;
+       char                            *endp;
+       const struct xfs_dir_ops        *dops;
+       xfs_ino_t                       ino;
+       int                             i;
+       int                             i8count;
+       int                             offset;
+       __uint8_t                       filetype;
+
+       dops = xfs_dir_get_ops(mp, NULL);
+
+       /*
+        * Give up if the directory is way too short.
+        */
+       XFS_WANT_CORRUPTED_RETURN(mp, size >
+                       offsetof(struct xfs_dir2_sf_hdr, parent));
+       XFS_WANT_CORRUPTED_RETURN(mp, size >=
+                       xfs_dir2_sf_hdr_size(sfp->i8count));
+
+       endp = (char *)sfp + size;
+
+       /* Check .. entry */
+       ino = dops->sf_get_parent_ino(sfp);
+       i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
+       XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino));
+       offset = dops->data_first_offset;
+
+       /* Check all reported entries */
+       sfep = xfs_dir2_sf_firstentry(sfp);
+       for (i = 0; i < sfp->count; i++) {
+               /*
+                * struct xfs_dir2_sf_entry has a variable length.
+                * Check the fixed-offset parts of the structure are
+                * within the data buffer.
+                */
+               XFS_WANT_CORRUPTED_RETURN(mp,
+                               ((char *)sfep + sizeof(*sfep)) < endp);
+
+               /* Don't allow names with known bad length. */
+               XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen > 0);
+               XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen < MAXNAMELEN);
+
+               /*
+                * Check that the variable-length part of the structure is
+                * within the data buffer.  The next entry starts after the
+                * name component, so nextentry is an acceptable test.
+                */
+               next_sfep = dops->sf_nextentry(sfp, sfep);
+               XFS_WANT_CORRUPTED_RETURN(mp, endp >= (char *)next_sfep);
+
+               /* Check that the offsets always increase. */
+               XFS_WANT_CORRUPTED_RETURN(mp,
+                               xfs_dir2_sf_get_offset(sfep) >= offset);
+
+               /* Check the inode number. */
+               ino = dops->sf_get_ino(sfp, sfep);
+               i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
+               XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino));
+
+               /* Check the file type. */
+               filetype = dops->sf_get_ftype(sfep);
+               XFS_WANT_CORRUPTED_RETURN(mp, filetype < XFS_DIR3_FT_MAX);
+
+               offset = xfs_dir2_sf_get_offset(sfep) +
+                               dops->data_entsize(sfep->namelen);
+
+               sfep = next_sfep;
+       }
+       XFS_WANT_CORRUPTED_RETURN(mp, i8count == sfp->i8count);
+       XFS_WANT_CORRUPTED_RETURN(mp, (void *)sfep == (void *)endp);
+
+       /* Make sure this whole thing ought to be in local format. */
+       XFS_WANT_CORRUPTED_RETURN(mp, offset +
+              (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
+              (uint)sizeof(xfs_dir2_block_tail_t) <= mp->m_dir_geo->blksize);
+
+       return 0;
+}
+
 /*
  * Create a new (shortform) directory.
  */
index 25c1e078aef6a5925c12f2cc91b0d18b8b38711b..9653e964eda4f99ca611bb2cb6449a470be45d48 100644 (file)
@@ -33,6 +33,8 @@
 #include "xfs_trace.h"
 #include "xfs_attr_sf.h"
 #include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2_priv.h"
 
 kmem_zone_t *xfs_ifork_zone;
 
@@ -320,6 +322,7 @@ xfs_iformat_local(
        int             whichfork,
        int             size)
 {
+       int             error;
 
        /*
         * If the size is unreasonable, then something
@@ -336,6 +339,14 @@ xfs_iformat_local(
                return -EFSCORRUPTED;
        }
 
+       if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
+               error = xfs_dir2_sf_verify(ip->i_mount,
+                               (struct xfs_dir2_sf_hdr *)XFS_DFORK_DPTR(dip),
+                               size);
+               if (error)
+                       return error;
+       }
+
        xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size);
        return 0;
 }
@@ -856,7 +867,7 @@ xfs_iextents_copy(
  * In these cases, the format always takes precedence, because the
  * format indicates the current state of the fork.
  */
-void
+int
 xfs_iflush_fork(
        xfs_inode_t             *ip,
        xfs_dinode_t            *dip,
@@ -866,6 +877,7 @@ xfs_iflush_fork(
        char                    *cp;
        xfs_ifork_t             *ifp;
        xfs_mount_t             *mp;
+       int                     error;
        static const short      brootflag[2] =
                { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
        static const short      dataflag[2] =
@@ -874,7 +886,7 @@ xfs_iflush_fork(
                { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
 
        if (!iip)
-               return;
+               return 0;
        ifp = XFS_IFORK_PTR(ip, whichfork);
        /*
         * This can happen if we gave up in iformat in an error path,
@@ -882,12 +894,19 @@ xfs_iflush_fork(
         */
        if (!ifp) {
                ASSERT(whichfork == XFS_ATTR_FORK);
-               return;
+               return 0;
        }
        cp = XFS_DFORK_PTR(dip, whichfork);
        mp = ip->i_mount;
        switch (XFS_IFORK_FORMAT(ip, whichfork)) {
        case XFS_DINODE_FMT_LOCAL:
+               if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
+                       error = xfs_dir2_sf_verify(mp,
+                                       (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data,
+                                       ifp->if_bytes);
+                       if (error)
+                               return error;
+               }
                if ((iip->ili_fields & dataflag[whichfork]) &&
                    (ifp->if_bytes > 0)) {
                        ASSERT(ifp->if_u1.if_data != NULL);
@@ -940,6 +959,7 @@ xfs_iflush_fork(
                ASSERT(0);
                break;
        }
+       return 0;
 }
 
 /*
index 7fb8365326d1a745583c4f133bc5a63668316b33..132dc59fdde6942cd22fca4ae11b8adbc193f051 100644 (file)
@@ -140,7 +140,7 @@ typedef struct xfs_ifork {
 struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state);
 
 int            xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *);
-void           xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
+int            xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
                                struct xfs_inode_log_item *, int);
 void           xfs_idestroy_fork(struct xfs_inode *, int);
 void           xfs_idata_realloc(struct xfs_inode *, int, int);
index bf65a9ea864293d48e5326178336680c2eb29758..61494295d92fe1acb7d343bc3a4e1594f09027ab 100644 (file)
@@ -274,54 +274,49 @@ xfs_end_io(
        struct xfs_ioend        *ioend =
                container_of(work, struct xfs_ioend, io_work);
        struct xfs_inode        *ip = XFS_I(ioend->io_inode);
+       xfs_off_t               offset = ioend->io_offset;
+       size_t                  size = ioend->io_size;
        int                     error = ioend->io_bio->bi_error;
 
        /*
-        * Set an error if the mount has shut down and proceed with end I/O
-        * processing so it can perform whatever cleanups are necessary.
+        * Just clean up the in-memory strutures if the fs has been shut down.
         */
-       if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+       if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
                error = -EIO;
+               goto done;
+       }
 
        /*
-        * For a CoW extent, we need to move the mapping from the CoW fork
-        * to the data fork.  If instead an error happened, just dump the
-        * new blocks.
+        * Clean up any COW blocks on an I/O error.
         */
-       if (ioend->io_type == XFS_IO_COW) {
-               if (error)
-                       goto done;
-               if (ioend->io_bio->bi_error) {
-                       error = xfs_reflink_cancel_cow_range(ip,
-                                       ioend->io_offset, ioend->io_size);
-                       goto done;
+       if (unlikely(error)) {
+               switch (ioend->io_type) {
+               case XFS_IO_COW:
+                       xfs_reflink_cancel_cow_range(ip, offset, size, true);
+                       break;
                }
-               error = xfs_reflink_end_cow(ip, ioend->io_offset,
-                               ioend->io_size);
-               if (error)
-                       goto done;
+
+               goto done;
        }
 
        /*
-        * For unwritten extents we need to issue transactions to convert a
-        * range to normal written extens after the data I/O has finished.
-        * Detecting and handling completion IO errors is done individually
-        * for each case as different cleanup operations need to be performed
-        * on error.
+        * Success:  commit the COW or unwritten blocks if needed.
         */
-       if (ioend->io_type == XFS_IO_UNWRITTEN) {
-               if (error)
-                       goto done;
-               error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
-                                                 ioend->io_size);
-       } else if (ioend->io_append_trans) {
-               error = xfs_setfilesize_ioend(ioend, error);
-       } else {
-               ASSERT(!xfs_ioend_is_append(ioend) ||
-                      ioend->io_type == XFS_IO_COW);
+       switch (ioend->io_type) {
+       case XFS_IO_COW:
+               error = xfs_reflink_end_cow(ip, offset, size);
+               break;
+       case XFS_IO_UNWRITTEN:
+               error = xfs_iomap_write_unwritten(ip, offset, size);
+               break;
+       default:
+               ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
+               break;
        }
 
 done:
+       if (ioend->io_append_trans)
+               error = xfs_setfilesize_ioend(ioend, error);
        xfs_destroy_ioend(ioend, error);
 }
 
index 003a99b83bd8845e22d6311be1d474679521242d..ad9396e516f6e389b88bca5dc2dc41d3372ed714 100644 (file)
@@ -71,22 +71,11 @@ xfs_dir2_sf_getdents(
        struct xfs_da_geometry  *geo = args->geo;
 
        ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
-       /*
-        * Give up if the directory is way too short.
-        */
-       if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
-               ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
-               return -EIO;
-       }
-
        ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
        ASSERT(dp->i_df.if_u1.if_data != NULL);
 
        sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
 
-       if (dp->i_d.di_size < xfs_dir2_sf_hdr_size(sfp->i8count))
-               return -EFSCORRUPTED;
-
        /*
         * If the block number in the offset is out of range, we're done.
         */
index 7234b9748c36e048b15b376e4408ef3626422cf4..3531f8f72fa5e10b83f0fa8bd37afc560b2dbf0a 100644 (file)
@@ -1608,7 +1608,7 @@ xfs_inode_free_cowblocks(
        xfs_ilock(ip, XFS_IOLOCK_EXCL);
        xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 
-       ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF);
+       ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
 
        xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
index edfa6a55b0646d0d444ea0b2c12e46a62a2c4474..c7fe2c2123ab8375caf0e0349a454ed8b2762095 100644 (file)
@@ -1615,7 +1615,7 @@ xfs_itruncate_extents(
 
        /* Remove all pending CoW reservations. */
        error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block,
-                       last_block);
+                       last_block, true);
        if (error)
                goto out;
 
@@ -3475,6 +3475,7 @@ xfs_iflush_int(
        struct xfs_inode_log_item *iip = ip->i_itemp;
        struct xfs_dinode       *dip;
        struct xfs_mount        *mp = ip->i_mount;
+       int                     error;
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
        ASSERT(xfs_isiflocked(ip));
@@ -3557,9 +3558,14 @@ xfs_iflush_int(
        if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
                ip->i_d.di_flushiter = 0;
 
-       xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
-       if (XFS_IFORK_Q(ip))
-               xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
+       error = xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
+       if (error)
+               return error;
+       if (XFS_IFORK_Q(ip)) {
+               error = xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
+               if (error)
+                       return error;
+       }
        xfs_inobp_check(mp, bp);
 
        /*
index 41662fb14e87d8b1546c42d6a65508db5c5a76bf..288ee5b840d738116b8981e9618fac36fb24614f 100644 (file)
@@ -630,6 +630,11 @@ retry:
                goto out_unlock;
        }
 
+       /*
+        * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
+        * them out if the write happens to fail.
+        */
+       iomap->flags = IOMAP_F_NEW;
        trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
 done:
        if (isnullstartblock(got.br_startblock))
@@ -1071,16 +1076,22 @@ xfs_file_iomap_end_delalloc(
        struct xfs_inode        *ip,
        loff_t                  offset,
        loff_t                  length,
-       ssize_t                 written)
+       ssize_t                 written,
+       struct iomap            *iomap)
 {
        struct xfs_mount        *mp = ip->i_mount;
        xfs_fileoff_t           start_fsb;
        xfs_fileoff_t           end_fsb;
        int                     error = 0;
 
-       /* behave as if the write failed if drop writes is enabled */
-       if (xfs_mp_drop_writes(mp))
+       /*
+        * Behave as if the write failed if drop writes is enabled. Set the NEW
+        * flag to force delalloc cleanup.
+        */
+       if (xfs_mp_drop_writes(mp)) {
+               iomap->flags |= IOMAP_F_NEW;
                written = 0;
+       }
 
        /*
         * start_fsb refers to the first unused block after a short write. If
@@ -1094,14 +1105,14 @@ xfs_file_iomap_end_delalloc(
        end_fsb = XFS_B_TO_FSB(mp, offset + length);
 
        /*
-        * Trim back delalloc blocks if we didn't manage to write the whole
-        * range reserved.
+        * Trim delalloc blocks if they were allocated by this write and we
+        * didn't manage to write the whole range.
         *
         * We don't need to care about racing delalloc as we hold i_mutex
         * across the reserve/allocate/unreserve calls. If there are delalloc
         * blocks in the range, they are ours.
         */
-       if (start_fsb < end_fsb) {
+       if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
                truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
                                         XFS_FSB_TO_B(mp, end_fsb) - 1);
 
@@ -1131,7 +1142,7 @@ xfs_file_iomap_end(
 {
        if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
                return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
-                               length, written);
+                               length, written, iomap);
        return 0;
 }
 
index 66e881790c17109496e21bd5e2d7d21f5ecc7fe5..2a6d9b1558e00dca550a2d46f8a5a51b9661ec3a 100644 (file)
@@ -361,7 +361,6 @@ xfs_bulkstat(
        xfs_agino_t             agino;  /* inode # in allocation group */
        xfs_agnumber_t          agno;   /* allocation group number */
        xfs_btree_cur_t         *cur;   /* btree cursor for ialloc btree */
-       size_t                  irbsize; /* size of irec buffer in bytes */
        xfs_inobt_rec_incore_t  *irbuf; /* start of irec buffer */
        int                     nirbuf; /* size of irbuf */
        int                     ubcount; /* size of user's buffer */
@@ -388,11 +387,10 @@ xfs_bulkstat(
        *ubcountp = 0;
        *done = 0;
 
-       irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
+       irbuf = kmem_zalloc_large(PAGE_SIZE * 4, KM_SLEEP);
        if (!irbuf)
                return -ENOMEM;
-
-       nirbuf = irbsize / sizeof(*irbuf);
+       nirbuf = (PAGE_SIZE * 4) / sizeof(*irbuf);
 
        /*
         * Loop over the allocation groups, starting from the last
index 450bde68bb7528d70a47e0b38275ca75c7e757a1..688ebff1f66384a309cca74539cbe4d27172b177 100644 (file)
@@ -513,8 +513,7 @@ STATIC void
 xfs_set_inoalignment(xfs_mount_t *mp)
 {
        if (xfs_sb_version_hasalign(&mp->m_sb) &&
-           mp->m_sb.sb_inoalignmt >=
-           XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
+               mp->m_sb.sb_inoalignmt >= xfs_icluster_size_fsb(mp))
                mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
        else
                mp->m_inoalign_mask = 0;
index da6d08fb359c8efdf42a53e283bb030780b5b064..4a84c5ea266d8f8fcec61aa55776fec339d27aaf 100644 (file)
@@ -548,14 +548,18 @@ xfs_reflink_trim_irec_to_next_cow(
 }
 
 /*
- * Cancel all pending CoW reservations for some block range of an inode.
+ * Cancel CoW reservations for some block range of an inode.
+ *
+ * If cancel_real is true this function cancels all COW fork extents for the
+ * inode; if cancel_real is false, real extents are not cleared.
  */
 int
 xfs_reflink_cancel_cow_blocks(
        struct xfs_inode                *ip,
        struct xfs_trans                **tpp,
        xfs_fileoff_t                   offset_fsb,
-       xfs_fileoff_t                   end_fsb)
+       xfs_fileoff_t                   end_fsb,
+       bool                            cancel_real)
 {
        struct xfs_ifork                *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
        struct xfs_bmbt_irec            got, del;
@@ -579,7 +583,7 @@ xfs_reflink_cancel_cow_blocks(
                                        &idx, &got, &del);
                        if (error)
                                break;
-               } else {
+               } else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
                        xfs_trans_ijoin(*tpp, ip, 0);
                        xfs_defer_init(&dfops, &firstfsb);
 
@@ -621,13 +625,17 @@ xfs_reflink_cancel_cow_blocks(
 }
 
 /*
- * Cancel all pending CoW reservations for some byte range of an inode.
+ * Cancel CoW reservations for some byte range of an inode.
+ *
+ * If cancel_real is true this function cancels all COW fork extents for the
+ * inode; if cancel_real is false, real extents are not cleared.
  */
 int
 xfs_reflink_cancel_cow_range(
        struct xfs_inode        *ip,
        xfs_off_t               offset,
-       xfs_off_t               count)
+       xfs_off_t               count,
+       bool                    cancel_real)
 {
        struct xfs_trans        *tp;
        xfs_fileoff_t           offset_fsb;
@@ -653,7 +661,8 @@ xfs_reflink_cancel_cow_range(
        xfs_trans_ijoin(tp, ip, 0);
 
        /* Scrape out the old CoW reservations */
-       error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb);
+       error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb,
+                       cancel_real);
        if (error)
                goto out_cancel;
 
@@ -1450,7 +1459,7 @@ next:
         * We didn't find any shared blocks so turn off the reflink flag.
         * First, get rid of any leftover CoW mappings.
         */
-       error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF);
+       error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF, true);
        if (error)
                return error;
 
index 33ac9b8db68380185ad80073b1890cc70e4e3b09..d29a7967f0290ecb8b4ca7c4d4077723262c8ba2 100644 (file)
@@ -39,9 +39,9 @@ extern void xfs_reflink_trim_irec_to_next_cow(struct xfs_inode *ip,
 
 extern int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip,
                struct xfs_trans **tpp, xfs_fileoff_t offset_fsb,
-               xfs_fileoff_t end_fsb);
+               xfs_fileoff_t end_fsb, bool cancel_real);
 extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
-               xfs_off_t count);
+               xfs_off_t count, bool cancel_real);
 extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset,
                xfs_off_t count);
 extern int xfs_reflink_recover_cow(struct xfs_mount *mp);
index 890862f2447c193f374b4de64c58940521b203fb..685c042a120f16a8a9a8dad69d8ee7ce6f9274a0 100644 (file)
@@ -953,7 +953,7 @@ xfs_fs_destroy_inode(
        XFS_STATS_INC(ip->i_mount, vn_remove);
 
        if (xfs_is_reflink_inode(ip)) {
-               error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF);
+               error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
                if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount))
                        xfs_warn(ip->i_mount,
 "Error %d while evicting CoW blocks for inode %llu.",
index 5bdab6bffd23cfc708ad80d6a04b5ad3690bd74d..928fd66b12712241d100c5f4c9d3a857b9eabf1b 100644 (file)
@@ -15,7 +15,6 @@
        ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
                NULL: pmd_offset(pud, address))
 
-#define pud_alloc(mm, pgd, address)    (pgd)
 #define pud_offset(pgd, start)         (pgd)
 #define pud_none(pud)                  0
 #define pud_bad(pud)                   0
@@ -35,4 +34,6 @@
 #undef  pud_addr_end
 #define pud_addr_end(addr, end)                (end)
 
+#include <asm-generic/5level-fixup.h>
+
 #endif
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
new file mode 100644 (file)
index 0000000..b5ca82d
--- /dev/null
@@ -0,0 +1,41 @@
+#ifndef _5LEVEL_FIXUP_H
+#define _5LEVEL_FIXUP_H
+
+#define __ARCH_HAS_5LEVEL_HACK
+#define __PAGETABLE_P4D_FOLDED
+
+#define P4D_SHIFT                      PGDIR_SHIFT
+#define P4D_SIZE                       PGDIR_SIZE
+#define P4D_MASK                       PGDIR_MASK
+#define PTRS_PER_P4D                   1
+
+#define p4d_t                          pgd_t
+
+#define pud_alloc(mm, p4d, address) \
+       ((unlikely(pgd_none(*(p4d))) && __pud_alloc(mm, p4d, address)) ? \
+               NULL : pud_offset(p4d, address))
+
+#define p4d_alloc(mm, pgd, address)    (pgd)
+#define p4d_offset(pgd, start)         (pgd)
+#define p4d_none(p4d)                  0
+#define p4d_bad(p4d)                   0
+#define p4d_present(p4d)               1
+#define p4d_ERROR(p4d)                 do { } while (0)
+#define p4d_clear(p4d)                 pgd_clear(p4d)
+#define p4d_val(p4d)                   pgd_val(p4d)
+#define p4d_populate(mm, p4d, pud)     pgd_populate(mm, p4d, pud)
+#define p4d_page(p4d)                  pgd_page(p4d)
+#define p4d_page_vaddr(p4d)            pgd_page_vaddr(p4d)
+
+#define __p4d(x)                       __pgd(x)
+#define set_p4d(p4dp, p4d)             set_pgd(p4dp, p4d)
+
+#undef p4d_free_tlb
+#define p4d_free_tlb(tlb, x, addr)     do { } while (0)
+#define p4d_free(mm, x)                        do { } while (0)
+#define __p4d_free_tlb(tlb, x, addr)   do { } while (0)
+
+#undef  p4d_addr_end
+#define p4d_addr_end(addr, end)                (end)
+
+#endif
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
new file mode 100644 (file)
index 0000000..752fb75
--- /dev/null
@@ -0,0 +1,62 @@
+#ifndef _PGTABLE_NOP4D_HACK_H
+#define _PGTABLE_NOP4D_HACK_H
+
+#ifndef __ASSEMBLY__
+#include <asm-generic/5level-fixup.h>
+
+#define __PAGETABLE_PUD_FOLDED
+
+/*
+ * Having the pud type consist of a pgd gets the size right, and allows
+ * us to conceptually access the pgd entry that this pud is folded into
+ * without casting.
+ */
+typedef struct { pgd_t pgd; } pud_t;
+
+#define PUD_SHIFT      PGDIR_SHIFT
+#define PTRS_PER_PUD   1
+#define PUD_SIZE       (1UL << PUD_SHIFT)
+#define PUD_MASK       (~(PUD_SIZE-1))
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pud is never bad, and a pud always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd)          { return 0; }
+static inline int pgd_bad(pgd_t pgd)           { return 0; }
+static inline int pgd_present(pgd_t pgd)       { return 1; }
+static inline void pgd_clear(pgd_t *pgd)       { }
+#define pud_ERROR(pud)                         (pgd_ERROR((pud).pgd))
+
+#define pgd_populate(mm, pgd, pud)             do { } while (0)
+/*
+ * (puds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pgd(pgdptr, pgdval)        set_pud((pud_t *)(pgdptr), (pud_t) { pgdval })
+
+static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+{
+       return (pud_t *)pgd;
+}
+
+#define pud_val(x)                             (pgd_val((x).pgd))
+#define __pud(x)                               ((pud_t) { __pgd(x) })
+
+#define pgd_page(pgd)                          (pud_page((pud_t){ pgd }))
+#define pgd_page_vaddr(pgd)                    (pud_page_vaddr((pud_t){ pgd }))
+
+/*
+ * allocating and freeing a pud is trivial: the 1-entry pud is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+#define pud_alloc_one(mm, address)             NULL
+#define pud_free(mm, x)                                do { } while (0)
+#define __pud_free_tlb(tlb, x, a)              do { } while (0)
+
+#undef  pud_addr_end
+#define pud_addr_end(addr, end)                        (end)
+
+#endif /* __ASSEMBLY__ */
+#endif /* _PGTABLE_NOP4D_HACK_H */
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
new file mode 100644 (file)
index 0000000..de364ec
--- /dev/null
@@ -0,0 +1,56 @@
+#ifndef _PGTABLE_NOP4D_H
+#define _PGTABLE_NOP4D_H
+
+#ifndef __ASSEMBLY__
+
+#define __PAGETABLE_P4D_FOLDED
+
+typedef struct { pgd_t pgd; } p4d_t;
+
+#define P4D_SHIFT      PGDIR_SHIFT
+#define PTRS_PER_P4D   1
+#define P4D_SIZE       (1UL << P4D_SHIFT)
+#define P4D_MASK       (~(P4D_SIZE-1))
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the p4d is never bad, and a p4d always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd)          { return 0; }
+static inline int pgd_bad(pgd_t pgd)           { return 0; }
+static inline int pgd_present(pgd_t pgd)       { return 1; }
+static inline void pgd_clear(pgd_t *pgd)       { }
+#define p4d_ERROR(p4d)                         (pgd_ERROR((p4d).pgd))
+
+#define pgd_populate(mm, pgd, p4d)             do { } while (0)
+/*
+ * (p4ds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pgd(pgdptr, pgdval)        set_p4d((p4d_t *)(pgdptr), (p4d_t) { pgdval })
+
+static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+{
+       return (p4d_t *)pgd;
+}
+
+#define p4d_val(x)                             (pgd_val((x).pgd))
+#define __p4d(x)                               ((p4d_t) { __pgd(x) })
+
+#define pgd_page(pgd)                          (p4d_page((p4d_t){ pgd }))
+#define pgd_page_vaddr(pgd)                    (p4d_page_vaddr((p4d_t){ pgd }))
+
+/*
+ * allocating and freeing a p4d is trivial: the 1-entry p4d is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+#define p4d_alloc_one(mm, address)             NULL
+#define p4d_free(mm, x)                                do { } while (0)
+#define __p4d_free_tlb(tlb, x, a)              do { } while (0)
+
+#undef  p4d_addr_end
+#define p4d_addr_end(addr, end)                        (end)
+
+#endif /* __ASSEMBLY__ */
+#endif /* _PGTABLE_NOP4D_H */
index 810431d8351b16c14c3d1954ddc2890866c41658..c2b9b96d6268f4e85de4e4e15a27bf4a283e9312 100644 (file)
@@ -3,52 +3,57 @@
 
 #ifndef __ASSEMBLY__
 
+#ifdef __ARCH_USE_5LEVEL_HACK
+#include <asm-generic/pgtable-nop4d-hack.h>
+#else
+#include <asm-generic/pgtable-nop4d.h>
+
 #define __PAGETABLE_PUD_FOLDED
 
 /*
- * Having the pud type consist of a pgd gets the size right, and allows
- * us to conceptually access the pgd entry that this pud is folded into
+ * Having the pud type consist of a p4d gets the size right, and allows
+ * us to conceptually access the p4d entry that this pud is folded into
  * without casting.
  */
-typedef struct { pgd_t pgd; } pud_t;
+typedef struct { p4d_t p4d; } pud_t;
 
-#define PUD_SHIFT      PGDIR_SHIFT
+#define PUD_SHIFT      P4D_SHIFT
 #define PTRS_PER_PUD   1
 #define PUD_SIZE       (1UL << PUD_SHIFT)
 #define PUD_MASK       (~(PUD_SIZE-1))
 
 /*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * The "p4d_xxx()" functions here are trivial for a folded two-level
  * setup: the pud is never bad, and a pud always exists (as it's folded
- * into the pgd entry)
+ * into the p4d entry)
  */
-static inline int pgd_none(pgd_t pgd)          { return 0; }
-static inline int pgd_bad(pgd_t pgd)           { return 0; }
-static inline int pgd_present(pgd_t pgd)       { return 1; }
-static inline void pgd_clear(pgd_t *pgd)       { }
-#define pud_ERROR(pud)                         (pgd_ERROR((pud).pgd))
+static inline int p4d_none(p4d_t p4d)          { return 0; }
+static inline int p4d_bad(p4d_t p4d)           { return 0; }
+static inline int p4d_present(p4d_t p4d)       { return 1; }
+static inline void p4d_clear(p4d_t *p4d)       { }
+#define pud_ERROR(pud)                         (p4d_ERROR((pud).p4d))
 
-#define pgd_populate(mm, pgd, pud)             do { } while (0)
+#define p4d_populate(mm, p4d, pud)             do { } while (0)
 /*
- * (puds are folded into pgds so this doesn't get actually called,
+ * (puds are folded into p4ds so this doesn't get actually called,
  * but the define is needed for a generic inline function.)
  */
-#define set_pgd(pgdptr, pgdval)                        set_pud((pud_t *)(pgdptr), (pud_t) { pgdval })
+#define set_p4d(p4dptr, p4dval)        set_pud((pud_t *)(p4dptr), (pud_t) { p4dval })
 
-static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address)
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
 {
-       return (pud_t *)pgd;
+       return (pud_t *)p4d;
 }
 
-#define pud_val(x)                             (pgd_val((x).pgd))
-#define __pud(x)                               ((pud_t) { __pgd(x) } )
+#define pud_val(x)                             (p4d_val((x).p4d))
+#define __pud(x)                               ((pud_t) { __p4d(x) })
 
-#define pgd_page(pgd)                          (pud_page((pud_t){ pgd }))
-#define pgd_page_vaddr(pgd)                    (pud_page_vaddr((pud_t){ pgd }))
+#define p4d_page(p4d)                          (pud_page((pud_t){ p4d }))
+#define p4d_page_vaddr(p4d)                    (pud_page_vaddr((pud_t){ p4d }))
 
 /*
  * allocating and freeing a pud is trivial: the 1-entry pud is
- * inside the pgd, so has no extra memory associated with it.
+ * inside the p4d, so has no extra memory associated with it.
  */
 #define pud_alloc_one(mm, address)             NULL
 #define pud_free(mm, x)                                do { } while (0)
@@ -58,4 +63,5 @@ static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address)
 #define pud_addr_end(addr, end)                        (end)
 
 #endif /* __ASSEMBLY__ */
+#endif /* !__ARCH_USE_5LEVEL_HACK */
 #endif /* _PGTABLE_NOPUD_H */
index f4ca23b158b3b7aace85c4899385e980fd4c42b8..1fad160f35de8e89953af075173a2ad219c9693b 100644 (file)
@@ -10,9 +10,9 @@
 #include <linux/bug.h>
 #include <linux/errno.h>
 
-#if 4 - defined(__PAGETABLE_PUD_FOLDED) - defined(__PAGETABLE_PMD_FOLDED) != \
-       CONFIG_PGTABLE_LEVELS
-#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{PUD,PMD}_FOLDED
+#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
+       defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
+#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
 #endif
 
 /*
@@ -424,6 +424,13 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
        (__boundary - 1 < (end) - 1)? __boundary: (end);                \
 })
 
+#ifndef p4d_addr_end
+#define p4d_addr_end(addr, end)                                                \
+({     unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK;      \
+       (__boundary - 1 < (end) - 1)? __boundary: (end);                \
+})
+#endif
+
 #ifndef pud_addr_end
 #define pud_addr_end(addr, end)                                                \
 ({     unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK;      \
@@ -444,6 +451,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
  * Do the tests inline, but report and clear the bad entry in mm/memory.c.
  */
 void pgd_clear_bad(pgd_t *);
+void p4d_clear_bad(p4d_t *);
 void pud_clear_bad(pud_t *);
 void pmd_clear_bad(pmd_t *);
 
@@ -458,6 +466,17 @@ static inline int pgd_none_or_clear_bad(pgd_t *pgd)
        return 0;
 }
 
+static inline int p4d_none_or_clear_bad(p4d_t *p4d)
+{
+       if (p4d_none(*p4d))
+               return 1;
+       if (unlikely(p4d_bad(*p4d))) {
+               p4d_clear_bad(p4d);
+               return 1;
+       }
+       return 0;
+}
+
 static inline int pud_none_or_clear_bad(pud_t *pud)
 {
        if (pud_none(*pud))
@@ -844,11 +863,30 @@ static inline int pmd_protnone(pmd_t pmd)
 #endif /* CONFIG_MMU */
 
 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+#ifndef __PAGETABLE_P4D_FOLDED
+int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
+int p4d_clear_huge(p4d_t *p4d);
+#else
+static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+{
+       return 0;
+}
+static inline int p4d_clear_huge(p4d_t *p4d)
+{
+       return 0;
+}
+#endif /* !__PAGETABLE_P4D_FOLDED */
+
 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
 int pud_clear_huge(pud_t *pud);
 int pmd_clear_huge(pmd_t *pmd);
 #else  /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+{
+       return 0;
+}
 static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
 {
        return 0;
@@ -857,6 +895,10 @@ static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
 {
        return 0;
 }
+static inline int p4d_clear_huge(p4d_t *p4d)
+{
+       return 0;
+}
 static inline int pud_clear_huge(pud_t *pud)
 {
        return 0;
index 4df64a1fc09e7aab7f88cd4afe73928228930147..532372c6cf15c8084f4910072e96fd0360bad2cd 100644 (file)
@@ -14,8 +14,8 @@
  * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.*
  *                   and/or .init.* sections.
  * [__start_rodata, __end_rodata]: contains .rodata.* sections
- * [__start_data_ro_after_init, __end_data_ro_after_init]:
- *                  contains data.ro_after_init section
+ * [__start_ro_after_init, __end_ro_after_init]:
+ *                  contains .data..ro_after_init section
  * [__init_begin, __init_end]: contains .init.* sections, but .init.text.*
  *                   may be out of this range on some architectures.
  * [_sinittext, _einittext]: contains .init.text.* sections
@@ -33,7 +33,7 @@ extern char _data[], _sdata[], _edata[];
 extern char __bss_start[], __bss_stop[];
 extern char __init_begin[], __init_end[];
 extern char _sinittext[], _einittext[];
-extern char __start_data_ro_after_init[], __end_data_ro_after_init[];
+extern char __start_ro_after_init[], __end_ro_after_init[];
 extern char _end[];
 extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
 extern char __kprobes_text_start[], __kprobes_text_end[];
index 4329bc6ef04b7b555337dc2f558ff7d7321668c4..8afa4335e5b2bfd0c42c00e1b1506d4e1f7377ac 100644 (file)
@@ -270,6 +270,12 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
                __pte_free_tlb(tlb, ptep, address);             \
        } while (0)
 
+#define pmd_free_tlb(tlb, pmdp, address)                       \
+       do {                                                    \
+               __tlb_adjust_range(tlb, address, PAGE_SIZE);            \
+               __pmd_free_tlb(tlb, pmdp, address);             \
+       } while (0)
+
 #ifndef __ARCH_HAS_4LEVEL_HACK
 #define pud_free_tlb(tlb, pudp, address)                       \
        do {                                                    \
@@ -278,11 +284,13 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
        } while (0)
 #endif
 
-#define pmd_free_tlb(tlb, pmdp, address)                       \
+#ifndef __ARCH_HAS_5LEVEL_HACK
+#define p4d_free_tlb(tlb, pudp, address)                       \
        do {                                                    \
-               __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
-               __pmd_free_tlb(tlb, pmdp, address);             \
+               __tlb_adjust_range(tlb, address, PAGE_SIZE);            \
+               __p4d_free_tlb(tlb, pudp, address);             \
        } while (0)
+#endif
 
 #define tlb_migrate_finish(mm) do {} while (0)
 
index 0968d13b388591ae02b37f9dda0fe7a498cc7475..7cdfe167074f873a71dd51e04da3304864d2f063 100644 (file)
        KEEP(*(__##name##_of_table_end))
 
 #define CLKSRC_OF_TABLES()     OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
+#define CLKEVT_OF_TABLES()     OF_TABLE(CONFIG_CLKEVT_OF, clkevt)
 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
 #define CLK_OF_TABLES()                OF_TABLE(CONFIG_COMMON_CLK, clk)
 #define IOMMU_OF_TABLES()      OF_TABLE(CONFIG_OF_IOMMU, iommu)
  */
 #ifndef RO_AFTER_INIT_DATA
 #define RO_AFTER_INIT_DATA                                             \
-       __start_data_ro_after_init = .;                                 \
+       __start_ro_after_init = .;                                      \
        *(.data..ro_after_init)                                         \
-       __end_data_ro_after_init = .;
+       __end_ro_after_init = .;
 #endif
 
 /*
        CLK_OF_TABLES()                                                 \
        RESERVEDMEM_OF_TABLES()                                         \
        CLKSRC_OF_TABLES()                                              \
+       CLKEVT_OF_TABLES()                                              \
        IOMMU_OF_TABLES()                                               \
        CPU_METHOD_OF_TABLES()                                          \
        CPUIDLE_METHOD_OF_TABLES()                                      \
index a2bfd7843f18f6e79d8bc7e5743b031345153053..e2b9c6fe271496e45dca5abb94d814dd8a9c3c04 100644 (file)
@@ -73,7 +73,7 @@ int af_alg_unregister_type(const struct af_alg_type *type);
 
 int af_alg_release(struct socket *sock);
 void af_alg_release_parent(struct sock *sk);
-int af_alg_accept(struct sock *sk, struct socket *newsock);
+int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern);
 
 int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
 void af_alg_free_sg(struct af_alg_sgl *sgl);
index ed953f98f0e1446ce01630c005e406f941282347..1487011fe057ba4b35271bc85e52c6b90291c2c3 100644 (file)
@@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
  * @ref_type: The type of reference.
  * @existed: Upon completion, indicates that an identical reference object
  * already existed, and the refcount was upped on that object instead.
+ * @require_existed: Fail with -EPERM if an identical ref object didn't
+ * already exist.
  *
  * Checks that the base object is shareable and adds a ref object to it.
  *
@@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
  */
 extern int ttm_ref_object_add(struct ttm_object_file *tfile,
                              struct ttm_base_object *base,
-                             enum ttm_ref_type ref_type, bool *existed);
+                             enum ttm_ref_type ref_type, bool *existed,
+                             bool require_existed);
 
 extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
                                  struct ttm_base_object *base);
index 399a123aed5815f1f6d2b2784e0148794790bc43..db69d84ed7d14152626565529e0bc981c1976a33 100644 (file)
@@ -20,7 +20,7 @@
 #define CS42L42_HPOUT_LOAD_1NF         0
 #define CS42L42_HPOUT_LOAD_10NF                1
 
-/* HPOUT Clamp to GND Overide */
+/* HPOUT Clamp to GND Override */
 #define CS42L42_HPOUT_CLAMP_EN         0
 #define CS42L42_HPOUT_CLAMP_DIS                1
 
index 673acda012af44efe4fb5a7fc5279d08e416cc86..9b05886f9773cde8439a0c3e21b39ad29460c440 100644 (file)
@@ -287,18 +287,15 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
 }
 
 /* Validate the processor object's proc_id */
-bool acpi_processor_validate_proc_id(int proc_id);
+bool acpi_duplicate_processor_id(int proc_id);
 
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 /* Arch dependent functions for cpu hotplug support */
 int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
                 int *pcpu);
 int acpi_unmap_cpu(int cpu);
-int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid);
 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
 
-void acpi_set_processor_mapping(void);
-
 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
 int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
 #endif
index 796016e63c1da7b64c59f8d9a1b4979d8059027b..5a7da607ca045f81a46e7b73bb31a8f1b978452a 100644 (file)
@@ -435,7 +435,6 @@ struct request_queue {
        struct delayed_work     delay_work;
 
        struct backing_dev_info *backing_dev_info;
-       struct disk_devt        *disk_devt;
 
        /*
         * The queue owner gets to use this for whatever they like.
index 909fc033173a7c893ffe7113f0e32568392b76ae..6bb38d76faf42a18437eb7565380b75a71096f78 100644 (file)
@@ -35,6 +35,7 @@ struct bpf_map_ops {
        void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
                                int fd);
        void (*map_fd_put_ptr)(void *ptr);
+       u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
 };
 
 struct bpf_map {
@@ -49,12 +50,7 @@ struct bpf_map {
        const struct bpf_map_ops *ops;
        struct work_struct work;
        atomic_t usercnt;
-};
-
-struct bpf_map_type_list {
-       struct list_head list_node;
-       const struct bpf_map_ops *ops;
-       enum bpf_map_type type;
+       struct bpf_map *inner_map_meta;
 };
 
 /* function argument constraints */
@@ -167,12 +163,8 @@ struct bpf_verifier_ops {
                                  const struct bpf_insn *src,
                                  struct bpf_insn *dst,
                                  struct bpf_prog *prog);
-};
-
-struct bpf_prog_type_list {
-       struct list_head list_node;
-       const struct bpf_verifier_ops *ops;
-       enum bpf_prog_type type;
+       int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
+                       union bpf_attr __user *uattr);
 };
 
 struct bpf_prog_aux {
@@ -231,11 +223,21 @@ typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
                     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
 
+int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
+                         union bpf_attr __user *uattr);
+int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
+                         union bpf_attr __user *uattr);
+
 #ifdef CONFIG_BPF_SYSCALL
 DECLARE_PER_CPU(int, bpf_prog_active);
 
-void bpf_register_prog_type(struct bpf_prog_type_list *tl);
-void bpf_register_map_type(struct bpf_map_type_list *tl);
+#define BPF_PROG_TYPE(_id, _ops) \
+       extern const struct bpf_verifier_ops _ops;
+#define BPF_MAP_TYPE(_id, _ops) \
+       extern const struct bpf_map_ops _ops;
+#include <linux/bpf_types.h>
+#undef BPF_PROG_TYPE
+#undef BPF_MAP_TYPE
 
 struct bpf_prog *bpf_prog_get(u32 ufd);
 struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
@@ -275,6 +277,8 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
                                 void *key, void *value, u64 map_flags);
 void bpf_fd_array_map_clear(struct bpf_map *map);
+int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
+                               void *key, void *value, u64 map_flags);
 
 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
  * forced to use 'long' read/writes to try to atomically copy long counters.
@@ -295,10 +299,6 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
 /* verify correctness of eBPF program */
 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
 #else
-static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl)
-{
-}
-
 static inline struct bpf_prog *bpf_prog_get(u32 ufd)
 {
        return ERR_PTR(-EOPNOTSUPP);
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
new file mode 100644 (file)
index 0000000..03bf223
--- /dev/null
@@ -0,0 +1,36 @@
+/* internal file - do not include directly */
+
+#ifdef CONFIG_NET
+BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit_prog_ops)
+#endif
+#ifdef CONFIG_BPF_EVENTS
+BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event_prog_ops)
+#endif
+
+BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_PROG_ARRAY, prog_array_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops)
+#ifdef CONFIG_CGROUPS
+BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops)
+#endif
+BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops)
+#ifdef CONFIG_PERF_EVENTS
+BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops)
+#endif
+BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
index a13b031dc6b807f38c0e7e687ba2cfbfe4c65fb7..5efb4db44e1ef3223d984296ccf1ca0737224d10 100644 (file)
@@ -66,7 +66,10 @@ struct bpf_verifier_state_list {
 };
 
 struct bpf_insn_aux_data {
-       enum bpf_reg_type ptr_type;     /* pointer type for load/store insns */
+       union {
+               enum bpf_reg_type ptr_type;     /* pointer type for load/store insns */
+               struct bpf_map *map_ptr;        /* pointer for call insn into lookup_elem */
+       };
 };
 
 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
index 55e517130311980b36ad628054043130f88626af..abcda9b458ab65143acb1fb28b50225adbec83fd 100644 (file)
@@ -25,6 +25,9 @@
 #define PHY_ID_BCM57780                        0x03625d90
 
 #define PHY_ID_BCM7250                 0xae025280
+#define PHY_ID_BCM7260                 0xae025190
+#define PHY_ID_BCM7268                 0xae025090
+#define PHY_ID_BCM7271                 0xae0253b0
 #define PHY_ID_BCM7278                 0xae0251a0
 #define PHY_ID_BCM7364                 0xae025260
 #define PHY_ID_BCM7366                 0x600d8490
index df08a41d5be5f26cfa4cdc74935f5eae7fa51385..319a0da827b881a7dad147abd7feb264c17662b4 100644 (file)
@@ -45,12 +45,13 @@ struct can_proto {
 extern int  can_proto_register(const struct can_proto *cp);
 extern void can_proto_unregister(const struct can_proto *cp);
 
-int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+int can_rx_register(struct net *net, struct net_device *dev,
+                   canid_t can_id, canid_t mask,
                    void (*func)(struct sk_buff *, void *),
                    void *data, char *ident, struct sock *sk);
 
-extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
-                             canid_t mask,
+extern void can_rx_unregister(struct net *net, struct net_device *dev,
+                             canid_t can_id, canid_t mask,
                              void (*func)(struct sk_buff *, void *),
                              void *data);
 
diff --git a/include/linux/can/platform/ti_hecc.h b/include/linux/can/platform/ti_hecc.h
deleted file mode 100644 (file)
index a52f47c..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _CAN_PLATFORM_TI_HECC_H
-#define _CAN_PLATFORM_TI_HECC_H
-
-/*
- * TI HECC (High End CAN Controller) driver platform header
- *
- * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed as is WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-/**
- * struct hecc_platform_data - HECC Platform Data
- *
- * @scc_hecc_offset:   mostly 0 - should really never change
- * @scc_ram_offset:    SCC RAM offset
- * @hecc_ram_offset:   HECC RAM offset
- * @mbx_offset:                Mailbox RAM offset
- * @int_line:          Interrupt line to use - 0 or 1
- * @version:           version for future use
- * @transceiver_switch:        platform specific callback fn for transceiver control
- *
- * Platform data structure to get all platform specific settings.
- * this structure also accounts the fact that the IP may have different
- * RAM and mailbox offsets for different SOC's
- */
-struct ti_hecc_platform_data {
-       u32 scc_hecc_offset;
-       u32 scc_ram_offset;
-       u32 hecc_ram_offset;
-       u32 mbx_offset;
-       u32 int_line;
-       u32 version;
-       void (*transceiver_switch) (int);
-};
-#endif /* !_CAN_PLATFORM_TI_HECC_H */
index c71dd8fa57640eab059ca21b2847609340b6b506..c41b8d99dd0e7f352bc8e57e4e2ffc4dd08c63b7 100644 (file)
@@ -556,7 +556,7 @@ enum ccp_engine {
  * struct ccp_cmd - CCP operation request
  * @entry: list element (ccp driver use only)
  * @work: work element used for callbacks (ccp driver use only)
- * @ccp: CCP device to be run on (ccp driver use only)
+ * @ccp: CCP device to be run on
  * @ret: operation return code (ccp driver use only)
  * @flags: cmd processing flags
  * @engine: CCP operation to perform
index 1816c5e26581716b24d6d3fd0f036b9b739c467c..88cd5dc8e238a2fa7e8c66a034a8e5b0ae248f21 100644 (file)
@@ -48,6 +48,7 @@ struct ceph_options {
        unsigned long mount_timeout;            /* jiffies */
        unsigned long osd_idle_ttl;             /* jiffies */
        unsigned long osd_keepalive_timeout;    /* jiffies */
+       unsigned long osd_request_timeout;      /* jiffies */
 
        /*
         * any type that can't be simply compared or doesn't need need
@@ -68,6 +69,7 @@ struct ceph_options {
 #define CEPH_MOUNT_TIMEOUT_DEFAULT     msecs_to_jiffies(60 * 1000)
 #define CEPH_OSD_KEEPALIVE_DEFAULT     msecs_to_jiffies(5 * 1000)
 #define CEPH_OSD_IDLE_TTL_DEFAULT      msecs_to_jiffies(60 * 1000)
+#define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0  /* no timeout */
 
 #define CEPH_MONC_HUNT_INTERVAL                msecs_to_jiffies(3 * 1000)
 #define CEPH_MONC_PING_INTERVAL                msecs_to_jiffies(10 * 1000)
index 2ea0c282f3dc9326f7b3c4b7a3883758831ed251..c125b5d9e13ceddacd921286f19133607835dee1 100644 (file)
@@ -189,6 +189,7 @@ struct ceph_osd_request {
 
        /* internal */
        unsigned long r_stamp;                /* jiffies, send or check time */
+       unsigned long r_start_stamp;          /* jiffies */
        int r_attempts;
        struct ceph_eversion r_replay_version; /* aka reassert_version */
        u32 r_last_force_resend;
index 5d3053c34fb3d5c365ad1f5a44e39a03fb6926c8..6d7edc3082f98466566cd1ae67640d7e0bd976d3 100644 (file)
@@ -229,7 +229,7 @@ static inline void tick_setup_hrtimer_broadcast(void) { }
 
 #ifdef CONFIG_CLKEVT_PROBE
 extern int clockevent_probe(void);
-#els
+#else
 static inline int clockevent_probe(void) { return 0; }
 #endif
 
index 61d042bbbf607253033d9948b291cab2322814ba..68449293c4b6233c1a1d4133b1819376a9310225 100644 (file)
@@ -163,6 +163,7 @@ struct dccp_request_sock {
        __u64                    dreq_isr;
        __u64                    dreq_gsr;
        __be32                   dreq_service;
+       spinlock_t               dreq_lock;
        struct list_head         dreq_featneg;
        __u32                    dreq_timestamp_echo;
        __u32                    dreq_timestamp_time;
index 30c4570e928dfe871bc84382f14eb49b5cac018e..9ef518af5515a01e202dee3cf4c27ffcd8c56441 100644 (file)
@@ -1140,7 +1140,6 @@ static inline bool device_supports_offline(struct device *dev)
 extern void lock_device_hotplug(void);
 extern void unlock_device_hotplug(void);
 extern int lock_device_hotplug_sysfs(void);
-void assert_held_device_hotplug(void);
 extern int device_offline(struct device *dev);
 extern int device_online(struct device *dev);
 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
index 9ca23fcfb5d73131b564ad81d887929abc0e954b..6fdfc884fdeb3d3cf81dcbd40c52c0b8c8d203b1 100644 (file)
@@ -20,6 +20,8 @@ struct sock_exterr_skb {
        struct sock_extended_err        ee;
        u16                             addr_offset;
        __be16                          port;
+       u8                              opt_stats:1,
+                                       unused:7;
 };
 
 #endif
index c62b709b1ce087b7891f5d9c76aa2940b7f4a9a9..2d9f80848d4bd2b6e60dc06f1053ba91256c37ac 100644 (file)
@@ -446,21 +446,6 @@ static inline void eth_addr_dec(u8 *addr)
        u64_to_ether_addr(u, addr);
 }
 
-/**
- * ether_addr_greater - Compare two Ethernet addresses
- * @addr1: Pointer to a six-byte array containing the Ethernet address
- * @addr2: Pointer other six-byte array containing the Ethernet address
- *
- * Compare two Ethernet addresses, returns true addr1 is greater than addr2
- */
-static inline bool ether_addr_greater(const u8 *addr1, const u8 *addr2)
-{
-       u64 u1 = ether_addr_to_u64(addr1);
-       u64 u2 = ether_addr_to_u64(addr2);
-
-       return u1 > u2;
-}
-
 /**
  * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
  * @dev: Pointer to a device structure
index 9ded8c6d8176b909cf68da0e125eef4441b7c9a9..83cc9863444b078765255b9010b015578768be09 100644 (file)
@@ -60,6 +60,7 @@ enum ethtool_phys_id_state {
 enum {
        ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */
        ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */
+       ETH_RSS_HASH_CRC32_BIT, /* Configurable RSS hash function - Crc32 */
 
        /*
         * Add your fresh new hash function bits above and remember to update
@@ -73,6 +74,7 @@ enum {
 
 #define ETH_RSS_HASH_TOP       __ETH_RSS_HASH(TOP)
 #define ETH_RSS_HASH_XOR       __ETH_RSS_HASH(XOR)
+#define ETH_RSS_HASH_CRC32     __ETH_RSS_HASH(CRC32)
 
 #define ETH_RSS_HASH_UNKNOWN   0
 #define ETH_RSS_HASH_NO_CHANGE 0
index 0c167fdee5f7d126ed4de7e1201d514d1402a5ca..511fe910bf1d5225a017234015b3455d0a46c2b9 100644 (file)
@@ -7,6 +7,7 @@
 #include <stdarg.h>
 
 #include <linux/atomic.h>
+#include <linux/refcount.h>
 #include <linux/compat.h>
 #include <linux/skbuff.h>
 #include <linux/linkage.h>
@@ -409,6 +410,7 @@ struct bpf_prog {
        u16                     pages;          /* Number of allocated pages */
        kmemcheck_bitfield_begin(meta);
        u16                     jited:1,        /* Is our filter JIT'ed? */
+                               locked:1,       /* Program image locked? */
                                gpl_compatible:1, /* Is filter GPL compatible? */
                                cb_access:1,    /* Is control block accessed? */
                                dst_needed:1,   /* Do we need dst entry? */
@@ -429,7 +431,7 @@ struct bpf_prog {
 };
 
 struct sk_filter {
-       atomic_t        refcnt;
+       refcount_t      refcnt;
        struct rcu_head rcu;
        struct bpf_prog *prog;
 };
@@ -554,22 +556,29 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
 #ifdef CONFIG_ARCH_HAS_SET_MEMORY
 static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
 {
-       set_memory_ro((unsigned long)fp, fp->pages);
+       fp->locked = 1;
+       WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
 }
 
 static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 {
-       set_memory_rw((unsigned long)fp, fp->pages);
+       if (fp->locked) {
+               WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
+               /* In case set_memory_rw() fails, we want to be the first
+                * to crash here instead of some random place later on.
+                */
+               fp->locked = 0;
+       }
 }
 
 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
 {
-       set_memory_ro((unsigned long)hdr, hdr->pages);
+       WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
 }
 
 static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
 {
-       set_memory_rw((unsigned long)hdr, hdr->pages);
+       WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
 }
 #else
 static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
@@ -685,6 +694,11 @@ static inline bool bpf_jit_is_ebpf(void)
 # endif
 }
 
+static inline bool ebpf_jit_enabled(void)
+{
+       return bpf_jit_enable && bpf_jit_is_ebpf();
+}
+
 static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
 {
        return fp->jited && bpf_jit_is_ebpf();
@@ -745,6 +759,11 @@ void bpf_prog_kallsyms_del(struct bpf_prog *fp);
 
 #else /* CONFIG_BPF_JIT */
 
+static inline bool ebpf_jit_enabled(void)
+{
+       return false;
+}
+
 static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
 {
        return false;
index aad3fd0ff5f8314975c93af81d94b3ce88ddbdda..7251f7bb45e8b80b44f28c2051c5bef8e947e6bb 100644 (file)
@@ -2678,7 +2678,7 @@ static const char * const kernel_read_file_str[] = {
 
 static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
 {
-       if (id < 0 || id >= READING_MAX_ID)
+       if ((unsigned)id >= READING_MAX_ID)
                return kernel_read_file_str[READING_UNKNOWN];
 
        return kernel_read_file_str[id];
index 547f81592ba134bce5d222a66405c782a91b80d8..10c1abfbac6c45d1049fdf9f1b1f133371521461 100644 (file)
@@ -87,7 +87,6 @@ struct fscrypt_operations {
        unsigned int flags;
        const char *key_prefix;
        int (*get_context)(struct inode *, void *, size_t);
-       int (*prepare_context)(struct inode *);
        int (*set_context)(struct inode *, const void *, size_t, void *);
        int (*dummy_context)(struct inode *);
        bool (*is_encrypted)(struct inode *);
index a999d281a2f1e41ce6cb7613dc5ecd8e0d4797c8..76f39754e7b0299df616bc3cb909f9a35fce9ea1 100644 (file)
@@ -167,13 +167,6 @@ struct blk_integrity {
 };
 
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
-struct disk_devt {
-       atomic_t count;
-       void (*release)(struct disk_devt *disk_devt);
-};
-
-void put_disk_devt(struct disk_devt *disk_devt);
-void get_disk_devt(struct disk_devt *disk_devt);
 
 struct gendisk {
        /* major, first_minor and minors are input parameters only,
@@ -183,7 +176,6 @@ struct gendisk {
        int first_minor;
        int minors;                     /* maximum number of minors, =1 for
                                          * disks that can't be partitioned. */
-       struct disk_devt *disk_devt;
 
        char disk_name[DISK_NAME_LEN];  /* name of major driver */
        char *(*devnode)(struct gendisk *gd, umode_t *mode);
index 2484b2fcc6eb58d0139605359fe97b285df8e5f5..933d936566055de430f9db64ae152eb31785b7ff 100644 (file)
@@ -143,15 +143,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
                                                struct fwnode_handle *child,
                                                enum gpiod_flags flags,
                                                const char *label);
-/* FIXME: delete this helper when users are switched over */
-static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
-                         const char *con_id, struct fwnode_handle *child)
-{
-       return devm_fwnode_get_index_gpiod_from_child(dev, con_id,
-                                                     0, child,
-                                                     GPIOD_ASIS,
-                                                     "?");
-}
 
 #else /* CONFIG_GPIOLIB */
 
@@ -444,13 +435,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
        return ERR_PTR(-ENOSYS);
 }
 
-/* FIXME: delete this when all users are switched over */
-static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
-                         const char *con_id, struct fwnode_handle *child)
-{
-       return ERR_PTR(-ENOSYS);
-}
-
 #endif /* CONFIG_GPIOLIB */
 
 static inline
index 503099d8aada5351b2e30b04cf79e651d57a23d7..b857fc8cc2ecaef504a12e1a88d6e87fa38045ad 100644 (file)
@@ -122,7 +122,7 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
                                pud_t *pud, int flags);
 int pmd_huge(pmd_t pmd);
-int pud_huge(pud_t pmd);
+int pud_huge(pud_t pud);
 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                unsigned long address, unsigned long end, pgprot_t newprot);
 
@@ -197,6 +197,9 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
 #ifndef pgd_huge
 #define pgd_huge(x)    0
 #endif
+#ifndef p4d_huge
+#define p4d_huge(x)    0
+#endif
 
 #ifndef pgd_write
 static inline int pgd_write(pgd_t pgd)
index 78d59dba563e33e96f6658bfb764268e97f99573..88b6737491210aaf7a2ba66756672188992902ac 100644 (file)
@@ -88,6 +88,7 @@ enum hwmon_temp_attributes {
 #define HWMON_T_CRIT_HYST      BIT(hwmon_temp_crit_hyst)
 #define HWMON_T_EMERGENCY      BIT(hwmon_temp_emergency)
 #define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst)
+#define HWMON_T_ALARM          BIT(hwmon_temp_alarm)
 #define HWMON_T_MIN_ALARM      BIT(hwmon_temp_min_alarm)
 #define HWMON_T_MAX_ALARM      BIT(hwmon_temp_max_alarm)
 #define HWMON_T_CRIT_ALARM     BIT(hwmon_temp_crit_alarm)
index 36162485d66310e803884aee213d65aac6a8b10d..0c170a3f0d8b0653b97d598c2a0c61d6b9abe1d2 100644 (file)
@@ -845,6 +845,13 @@ struct vmbus_channel {
         * link up channels based on their CPU affinity.
         */
        struct list_head percpu_list;
+
+       /*
+        * Defer freeing channel until after all cpu's have
+        * gone through grace period.
+        */
+       struct rcu_head rcu;
+
        /*
         * For performance critical channels (storage, networking
         * etc,), Hyper-V has a mechanism to enhance the throughput
@@ -1430,9 +1437,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
                                const int *srv_version, int srv_vercnt,
                                int *nego_fw_version, int *nego_srv_version);
 
-void hv_event_tasklet_disable(struct vmbus_channel *channel);
-void hv_event_tasklet_enable(struct vmbus_channel *channel);
-
 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
 
 void vmbus_setevent(struct vmbus_channel *channel);
index 23ca41515527965239a56e92b909d53e75055cd4..fa7931933067982b2bac57f4c73aac039db8f692 100644 (file)
@@ -62,7 +62,7 @@ void iio_swd_group_init_type_name(struct iio_sw_device *d,
                                  const char *name,
                                  struct config_item_type *type)
 {
-#ifdef CONFIG_CONFIGFS_FS
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
        config_group_init_type_name(&d->group, name, type);
 #endif
 }
index ee971f335a8b659f04e5a3048ca70e5bc361ee5f..a2e9d6ea1349fb85418a9ebafc55dd08d16ca6b0 100644 (file)
@@ -153,8 +153,8 @@ struct in_ifaddr {
 int register_inetaddr_notifier(struct notifier_block *nb);
 int unregister_inetaddr_notifier(struct notifier_block *nb);
 
-void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
-                                struct ipv4_devconf *devconf);
+void inet_netconf_notify_devconf(struct net *net, int event, int type,
+                                int ifindex, struct ipv4_devconf *devconf);
 
 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
 static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
index 6a6de187ddc0ff1e0e737f94261211b7ea4408e9..2e4de0deee531adbd7c1cd8ff1d25e0d0eb98d47 100644 (file)
@@ -125,9 +125,16 @@ enum iommu_attr {
 };
 
 /* These are the possible reserved region types */
-#define IOMMU_RESV_DIRECT      (1 << 0)
-#define IOMMU_RESV_RESERVED    (1 << 1)
-#define IOMMU_RESV_MSI         (1 << 2)
+enum iommu_resv_type {
+       /* Memory regions which must be mapped 1:1 at all times */
+       IOMMU_RESV_DIRECT,
+       /* Arbitrary "never map this or give it to a device" address ranges */
+       IOMMU_RESV_RESERVED,
+       /* Hardware MSI region (untranslated) */
+       IOMMU_RESV_MSI,
+       /* Software-managed MSI translation window */
+       IOMMU_RESV_SW_MSI,
+};
 
 /**
  * struct iommu_resv_region - descriptor for a reserved memory region
@@ -142,7 +149,7 @@ struct iommu_resv_region {
        phys_addr_t             start;
        size_t                  length;
        int                     prot;
-       int                     type;
+       enum iommu_resv_type    type;
 };
 
 #ifdef CONFIG_IOMMU_API
@@ -288,7 +295,8 @@ extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
 extern int iommu_request_dm_for_dev(struct device *dev);
 extern struct iommu_resv_region *
-iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type);
+iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
+                       enum iommu_resv_type type);
 extern int iommu_get_group_resv_regions(struct iommu_group *group,
                                        struct list_head *head);
 
index f0d79bd054cab9eed7a2d63058f25a3a6abd8bbf..e1b442996f810529a755533270b6d21c350fbd5a 100644 (file)
@@ -37,6 +37,7 @@ struct ipv6_devconf {
        __s32           accept_ra_rtr_pref;
        __s32           rtr_probe_interval;
 #ifdef CONFIG_IPV6_ROUTE_INFO
+       __s32           accept_ra_rt_info_min_plen;
        __s32           accept_ra_rt_info_max_plen;
 #endif
 #endif
index 672cfef72fc85da6a30579b00bf7e1349758a3ac..97cbca19430d82aa2b4c835db1edf2d6620517ba 100644 (file)
 #define ICC_IGRPEN0_EL1_MASK           (1 << ICC_IGRPEN0_EL1_SHIFT)
 #define ICC_IGRPEN1_EL1_SHIFT          0
 #define ICC_IGRPEN1_EL1_MASK           (1 << ICC_IGRPEN1_EL1_SHIFT)
+#define ICC_SRE_EL1_DIB                        (1U << 2)
+#define ICC_SRE_EL1_DFB                        (1U << 1)
 #define ICC_SRE_EL1_SRE                        (1U << 0)
 
 /*
index 188eced6813eddb9c313fdb59016b972835e7674..9f3616085423cfca654264a4f5b9fed022431997 100644 (file)
@@ -524,6 +524,10 @@ static inline struct irq_domain *irq_find_matching_fwnode(
 {
        return NULL;
 }
+static inline bool irq_domain_check_msi_remap(void)
+{
+       return false;
+}
 #endif /* !CONFIG_IRQ_DOMAIN */
 
 #endif /* _LINUX_IRQDOMAIN_H */
index 8e06d758ee48a2d92da7b9cfba79175334423b48..2afd74b9d844095375be39342e78b5a6749e2a52 100644 (file)
@@ -90,6 +90,13 @@ extern bool static_key_initialized;
 struct static_key {
        atomic_t enabled;
 /*
+ * Note:
+ *   To make anonymous unions work with old compilers, the static
+ *   initialization of them requires brackets. This creates a dependency
+ *   on the order of the struct with the initializers. If any fields
+ *   are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need
+ *   to be modified.
+ *
  * bit 0 => 1 if key is initially true
  *         0 if initially false
  * bit 1 => 1 if points to struct static_key_mod
@@ -166,10 +173,10 @@ extern void static_key_disable(struct static_key *key);
  */
 #define STATIC_KEY_INIT_TRUE                                   \
        { .enabled = { 1 },                                     \
-         .entries = (void *)JUMP_TYPE_TRUE }
+         { .entries = (void *)JUMP_TYPE_TRUE } }
 #define STATIC_KEY_INIT_FALSE                                  \
        { .enabled = { 0 },                                     \
-         .entries = (void *)JUMP_TYPE_FALSE }
+         { .entries = (void *)JUMP_TYPE_FALSE } }
 
 #else  /* !HAVE_JUMP_LABEL */
 
index ceb3fe78a0d39d6b1268c0b92485b8ee110e0b4a..a5c7046f26b4b93f9199c2f4800b7a1da77791da 100644 (file)
@@ -6,6 +6,7 @@
 struct kmem_cache;
 struct page;
 struct vm_struct;
+struct task_struct;
 
 #ifdef CONFIG_KASAN
 
@@ -18,6 +19,7 @@ extern unsigned char kasan_zero_page[PAGE_SIZE];
 extern pte_t kasan_zero_pte[PTRS_PER_PTE];
 extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
 extern pud_t kasan_zero_pud[PTRS_PER_PUD];
+extern p4d_t kasan_zero_p4d[PTRS_PER_P4D];
 
 void kasan_populate_zero_shadow(const void *shadow_start,
                                const void *shadow_end);
@@ -74,6 +76,9 @@ size_t ksize(const void *);
 static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
 size_t kasan_metadata_size(struct kmem_cache *cache);
 
+bool kasan_save_enable_multi_shot(void);
+void kasan_restore_multi_shot(bool enabled);
+
 #else /* CONFIG_KASAN */
 
 static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
index 2c14ad9809da94bde727f3ebc744fabd47673f98..d0250744507a284138d0e7e702bb64e1d205e769 100644 (file)
@@ -162,8 +162,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
                    int len, void *val);
 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
                            int len, struct kvm_io_device *dev);
-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
-                             struct kvm_io_device *dev);
+void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+                              struct kvm_io_device *dev);
 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
                                         gpa_t addr);
 
index b01fe100908430708df0df5162594b497ffdad62..87ff4f58a2f0182ec0586c0dee923bc30e004149 100644 (file)
@@ -29,6 +29,11 @@ struct hlist_nulls_node {
        ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
 
 #define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define hlist_nulls_entry_safe(ptr, type, member) \
+       ({ typeof(ptr) ____ptr = (ptr); \
+          !is_a_nulls(____ptr) ? hlist_nulls_entry(____ptr, type, member) : NULL; \
+       })
 /**
  * ptr_is_a_nulls - Test if a ptr is a nulls
  * @ptr: ptr to be tested
index 5af37730388074ff65d4e42143c40c2880b5aff7..bb7250c45cb8356b03df2d4b9f1325dc6e30069b 100644 (file)
@@ -740,6 +740,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
        return false;
 }
 
+static inline void mem_cgroup_update_page_stat(struct page *page,
+                                              enum mem_cgroup_stat_index idx,
+                                              int nr)
+{
+}
+
 static inline void mem_cgroup_inc_page_stat(struct page *page,
                                            enum mem_cgroup_stat_index idx)
 {
index 7a01c94496f14eac3c1ed13b37a9811cc1b6f853..3eef9fb9968ae730716a79bc9e3aef8be4e2e650 100644 (file)
  * Max bus-specific overhead incurred by request/responses.
  * I2C requires 1 additional byte for requests.
  * I2C requires 2 additional bytes for responses.
+ * SPI requires up to 32 additional bytes for responses.
  * */
 #define EC_PROTO_VERSION_UNKNOWN       0
 #define EC_MAX_REQUEST_OVERHEAD                1
-#define EC_MAX_RESPONSE_OVERHEAD       2
+#define EC_MAX_RESPONSE_OVERHEAD       32
 
 /*
  * Command interface between EC and AP, for LPC, I2C and SPI interfaces.
index 7e66e4f62858f395cd000226e9580785b03a4cf1..1beb1ec2fbdf339b34affc69508a5f5462b409b0 100644 (file)
@@ -476,6 +476,7 @@ enum {
 enum {
        MLX4_INTERFACE_STATE_UP         = 1 << 0,
        MLX4_INTERFACE_STATE_DELETION   = 1 << 1,
+       MLX4_INTERFACE_STATE_NOWAIT     = 1 << 2,
 };
 
 #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
index 2fcff6b4503f6a4824bea50c189b072ef6c486cb..f508646262305afe315f4532200dc979b05fddae 100644 (file)
@@ -728,6 +728,7 @@ struct mlx5e_resources {
        u32                        pdn;
        struct mlx5_td             td;
        struct mlx5_core_mkey      mkey;
+       struct mlx5_sq_bfreg       bfreg;
 };
 
 struct mlx5_core_dev {
index 949b24b6c4794ce14909d779b7dbfd2534aa53db..ae91a4bda1a3063d3b2f6cef2d10c1266c1ef59f 100644 (file)
@@ -134,8 +134,13 @@ struct mlx5_flow_act {
        u32 action;
        u32 flow_tag;
        u32 encap_id;
+       u32 modify_id;
 };
 
+#define MLX5_DECLARE_FLOW_ACT(name) \
+       struct mlx5_flow_act name = {MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
+                                    MLX5_FS_DEFAULT_FLOW_TAG, 0, 0}
+
 /* Single destination per rule.
  * Group ID is implied by the match criteria.
  */
@@ -156,5 +161,4 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
 void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
 void mlx5_fc_query_cached(struct mlx5_fc *counter,
                          u64 *bytes, u64 *packets, u64 *lastuse);
-
 #endif
index 838242697541a28fdda4d90bf7b604e25f3bfba2..1993adbd2c822c223f765198f6ea21fa70467a06 100644 (file)
@@ -227,6 +227,8 @@ enum {
        MLX5_CMD_OP_MODIFY_FLOW_TABLE             = 0x93c,
        MLX5_CMD_OP_ALLOC_ENCAP_HEADER            = 0x93d,
        MLX5_CMD_OP_DEALLOC_ENCAP_HEADER          = 0x93e,
+       MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT   = 0x940,
+       MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
        MLX5_CMD_OP_MAX
 };
 
@@ -302,7 +304,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
 
        u8         reserved_at_20[0x2];
        u8         log_max_ft_size[0x6];
-       u8         reserved_at_28[0x10];
+       u8         log_max_modify_header_context[0x8];
+       u8         max_modify_header_actions[0x8];
        u8         max_ft_level[0x8];
 
        u8         reserved_at_40[0x20];
@@ -2190,6 +2193,7 @@ enum {
        MLX5_FLOW_CONTEXT_ACTION_COUNT     = 0x8,
        MLX5_FLOW_CONTEXT_ACTION_ENCAP     = 0x10,
        MLX5_FLOW_CONTEXT_ACTION_DECAP     = 0x20,
+       MLX5_FLOW_CONTEXT_ACTION_MOD_HDR   = 0x40,
 };
 
 struct mlx5_ifc_flow_context_bits {
@@ -2211,7 +2215,9 @@ struct mlx5_ifc_flow_context_bits {
 
        u8         encap_id[0x20];
 
-       u8         reserved_at_e0[0x120];
+       u8         modify_header_id[0x20];
+
+       u8         reserved_at_100[0x100];
 
        struct mlx5_ifc_fte_match_param_bits match_value;
 
@@ -4534,6 +4540,109 @@ struct mlx5_ifc_dealloc_encap_header_in_bits {
        u8         reserved_60[0x20];
 };
 
+struct mlx5_ifc_set_action_in_bits {
+       u8         action_type[0x4];
+       u8         field[0xc];
+       u8         reserved_at_10[0x3];
+       u8         offset[0x5];
+       u8         reserved_at_18[0x3];
+       u8         length[0x5];
+
+       u8         data[0x20];
+};
+
+struct mlx5_ifc_add_action_in_bits {
+       u8         action_type[0x4];
+       u8         field[0xc];
+       u8         reserved_at_10[0x10];
+
+       u8         data[0x20];
+};
+
+union mlx5_ifc_set_action_in_add_action_in_auto_bits {
+       struct mlx5_ifc_set_action_in_bits set_action_in;
+       struct mlx5_ifc_add_action_in_bits add_action_in;
+       u8         reserved_at_0[0x40];
+};
+
+enum {
+       MLX5_ACTION_TYPE_SET   = 0x1,
+       MLX5_ACTION_TYPE_ADD   = 0x2,
+};
+
+enum {
+       MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16    = 0x1,
+       MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0     = 0x2,
+       MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE     = 0x3,
+       MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16    = 0x4,
+       MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0     = 0x5,
+       MLX5_ACTION_IN_FIELD_OUT_IP_DSCP       = 0x6,
+       MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS     = 0x7,
+       MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT     = 0x8,
+       MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT     = 0x9,
+       MLX5_ACTION_IN_FIELD_OUT_IP_TTL        = 0xa,
+       MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT     = 0xb,
+       MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT     = 0xc,
+       MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96  = 0xd,
+       MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64   = 0xe,
+       MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32   = 0xf,
+       MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0    = 0x10,
+       MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96  = 0x11,
+       MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64   = 0x12,
+       MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32   = 0x13,
+       MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0    = 0x14,
+       MLX5_ACTION_IN_FIELD_OUT_SIPV4         = 0x15,
+       MLX5_ACTION_IN_FIELD_OUT_DIPV4         = 0x16,
+};
+
+struct mlx5_ifc_alloc_modify_header_context_out_bits {
+       u8         status[0x8];
+       u8         reserved_at_8[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         modify_header_id[0x20];
+
+       u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_modify_header_context_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_at_10[0x10];
+
+       u8         reserved_at_20[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_at_40[0x20];
+
+       u8         table_type[0x8];
+       u8         reserved_at_68[0x10];
+       u8         num_of_actions[0x8];
+
+       union mlx5_ifc_set_action_in_add_action_in_auto_bits actions[0];
+};
+
+struct mlx5_ifc_dealloc_modify_header_context_out_bits {
+       u8         status[0x8];
+       u8         reserved_at_8[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_modify_header_context_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_at_10[0x10];
+
+       u8         reserved_at_20[0x10];
+       u8         op_mod[0x10];
+
+       u8         modify_header_id[0x20];
+
+       u8         reserved_at_60[0x20];
+};
+
 struct mlx5_ifc_query_dct_out_bits {
        u8         status[0x8];
        u8         reserved_at_8[0x18];
@@ -5013,6 +5122,7 @@ struct mlx5_ifc_modify_rq_out_bits {
 
 enum {
        MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,
+       MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS = 1ULL << 2,
        MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID = 1ULL << 3,
 };
 
index 0d65dd72c0f49e230613ac268d29c7b377962836..00a8fa7e366a0320210941ca39dd53fed97d4e2c 100644 (file)
@@ -32,6 +32,8 @@ struct user_struct;
 struct writeback_control;
 struct bdi_writeback;
 
+void init_mm_internals(void);
+
 #ifndef CONFIG_NEED_MULTIPLE_NODES     /* Don't use mapnrs, do it properly */
 extern unsigned long max_mapnr;
 
@@ -1560,14 +1562,24 @@ static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
        return ptep;
 }
 
+#ifdef __PAGETABLE_P4D_FOLDED
+static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
+                                               unsigned long address)
+{
+       return 0;
+}
+#else
+int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
+#endif
+
 #ifdef __PAGETABLE_PUD_FOLDED
-static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
+static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
                                                unsigned long address)
 {
        return 0;
 }
 #else
-int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
+int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
 #endif
 
 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
@@ -1619,11 +1631,22 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
  * Remove it when 4level-fixup.h has been removed.
  */
 #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
-static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+
+#ifndef __ARCH_HAS_5LEVEL_HACK
+static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
+               unsigned long address)
+{
+       return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
+               NULL : p4d_offset(pgd, address);
+}
+
+static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
+               unsigned long address)
 {
-       return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
-               NULL: pud_offset(pgd, address);
+       return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
+               NULL : pud_offset(p4d, address);
 }
+#endif /* !__ARCH_HAS_5LEVEL_HACK */
 
 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
 {
@@ -2385,7 +2408,8 @@ void sparse_mem_maps_populate_node(struct page **map_map,
 
 struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
-pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
+p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
+pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
 void *vmemmap_alloc_block(unsigned long size, int node);
index cd0c8bd0a1dec0d2047509177ee8d33de1f185d4..a42fab24c8af6aa7a21334d8af40d0cecfc72bb0 100644 (file)
@@ -146,7 +146,7 @@ struct proto_ops {
        int             (*socketpair)(struct socket *sock1,
                                      struct socket *sock2);
        int             (*accept)    (struct socket *sock,
-                                     struct socket *newsock, int flags);
+                                     struct socket *newsock, int flags, bool kern);
        int             (*getname)   (struct socket *sock,
                                      struct sockaddr *addr,
                                      int *sockaddr_len, int peer);
@@ -298,6 +298,9 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset,
 int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
 int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
 
+/* Following routine returns the IP overhead imposed by a socket.  */
+u32 kernel_sock_ip_overhead(struct sock *sk);
+
 #define MODULE_ALIAS_NETPROTO(proto) \
        MODULE_ALIAS("net-pf-" __stringify(proto))
 
index 97456b2539e46d6232dda804f6a434db6fd7134f..b0aa089ce67fe4acec70b518a399db7e99a435e0 100644 (file)
@@ -41,7 +41,6 @@
 
 #include <linux/ethtool.h>
 #include <net/net_namespace.h>
-#include <net/dsa.h>
 #ifdef CONFIG_DCB
 #include <net/dcbnl.h>
 #endif
@@ -57,6 +56,8 @@
 struct netpoll_info;
 struct device;
 struct phy_device;
+struct dsa_switch_tree;
+
 /* 802.11 specific */
 struct wireless_dev;
 /* 802.15.4 specific */
@@ -236,8 +237,7 @@ struct netdev_hw_addr_list {
        netdev_hw_addr_list_for_each(ha, &(dev)->mc)
 
 struct hh_cache {
-       u16             hh_len;
-       u16             __pad;
+       unsigned int    hh_len;
        seqlock_t       hh_lock;
 
        /* cached hardware header; allow for machine alignment needs.        */
@@ -786,11 +786,11 @@ struct tc_cls_u32_offload;
 struct tc_to_netdev {
        unsigned int type;
        union {
-               u8 tc;
                struct tc_cls_u32_offload *cls_u32;
                struct tc_cls_flower_offload *cls_flower;
                struct tc_cls_matchall_offload *cls_mall;
                struct tc_cls_bpf_offload *cls_bpf;
+               struct tc_mqprio_qopt *mqprio;
        };
        bool egress_dev;
 };
@@ -1715,7 +1715,7 @@ struct net_device {
        unsigned int            max_mtu;
        unsigned short          type;
        unsigned short          hard_header_len;
-       unsigned short          min_header_len;
+       unsigned char           min_header_len;
 
        unsigned short          needed_headroom;
        unsigned short          needed_tailroom;
@@ -2004,15 +2004,6 @@ void dev_net_set(struct net_device *dev, struct net *net)
        write_pnet(&dev->nd_net, net);
 }
 
-static inline bool netdev_uses_dsa(struct net_device *dev)
-{
-#if IS_ENABLED(CONFIG_NET_DSA)
-       if (dev->dsa_ptr != NULL)
-               return dsa_uses_tagged_protocol(dev->dsa_ptr);
-#endif
-       return false;
-}
-
 /**
  *     netdev_priv - access network device private data
  *     @dev: network device
index a58cca8bcb29d8c9bdcda71538cfa8f25c913916..ba35ba5204871a69b5d5522f170432d24aa32e96 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/phy.h>
 #include <linux/of.h>
 
-#ifdef CONFIG_OF
+#if IS_ENABLED(CONFIG_OF_MDIO)
 extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
 extern struct phy_device *of_phy_find_device(struct device_node *phy_np);
 extern struct phy_device *of_phy_connect(struct net_device *dev,
@@ -32,7 +32,7 @@ extern int of_phy_register_fixed_link(struct device_node *np);
 extern void of_phy_deregister_fixed_link(struct device_node *np);
 extern bool of_phy_is_fixed_link(struct device_node *np);
 
-#else /* CONFIG_OF */
+#else /* CONFIG_OF_MDIO */
 static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 {
        /*
index 35d0fd7a4948e6e49eb7e75512461d935cccd161..fd0de00c0d777ed72cfab3fdbeb54e885c14be77 100644 (file)
@@ -76,22 +76,12 @@ struct gpmc_timings;
 struct omap_nand_platform_data;
 struct omap_onenand_platform_data;
 
-#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2)
-extern int gpmc_nand_init(struct omap_nand_platform_data *d,
-                         struct gpmc_timings *gpmc_t);
-#else
-static inline int gpmc_nand_init(struct omap_nand_platform_data *d,
-                                struct gpmc_timings *gpmc_t)
-{
-       return 0;
-}
-#endif
-
 #if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
-extern void gpmc_onenand_init(struct omap_onenand_platform_data *d);
+extern int gpmc_onenand_init(struct omap_onenand_platform_data *d);
 #else
 #define board_onenand_data     NULL
-static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d)
+static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d)
 {
+       return 0;
 }
 #endif
index eb3da1a04e6cdc7d3f4efab5532f53abcdf3a28f..82dec36845e61a77a2673275982bbca5fdc73bc6 100644 (file)
@@ -1300,7 +1300,6 @@ int pci_msi_vec_count(struct pci_dev *dev);
 void pci_msi_shutdown(struct pci_dev *dev);
 void pci_disable_msi(struct pci_dev *dev);
 int pci_msix_vec_count(struct pci_dev *dev);
-int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec);
 void pci_msix_shutdown(struct pci_dev *dev);
 void pci_disable_msix(struct pci_dev *dev);
 void pci_restore_msi_state(struct pci_dev *dev);
@@ -1330,9 +1329,6 @@ static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
 static inline void pci_msi_shutdown(struct pci_dev *dev) { }
 static inline void pci_disable_msi(struct pci_dev *dev) { }
 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
-static inline int pci_enable_msix(struct pci_dev *dev,
-                                 struct msix_entry *entries, int nvec)
-{ return -ENOSYS; }
 static inline void pci_msix_shutdown(struct pci_dev *dev) { }
 static inline void pci_disable_msix(struct pci_dev *dev) { }
 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
index 43a774873aa96d4af64d0cdebb579be572a6658a..624cecf69c28d39b148b6d0616842fcf40317473 100644 (file)
@@ -587,23 +587,29 @@ struct phy_driver {
         */
        void (*link_change_notify)(struct phy_device *dev);
 
-       /* A function provided by a phy specific driver to override the
-        * the PHY driver framework support for reading a MMD register
-        * from the PHY. If not supported, return -1. This function is
-        * optional for PHY specific drivers, if not provided then the
-        * default MMD read function is used by the PHY framework.
+       /*
+        * Phy specific driver override for reading a MMD register.
+        * This function is optional for PHY specific drivers.  When
+        * not provided, the default MMD read function will be used
+        * by phy_read_mmd(), which will use either a direct read for
+        * Clause 45 PHYs or an indirect read for Clause 22 PHYs.
+        *  devnum is the MMD device number within the PHY device,
+        *  regnum is the register within the selected MMD device.
         */
-       int (*read_mmd_indirect)(struct phy_device *dev, int ptrad,
-                                int devnum, int regnum);
-
-       /* A function provided by a phy specific driver to override the
-        * the PHY driver framework support for writing a MMD register
-        * from the PHY. This function is optional for PHY specific drivers,
-        * if not provided then the default MMD read function is used by
-        * the PHY framework.
+       int (*read_mmd)(struct phy_device *dev, int devnum, u16 regnum);
+
+       /*
+        * Phy specific driver override for writing a MMD register.
+        * This function is optional for PHY specific drivers.  When
+        * not provided, the default MMD write function will be used
+        * by phy_write_mmd(), which will use either a direct write for
+        * Clause 45 PHYs, or an indirect write for Clause 22 PHYs.
+        *  devnum is the MMD device number within the PHY device,
+        *  regnum is the register within the selected MMD device.
+        *  val is the value to be written.
         */
-       void (*write_mmd_indirect)(struct phy_device *dev, int ptrad,
-                                  int devnum, int regnum, u32 val);
+       int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum,
+                        u16 val);
 
        /* Get the size and type of the eeprom contained within a plug-in
         * module */
@@ -651,25 +657,7 @@ struct phy_fixup {
  *
  * Same rules as for phy_read();
  */
-static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
-{
-       if (!phydev->is_c45)
-               return -EOPNOTSUPP;
-
-       return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
-                           MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff));
-}
-
-/**
- * phy_read_mmd_indirect - reads data from the MMD registers
- * @phydev: The PHY device bus
- * @prtad: MMD Address
- * @addr: PHY address on the MII bus
- *
- * Description: it reads data from the MMD registers (clause 22 to access to
- * clause 45) of the specified phy address.
- */
-int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad);
+int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
 
 /**
  * phy_read - Convenience function for reading a given PHY register
@@ -752,35 +740,29 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
  *
  * Same rules as for phy_write();
  */
-static inline int phy_write_mmd(struct phy_device *phydev, int devad,
-                               u32 regnum, u16 val)
-{
-       if (!phydev->is_c45)
-               return -EOPNOTSUPP;
-
-       regnum = MII_ADDR_C45 | ((devad & 0x1f) << 16) | (regnum & 0xffff);
-
-       return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val);
-}
-
-/**
- * phy_write_mmd_indirect - writes data to the MMD registers
- * @phydev: The PHY device
- * @prtad: MMD Address
- * @devad: MMD DEVAD
- * @data: data to write in the MMD register
- *
- * Description: Write data from the MMD registers of the specified
- * phy address.
- */
-void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
-                           int devad, u32 data);
+int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
 
 struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
                                     bool is_c45,
                                     struct phy_c45_device_ids *c45_ids);
+#if IS_ENABLED(CONFIG_PHYLIB)
 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
 int phy_device_register(struct phy_device *phy);
+void phy_device_free(struct phy_device *phydev);
+#else
+static inline
+struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
+{
+       return NULL;
+}
+
+static inline int phy_device_register(struct phy_device *phy)
+{
+       return 0;
+}
+
+static inline void phy_device_free(struct phy_device *phydev) { }
+#endif /* CONFIG_PHYLIB */
 void phy_device_remove(struct phy_device *phydev);
 int phy_init_hw(struct phy_device *phydev);
 int phy_suspend(struct phy_device *phydev);
@@ -861,7 +843,6 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
 int phy_start_interrupts(struct phy_device *phydev);
 void phy_print_status(struct phy_device *phydev);
-void phy_device_free(struct phy_device *phydev);
 int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
 
 int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
@@ -888,8 +869,10 @@ int phy_ethtool_set_link_ksettings(struct net_device *ndev,
                                   const struct ethtool_link_ksettings *cmd);
 int phy_ethtool_nway_reset(struct net_device *ndev);
 
+#if IS_ENABLED(CONFIG_PHYLIB)
 int __init mdio_bus_init(void);
 void mdio_bus_exit(void);
+#endif
 
 extern struct bus_type mdio_bus_type;
 
@@ -900,7 +883,7 @@ struct mdio_board_info {
        const void      *platform_data;
 };
 
-#if IS_ENABLED(CONFIG_PHYLIB)
+#if IS_ENABLED(CONFIG_MDIO_DEVICE)
 int mdiobus_register_board_info(const struct mdio_board_info *info,
                                unsigned int n);
 #else
diff --git a/include/linux/purgatory.h b/include/linux/purgatory.h
new file mode 100644 (file)
index 0000000..d60d4e2
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef _LINUX_PURGATORY_H
+#define _LINUX_PURGATORY_H
+
+#include <linux/types.h>
+#include <crypto/sha.h>
+#include <uapi/linux/kexec.h>
+
+struct kexec_sha_region {
+       unsigned long start;
+       unsigned long len;
+};
+
+/*
+ * These forward declarations serve two purposes:
+ *
+ * 1) Make sparse happy when checking arch/purgatory
+ * 2) Document that these are required to be global so the symbol
+ *    lookup in kexec works
+ */
+extern struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX];
+extern u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE];
+
+#endif
index 52966b9bfde3740b506664bf2329596cf23695ef..fbab6e0514f07bf0f4a9ac481cb712c58d113b7f 100644 (file)
 #define MAX_NUM_LL2_TX_STATS_COUNTERS  32
 
 #define FW_MAJOR_VERSION       8
-#define FW_MINOR_VERSION       10
-#define FW_REVISION_VERSION    10
+#define FW_MINOR_VERSION       15
+#define FW_REVISION_VERSION    3
 #define FW_ENGINEERING_VERSION 0
 
 /***********************/
 
 /* DEMS */
 #define DQ_DEMS_LEGACY                 0
+#define DQ_DEMS_TOE_MORE_TO_SEND       3
+#define DQ_DEMS_TOE_LOCAL_ADV_WND      4
+#define DQ_DEMS_ROCE_CQ_CONS           7
 
 /* XCM agg val selection */
 #define DQ_XCM_AGG_VAL_SEL_WORD2  0
 #define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
 #define DQ_XCM_ISCSI_EXP_STAT_SN_CMD   DQ_XCM_AGG_VAL_SEL_REG6
 #define DQ_XCM_ROCE_SQ_PROD_CMD        DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_TX_BD_PROD_CMD      DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD        DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
 
 /* UCM agg val selection (HW) */
 #define        DQ_UCM_AGG_VAL_SEL_WORD0        0
 #define DQ_XCM_ISCSI_DQ_FLUSH_CMD      BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
 #define DQ_XCM_ISCSI_SLOW_PATH_CMD     BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
 #define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_TOE_DQ_FLUSH_CMD                BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_TOE_SLOW_PATH_CMD       BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
 
 /* UCM agg counter flag selection (HW) */
 #define        DQ_UCM_AGG_FLG_SHIFT_CF0        0
 #define DQ_UCM_ETH_PMD_RX_ARM_CMD      BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
 #define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD   BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
 #define DQ_UCM_ROCE_CQ_ARM_CF_CMD      BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
+#define DQ_UCM_TOE_TIMER_STOP_ALL_CMD  BIT(DQ_UCM_AGG_FLG_SHIFT_CF3)
+#define DQ_UCM_TOE_SLOW_PATH_CF_CMD    BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_TOE_DQ_CF_CMD           BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
 
 /* TCM agg counter flag selection (HW) */
 #define DQ_TCM_AGG_FLG_SHIFT_CF0       0
 #define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD      BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
 #define DQ_TCM_ISCSI_FLUSH_Q0_CMD      BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
 #define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD        BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_TOE_FLUSH_Q0_CMD                BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_TOE_TIMER_STOP_ALL_CMD  BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_IWARP_POST_RQ_CF_CMD    BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
 
 /* PWM address mapping */
 #define DQ_PWM_OFFSET_DPM_BASE 0x0
@@ -689,6 +703,16 @@ struct iscsi_eqe_data {
 #define ISCSI_EQE_DATA_RESERVED0_SHIFT                 7
 };
 
+struct rdma_eqe_destroy_qp {
+       __le32 cid;
+       u8 reserved[4];
+};
+
+union rdma_eqe_data {
+       struct regpair async_handle;
+       struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+};
+
 struct malicious_vf_eqe_data {
        u8 vf_id;
        u8 err_id;
@@ -705,9 +729,9 @@ union event_ring_data {
        u8 bytes[8];
        struct vf_pf_channel_eqe_data vf_pf_channel;
        struct iscsi_eqe_data iscsi_info;
+       union rdma_eqe_data rdma_data;
        struct malicious_vf_eqe_data malicious_vf;
        struct initial_cleanup_eqe_data vf_init_cleanup;
-       struct regpair roce_handle;
 };
 
 /* Event Ring Entry */
index 4b402fb0eaad5fdf7bd29221d95596f092c0e945..34d93eb5bfba346019ba1d2c9014ab8a2fa5fd8f 100644 (file)
@@ -49,6 +49,9 @@
 #define ETH_RX_CQE_PAGE_SIZE_BYTES                      4096
 #define ETH_RX_NUM_NEXT_PAGE_BDS                        2
 
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET          253
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET          251
+
 #define ETH_TX_MIN_BDS_PER_NON_LSO_PKT                          1
 #define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET                       18
 #define ETH_TX_MAX_BDS_PER_LSO_PACKET  255
index 2e417a45c5f7028ba1fc2b6201fc828f1fef4247..947a635d04bb57ff15a61f1ee82c41ae27c1d4e5 100644 (file)
@@ -109,13 +109,6 @@ struct fcoe_conn_terminate_ramrod_data {
        struct regpair terminate_params_addr;
 };
 
-struct fcoe_fast_sgl_ctx {
-       struct regpair sgl_start_addr;
-       __le32 sgl_byte_offset;
-       __le16 task_reuse_cnt;
-       __le16 init_offset_in_first_sge;
-};
-
 struct fcoe_slow_sgl_ctx {
        struct regpair base_sgl_addr;
        __le16 curr_sge_off;
@@ -124,23 +117,16 @@ struct fcoe_slow_sgl_ctx {
        __le16 reserved;
 };
 
-struct fcoe_sge {
-       struct regpair sge_addr;
-       __le16 size;
-       __le16 reserved0;
-       u8 reserved1[3];
-       u8 is_valid_sge;
-};
-
-union fcoe_data_desc_ctx {
-       struct fcoe_fast_sgl_ctx fast;
-       struct fcoe_slow_sgl_ctx slow;
-       struct fcoe_sge single_sge;
-};
-
 union fcoe_dix_desc_ctx {
        struct fcoe_slow_sgl_ctx dix_sgl;
-       struct fcoe_sge cached_dix_sge;
+       struct scsi_sge cached_dix_sge;
+};
+
+struct fcoe_fast_sgl_ctx {
+       struct regpair sgl_start_addr;
+       __le32 sgl_byte_offset;
+       __le16 task_reuse_cnt;
+       __le16 init_offset_in_first_sge;
 };
 
 struct fcoe_fcp_cmd_payload {
@@ -172,57 +158,6 @@ enum fcoe_mode_type {
        MAX_FCOE_MODE_TYPE
 };
 
-struct fcoe_mstorm_fcoe_task_st_ctx_fp {
-       __le16 flags;
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_MASK                 0x7FFF
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_SHIFT                0
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_MASK  0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_SHIFT 15
-       __le16 difDataResidue;
-       __le16 parent_id;
-       __le16 single_sge_saved_offset;
-       __le32 data_2_trns_rem;
-       __le32 offset_in_io;
-       union fcoe_dix_desc_ctx dix_desc;
-       union fcoe_data_desc_ctx data_desc;
-};
-
-struct fcoe_mstorm_fcoe_task_st_ctx_non_fp {
-       __le16 flags;
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_MASK            0x3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_SHIFT           0
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_MASK               0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_SHIFT              2
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_MASK      0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_SHIFT     3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_MASK         0xF
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_SHIFT        4
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_MASK            0x3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_SHIFT           8
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_MASK                  0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_SHIFT                 10
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_MASK  0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_SHIFT 11
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_MASK      0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_SHIFT     12
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_MASK        0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_SHIFT       13
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_MASK        0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_SHIFT       14
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_MASK             0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_SHIFT            15
-       u8 tx_rx_sgl_mode;
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_MASK               0x7
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_SHIFT              0
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_MASK               0x7
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_SHIFT              3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_MASK                     0x3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_SHIFT                    6
-       u8 rsrv2;
-       __le32 num_prm_zero_read;
-       struct regpair rsp_buf_addr;
-};
-
 struct fcoe_rx_stat {
        struct regpair fcoe_rx_byte_cnt;
        struct regpair fcoe_rx_data_pkt_cnt;
@@ -236,16 +171,6 @@ struct fcoe_rx_stat {
        __le32 rsrv;
 };
 
-enum fcoe_sgl_mode {
-       FCOE_SLOW_SGL,
-       FCOE_SINGLE_FAST_SGE,
-       FCOE_2_FAST_SGE,
-       FCOE_3_FAST_SGE,
-       FCOE_4_FAST_SGE,
-       FCOE_MUL_FAST_SGES,
-       MAX_FCOE_SGL_MODE
-};
-
 struct fcoe_stat_ramrod_data {
        struct regpair stat_params_addr;
 };
@@ -328,22 +253,24 @@ union fcoe_tx_info_union_ctx {
 struct ystorm_fcoe_task_st_ctx {
        u8 task_type;
        u8 sgl_mode;
-#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK  0x7
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK  0x1
 #define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK         0x1F
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT        3
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK         0x7F
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT        1
        u8 cached_dix_sge;
        u8 expect_first_xfer;
        __le32 num_pbf_zero_write;
        union protection_info_union_ctx protection_info_union;
        __le32 data_2_trns_rem;
+       struct scsi_sgl_params sgl_params;
+       u8 reserved1[12];
        union fcoe_tx_info_union_ctx tx_info_union;
        union fcoe_dix_desc_ctx dix_desc;
-       union fcoe_data_desc_ctx data_desc;
+       struct scsi_cached_sges data_desc;
        __le16 ox_id;
        __le16 rx_id;
        __le32 task_rety_identifier;
-       __le32 reserved1[2];
+       u8 reserved2[8];
 };
 
 struct ystorm_fcoe_task_ag_ctx {
@@ -484,22 +411,22 @@ struct tstorm_fcoe_task_ag_ctx {
 struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
        union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
        __le16 flags;
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK       0x7
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK       0x1
 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT      0
 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK   0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT  3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT  1
 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK        0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT       4
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT       2
 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK       0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT      5
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT      3
 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK  0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK   0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT  7
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT  5
 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK        0x3
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT       8
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK             0x3F
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT            10
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT       6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK             0xFF
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT            8
        __le16 seq_cnt;
        u8 seq_id;
        u8 ooo_rx_seq_id;
@@ -582,8 +509,34 @@ struct mstorm_fcoe_task_ag_ctx {
 };
 
 struct mstorm_fcoe_task_st_ctx {
-       struct fcoe_mstorm_fcoe_task_st_ctx_non_fp non_fp;
-       struct fcoe_mstorm_fcoe_task_st_ctx_fp fp;
+       struct regpair rsp_buf_addr;
+       __le32 rsrv[2];
+       struct scsi_sgl_params sgl_params;
+       __le32 data_2_trns_rem;
+       __le32 data_buffer_offset;
+       __le16 parent_id;
+       __le16 flags;
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK     0xF
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT    0
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK        0x3
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT       4
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK           0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT          6
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK  0x1
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK        0x3
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT       8
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK  0x1
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK    0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT   11
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK         0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT        12
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK           0x1
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT          13
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK              0x3
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT             14
+       struct scsi_cached_sges data_desc;
 };
 
 struct ustorm_fcoe_task_ag_ctx {
@@ -646,6 +599,7 @@ struct ustorm_fcoe_task_ag_ctx {
 
 struct fcoe_task_context {
        struct ystorm_fcoe_task_st_ctx ystorm_st_context;
+       struct regpair ystorm_st_padding[2];
        struct tdif_task_context tdif_context;
        struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
        struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
@@ -668,20 +622,20 @@ struct fcoe_tx_stat {
 struct fcoe_wqe {
        __le16 task_id;
        __le16 flags;
-#define FCOE_WQE_REQ_TYPE_MASK        0xF
-#define FCOE_WQE_REQ_TYPE_SHIFT       0
-#define FCOE_WQE_SGL_MODE_MASK        0x7
-#define FCOE_WQE_SGL_MODE_SHIFT       4
-#define FCOE_WQE_CONTINUATION_MASK    0x1
-#define FCOE_WQE_CONTINUATION_SHIFT   7
-#define FCOE_WQE_INVALIDATE_PTU_MASK  0x1
-#define FCOE_WQE_INVALIDATE_PTU_SHIFT 8
-#define FCOE_WQE_SUPER_IO_MASK        0x1
-#define FCOE_WQE_SUPER_IO_SHIFT       9
-#define FCOE_WQE_SEND_AUTO_RSP_MASK   0x1
-#define FCOE_WQE_SEND_AUTO_RSP_SHIFT  10
-#define FCOE_WQE_RESERVED0_MASK       0x1F
-#define FCOE_WQE_RESERVED0_SHIFT      11
+#define FCOE_WQE_REQ_TYPE_MASK       0xF
+#define FCOE_WQE_REQ_TYPE_SHIFT      0
+#define FCOE_WQE_SGL_MODE_MASK       0x1
+#define FCOE_WQE_SGL_MODE_SHIFT      4
+#define FCOE_WQE_CONTINUATION_MASK   0x1
+#define FCOE_WQE_CONTINUATION_SHIFT  5
+#define FCOE_WQE_SEND_AUTO_RSP_MASK  0x1
+#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
+#define FCOE_WQE_RESERVED_MASK       0x1
+#define FCOE_WQE_RESERVED_SHIFT      7
+#define FCOE_WQE_NUM_SGES_MASK       0xF
+#define FCOE_WQE_NUM_SGES_SHIFT      8
+#define FCOE_WQE_RESERVED1_MASK      0xF
+#define FCOE_WQE_RESERVED1_SHIFT     12
        union fcoe_additional_info_union additional_info_union;
 };
 
index 4c5747babcf63ff32b8db664146df40545d986d2..69949f8e354b0447c7950884bd205622a4653ab2 100644 (file)
 /* iSCSI HSI constants */
 #define ISCSI_DEFAULT_MTU       (1500)
 
-/* Current iSCSI HSI version number composed of two fields (16 bit) */
-#define ISCSI_HSI_MAJOR_VERSION (0)
-#define ISCSI_HSI_MINOR_VERSION (0)
-
 /* KWQ (kernel work queue) layer codes */
 #define ISCSI_SLOW_PATH_LAYER_CODE   (6)
 
-/* CQE completion status */
-#define ISCSI_EQE_COMPLETION_SUCCESS (0x0)
-#define ISCSI_EQE_RST_CONN_RCVD (0x1)
-
 /* iSCSI parameter defaults */
 #define ISCSI_DEFAULT_HEADER_DIGEST         (0)
 #define ISCSI_DEFAULT_DATA_DIGEST           (0)
 #define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T   (1)
 #define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T   (0xff)
 
+#define ISCSI_AHS_CNTL_SIZE 4
+
+#define ISCSI_WQE_NUM_SGES_SLOWIO           (0xf)
+
 /* iSCSI reserved params */
 #define ISCSI_ITT_ALL_ONES     (0xffffffff)
 #define ISCSI_TTT_ALL_ONES     (0xffffffff)
@@ -173,19 +169,6 @@ struct iscsi_async_msg_hdr {
        __le32 reserved7;
 };
 
-struct iscsi_sge {
-       struct regpair sge_addr;
-       __le16 sge_len;
-       __le16 reserved0;
-       __le32 reserved1;
-};
-
-struct iscsi_cached_sge_ctx {
-       struct iscsi_sge sge;
-       struct regpair reserved;
-       __le32 dsgl_curr_offset[2];
-};
-
 struct iscsi_cmd_hdr {
        __le16 reserved1;
        u8 flags_attr;
@@ -229,8 +212,13 @@ struct iscsi_common_hdr {
 #define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT  0
 #define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK  0xFF
 #define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
-       __le32 lun_reserved[4];
-       __le32 data[6];
+       struct regpair lun_reserved;
+       __le32 itt;
+       __le32 ttt;
+       __le32 cmdstat_sn;
+       __le32 exp_statcmd_sn;
+       __le32 max_cmd_sn;
+       __le32 data[3];
 };
 
 struct iscsi_conn_offload_params {
@@ -246,8 +234,10 @@ struct iscsi_conn_offload_params {
 #define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
 #define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK     0x1
 #define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT    1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK       0x3F
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT      2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT        2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK       0x1F
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT      3
        u8 pbl_page_size_log;
        u8 pbe_page_size_log;
        u8 default_cq;
@@ -278,8 +268,12 @@ struct iscsi_conn_update_ramrod_params {
 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT    2
 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK  0x1
 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK       0xF
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT      4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK  0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK  0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK       0x3
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT      6
        u8 reserved0[3];
        __le32 max_seq_size;
        __le32 max_send_pdu_length;
@@ -312,7 +306,7 @@ struct iscsi_ext_cdb_cmd_hdr {
        __le32 expected_transfer_length;
        __le32 cmd_sn;
        __le32 exp_stat_sn;
-       struct iscsi_sge cdb_sge;
+       struct scsi_sge cdb_sge;
 };
 
 struct iscsi_login_req_hdr {
@@ -519,8 +513,8 @@ struct iscsi_logout_response_hdr {
        __le32 exp_cmd_sn;
        __le32 max_cmd_sn;
        __le32 reserved4;
-       __le16 time2retain;
-       __le16 time2wait;
+       __le16 time_2_retain;
+       __le16 time_2_wait;
        __le32 reserved5[1];
 };
 
@@ -602,7 +596,7 @@ struct iscsi_tmf_response_hdr {
 #define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
        struct regpair reserved0;
        __le32 itt;
-       __le32 rtt;
+       __le32 reserved1;
        __le32 stat_sn;
        __le32 exp_cmd_sn;
        __le32 max_cmd_sn;
@@ -641,7 +635,7 @@ struct iscsi_reject_hdr {
 #define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK  0xFF
 #define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
        struct regpair reserved0;
-       __le32 reserved1;
+       __le32 all_ones;
        __le32 reserved2;
        __le32 stat_sn;
        __le32 exp_cmd_sn;
@@ -688,7 +682,9 @@ struct iscsi_cqe_solicited {
        __le16 itid;
        u8 task_type;
        u8 fw_dbg_field;
-       __le32 reserved1[2];
+       u8 caused_conn_err;
+       u8 reserved0[3];
+       __le32 reserved1[1];
        union iscsi_task_hdr iscsi_hdr;
 };
 
@@ -727,35 +723,6 @@ enum iscsi_cqe_unsolicited_type {
        MAX_ISCSI_CQE_UNSOLICITED_TYPE
 };
 
-struct iscsi_virt_sgl_ctx {
-       struct regpair sgl_base;
-       struct regpair dsgl_base;
-       __le32 sgl_initial_offset;
-       __le32 dsgl_initial_offset;
-       __le32 dsgl_curr_offset[2];
-};
-
-struct iscsi_sgl_var_params {
-       u8 sgl_ptr;
-       u8 dsgl_ptr;
-       __le16 sge_offset;
-       __le16 dsge_offset;
-};
-
-struct iscsi_phys_sgl_ctx {
-       struct regpair sgl_base;
-       struct regpair dsgl_base;
-       u8 sgl_size;
-       u8 dsgl_size;
-       __le16 reserved;
-       struct iscsi_sgl_var_params var_params[2];
-};
-
-union iscsi_data_desc_ctx {
-       struct iscsi_virt_sgl_ctx virt_sgl;
-       struct iscsi_phys_sgl_ctx phys_sgl;
-       struct iscsi_cached_sge_ctx cached_sge;
-};
 
 struct iscsi_debug_modes {
        u8 flags;
@@ -771,8 +738,10 @@ struct iscsi_debug_modes {
 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK              0x1
 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT             5
-#define ISCSI_DEBUG_MODES_RESERVED0_MASK                       0x3
-#define ISCSI_DEBUG_MODES_RESERVED0_SHIFT                      6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK     0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT    6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK             0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT            7
 };
 
 struct iscsi_dif_flags {
@@ -806,7 +775,6 @@ enum iscsi_eqe_opcode {
        ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2,
        ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR,
        ISCSI_EVENT_TYPE_TCP_CONN_ERROR,
-       ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES,
        MAX_ISCSI_EQE_OPCODE
 };
 
@@ -856,31 +824,11 @@ enum iscsi_error_types {
        ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX,
        ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
        ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR,
+       ISCSI_CONN_ERROR_INVALID_ITT,
        ISCSI_ERROR_UNKNOWN,
        MAX_ISCSI_ERROR_TYPES
 };
 
-struct iscsi_mflags {
-       u8 mflags;
-#define ISCSI_MFLAGS_SLOW_IO_MASK     0x1
-#define ISCSI_MFLAGS_SLOW_IO_SHIFT    0
-#define ISCSI_MFLAGS_SINGLE_SGE_MASK  0x1
-#define ISCSI_MFLAGS_SINGLE_SGE_SHIFT 1
-#define ISCSI_MFLAGS_RESERVED_MASK    0x3F
-#define ISCSI_MFLAGS_RESERVED_SHIFT   2
-};
-
-struct iscsi_sgl {
-       struct regpair sgl_addr;
-       __le16 updated_sge_size;
-       __le16 updated_sge_offset;
-       __le32 byte_offset;
-};
-
-union iscsi_mstorm_sgl {
-       struct iscsi_sgl sgl_struct;
-       struct iscsi_sge single_sge;
-};
 
 enum iscsi_ramrod_cmd_id {
        ISCSI_RAMROD_CMD_ID_UNUSED = 0,
@@ -896,10 +844,10 @@ enum iscsi_ramrod_cmd_id {
 
 struct iscsi_reg1 {
        __le32 reg1_map;
-#define ISCSI_REG1_NUM_FAST_SGES_MASK  0x7
-#define ISCSI_REG1_NUM_FAST_SGES_SHIFT 0
-#define ISCSI_REG1_RESERVED1_MASK      0x1FFFFFFF
-#define ISCSI_REG1_RESERVED1_SHIFT     3
+#define ISCSI_REG1_NUM_SGES_MASK   0xF
+#define ISCSI_REG1_NUM_SGES_SHIFT  0
+#define ISCSI_REG1_RESERVED1_MASK  0xFFFFFFF
+#define ISCSI_REG1_RESERVED1_SHIFT 4
 };
 
 union iscsi_seq_num {
@@ -967,22 +915,33 @@ struct iscsi_spe_func_init {
 };
 
 struct ystorm_iscsi_task_state {
-       union iscsi_data_desc_ctx sgl_ctx_union;
-       __le32 buffer_offset[2];
-       __le16 bytes_nxt_dif;
-       __le16 rxmit_bytes_nxt_dif;
-       union iscsi_seq_num seq_num_union;
-       u8 dif_bytes_leftover;
-       u8 rxmit_dif_bytes_leftover;
-       __le16 reuse_count;
-       struct iscsi_dif_flags dif_flags;
-       u8 local_comp;
+       struct scsi_cached_sges data_desc;
+       struct scsi_sgl_params sgl_params;
        __le32 exp_r2t_sn;
-       __le32 sgl_offset[2];
+       __le32 buffer_offset;
+       union iscsi_seq_num seq_num;
+       struct iscsi_dif_flags dif_flags;
+       u8 flags;
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK  0x1
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK     0x1
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT    1
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK   0x3F
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT  2
+};
+
+struct ystorm_iscsi_task_rxmit_opt {
+       __le32 fast_rxmit_sge_offset;
+       __le32 scan_start_buffer_offset;
+       __le32 fast_rxmit_buffer_offset;
+       u8 scan_start_sgl_index;
+       u8 fast_rxmit_sgl_index;
+       __le16 reserved;
 };
 
 struct ystorm_iscsi_task_st_ctx {
        struct ystorm_iscsi_task_state state;
+       struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
        union iscsi_task_hdr pdu_hdr;
 };
 
@@ -1152,25 +1111,16 @@ struct ustorm_iscsi_task_ag_ctx {
 };
 
 struct mstorm_iscsi_task_st_ctx {
-       union iscsi_mstorm_sgl sgl_union;
-       struct iscsi_dif_flags dif_flags;
-       struct iscsi_mflags flags;
-       u8 sgl_size;
-       u8 host_sge_index;
-       __le16 dix_cur_sge_offset;
-       __le16 dix_cur_sge_size;
-       __le32 data_offset_rtid;
-       u8 dif_offset;
-       u8 dix_sgl_size;
-       u8 dix_sge_index;
+       struct scsi_cached_sges data_desc;
+       struct scsi_sgl_params sgl_params;
+       __le32 rem_task_size;
+       __le32 data_buffer_offset;
        u8 task_type;
+       struct iscsi_dif_flags dif_flags;
+       u8 reserved0[2];
        struct regpair sense_db;
-       struct regpair dix_sgl_cur_sge;
-       __le32 rem_task_size;
-       __le16 reuse_count;
-       __le16 dif_data_residue;
-       u8 reserved0[4];
-       __le32 reserved1[1];
+       __le32 expected_itt;
+       __le32 reserved1;
 };
 
 struct ustorm_iscsi_task_st_ctx {
@@ -1184,7 +1134,7 @@ struct ustorm_iscsi_task_st_ctx {
 #define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT            0
 #define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK             0x7F
 #define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT            1
-       u8 reserved2;
+       struct iscsi_dif_flags dif_flags;
        __le16 reserved3;
        __le32 reserved4;
        __le32 reserved5;
@@ -1207,10 +1157,10 @@ struct ustorm_iscsi_task_st_ctx {
 #define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT           2
 #define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK        0x1
 #define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT       3
-#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_MASK   0x1
-#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_SHIFT  4
-#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_MASK        0x1
-#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_SHIFT       5
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK  0x1
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK        0x1
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT       5
 #define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK         0x1
 #define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT        6
 #define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK             0x1
@@ -1220,7 +1170,6 @@ struct ustorm_iscsi_task_st_ctx {
 
 struct iscsi_task_context {
        struct ystorm_iscsi_task_st_ctx ystorm_st_context;
-       struct regpair ystorm_st_padding[2];
        struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
        struct regpair ystorm_ag_padding[2];
        struct tdif_task_context tdif_context;
@@ -1272,32 +1221,22 @@ struct iscsi_uhqe {
 #define ISCSI_UHQE_TASK_ID_LO_SHIFT         24
 };
 
-struct iscsi_wqe_field {
-       __le32 contlen_cdbsize_field;
-#define ISCSI_WQE_FIELD_CONT_LEN_MASK  0xFFFFFF
-#define ISCSI_WQE_FIELD_CONT_LEN_SHIFT 0
-#define ISCSI_WQE_FIELD_CDB_SIZE_MASK  0xFF
-#define ISCSI_WQE_FIELD_CDB_SIZE_SHIFT 24
-};
-
-union iscsi_wqe_field_union {
-       struct iscsi_wqe_field cont_field;
-       __le32 prev_tid;
-};
 
 struct iscsi_wqe {
        __le16 task_id;
        u8 flags;
 #define ISCSI_WQE_WQE_TYPE_MASK        0x7
 #define ISCSI_WQE_WQE_TYPE_SHIFT       0
-#define ISCSI_WQE_NUM_FAST_SGES_MASK   0x7
-#define ISCSI_WQE_NUM_FAST_SGES_SHIFT  3
-#define ISCSI_WQE_PTU_INVALIDATE_MASK  0x1
-#define ISCSI_WQE_PTU_INVALIDATE_SHIFT 6
+#define ISCSI_WQE_NUM_SGES_MASK  0xF
+#define ISCSI_WQE_NUM_SGES_SHIFT 3
 #define ISCSI_WQE_RESPONSE_MASK        0x1
 #define ISCSI_WQE_RESPONSE_SHIFT       7
        struct iscsi_dif_flags prot_flags;
-       union iscsi_wqe_field_union cont_prevtid_union;
+       __le32 contlen_cdbsize;
+#define ISCSI_WQE_CONT_LEN_MASK  0xFFFFFF
+#define ISCSI_WQE_CONT_LEN_SHIFT 0
+#define ISCSI_WQE_CDB_SIZE_MASK  0xFF
+#define ISCSI_WQE_CDB_SIZE_SHIFT 24
 };
 
 enum iscsi_wqe_type {
@@ -1318,17 +1257,15 @@ struct iscsi_xhqe {
        u8 total_ahs_length;
        u8 opcode;
        u8 flags;
-#define ISCSI_XHQE_NUM_FAST_SGES_MASK  0x7
-#define ISCSI_XHQE_NUM_FAST_SGES_SHIFT 0
-#define ISCSI_XHQE_FINAL_MASK          0x1
-#define ISCSI_XHQE_FINAL_SHIFT         3
-#define ISCSI_XHQE_SUPER_IO_MASK       0x1
-#define ISCSI_XHQE_SUPER_IO_SHIFT      4
-#define ISCSI_XHQE_STATUS_BIT_MASK     0x1
-#define ISCSI_XHQE_STATUS_BIT_SHIFT    5
-#define ISCSI_XHQE_RESERVED_MASK       0x3
-#define ISCSI_XHQE_RESERVED_SHIFT      6
-       union iscsi_seq_num seq_num_union;
+#define ISCSI_XHQE_FINAL_MASK       0x1
+#define ISCSI_XHQE_FINAL_SHIFT      0
+#define ISCSI_XHQE_STATUS_BIT_MASK  0x1
+#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
+#define ISCSI_XHQE_NUM_SGES_MASK    0xF
+#define ISCSI_XHQE_NUM_SGES_SHIFT   2
+#define ISCSI_XHQE_RESERVED0_MASK   0x3
+#define ISCSI_XHQE_RESERVED0_SHIFT  6
+       union iscsi_seq_num seq_num;
        __le16 reserved1;
 };
 
index fde56c436f7177e27173656f4fa4480752351c6d..625f80f08f91100b2cf0a7ffc2232868c82843a7 100644 (file)
@@ -263,7 +263,6 @@ struct qed_rdma_pf_params {
         * the doorbell BAR).
         */
        u32 min_dpis;           /* number of requested DPIs */
-       u32 num_mrs;            /* number of requested memory regions */
        u32 num_qps;            /* number of requested Queue Pairs */
        u32 num_srqs;           /* number of requested SRQ */
        u8 roce_edpm_mode;      /* see QED_ROCE_EDPM_MODE_ENABLE */
@@ -300,6 +299,11 @@ struct qed_sb_info {
        struct qed_dev          *cdev;
 };
 
+enum qed_dev_type {
+       QED_DEV_TYPE_BB,
+       QED_DEV_TYPE_AH,
+};
+
 struct qed_dev_info {
        unsigned long   pci_mem_start;
        unsigned long   pci_mem_end;
@@ -325,6 +329,8 @@ struct qed_dev_info {
        u16             mtu;
 
        bool wol_support;
+
+       enum qed_dev_type dev_type;
 };
 
 enum qed_sb_type {
@@ -752,7 +758,7 @@ enum qed_mf_mode {
        QED_MF_NPAR,
 };
 
-struct qed_eth_stats {
+struct qed_eth_stats_common {
        u64     no_buff_discards;
        u64     packet_too_big_discard;
        u64     ttl0_discard;
@@ -784,11 +790,6 @@ struct qed_eth_stats {
        u64     rx_256_to_511_byte_packets;
        u64     rx_512_to_1023_byte_packets;
        u64     rx_1024_to_1518_byte_packets;
-       u64     rx_1519_to_1522_byte_packets;
-       u64     rx_1519_to_2047_byte_packets;
-       u64     rx_2048_to_4095_byte_packets;
-       u64     rx_4096_to_9216_byte_packets;
-       u64     rx_9217_to_16383_byte_packets;
        u64     rx_crc_errors;
        u64     rx_mac_crtl_frames;
        u64     rx_pause_frames;
@@ -805,14 +806,8 @@ struct qed_eth_stats {
        u64     tx_256_to_511_byte_packets;
        u64     tx_512_to_1023_byte_packets;
        u64     tx_1024_to_1518_byte_packets;
-       u64     tx_1519_to_2047_byte_packets;
-       u64     tx_2048_to_4095_byte_packets;
-       u64     tx_4096_to_9216_byte_packets;
-       u64     tx_9217_to_16383_byte_packets;
        u64     tx_pause_frames;
        u64     tx_pfc_frames;
-       u64     tx_lpi_entry_count;
-       u64     tx_total_collisions;
        u64     brb_truncates;
        u64     brb_discards;
        u64     rx_mac_bytes;
@@ -827,6 +822,34 @@ struct qed_eth_stats {
        u64     tx_mac_ctrl_frames;
 };
 
+struct qed_eth_stats_bb {
+       u64 rx_1519_to_1522_byte_packets;
+       u64 rx_1519_to_2047_byte_packets;
+       u64 rx_2048_to_4095_byte_packets;
+       u64 rx_4096_to_9216_byte_packets;
+       u64 rx_9217_to_16383_byte_packets;
+       u64 tx_1519_to_2047_byte_packets;
+       u64 tx_2048_to_4095_byte_packets;
+       u64 tx_4096_to_9216_byte_packets;
+       u64 tx_9217_to_16383_byte_packets;
+       u64 tx_lpi_entry_count;
+       u64 tx_total_collisions;
+};
+
+struct qed_eth_stats_ah {
+       u64 rx_1519_to_max_byte_packets;
+       u64 tx_1519_to_max_byte_packets;
+};
+
+struct qed_eth_stats {
+       struct qed_eth_stats_common common;
+
+       union {
+               struct qed_eth_stats_bb bb;
+               struct qed_eth_stats_ah ah;
+       };
+};
+
 #define QED_SB_IDX              0x0002
 
 #define RX_PI           0
index f70bb81b8b6acfda1701ffcd073163db05a82770..3414649133d2e44fe36b9c12e12f42ba6970ed9d 100644 (file)
@@ -67,6 +67,8 @@ struct qed_dev_iscsi_info {
 
        void __iomem *primary_dbq_rq_addr;
        void __iomem *secondary_bdq_rq_addr;
+
+       u8 num_cqs;
 };
 
 struct qed_iscsi_id_params {
index f773aa5e746ff47bb886aa19f568a24108dd0d1e..72c770f9f6669a5169f1780f8cae524bc6c3e0b4 100644 (file)
@@ -52,7 +52,8 @@
 #define RDMA_MAX_PDS                            (64 * 1024)
 
 #define RDMA_NUM_STATISTIC_COUNTERS                     MAX_NUM_VPORTS
-#define RDMA_NUM_STATISTIC_COUNTERS_BB                 MAX_NUM_VPORTS_BB
+#define RDMA_NUM_STATISTIC_COUNTERS_K2                  MAX_NUM_VPORTS_K2
+#define RDMA_NUM_STATISTIC_COUNTERS_BB                  MAX_NUM_VPORTS_BB
 
 #define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
 
index bad02df213dfccd11cd25fa1b1e0decaf17f92c6..866f063026dedc6540d87d595bcacf9071f14681 100644 (file)
 
 #define ROCE_MAX_QPS   (32 * 1024)
 
+enum roce_async_events_type {
+       ROCE_ASYNC_EVENT_NONE = 0,
+       ROCE_ASYNC_EVENT_COMM_EST = 1,
+       ROCE_ASYNC_EVENT_SQ_DRAINED,
+       ROCE_ASYNC_EVENT_SRQ_LIMIT,
+       ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
+       ROCE_ASYNC_EVENT_CQ_ERR,
+       ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
+       ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
+       ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
+       ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
+       ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
+       ROCE_ASYNC_EVENT_SRQ_EMPTY,
+       ROCE_ASYNC_EVENT_DESTROY_QP_DONE,
+       MAX_ROCE_ASYNC_EVENTS_TYPE
+};
+
 #endif /* __ROCE_COMMON__ */
index 03f3e37ab059d5e4b48aa2b7f80366016b27fdf5..08df82a096b62de80e51f34122f4809f0eb6d27e 100644 (file)
@@ -40,6 +40,8 @@
 #define BDQ_ID_IMM_DATA          (1)
 #define BDQ_NUM_IDS          (2)
 
+#define SCSI_NUM_SGES_SLOW_SGL_THR      8
+
 #define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
 
 struct scsi_bd {
@@ -52,6 +54,16 @@ struct scsi_bdq_ram_drv_data {
        __le16 reserved0[3];
 };
 
+struct scsi_sge {
+       struct regpair sge_addr;
+       __le32 sge_len;
+       __le32 reserved;
+};
+
+struct scsi_cached_sges {
+       struct scsi_sge sge[4];
+};
+
 struct scsi_drv_cmdq {
        __le16 cmdq_cons;
        __le16 reserved0;
@@ -99,11 +111,19 @@ struct scsi_ram_per_bdq_resource_drv_data {
        struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
 };
 
-struct scsi_sge {
-       struct regpair sge_addr;
-       __le16 sge_len;
-       __le16 reserved0;
-       __le32 reserved1;
+enum scsi_sgl_mode {
+       SCSI_TX_SLOW_SGL,
+       SCSI_FAST_SGL,
+       MAX_SCSI_SGL_MODE
+};
+
+struct scsi_sgl_params {
+       struct regpair sgl_addr;
+       __le32 sgl_total_length;
+       __le32 sge_offset;
+       __le16 sgl_num_sges;
+       u8 sgl_index;
+       u8 reserved;
 };
 
 struct scsi_terminate_extra_params {
index 46fe7856f1b22c828474257ceedf03c182958ec5..a5e843268f0e9431eacd07ad5cc74e10be690b0d 100644 (file)
@@ -173,6 +173,7 @@ enum tcp_seg_placement_event {
        TCP_EVENT_ADD_ISLE_RIGHT,
        TCP_EVENT_ADD_ISLE_LEFT,
        TCP_EVENT_JOIN,
+       TCP_EVENT_DELETE_ISLES,
        TCP_EVENT_NOP,
        MAX_TCP_SEG_PLACEMENT_EVENT
 };
index 7bd2403e4fef1ad7fb0a5f03b4e104e96234d26b..ed5c3838780de5ba9509071bef56e8d521dc5782 100644 (file)
@@ -37,14 +37,26 @@ extern void get_random_bytes(void *buf, int nbytes);
 extern int add_random_ready_callback(struct random_ready_callback *rdy);
 extern void del_random_ready_callback(struct random_ready_callback *rdy);
 extern void get_random_bytes_arch(void *buf, int nbytes);
-extern int random_int_secret_init(void);
 
 #ifndef MODULE
 extern const struct file_operations random_fops, urandom_fops;
 #endif
 
-unsigned int get_random_int(void);
-unsigned long get_random_long(void);
+u32 get_random_u32(void);
+u64 get_random_u64(void);
+static inline unsigned int get_random_int(void)
+{
+       return get_random_u32();
+}
+static inline unsigned long get_random_long(void)
+{
+#if BITS_PER_LONG == 64
+       return get_random_u64();
+#else
+       return get_random_u32();
+#endif
+}
+
 unsigned long randomize_page(unsigned long start, unsigned long range);
 
 u32 prandom_u32(void);
index 4ae95f7e8597b0b43575d04aaf524cf252761e6e..a23a3315318048eec1fc18678e17967f03eaa89b 100644 (file)
@@ -156,5 +156,19 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
                ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
                pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
 
+/**
+ * hlist_nulls_for_each_entry_safe -
+ *   iterate over list of given type safe against removal of list entry
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct hlist_nulls_node to use as a loop cursor.
+ * @head:      the head for your list.
+ * @member:    the name of the hlist_nulls_node within the struct.
+ */
+#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member)               \
+       for (({barrier();}),                                                    \
+            pos = rcu_dereference_raw(hlist_nulls_first_rcu(head));            \
+               (!is_a_nulls(pos)) &&                                           \
+               ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member);        \
+                  pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });)
 #endif
 #endif
index ad3e5158e586dc841e9cd37492ec7104d60e7a81..c9f795e9a2ee26aaf562e9a97a2fe2f963a2f054 100644 (file)
@@ -65,7 +65,7 @@ struct regulator_state {
        int uV; /* suspend voltage */
        unsigned int mode; /* suspend regulator operating mode */
        int enabled; /* is regulator enabled in this suspend state */
-       int disabled; /* is the regulator disbled in this suspend state */
+       int disabled; /* is the regulator disabled in this suspend state */
 };
 
 /**
index 86b4ed75359e85345afb839483e5910bcf5fe6cb..96fb139bdd08fdec3ad83e689ad2808375473126 100644 (file)
@@ -31,31 +31,26 @@ static inline int device_reset_optional(struct device *dev)
 
 static inline int reset_control_reset(struct reset_control *rstc)
 {
-       WARN_ON(1);
        return 0;
 }
 
 static inline int reset_control_assert(struct reset_control *rstc)
 {
-       WARN_ON(1);
        return 0;
 }
 
 static inline int reset_control_deassert(struct reset_control *rstc)
 {
-       WARN_ON(1);
        return 0;
 }
 
 static inline int reset_control_status(struct reset_control *rstc)
 {
-       WARN_ON(1);
        return 0;
 }
 
 static inline void reset_control_put(struct reset_control *rstc)
 {
-       WARN_ON(1);
 }
 
 static inline int __must_check device_reset(struct device *dev)
@@ -74,14 +69,14 @@ static inline struct reset_control *__of_reset_control_get(
                                        const char *id, int index, bool shared,
                                        bool optional)
 {
-       return ERR_PTR(-ENOTSUPP);
+       return optional ? NULL : ERR_PTR(-ENOTSUPP);
 }
 
 static inline struct reset_control *__devm_reset_control_get(
                                        struct device *dev, const char *id,
                                        int index, bool shared, bool optional)
 {
-       return ERR_PTR(-ENOTSUPP);
+       return optional ? NULL : ERR_PTR(-ENOTSUPP);
 }
 
 #endif /* CONFIG_RESET_CONTROLLER */
index 092292b6675e2cf08b1138410a8488bc87986495..e507290cd2c7736ff3e84527a87748dfdc504d9c 100644 (file)
@@ -915,6 +915,28 @@ static inline int rhashtable_lookup_insert_fast(
        return ret == NULL ? 0 : -EEXIST;
 }
 
+/**
+ * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
+ * @ht:                hash table
+ * @obj:       pointer to hash head inside object
+ * @params:    hash table parameters
+ *
+ * Just like rhashtable_lookup_insert_fast(), but this function returns the
+ * object if it exists, NULL if it did not and the insertion was successful,
+ * and an ERR_PTR otherwise.
+ */
+static inline void *rhashtable_lookup_get_insert_fast(
+       struct rhashtable *ht, struct rhash_head *obj,
+       const struct rhashtable_params params)
+{
+       const char *key = rht_obj(ht, obj);
+
+       BUG_ON(ht->p.obj_hashfn);
+
+       return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
+                                       false);
+}
+
 /**
  * rhashtable_lookup_insert_key - search and insert object to hash table
  *                               with explicit key
index 8ec8b6439b25edb956bb06bcf116c190a46ce62c..f27917e0a10114f9c72e228c42b6f60efd51b541 100644 (file)
@@ -6,7 +6,7 @@
 
 struct qcom_smd_edge;
 
-#if IS_ENABLED(CONFIG_RPMSG_QCOM_SMD) || IS_ENABLED(CONFIG_QCOM_SMD)
+#if IS_ENABLED(CONFIG_RPMSG_QCOM_SMD)
 
 struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
                                             struct device_node *node);
index 4a68c67912078ec765e25ad59b88da70feacb0b6..34fe92ce1ebd7c6e9dfa0dac94ff4512deb6f95f 100644 (file)
@@ -54,15 +54,16 @@ static inline u64 local_clock(void)
 }
 #else
 extern void sched_clock_init_late(void);
-/*
- * Architectures can set this to 1 if they have specified
- * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
- * but then during bootup it turns out that sched_clock()
- * is reliable after all:
- */
 extern int sched_clock_stable(void);
 extern void clear_sched_clock_stable(void);
 
+/*
+ * When sched_clock_stable(), __sched_clock_offset provides the offset
+ * between local_clock() and sched_clock().
+ */
+extern u64 __sched_clock_offset;
+
+
 extern void sched_clock_tick(void);
 extern void sched_clock_idle_sleep_event(void);
 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
index c776abd86937f52b002773728b5611746f199281..741d75cfc6862be1c524931550ccec7831328104 100644 (file)
@@ -413,14 +413,15 @@ struct ubuf_info {
  * the end of the header data, ie. at skb->end.
  */
 struct skb_shared_info {
+       unsigned short  _unused;
        unsigned char   nr_frags;
        __u8            tx_flags;
        unsigned short  gso_size;
        /* Warning: this field is not always filled in (UFO)! */
        unsigned short  gso_segs;
-       unsigned short  gso_type;
        struct sk_buff  *frag_list;
        struct skb_shared_hwtstamps hwtstamps;
+       unsigned int    gso_type;
        u32             tskey;
        __be32          ip6_frag_id;
 
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h
deleted file mode 100644 (file)
index f148e0f..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-#ifndef __QCOM_SMD_H__
-#define __QCOM_SMD_H__
-
-#include <linux/device.h>
-#include <linux/mod_devicetable.h>
-
-struct qcom_smd;
-struct qcom_smd_channel;
-struct qcom_smd_lookup;
-
-/**
- * struct qcom_smd_id - struct used for matching a smd device
- * @name:      name of the channel
- */
-struct qcom_smd_id {
-       char name[20];
-};
-
-/**
- * struct qcom_smd_device - smd device struct
- * @dev:       the device struct
- * @channel:   handle to the smd channel for this device
- */
-struct qcom_smd_device {
-       struct device dev;
-       struct qcom_smd_channel *channel;
-};
-
-typedef int (*qcom_smd_cb_t)(struct qcom_smd_channel *, const void *, size_t);
-
-/**
- * struct qcom_smd_driver - smd driver struct
- * @driver:    underlying device driver
- * @smd_match_table: static channel match table
- * @probe:     invoked when the smd channel is found
- * @remove:    invoked when the smd channel is closed
- * @callback:  invoked when an inbound message is received on the channel,
- *             should return 0 on success or -EBUSY if the data cannot be
- *             consumed at this time
- */
-struct qcom_smd_driver {
-       struct device_driver driver;
-       const struct qcom_smd_id *smd_match_table;
-
-       int (*probe)(struct qcom_smd_device *dev);
-       void (*remove)(struct qcom_smd_device *dev);
-       qcom_smd_cb_t callback;
-};
-
-#if IS_ENABLED(CONFIG_QCOM_SMD)
-
-int qcom_smd_driver_register(struct qcom_smd_driver *drv);
-void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
-
-struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *channel,
-                                              const char *name,
-                                              qcom_smd_cb_t cb);
-void qcom_smd_close_channel(struct qcom_smd_channel *channel);
-void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel);
-void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data);
-int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
-
-
-struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
-                                            struct device_node *node);
-int qcom_smd_unregister_edge(struct qcom_smd_edge *edge);
-
-#else
-
-static inline int qcom_smd_driver_register(struct qcom_smd_driver *drv)
-{
-       return -ENXIO;
-}
-
-static inline void qcom_smd_driver_unregister(struct qcom_smd_driver *drv)
-{
-       /* This shouldn't be possible */
-       WARN_ON(1);
-}
-
-static inline struct qcom_smd_channel *
-qcom_smd_open_channel(struct qcom_smd_channel *channel,
-                     const char *name,
-                     qcom_smd_cb_t cb)
-{
-       /* This shouldn't be possible */
-       WARN_ON(1);
-       return NULL;
-}
-
-static inline void qcom_smd_close_channel(struct qcom_smd_channel *channel)
-{
-       /* This shouldn't be possible */
-       WARN_ON(1);
-}
-
-static inline void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel)
-{
-       /* This shouldn't be possible */
-       WARN_ON(1);
-       return NULL;
-}
-
-static inline void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data)
-{
-       /* This shouldn't be possible */
-       WARN_ON(1);
-}
-
-static inline int qcom_smd_send(struct qcom_smd_channel *channel,
-                               const void *data, int len)
-{
-       /* This shouldn't be possible */
-       WARN_ON(1);
-       return -ENXIO;
-}
-
-static inline struct qcom_smd_edge *
-qcom_smd_register_edge(struct device *parent,
-                      struct device_node *node)
-{
-       return ERR_PTR(-ENXIO);
-}
-
-static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
-{
-       /* This shouldn't be possible */
-       WARN_ON(1);
-       return -ENXIO;
-}
-
-#endif
-
-#define module_qcom_smd_driver(__smd_driver) \
-       module_driver(__smd_driver, qcom_smd_driver_register, \
-                     qcom_smd_driver_unregister)
-
-
-#endif
index eab64976a73b0e1aa2c15de6a06ae65e1333de99..a4dd4d7c711dc000fc3f1fe93a794dbff245b418 100644 (file)
@@ -1,16 +1,19 @@
 #ifndef __WCNSS_CTRL_H__
 #define __WCNSS_CTRL_H__
 
-#include <linux/soc/qcom/smd.h>
+#include <linux/rpmsg.h>
 
 #if IS_ENABLED(CONFIG_QCOM_WCNSS_CTRL)
 
-struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb);
+struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name,
+                                              rpmsg_rx_cb_t cb, void *priv);
 
 #else
 
-static inline struct qcom_smd_channel*
-qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb)
+static struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss,
+                                                     const char *name,
+                                                     rpmsg_rx_cb_t cb,
+                                                     void *priv)
 {
        WARN_ON(1);
        return ERR_PTR(-ENXIO);
index a0596ca0e80ac77aeb0afa29648532ef51a5deae..a2f8109bb215751427e99b3c026badfe7113b875 100644 (file)
@@ -24,6 +24,7 @@ void sock_diag_unregister(const struct sock_diag_handler *h);
 void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
 void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
 
+u64 sock_gen_cookie(struct sock *sk);
 int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
 void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
 
index fc273e9d5f67625b9ddf611746d1a09ff555e4ab..3921cb9dfadb9b4a9bbe30854ccada75725b8005 100644 (file)
@@ -28,6 +28,9 @@
 
 #include <linux/platform_device.h>
 
+#define MTL_MAX_RX_QUEUES      8
+#define MTL_MAX_TX_QUEUES      8
+
 #define STMMAC_RX_COE_NONE     0
 #define STMMAC_RX_COE_TYPE1    1
 #define STMMAC_RX_COE_TYPE2    2
 #define        STMMAC_CSR_150_250M     0x4     /* MDC = clk_scr_i/102 */
 #define        STMMAC_CSR_250_300M     0x5     /* MDC = clk_scr_i/122 */
 
+/* MTL algorithms identifiers */
+#define MTL_TX_ALGORITHM_WRR   0x0
+#define MTL_TX_ALGORITHM_WFQ   0x1
+#define MTL_TX_ALGORITHM_DWRR  0x2
+#define MTL_TX_ALGORITHM_SP    0x3
+#define MTL_RX_ALGORITHM_SP    0x4
+#define MTL_RX_ALGORITHM_WSP   0x5
+
+/* RX/TX Queue Mode */
+#define MTL_QUEUE_AVB          0x0
+#define MTL_QUEUE_DCB          0x1
+
 /* The MDC clock could be set higher than the IEEE 802.3
  * specified frequency limit 0f 2.5 MHz, by programming a clock divider
  * of value different than the above defined values. The resultant MDIO
@@ -109,6 +124,26 @@ struct stmmac_axi {
        bool axi_rb;
 };
 
+struct stmmac_rxq_cfg {
+       u8 mode_to_use;
+       u8 chan;
+       u8 pkt_route;
+       bool use_prio;
+       u32 prio;
+};
+
+struct stmmac_txq_cfg {
+       u8 weight;
+       u8 mode_to_use;
+       /* Credit Base Shaper parameters */
+       u32 send_slope;
+       u32 idle_slope;
+       u32 high_credit;
+       u32 low_credit;
+       bool use_prio;
+       u32 prio;
+};
+
 struct plat_stmmacenet_data {
        int bus_id;
        int phy_addr;
@@ -133,6 +168,12 @@ struct plat_stmmacenet_data {
        int unicast_filter_entries;
        int tx_fifo_size;
        int rx_fifo_size;
+       u8 rx_queues_to_use;
+       u8 tx_queues_to_use;
+       u8 rx_sched_algorithm;
+       u8 tx_sched_algorithm;
+       struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
+       struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES];
        void (*fix_mac_speed)(void *priv, unsigned int speed);
        int (*init)(struct platform_device *pdev, void *priv);
        void (*exit)(struct platform_device *pdev, void *priv);
index c0f530809d1f3db7323e51a52224eb49d8f97da0..6cb4061a720d2df5e5f9467de8269529195ce827 100644 (file)
@@ -115,6 +115,6 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
 #define udp_portaddr_for_each_entry_rcu(__sk, list) \
        hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node)
 
-#define IS_UDPLITE(__sk) (udp_sk(__sk)->pcflag)
+#define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE)
 
 #endif /* _LINUX_UDP_H */
index 1d0043dc34e427403a1c82458249bed83e87d3a0..de2a722fe3cf7c457352eb02548f0351a669ddf5 100644 (file)
 /* device can't handle Link Power Management */
 #define USB_QUIRK_NO_LPM                       BIT(10)
 
+/*
+ * Device reports its bInterval as linear frames instead of the
+ * USB 2.0 calculation.
+ */
+#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL  BIT(11)
+
 #endif /* __LINUX_USB_QUIRKS_H */
index 6e0ce8c7b8cb5a9fcb985a5a5078f82267d03092..7dffa5624ea62bee992e547c44ed4a86a577b0a7 100644 (file)
@@ -64,6 +64,8 @@ struct usbnet {
        struct usb_anchor       deferred;
        struct tasklet_struct   bh;
 
+       struct pcpu_sw_netstats __percpu *stats64;
+
        struct work_struct      kevent;
        unsigned long           flags;
 #              define EVENT_TX_HALT    0
@@ -261,10 +263,10 @@ extern void usbnet_pause_rx(struct usbnet *);
 extern void usbnet_resume_rx(struct usbnet *);
 extern void usbnet_purge_paused_rxq(struct usbnet *);
 
-extern int usbnet_get_settings(struct net_device *net,
-                              struct ethtool_cmd *cmd);
-extern int usbnet_set_settings(struct net_device *net,
-                              struct ethtool_cmd *cmd);
+extern int usbnet_get_link_ksettings(struct net_device *net,
+                                    struct ethtool_link_ksettings *cmd);
+extern int usbnet_set_link_ksettings(struct net_device *net,
+                                    const struct ethtool_link_ksettings *cmd);
 extern u32 usbnet_get_link(struct net_device *net);
 extern u32 usbnet_get_msglevel(struct net_device *);
 extern void usbnet_set_msglevel(struct net_device *, u32);
@@ -278,5 +280,7 @@ extern int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags);
 extern void usbnet_status_stop(struct usbnet *dev);
 
 extern void usbnet_update_max_qlen(struct usbnet *dev);
+extern void usbnet_get_stats64(struct net_device *dev,
+                              struct rtnl_link_stats64 *stats);
 
 #endif /* __LINUX_USB_USBNET_H */
index be765234c0a2b999f0f2bfcdb9f36b2cc16fa0c6..32354b4b4b2ba5ae72034d00c3b1d43fa8c2a15c 100644 (file)
@@ -72,7 +72,7 @@ struct ucounts {
        struct hlist_node node;
        struct user_namespace *ns;
        kuid_t uid;
-       atomic_t count;
+       int count;
        atomic_t ucount[UCOUNT_COUNTS];
 };
 
index 0468548acebfef5431ea7bfd6f565cfdfb73f348..48a3483dccb12360e288ffdd97a9bf8d9d9080a4 100644 (file)
@@ -61,8 +61,7 @@ extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
                                        unsigned long from, unsigned long to,
                                        unsigned long len);
 
-extern void userfaultfd_remove(struct vm_area_struct *vma,
-                              struct vm_area_struct **prev,
+extern bool userfaultfd_remove(struct vm_area_struct *vma,
                               unsigned long start,
                               unsigned long end);
 
@@ -72,8 +71,6 @@ extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
 extern void userfaultfd_unmap_complete(struct mm_struct *mm,
                                       struct list_head *uf);
 
-extern void userfaultfd_exit(struct mm_struct *mm);
-
 #else /* CONFIG_USERFAULTFD */
 
 /* mm helpers */
@@ -120,11 +117,11 @@ static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
 {
 }
 
-static inline void userfaultfd_remove(struct vm_area_struct *vma,
-                                     struct vm_area_struct **prev,
+static inline bool userfaultfd_remove(struct vm_area_struct *vma,
                                      unsigned long start,
                                      unsigned long end)
 {
+       return true;
 }
 
 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
@@ -139,10 +136,6 @@ static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
 {
 }
 
-static inline void userfaultfd_exit(struct mm_struct *mm)
-{
-}
-
 #endif /* CONFIG_USERFAULTFD */
 
 #endif /* _LINUX_USERFAULTFD_K_H */
index 9638bfeb0d1f639ae310d1586b4e2fca567ba2f7..584f9a647ad4acca191ff6116a47c14da1385fa3 100644 (file)
@@ -48,6 +48,8 @@ struct virtio_vsock_pkt {
        struct virtio_vsock_hdr hdr;
        struct work_struct work;
        struct list_head list;
+       /* socket refcnt not held, only use for cancellation */
+       struct vsock_sock *vsk;
        void *buf;
        u32 len;
        u32 off;
@@ -56,6 +58,7 @@ struct virtio_vsock_pkt {
 
 struct virtio_vsock_pkt_info {
        u32 remote_cid, remote_port;
+       struct vsock_sock *vsk;
        struct msghdr *msg;
        u32 pkt_len;
        u16 type;
index 6aa1b6cb58285d92ccd4a53d8de660669f518a6b..a80b7b59cf33418811217faca1b9c6b041dad814 100644 (file)
@@ -79,6 +79,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
                THP_SPLIT_PAGE_FAILED,
                THP_DEFERRED_SPLIT_PAGE,
                THP_SPLIT_PMD,
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+               THP_SPLIT_PUD,
+#endif
                THP_ZERO_PAGE_ALLOC,
                THP_ZERO_PAGE_ALLOC_FAILED,
 #endif
index aacb1282d19a38d7b633bf7aedcc40fa258584f8..db076ca7f11da03f474be67f792e1189b96425eb 100644 (file)
@@ -620,30 +620,19 @@ do {                                                                      \
        __ret;                                                          \
 })
 
+extern int do_wait_intr(wait_queue_head_t *, wait_queue_t *);
+extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *);
 
-#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
+#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
 ({                                                                     \
-       int __ret = 0;                                                  \
+       int __ret;                                                      \
        DEFINE_WAIT(__wait);                                            \
        if (exclusive)                                                  \
                __wait.flags |= WQ_FLAG_EXCLUSIVE;                      \
        do {                                                            \
-               if (likely(list_empty(&__wait.task_list)))              \
-                       __add_wait_queue_tail(&(wq), &__wait);          \
-               set_current_state(TASK_INTERRUPTIBLE);                  \
-               if (signal_pending(current)) {                          \
-                       __ret = -ERESTARTSYS;                           \
+               __ret = fn(&(wq), &__wait);                             \
+               if (__ret)                                              \
                        break;                                          \
-               }                                                       \
-               if (irq)                                                \
-                       spin_unlock_irq(&(wq).lock);                    \
-               else                                                    \
-                       spin_unlock(&(wq).lock);                        \
-               schedule();                                             \
-               if (irq)                                                \
-                       spin_lock_irq(&(wq).lock);                      \
-               else                                                    \
-                       spin_lock(&(wq).lock);                          \
        } while (!(condition));                                         \
        __remove_wait_queue(&(wq), &__wait);                            \
        __set_current_state(TASK_RUNNING);                              \
@@ -676,7 +665,7 @@ do {                                                                        \
  */
 #define wait_event_interruptible_locked(wq, condition)                 \
        ((condition)                                                    \
-        ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
+        ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
 
 /**
  * wait_event_interruptible_locked_irq - sleep until a condition gets true
@@ -703,7 +692,7 @@ do {                                                                        \
  */
 #define wait_event_interruptible_locked_irq(wq, condition)             \
        ((condition)                                                    \
-        ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
+        ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
 
 /**
  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
@@ -734,7 +723,7 @@ do {                                                                        \
  */
 #define wait_event_interruptible_exclusive_locked(wq, condition)       \
        ((condition)                                                    \
-        ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
+        ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
 
 /**
  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
@@ -765,7 +754,7 @@ do {                                                                        \
  */
 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)   \
        ((condition)                                                    \
-        ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
+        ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
 
 
 #define __wait_event_killable(wq, condition)                           \
index 458b400373d44daf6d2fee8b2a971eb0e326d2c6..38aac554dbbab6384f1a16bd7d914b632b6a6d56 100644 (file)
@@ -20,8 +20,17 @@ struct device;
 
 int vsp1_du_init(struct device *dev);
 
-int vsp1_du_setup_lif(struct device *dev, unsigned int width,
-                     unsigned int height);
+/**
+ * struct vsp1_du_lif_config - VSP LIF configuration
+ * @width: output frame width
+ * @height: output frame height
+ */
+struct vsp1_du_lif_config {
+       unsigned int width;
+       unsigned int height;
+};
+
+int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg);
 
 struct vsp1_du_atomic_config {
        u32 pixelformat;
index 17c6fd84e287808eca08503b87a56a5d6fc0cc5c..1aeb25dd42a7d07282308f1995888ecdf01f99b8 100644 (file)
@@ -262,8 +262,8 @@ int register_inet6addr_notifier(struct notifier_block *nb);
 int unregister_inet6addr_notifier(struct notifier_block *nb);
 int inet6addr_notifier_call_chain(unsigned long val, void *v);
 
-void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
-                                 struct ipv6_devconf *devconf);
+void inet6_netconf_notify_devconf(struct net *net, int event, int type,
+                                 int ifindex, struct ipv6_devconf *devconf);
 
 /**
  * __in6_dev_get - get inet6_dev pointer from netdevice
index 1061a472a3e35b88a7df587fdeefcdb310c793bc..b5f5187f488cc1e663912ec1e12811d49ecb3fd5 100644 (file)
@@ -39,7 +39,7 @@ int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
                           struct msghdr *, size_t);
 int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
                           void *, size_t, size_t *, bool, u32 *);
-void rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
+bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
                             u32, int, const char *);
 void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
 void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
index f2758964ce6f890e3b11df5ba5bf2eefe636abd1..f32ed9ac181a47c00757596fc3b8c5733426c468 100644 (file)
@@ -100,6 +100,9 @@ struct vsock_transport {
        void (*destruct)(struct vsock_sock *);
        void (*release)(struct vsock_sock *);
 
+       /* Cancel all pending packets sent on vsock. */
+       int (*cancel_pkt)(struct vsock_sock *vsk);
+
        /* Connections. */
        int (*connect)(struct vsock_sock *);
 
index 3c857778a6ca6870f7e7d5604adcd263380e4708..04a21e8048be43a2a54579a68d0b39f39c1784e4 100644 (file)
@@ -153,7 +153,8 @@ struct slave {
        unsigned long last_link_up;
        unsigned long last_rx;
        unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
-       s8     link;    /* one of BOND_LINK_XXXX */
+       s8     link;            /* one of BOND_LINK_XXXX */
+       s8     link_new_state;  /* one of BOND_LINK_XXXX */
        s8     new_link;
        u8     backup:1,   /* indicates backup slave. Value corresponds with
                              BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
@@ -165,7 +166,7 @@ struct slave {
        u32    link_failure_count;
        u32    speed;
        u16    queue_id;
-       u8     perm_hwaddr[ETH_ALEN];
+       u8     perm_hwaddr[MAX_ADDR_LEN];
        struct ad_slave_info *ad_info;
        struct tlb_slave_info tlb_info;
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -401,6 +402,16 @@ static inline bool bond_slave_can_tx(struct slave *slave)
               bond_is_active_slave(slave);
 }
 
+static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len)
+{
+       if (len == ETH_ALEN) {
+               ether_addr_copy(dst, src);
+               return;
+       }
+
+       memcpy(dst, src, len);
+}
+
 #define BOND_PRI_RESELECT_ALWAYS       0
 #define BOND_PRI_RESELECT_BETTER       1
 #define BOND_PRI_RESELECT_FAILURE      2
@@ -504,13 +515,17 @@ static inline bool bond_is_slave_inactive(struct slave *slave)
        return slave->inactive;
 }
 
-static inline void bond_set_slave_link_state(struct slave *slave, int state,
-                                            bool notify)
+static inline void bond_propose_link_state(struct slave *slave, int state)
 {
-       if (slave->link == state)
+       slave->link_new_state = state;
+}
+
+static inline void bond_commit_link_state(struct slave *slave, bool notify)
+{
+       if (slave->link == slave->link_new_state)
                return;
 
-       slave->link = state;
+       slave->link = slave->link_new_state;
        if (notify) {
                bond_queue_slave_event(slave);
                bond_lower_state_changed(slave);
@@ -523,6 +538,13 @@ static inline void bond_set_slave_link_state(struct slave *slave, int state,
        }
 }
 
+static inline void bond_set_slave_link_state(struct slave *slave, int state,
+                                            bool notify)
+{
+       bond_propose_link_state(slave, state);
+       bond_commit_link_state(slave, notify);
+}
+
 static inline void bond_slave_link_notify(struct bonding *bond)
 {
        struct list_head *iter;
index c0452de83086e51738a249bea5fa86d6fb121760..8ffd434676b7a270af73534f3dbcc26f370fe215 100644 (file)
@@ -35,83 +35,101 @@ struct napi_struct;
 extern unsigned int sysctl_net_busy_read __read_mostly;
 extern unsigned int sysctl_net_busy_poll __read_mostly;
 
+/*             0 - Reserved to indicate value not set
+ *     1..NR_CPUS - Reserved for sender_cpu
+ *  NR_CPUS+1..~0 - Region available for NAPI IDs
+ */
+#define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
+
 static inline bool net_busy_loop_on(void)
 {
        return sysctl_net_busy_poll;
 }
 
-static inline u64 busy_loop_us_clock(void)
+static inline bool sk_can_busy_loop(const struct sock *sk)
 {
-       return local_clock() >> 10;
+       return sk->sk_ll_usec && !signal_pending(current);
 }
 
-static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
-{
-       return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec);
-}
+bool sk_busy_loop_end(void *p, unsigned long start_time);
 
-/* in poll/select we use the global sysctl_net_ll_poll value */
-static inline unsigned long busy_loop_end_time(void)
+void napi_busy_loop(unsigned int napi_id,
+                   bool (*loop_end)(void *, unsigned long),
+                   void *loop_end_arg);
+
+#else /* CONFIG_NET_RX_BUSY_POLL */
+static inline unsigned long net_busy_loop_on(void)
 {
-       return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_busy_poll);
+       return 0;
 }
 
-static inline bool sk_can_busy_loop(const struct sock *sk)
+static inline bool sk_can_busy_loop(struct sock *sk)
 {
-       return sk->sk_ll_usec && sk->sk_napi_id && !signal_pending(current);
+       return false;
 }
 
+#endif /* CONFIG_NET_RX_BUSY_POLL */
 
-static inline bool busy_loop_timeout(unsigned long end_time)
+static inline unsigned long busy_loop_current_time(void)
 {
-       unsigned long now = busy_loop_us_clock();
-
-       return time_after(now, end_time);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       return (unsigned long)(local_clock() >> 10);
+#else
+       return 0;
+#endif
 }
 
-bool sk_busy_loop(struct sock *sk, int nonblock);
-
-/* used in the NIC receive handler to mark the skb */
-static inline void skb_mark_napi_id(struct sk_buff *skb,
-                                   struct napi_struct *napi)
+/* in poll/select we use the global sysctl_net_ll_poll value */
+static inline bool busy_loop_timeout(unsigned long start_time)
 {
-       skb->napi_id = napi->napi_id;
-}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll);
 
+       if (bp_usec) {
+               unsigned long end_time = start_time + bp_usec;
+               unsigned long now = busy_loop_current_time();
 
-#else /* CONFIG_NET_RX_BUSY_POLL */
-static inline unsigned long net_busy_loop_on(void)
-{
-       return 0;
+               return time_after(now, end_time);
+       }
+#endif
+       return true;
 }
 
-static inline unsigned long busy_loop_end_time(void)
+static inline bool sk_busy_loop_timeout(struct sock *sk,
+                                       unsigned long start_time)
 {
-       return 0;
-}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec);
 
-static inline bool sk_can_busy_loop(struct sock *sk)
-{
-       return false;
-}
+       if (bp_usec) {
+               unsigned long end_time = start_time + bp_usec;
+               unsigned long now = busy_loop_current_time();
 
-static inline void skb_mark_napi_id(struct sk_buff *skb,
-                                   struct napi_struct *napi)
-{
+               return time_after(now, end_time);
+       }
+#endif
+       return true;
 }
 
-static inline bool busy_loop_timeout(unsigned long end_time)
+static inline void sk_busy_loop(struct sock *sk, int nonblock)
 {
-       return true;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       unsigned int napi_id = READ_ONCE(sk->sk_napi_id);
+
+       if (napi_id >= MIN_NAPI_ID)
+               napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk);
+#endif
 }
 
-static inline bool sk_busy_loop(struct sock *sk, int nonblock)
+/* used in the NIC receive handler to mark the skb */
+static inline void skb_mark_napi_id(struct sk_buff *skb,
+                                   struct napi_struct *napi)
 {
-       return false;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       skb->napi_id = napi->napi_id;
+#endif
 }
 
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 /* used in the protocol hanlder to propagate the napi_id to the socket */
 static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
 {
index d29e5fc8258216b9d79604bc99b69b66cce3e443..24de13f8c94f763bc63313fce444ee516acdef3a 100644 (file)
@@ -25,6 +25,8 @@ struct devlink {
        struct list_head list;
        struct list_head port_list;
        struct list_head sb_list;
+       struct list_head dpipe_table_list;
+       struct devlink_dpipe_headers *dpipe_headers;
        const struct devlink_ops *ops;
        struct device *dev;
        possible_net_t _net;
@@ -49,6 +51,178 @@ struct devlink_sb_pool_info {
        enum devlink_sb_threshold_type threshold_type;
 };
 
+/**
+ * struct devlink_dpipe_field - dpipe field object
+ * @name: field name
+ * @id: index inside the headers field array
+ * @bitwidth: bitwidth
+ * @mapping_type: mapping type
+ */
+struct devlink_dpipe_field {
+       const char *name;
+       unsigned int id;
+       unsigned int bitwidth;
+       enum devlink_dpipe_field_mapping_type mapping_type;
+};
+
+/**
+ * struct devlink_dpipe_header - dpipe header object
+ * @name: header name
+ * @id: index, global/local detrmined by global bit
+ * @fields: fields
+ * @fields_count: number of fields
+ * @global: indicates if header is shared like most protocol header
+ *         or driver specific
+ */
+struct devlink_dpipe_header {
+       const char *name;
+       unsigned int id;
+       struct devlink_dpipe_field *fields;
+       unsigned int fields_count;
+       bool global;
+};
+
+/**
+ * struct devlink_dpipe_match - represents match operation
+ * @type: type of match
+ * @header_index: header index (packets can have several headers of same
+ *               type like in case of tunnels)
+ * @header: header
+ * @fieled_id: field index
+ */
+struct devlink_dpipe_match {
+       enum devlink_dpipe_match_type type;
+       unsigned int header_index;
+       struct devlink_dpipe_header *header;
+       unsigned int field_id;
+};
+
+/**
+ * struct devlink_dpipe_action - represents action operation
+ * @type: type of action
+ * @header_index: header index (packets can have several headers of same
+ *               type like in case of tunnels)
+ * @header: header
+ * @fieled_id: field index
+ */
+struct devlink_dpipe_action {
+       enum devlink_dpipe_action_type type;
+       unsigned int header_index;
+       struct devlink_dpipe_header *header;
+       unsigned int field_id;
+};
+
+/**
+ * struct devlink_dpipe_value - represents value of match/action
+ * @action: action
+ * @match: match
+ * @mapping_value: in case the field has some mapping this value
+ *                 specified the mapping value
+ * @mapping_valid: specify if mapping value is valid
+ * @value_size: value size
+ * @value: value
+ * @mask: bit mask
+ */
+struct devlink_dpipe_value {
+       union {
+               struct devlink_dpipe_action *action;
+               struct devlink_dpipe_match *match;
+       };
+       unsigned int mapping_value;
+       bool mapping_valid;
+       unsigned int value_size;
+       void *value;
+       void *mask;
+};
+
+/**
+ * struct devlink_dpipe_entry - table entry object
+ * @index: index of the entry in the table
+ * @match_values: match values
+ * @matche_values_count: count of matches tuples
+ * @action_values: actions values
+ * @action_values_count: count of actions values
+ * @counter: value of counter
+ * @counter_valid: Specify if value is valid from hardware
+ */
+struct devlink_dpipe_entry {
+       u64 index;
+       struct devlink_dpipe_value *match_values;
+       unsigned int match_values_count;
+       struct devlink_dpipe_value *action_values;
+       unsigned int action_values_count;
+       u64 counter;
+       bool counter_valid;
+};
+
+/**
+ * struct devlink_dpipe_dump_ctx - context provided to driver in order
+ *                                to dump
+ * @info: info
+ * @cmd: devlink command
+ * @skb: skb
+ * @nest: top attribute
+ * @hdr: hdr
+ */
+struct devlink_dpipe_dump_ctx {
+       struct genl_info *info;
+       enum devlink_command cmd;
+       struct sk_buff *skb;
+       struct nlattr *nest;
+       void *hdr;
+};
+
+struct devlink_dpipe_table_ops;
+
+/**
+ * struct devlink_dpipe_table - table object
+ * @priv: private
+ * @name: table name
+ * @size: maximum number of entries
+ * @counters_enabled: indicates if counters are active
+ * @counter_control_extern: indicates if counter control is in dpipe or
+ *                         external tool
+ * @table_ops: table operations
+ * @rcu: rcu
+ */
+struct devlink_dpipe_table {
+       void *priv;
+       struct list_head list;
+       const char *name;
+       u64 size;
+       bool counters_enabled;
+       bool counter_control_extern;
+       struct devlink_dpipe_table_ops *table_ops;
+       struct rcu_head rcu;
+};
+
+/**
+ * struct devlink_dpipe_table_ops - dpipe_table ops
+ * @actions_dump - dumps all tables actions
+ * @matches_dump - dumps all tables matches
+ * @entries_dump - dumps all active entries in the table
+ * @counters_set_update - when changing the counter status hardware sync
+ *                       maybe needed to allocate/free counter related
+ *                       resources
+ */
+struct devlink_dpipe_table_ops {
+       int (*actions_dump)(void *priv, struct sk_buff *skb);
+       int (*matches_dump)(void *priv, struct sk_buff *skb);
+       int (*entries_dump)(void *priv, bool counters_enabled,
+                           struct devlink_dpipe_dump_ctx *dump_ctx);
+       int (*counters_set_update)(void *priv, bool enable);
+};
+
+/**
+ * struct devlink_dpipe_headers - dpipe headers
+ * @headers - header array can be shared (global bit) or driver specific
+ * @headers_count - count of headers
+ */
+struct devlink_dpipe_headers {
+       struct devlink_dpipe_header **headers;
+       unsigned int headers_count;
+};
+
 struct devlink_ops {
        int (*port_type_set)(struct devlink_port *devlink_port,
                             enum devlink_port_type port_type);
@@ -132,6 +306,26 @@ int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
                        u16 egress_pools_count, u16 ingress_tc_count,
                        u16 egress_tc_count);
 void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index);
+int devlink_dpipe_table_register(struct devlink *devlink,
+                                const char *table_name,
+                                struct devlink_dpipe_table_ops *table_ops,
+                                void *priv, u64 size,
+                                bool counter_control_extern);
+void devlink_dpipe_table_unregister(struct devlink *devlink,
+                                   const char *table_name);
+int devlink_dpipe_headers_register(struct devlink *devlink,
+                                  struct devlink_dpipe_headers *dpipe_headers);
+void devlink_dpipe_headers_unregister(struct devlink *devlink);
+bool devlink_dpipe_table_counter_enabled(struct devlink *devlink,
+                                        const char *table_name);
+int devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx);
+int devlink_dpipe_entry_ctx_append(struct devlink_dpipe_dump_ctx *dump_ctx,
+                                  struct devlink_dpipe_entry *entry);
+int devlink_dpipe_entry_ctx_close(struct devlink_dpipe_dump_ctx *dump_ctx);
+int devlink_dpipe_action_put(struct sk_buff *skb,
+                            struct devlink_dpipe_action *action);
+int devlink_dpipe_match_put(struct sk_buff *skb,
+                           struct devlink_dpipe_match *match);
 
 #else
 
@@ -200,6 +394,71 @@ static inline void devlink_sb_unregister(struct devlink *devlink,
 {
 }
 
+static inline int
+devlink_dpipe_table_register(struct devlink *devlink,
+                            const char *table_name,
+                            struct devlink_dpipe_table_ops *table_ops,
+                            void *priv, u64 size,
+                            bool counter_control_extern)
+{
+       return 0;
+}
+
+static inline void devlink_dpipe_table_unregister(struct devlink *devlink,
+                                                 const char *table_name)
+{
+}
+
+static inline int devlink_dpipe_headers_register(struct devlink *devlink,
+                                                struct devlink_dpipe_headers *
+                                                dpipe_headers)
+{
+       return 0;
+}
+
+static inline void devlink_dpipe_headers_unregister(struct devlink *devlink)
+{
+}
+
+static inline bool devlink_dpipe_table_counter_enabled(struct devlink *devlink,
+                                                      const char *table_name)
+{
+       return false;
+}
+
+static inline int
+devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+       return 0;
+}
+
+static inline int
+devlink_dpipe_entry_ctx_append(struct devlink_dpipe_dump_ctx *dump_ctx,
+                              struct devlink_dpipe_entry *entry)
+{
+       return 0;
+}
+
+static inline int
+devlink_dpipe_entry_ctx_close(struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+       return 0;
+}
+
+static inline int
+devlink_dpipe_action_put(struct sk_buff *skb,
+                        struct devlink_dpipe_action *action)
+{
+       return 0;
+}
+
+static inline int
+devlink_dpipe_match_put(struct sk_buff *skb,
+                       struct devlink_dpipe_match *match)
+{
+       return 0;
+}
+
 #endif
 
 #endif /* _NET_DEVLINK_H_ */
index 4e13e695f0251d5c762c3089065f4eeb429033eb..9b1c1eb4147a167c58f8fc5457ac66ced3db1a45 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/workqueue.h>
 #include <linux/of.h>
 #include <linux/ethtool.h>
+#include <net/devlink.h>
 
 struct tc_action;
 struct phy_device;
@@ -31,6 +32,7 @@ enum dsa_tag_protocol {
        DSA_TAG_PROTO_EDSA,
        DSA_TAG_PROTO_BRCM,
        DSA_TAG_PROTO_QCA,
+       DSA_TAG_PROTO_MTK,
        DSA_TAG_LAST,           /* MUST BE LAST */
 };
 
@@ -122,7 +124,7 @@ struct dsa_switch_tree {
         * protocol to use.
         */
        struct net_device       *master_netdev;
-       int                     (*rcv)(struct sk_buff *skb,
+       struct sk_buff *        (*rcv)(struct sk_buff *skb,
                                       struct net_device *dev,
                                       struct packet_type *pt,
                                       struct net_device *orig_dev);
@@ -182,6 +184,7 @@ struct dsa_port {
        unsigned int            ageing_time;
        u8                      stp_state;
        struct net_device       *bridge_dev;
+       struct devlink_port     devlink_port;
 };
 
 struct dsa_switch {
@@ -233,6 +236,13 @@ struct dsa_switch {
        u32                     phys_mii_mask;
        struct mii_bus          *slave_mii_bus;
 
+       /* Ageing Time limits in msecs */
+       unsigned int ageing_time_min;
+       unsigned int ageing_time_max;
+
+       /* devlink used to represent this switch device */
+       struct devlink          *devlink;
+
        /* Dynamically allocated ports, keep last */
        size_t num_ports;
        struct dsa_port ports[];
@@ -248,6 +258,11 @@ static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
        return !!((ds->dsa_port_mask) & (1 << p));
 }
 
+static inline bool dsa_is_normal_port(struct dsa_switch *ds, int p)
+{
+       return !dsa_is_cpu_port(ds, p) && !dsa_is_dsa_port(ds, p);
+}
+
 static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
 {
        return ds->enabled_port_mask & (1 << p) && ds->ports[p].netdev;
@@ -442,6 +457,14 @@ struct dsa_switch_ops {
                                   bool ingress);
        void    (*port_mirror_del)(struct dsa_switch *ds, int port,
                                   struct dsa_mall_mirror_tc_entry *mirror);
+
+       /*
+        * Cross-chip operations
+        */
+       int     (*crosschip_bridge_join)(struct dsa_switch *ds, int sw_index,
+                                        int port, struct net_device *br);
+       void    (*crosschip_bridge_leave)(struct dsa_switch *ds, int sw_index,
+                                         int port, struct net_device *br);
 };
 
 struct dsa_switch_driver {
@@ -459,6 +482,15 @@ static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
        return dst->rcv != NULL;
 }
 
+static inline bool netdev_uses_dsa(struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+       if (dev->dsa_ptr != NULL)
+               return dsa_uses_tagged_protocol(dev->dsa_ptr);
+#endif
+       return false;
+}
+
 struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n);
 void dsa_unregister_switch(struct dsa_switch *ds);
 int dsa_register_switch(struct dsa_switch *ds, struct device *dev);
index 8dbfdf728cd8ce901b3b05f0e58b4eeee25051fe..1243b9c7694e309f49dde4a5bf38cd1387dfecb7 100644 (file)
@@ -141,6 +141,7 @@ int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
                     struct fib_lookup_arg *);
 int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
                         u32 flags);
+bool fib_rule_matchall(const struct fib_rule *rule);
 
 int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh);
 int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh);
index 6984f1913dc124ab97627a4a5ecb2aba9f93e901..bae198b3039e6e66829c2717c1baf2baebbec6a2 100644 (file)
@@ -202,7 +202,7 @@ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
 
 typedef unsigned long flow_compare_t;
 
-static inline size_t flow_key_size(u16 family)
+static inline unsigned int flow_key_size(u16 family)
 {
        switch (family) {
        case AF_INET:
index 9caf3bfc8d2dafcc89064ec54e78682244d71fb7..51eb971e897378e25329b06f80678f522eec2ea6 100644 (file)
@@ -8,7 +8,7 @@
 
 struct flow_cache_percpu {
        struct hlist_head               *hash_table;
-       int                             hash_count;
+       unsigned int                    hash_count;
        u32                             hash_rnd;
        int                             hash_rnd_recalc;
        struct tasklet_struct           flush_tasklet;
@@ -18,8 +18,8 @@ struct flow_cache {
        u32                             hash_shift;
        struct flow_cache_percpu __percpu *percpu;
        struct hlist_node               node;
-       int                             low_watermark;
-       int                             high_watermark;
+       unsigned int                    low_watermark;
+       unsigned int                    high_watermark;
        struct timer_list               rnd_timer;
 };
 #endif /* _NET_FLOWCACHE_H */
index b7952d55b9c00039a9eca46544997c10722682b6..f39ae697347f6590459ee4178de84160b43841e2 100644 (file)
@@ -20,7 +20,8 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
                          int addr_len, int flags, int is_sendmsg);
 int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
                       int addr_len, int flags);
-int inet_accept(struct socket *sock, struct socket *newsock, int flags);
+int inet_accept(struct socket *sock, struct socket *newsock, int flags,
+               bool kern);
 int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
 ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
                      size_t size, int flags);
index 826f198374f809a4b7ca23ada4a46433b972ef35..c7a577976bec0887218a969bc8197dc1c8eb13f0 100644 (file)
@@ -258,7 +258,7 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
         return (unsigned long)min_t(u64, when, max_when);
 }
 
-struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
+struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);
 
 int inet_csk_get_port(struct sock *sk, unsigned short snum);
 
index 368bb4024b78c411d02a842f340f9fa11a9b5f7e..6692c5758b332d468f1e0611ecc4f3e03ae03b2b 100644 (file)
@@ -213,6 +213,11 @@ struct fib_entry_notifier_info {
        u32 tb_id;
 };
 
+struct fib_rule_notifier_info {
+       struct fib_notifier_info info; /* must be first */
+       struct fib_rule *rule;
+};
+
 struct fib_nh_notifier_info {
        struct fib_notifier_info info; /* must be first */
        struct fib_nh *fib_nh;
@@ -232,9 +237,21 @@ enum fib_event_type {
 int register_fib_notifier(struct notifier_block *nb,
                          void (*cb)(struct notifier_block *nb));
 int unregister_fib_notifier(struct notifier_block *nb);
+int call_fib_notifier(struct notifier_block *nb, struct net *net,
+                     enum fib_event_type event_type,
+                     struct fib_notifier_info *info);
 int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
                       struct fib_notifier_info *info);
 
+void fib_notify(struct net *net, struct notifier_block *nb);
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+void fib_rules_notify(struct net *net, struct notifier_block *nb);
+#else
+static inline void fib_rules_notify(struct net *net, struct notifier_block *nb)
+{
+}
+#endif
+
 struct fib_table {
        struct hlist_node       tb_hlist;
        u32                     tb_id;
@@ -299,6 +316,11 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
        return err;
 }
 
+static inline bool fib4_rule_default(const struct fib_rule *rule)
+{
+       return true;
+}
+
 #else /* CONFIG_IP_MULTIPLE_TABLES */
 int __net_init fib4_rules_init(struct net *net);
 void __net_exit fib4_rules_exit(struct net *net);
@@ -343,6 +365,8 @@ out:
        return err;
 }
 
+bool fib4_rule_default(const struct fib_rule *rule);
+
 #endif /* CONFIG_IP_MULTIPLE_TABLES */
 
 /* Exported by fib_frontend.c */
@@ -371,17 +395,13 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
 int fib_sync_down_addr(struct net_device *dev, __be32 local);
 int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
 
-extern u32 fib_multipath_secret __read_mostly;
-
-static inline int fib_multipath_hash(__be32 saddr, __be32 daddr)
-{
-       return jhash_2words((__force u32)saddr, (__force u32)daddr,
-                           fib_multipath_secret) >> 1;
-}
-
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
+                      const struct sk_buff *skb);
+#endif
 void fib_select_multipath(struct fib_result *res, int hash);
 void fib_select_path(struct net *net, struct fib_result *res,
-                    struct flowi4 *fl4, int mp_hash);
+                    struct flowi4 *fl4, const struct sk_buff *skb);
 
 /* Exported by fib_trie.c */
 void fib_trie_init(void);
index 7bdfa7d783639d8b65c18bd7f5a6ea5fa4fbb7da..8a4a57b887fb508c732b7e24cd4f9dd888a6941c 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/list.h>                 /* for struct list_head */
 #include <linux/spinlock.h>             /* for struct rwlock_t */
 #include <linux/atomic.h>               /* for struct atomic_t */
+#include <linux/refcount.h>             /* for struct refcount_t */
+
 #include <linux/compiler.h>
 #include <linux/timer.h>
 #include <linux/bug.h>
@@ -525,7 +527,7 @@ struct ip_vs_conn {
        struct netns_ipvs       *ipvs;
 
        /* counter and timer */
-       atomic_t                refcnt;         /* reference count */
+       refcount_t              refcnt;         /* reference count */
        struct timer_list       timer;          /* Expiration timer */
        volatile unsigned long  timeout;        /* timeout */
 
@@ -667,7 +669,7 @@ struct ip_vs_dest {
        atomic_t                conn_flags;     /* flags to copy to conn */
        atomic_t                weight;         /* server weight */
 
-       atomic_t                refcnt;         /* reference counter */
+       refcount_t              refcnt;         /* reference counter */
        struct ip_vs_stats      stats;          /* statistics */
        unsigned long           idle_start;     /* start time, jiffies */
 
@@ -1211,14 +1213,14 @@ struct ip_vs_conn * ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af,
  */
 static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp)
 {
-       return atomic_inc_not_zero(&cp->refcnt);
+       return refcount_inc_not_zero(&cp->refcnt);
 }
 
 /* put back the conn without restarting its timer */
 static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
 {
        smp_mb__before_atomic();
-       atomic_dec(&cp->refcnt);
+       refcount_dec(&cp->refcnt);
 }
 void ip_vs_conn_put(struct ip_vs_conn *cp);
 void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
@@ -1410,18 +1412,18 @@ void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
 
 static inline void ip_vs_dest_hold(struct ip_vs_dest *dest)
 {
-       atomic_inc(&dest->refcnt);
+       refcount_inc(&dest->refcnt);
 }
 
 static inline void ip_vs_dest_put(struct ip_vs_dest *dest)
 {
        smp_mb__before_atomic();
-       atomic_dec(&dest->refcnt);
+       refcount_dec(&dest->refcnt);
 }
 
 static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
 {
-       if (atomic_dec_and_test(&dest->refcnt))
+       if (refcount_dec_and_test(&dest->refcnt))
                kfree(dest);
 }
 
index cb2615ccf761d68123406d3600646d147a6e2f47..d784f242cf7b4dc114e355c52b319cb11d6faca7 100644 (file)
@@ -59,7 +59,7 @@ struct lap_cb;
  *  Slot timer must never exceed 85 ms, and must always be at least 25 ms, 
  *  suggested to  75-85 msec by IrDA lite. This doesn't work with a lot of
  *  devices, and other stackes uses a lot more, so it's best we do it as well
- *  (Note : this is the default value and sysctl overides it - Jean II)
+ *  (Note : this is the default value and sysctl overrides it - Jean II)
  */
 #define SLOT_TIMEOUT            (90*HZ/1000)
 
index 179253f9dcfd986ef806331044bc4973f1cc7d6e..9d22bf67ac86623eaaea2c49fbe1140747ce67e8 100644 (file)
 #ifndef _NET_MPLS_IPTUNNEL_H
 #define _NET_MPLS_IPTUNNEL_H 1
 
-#define MAX_NEW_LABELS 2
-
 struct mpls_iptunnel_encap {
-       u32     label[MAX_NEW_LABELS];
        u8      labels;
+       u8      ttl_propagate;
+       u8      default_ttl;
+       u8      reserved1;
+       u32     label[0];
 };
 
 static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
index 8a0214654b6b10bc480d7e6dc8195555ca58dc9a..1036c902d2c9904ed084fcd5a4d8dc70b7902cbe 100644 (file)
@@ -439,8 +439,10 @@ void ndisc_update(const struct net_device *dev, struct neighbour *neigh,
  *     IGMP
  */
 int igmp6_init(void);
+int igmp6_late_init(void);
 
 void igmp6_cleanup(void);
+void igmp6_late_cleanup(void);
 
 int igmp6_event_query(struct sk_buff *skb);
 
index 5ebf6949116097f60e668b0c2c4c48dd1639e5e8..e4dd3a2140341049fabfbff1ec55406fbe11d5b2 100644 (file)
@@ -314,7 +314,8 @@ static inline struct neighbour *neigh_create(struct neigh_table *tbl,
 }
 void neigh_destroy(struct neighbour *neigh);
 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
-int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags);
+int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
+                u32 nlmsg_pid);
 void __neigh_set_probe_once(struct neighbour *neigh);
 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
@@ -449,7 +450,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
 static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
 {
        unsigned int seq;
-       int hh_len;
+       unsigned int hh_len;
 
        do {
                seq = read_seqbegin(&hh->hh_lock);
@@ -458,7 +459,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
                        /* this is inlined by gcc */
                        memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
                } else {
-                       int hh_alen = HH_DATA_ALIGN(hh_len);
+                       unsigned int hh_alen = HH_DATA_ALIGN(hh_len);
 
                        memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
                }
index af8fe8a909dc0ca62e54056cf4be9d0d4ea82477..fe80bb48ab1f0c7b1665a10a9bf2388b32eb5013 100644 (file)
@@ -27,6 +27,7 @@
 #include <net/netns/nftables.h>
 #include <net/netns/xfrm.h>
 #include <net/netns/mpls.h>
+#include <net/netns/can.h>
 #include <linux/ns_common.h>
 #include <linux/idr.h>
 #include <linux/skbuff.h>
@@ -140,6 +141,9 @@ struct net {
 #endif
 #if IS_ENABLED(CONFIG_MPLS)
        struct netns_mpls       mpls;
+#endif
+#if IS_ENABLED(CONFIG_CAN)
+       struct netns_can        can;
 #endif
        struct sock             *diag_nlsk;
        atomic_t                fnhe_genid;
index f540f9ad2af4f673a204875864ce73f423204a74..19605878da4739d04f0642b20f8641ed8601d2eb 100644 (file)
@@ -244,7 +244,7 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
                               u32 seq);
 
 /* Fake conntrack entry for untracked connections */
-DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
+DECLARE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
 static inline struct nf_conn *nf_ct_untracked_get(void)
 {
        return raw_cpu_ptr(&nf_conntrack_untracked);
index 5ed33ea4718ef5a101689432825ebb1a48fcc0d9..65cc2cb005d937d610a2f072cb4f0f9c61570e60 100644 (file)
@@ -5,6 +5,8 @@
 #ifndef _NF_CONNTRACK_EXPECT_H
 #define _NF_CONNTRACK_EXPECT_H
 
+#include <linux/refcount.h>
+
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_zones.h>
 
@@ -37,7 +39,7 @@ struct nf_conntrack_expect {
        struct timer_list timeout;
 
        /* Usage count. */
-       atomic_t use;
+       refcount_t use;
 
        /* Flags */
        unsigned int flags;
index 5cc5e9e6171a03db471407c708578b4794c22b92..d40b89355fdd345617cf21593922753613190a11 100644 (file)
@@ -4,6 +4,7 @@
 #include <net/net_namespace.h>
 #include <linux/netfilter/nf_conntrack_common.h>
 #include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <linux/refcount.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_extend.h>
 
@@ -12,7 +13,7 @@
 struct ctnl_timeout {
        struct list_head        head;
        struct rcu_head         rcu_head;
-       atomic_t                refcnt;
+       refcount_t              refcnt;
        char                    name[CTNL_TIMEOUT_NAME_MAX];
        __u16                   l3num;
        struct nf_conntrack_l4proto *l4proto;
index 2aa8a9d80fbe8263a4b0e1c65f44e1ee2d9295d4..f713a053f89d4627d3166a5c2bb14f7a0f67b574 100644 (file)
@@ -103,6 +103,35 @@ struct nft_regs {
        };
 };
 
+/* Store/load an u16 or u8 integer to/from the u32 data register.
+ *
+ * Note, when using concatenations, register allocation happens at 32-bit
+ * level. So for store instruction, pad the rest part with zero to avoid
+ * garbage values.
+ */
+
+static inline void nft_reg_store16(u32 *dreg, u16 val)
+{
+       *dreg = 0;
+       *(u16 *)dreg = val;
+}
+
+static inline void nft_reg_store8(u32 *dreg, u8 val)
+{
+       *dreg = 0;
+       *(u8 *)dreg = val;
+}
+
+static inline u16 nft_reg_load16(u32 *sreg)
+{
+       return *(u16 *)sreg;
+}
+
+static inline u8 nft_reg_load8(u32 *sreg)
+{
+       return *(u8 *)sreg;
+}
+
 static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
                                 unsigned int len)
 {
@@ -203,7 +232,6 @@ struct nft_set_elem {
 struct nft_set;
 struct nft_set_iter {
        u8              genmask;
-       bool            flush;
        unsigned int    count;
        unsigned int    skip;
        int             err;
@@ -385,10 +413,11 @@ static inline struct nft_set *nft_set_container_of(const void *priv)
        return (void *)priv - offsetof(struct nft_set, data);
 }
 
-struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
-                                    const struct nlattr *nla, u8 genmask);
-struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
-                                         const struct nlattr *nla, u8 genmask);
+struct nft_set *nft_set_lookup(const struct net *net,
+                              const struct nft_table *table,
+                              const struct nlattr *nla_set_name,
+                              const struct nlattr *nla_set_id,
+                              u8 genmask);
 
 static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
 {
@@ -1016,7 +1045,8 @@ struct nft_object_type {
        unsigned int                    maxattr;
        struct module                   *owner;
        const struct nla_policy         *policy;
-       int                             (*init)(const struct nlattr * const tb[],
+       int                             (*init)(const struct nft_ctx *ctx,
+                                               const struct nlattr *const tb[],
                                                struct nft_object *obj);
        void                            (*destroy)(struct nft_object *obj);
        int                             (*dump)(struct sk_buff *skb,
index d150b50662017378644c8f3ccf0218ecceaa2331..97983d1c05e4d327147110440f4a3925b13f6951 100644 (file)
@@ -9,12 +9,13 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
                     struct sk_buff *skb,
                     const struct nf_hook_state *state)
 {
+       unsigned int flags = IP6_FH_F_AUTH;
        int protohdr, thoff = 0;
        unsigned short frag_off;
 
        nft_set_pktinfo(pkt, skb, state);
 
-       protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
+       protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
        if (protohdr < 0) {
                nft_set_pktinfo_proto_unspec(pkt, skb);
                return;
@@ -32,6 +33,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
                                const struct nf_hook_state *state)
 {
 #if IS_ENABLED(CONFIG_IPV6)
+       unsigned int flags = IP6_FH_F_AUTH;
        struct ipv6hdr *ip6h, _ip6h;
        unsigned int thoff = 0;
        unsigned short frag_off;
@@ -50,7 +52,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
        if (pkt_len + sizeof(*ip6h) > skb->len)
                return -1;
 
-       protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
+       protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
        if (protohdr < 0)
                return -1;
 
index 5ceb2205e4e3ed93461ed4a3956b227f99ac9494..381af9469e6ada01e4acffd4efc3ebc88e66a019 100644 (file)
@@ -32,6 +32,6 @@ void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
 void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
                   const struct nft_pktinfo *pkt);
 
-void nft_fib_store_result(void *reg, enum nft_fib_result r,
+void nft_fib_store_result(void *reg, const struct nft_fib *priv,
                          const struct nft_pktinfo *pkt, int index);
 #endif
diff --git a/include/net/netns/can.h b/include/net/netns/can.h
new file mode 100644 (file)
index 0000000..e8beba7
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * can in net namespaces
+ */
+
+#ifndef __NETNS_CAN_H__
+#define __NETNS_CAN_H__
+
+#include <linux/spinlock.h>
+
+struct dev_rcv_lists;
+
+struct netns_can {
+#if IS_ENABLED(CONFIG_PROC_FS)
+       struct proc_dir_entry *proc_dir;
+       struct proc_dir_entry *pde_version;
+       struct proc_dir_entry *pde_stats;
+       struct proc_dir_entry *pde_reset_stats;
+       struct proc_dir_entry *pde_rcvlist_all;
+       struct proc_dir_entry *pde_rcvlist_fil;
+       struct proc_dir_entry *pde_rcvlist_inv;
+       struct proc_dir_entry *pde_rcvlist_sff;
+       struct proc_dir_entry *pde_rcvlist_eff;
+       struct proc_dir_entry *pde_rcvlist_err;
+#endif
+
+       /* receive filters subscribed for 'all' CAN devices */
+       struct dev_rcv_lists *can_rx_alldev_list;
+       spinlock_t can_rcvlists_lock;
+};
+
+#endif /* __NETNS_CAN_H__ */
index 622d2da27135586d164c228b81e71afb922d5d8c..cd686c4fb32dc5409a08f818d48228bffa6f6778 100644 (file)
@@ -33,7 +33,6 @@ struct inet_timewait_death_row {
        atomic_t                tw_count;
 
        struct inet_hashinfo    *hashinfo ____cacheline_aligned_in_smp;
-       int                     sysctl_tw_recycle;
        int                     sysctl_max_tw_buckets;
 };
 
@@ -96,6 +95,8 @@ struct netns_ipv4 {
        /* Shall we try to damage output packets if routing dev changes? */
        int sysctl_ip_dynaddr;
        int sysctl_ip_early_demux;
+       int sysctl_tcp_early_demux;
+       int sysctl_udp_early_demux;
 
        int sysctl_fwmark_reflect;
        int sysctl_tcp_fwmark_accept;
@@ -152,6 +153,7 @@ struct netns_ipv4 {
 #endif
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        int sysctl_fib_multipath_use_neigh;
+       int sysctl_fib_multipath_hash_policy;
 #endif
 
        unsigned int    fib_seq;        /* protected by rtnl_mutex */
index d29203651c01700d9406157ef8dd016ea55a4cbb..6608b3693385e771147f78da13afa87cc6355d83 100644 (file)
@@ -9,8 +9,11 @@ struct mpls_route;
 struct ctl_table_header;
 
 struct netns_mpls {
+       int ip_ttl_propagate;
+       int default_ttl;
        size_t platform_labels;
        struct mpls_route __rcu * __rcu *platform_label;
+
        struct ctl_table_header *ctl;
 };
 
index f1b76b8e6d2d296177116d0ef0f254d175551cbe..bec46f63f10ced844f8aec2b19bebf8b3dc01167 100644 (file)
@@ -92,7 +92,7 @@ int unregister_qdisc(struct Qdisc_ops *qops);
 void qdisc_get_default(char *id, size_t len);
 int qdisc_set_default(const char *id);
 
-void qdisc_hash_add(struct Qdisc *q);
+void qdisc_hash_add(struct Qdisc *q, bool invisible);
 void qdisc_hash_del(struct Qdisc *q);
 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
 struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
index bf36ca34af7ad255b9eb821cbed0a70abad993f5..65ba335b0e7e66bb7f1b4bd279d31e616e0dd31e 100644 (file)
@@ -40,6 +40,7 @@
 /* This is used to register protocols. */
 struct net_protocol {
        void                    (*early_demux)(struct sk_buff *skb);
+       void                    (*early_demux_handler)(struct sk_buff *skb);
        int                     (*handler)(struct sk_buff *skb);
        void                    (*err_handler)(struct sk_buff *skb, u32 info);
        unsigned int            no_policy:1,
@@ -54,7 +55,7 @@ struct net_protocol {
 #if IS_ENABLED(CONFIG_IPV6)
 struct inet6_protocol {
        void    (*early_demux)(struct sk_buff *skb);
-
+       void    (*early_demux_handler)(struct sk_buff *skb);
        int     (*handler)(struct sk_buff *skb);
 
        void    (*err_handler)(struct sk_buff *skb,
@@ -92,12 +93,12 @@ struct inet_protosw {
 #define INET_PROTOSW_PERMANENT 0x02  /* Permanent protocols are unremovable. */
 #define INET_PROTOSW_ICSK      0x04  /* Is this an inet_connection_sock? */
 
-extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
+extern struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
 extern const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS];
 extern const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS];
 
 #if IS_ENABLED(CONFIG_IPV6)
-extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
+extern struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
 #endif
 
 int inet_add_protocol(const struct net_protocol *prot, unsigned char num);
index c0874c87c173717f2c13c8af06d2482a76190243..2cc0e14c63598ce3d3be88bb04d2fd433d676129 100644 (file)
@@ -113,13 +113,13 @@ struct in_device;
 int ip_rt_init(void);
 void rt_cache_flush(struct net *net);
 void rt_flush_dev(struct net_device *dev);
-struct rtable *__ip_route_output_key_hash(struct net *, struct flowi4 *flp,
-                                         int mp_hash);
+struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *flp,
+                                         const struct sk_buff *skb);
 
 static inline struct rtable *__ip_route_output_key(struct net *net,
                                                   struct flowi4 *flp)
 {
-       return __ip_route_output_key_hash(net, flp, -1);
+       return __ip_route_output_key_hash(net, flp, NULL);
 }
 
 struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
index aeec4086afb2446dadb1fb8c54ad54a909634380..65d50261031473d33c27f6bce1020048a697481d 100644 (file)
@@ -66,6 +66,7 @@ struct Qdisc {
 #define TCQ_F_NOPARENT         0x40 /* root of its hierarchy :
                                      * qdisc_tree_decrease_qlen() should stop.
                                      */
+#define TCQ_F_INVISIBLE                0x80 /* invisible by default in dump */
        u32                     limit;
        const struct Qdisc_ops  *ops;
        struct qdisc_size_table __rcu *stab;
index 1f71ee5ab518410ecd2fe0cab6064681025b43b3..069582ee5d7fd5b0e92edea68cb2406fbbe6db00 100644 (file)
@@ -448,10 +448,9 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
        return frag;
 }
 
-static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_association *asoc)
+static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc)
 {
-
-       sctp_assoc_sync_pmtu(sk, asoc);
+       sctp_assoc_sync_pmtu(asoc);
        asoc->pmtu_pending = 0;
 }
 
@@ -596,12 +595,23 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr)
  */
 static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
 {
-       if (t->dst && (!dst_check(t->dst, t->dst_cookie) ||
-                      t->pathmtu != max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
-                                          SCTP_DEFAULT_MINSEGMENT)))
+       if (t->dst && !dst_check(t->dst, t->dst_cookie))
                sctp_transport_dst_release(t);
 
        return t->dst;
 }
 
+static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
+{
+       __u32 pmtu = max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
+                          SCTP_DEFAULT_MINSEGMENT);
+
+       if (t->pathmtu == pmtu)
+               return true;
+
+       t->pathmtu = pmtu;
+
+       return false;
+}
+
 #endif /* __net_sctp_h__ */
index b6f682ec184a62a1e7b9d2a0ea159293cca12a76..47113f2c4b0a2b6c596d2f28018a1e1941cb6ede 100644 (file)
@@ -293,6 +293,22 @@ struct sctp_chunk *sctp_process_strreset_inreq(
                                struct sctp_association *asoc,
                                union sctp_params param,
                                struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_tsnreq(
+                               struct sctp_association *asoc,
+                               union sctp_params param,
+                               struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_addstrm_out(
+                               struct sctp_association *asoc,
+                               union sctp_params param,
+                               struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_addstrm_in(
+                               struct sctp_association *asoc,
+                               union sctp_params param,
+                               struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_resp(
+                               struct sctp_association *asoc,
+                               union sctp_params param,
+                               struct sctp_ulpevent **evp);
 
 /* Prototypes for statetable processing. */
 
index a244db5e5ff7fa65bcf0a4124cbbe5f4682e6d9d..b751399aa6b7d58d93b55a4efbd42e0d2e9ac7ea 100644 (file)
@@ -83,6 +83,7 @@ struct sctp_bind_addr;
 struct sctp_ulpq;
 struct sctp_ep_common;
 struct crypto_shash;
+struct sctp_stream;
 
 
 #include <net/sctp/tsnmap.h>
@@ -376,7 +377,8 @@ typedef struct sctp_sender_hb_info {
        __u64 hb_nonce;
 } sctp_sender_hb_info_t;
 
-struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp);
+int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp);
+int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp);
 void sctp_stream_free(struct sctp_stream *stream);
 void sctp_stream_clear(struct sctp_stream *stream);
 
@@ -476,7 +478,8 @@ struct sctp_pf {
        int  (*send_verify) (struct sctp_sock *, union sctp_addr *);
        int  (*supported_addrs)(const struct sctp_sock *, __be16 *);
        struct sock *(*create_accept_sk) (struct sock *sk,
-                                         struct sctp_association *asoc);
+                                         struct sctp_association *asoc,
+                                         bool kern);
        int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr);
        void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
        void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
@@ -497,7 +500,6 @@ struct sctp_datamsg {
        /* Did the messenge fail to send? */
        int send_error;
        u8 send_failed:1,
-          force_delay:1,
           can_delay;       /* should this message be Nagle delayed */
 };
 
@@ -752,6 +754,8 @@ struct sctp_transport {
                /* Is the Path MTU update pending on this tranport */
                pmtu_pending:1,
 
+               dst_pending_confirm:1,  /* need to confirm neighbour */
+
                /* Has this transport moved the ctsn since we last sacked */
                sack_generation:1;
        u32 dst_cookie;
@@ -805,8 +809,6 @@ struct sctp_transport {
 
        __u32 burst_limited;    /* Holds old cwnd when max.burst is applied */
 
-       __u32 dst_pending_confirm;      /* need to confirm neighbour */
-
        /* Destination */
        struct dst_entry *dst;
        /* Source address. */
@@ -950,8 +952,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t);
 void sctp_transport_burst_limited(struct sctp_transport *);
 void sctp_transport_burst_reset(struct sctp_transport *);
 unsigned long sctp_transport_timeout(struct sctp_transport *);
-void sctp_transport_reset(struct sctp_transport *);
-void sctp_transport_update_pmtu(struct sock *, struct sctp_transport *, u32);
+void sctp_transport_reset(struct sctp_transport *t);
+void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
 void sctp_transport_immediate_rtx(struct sctp_transport *);
 void sctp_transport_dst_release(struct sctp_transport *t);
 void sctp_transport_dst_confirm(struct sctp_transport *t);
@@ -1313,6 +1315,8 @@ struct sctp_inithdr_host {
 struct sctp_stream_out {
        __u16   ssn;
        __u8    state;
+       __u64   abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
+       __u64   abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
 };
 
 struct sctp_stream_in {
@@ -1876,6 +1880,7 @@ struct sctp_association {
 
        __u8 need_ecne:1,       /* Need to send an ECNE Chunk? */
             temp:1,            /* Is it a temporary association? */
+            force_delay:1,
             prsctp_enable:1,
             reconf_enable:1;
 
@@ -1951,7 +1956,7 @@ void sctp_assoc_update(struct sctp_association *old,
 
 __u32 sctp_association_get_next_tsn(struct sctp_association *);
 
-void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *);
+void sctp_assoc_sync_pmtu(struct sctp_association *asoc);
 void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
 void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
 void sctp_assoc_set_primary(struct sctp_association *,
index 324b5965fc4de505ca98fbbb9aedc0d3a0039742..1060494ac230b80caca57f6963dca2692ae10b9d 100644 (file)
@@ -132,6 +132,14 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
        const struct sctp_association *asoc, __u16 flags,
        __u16 stream_num, __u16 *stream_list, gfp_t gfp);
 
+struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event(
+       const struct sctp_association *asoc, __u16 flags,
+        __u32 local_tsn, __u32 remote_tsn, gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event(
+       const struct sctp_association *asoc, __u16 flags,
+       __u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp);
+
 void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
                                   struct msghdr *);
 void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
index 0caee631a8364fe6e49ab8cacba864d019be8b47..fe236b3429f0d8caeb1adc367b5b4a20591c848b 100644 (file)
@@ -6,10 +6,10 @@
 u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
                               __be16 dport);
-u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+u32 secure_tcp_seq_and_tsoff(__be32 saddr, __be32 daddr,
+                            __be16 sport, __be16 dport, u32 *tsoff);
+u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr,
                               __be16 sport, __be16 dport, u32 *tsoff);
-u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
-                                __be16 sport, __be16 dport, u32 *tsoff);
 u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
                                __be16 sport, __be16 dport);
 u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
index 5e5997654db6454f82179cc35c4bc22e89d0c06f..66349e49d468646ce724485bb8e74952825f0d6c 100644 (file)
@@ -236,6 +236,7 @@ struct sock_common {
   *    @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
   *    @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
   *    @sk_lock:       synchronizer
+  *    @sk_kern_sock: True if sock is using kernel lock classes
   *    @sk_rcvbuf: size of receive buffer in bytes
   *    @sk_wq: sock wait queue and async head
   *    @sk_rx_dst: receive input route used by early demux
@@ -430,7 +431,8 @@ struct sock {
 #endif
 
        kmemcheck_bitfield_begin(flags);
-       unsigned int            sk_padding : 2,
+       unsigned int            sk_padding : 1,
+                               sk_kern_sock : 1,
                                sk_no_check_tx : 1,
                                sk_no_check_rx : 1,
                                sk_userlocks : 4,
@@ -1015,7 +1017,8 @@ struct proto {
                                        int addr_len);
        int                     (*disconnect)(struct sock *sk, int flags);
 
-       struct sock *           (*accept)(struct sock *sk, int flags, int *err);
+       struct sock *           (*accept)(struct sock *sk, int flags, int *err,
+                                         bool kern);
 
        int                     (*ioctl)(struct sock *sk, int cmd,
                                         unsigned long arg);
@@ -1573,7 +1576,7 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
 int sock_no_bind(struct socket *, struct sockaddr *, int);
 int sock_no_connect(struct socket *, struct sockaddr *, int, int);
 int sock_no_socketpair(struct socket *, struct socket *);
-int sock_no_accept(struct socket *, struct socket *, int);
+int sock_no_accept(struct socket *, struct socket *, int, bool);
 int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
 unsigned int sock_no_poll(struct file *, struct socket *,
                          struct poll_table_struct *);
@@ -1780,11 +1783,8 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
 
        sk_tx_queue_clear(sk);
        sk->sk_dst_pending_confirm = 0;
-       /*
-        * This can be called while sk is owned by the caller only,
-        * with no state that can be checked in a rcu_dereference_check() cond
-        */
-       old_dst = rcu_dereference_raw(sk->sk_dst_cache);
+       old_dst = rcu_dereference_protected(sk->sk_dst_cache,
+                                           lockdep_sock_is_held(sk));
        rcu_assign_pointer(sk->sk_dst_cache, dst);
        dst_release(old_dst);
 }
@@ -2239,6 +2239,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
 void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
                              struct sk_buff *skb);
 
+#define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC)
 static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
                                          struct sk_buff *skb)
 {
@@ -2249,8 +2250,10 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
 
        if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
                __sock_recv_ts_and_drops(msg, sk, skb);
-       else
+       else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
                sk->sk_stamp = skb->tstamp;
+       else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
+               sk->sk_stamp = 0;
 }
 
 void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
@@ -2362,6 +2365,8 @@ bool sk_ns_capable(const struct sock *sk,
 bool sk_capable(const struct sock *sk, int cap);
 bool sk_net_capable(const struct sock *sk, int cap);
 
+void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
+
 extern __u32 sysctl_wmem_max;
 extern __u32 sysctl_rmem_max;
 
index dfbd6ee0bc7cd196c052e700da43deddb8d1dfef..a46c3f2ace702932dc95c3021e7b13d72a2a4777 100644 (file)
@@ -2,6 +2,7 @@
 #define __NET_TC_PED_H
 
 #include <net/act_api.h>
+#include <linux/tc_act/tc_pedit.h>
 
 struct tcf_pedit_key_ex {
        enum pedit_header_type htype;
@@ -17,4 +18,48 @@ struct tcf_pedit {
 };
 #define to_pedit(a) ((struct tcf_pedit *)a)
 
+static inline bool is_tcf_pedit(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+       if (a->ops && a->ops->type == TCA_ACT_PEDIT)
+               return true;
+#endif
+       return false;
+}
+
+static inline int tcf_pedit_nkeys(const struct tc_action *a)
+{
+       return to_pedit(a)->tcfp_nkeys;
+}
+
+static inline u32 tcf_pedit_htype(const struct tc_action *a, int index)
+{
+       if (to_pedit(a)->tcfp_keys_ex)
+               return to_pedit(a)->tcfp_keys_ex[index].htype;
+
+       return TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
+}
+
+static inline u32 tcf_pedit_cmd(const struct tc_action *a, int index)
+{
+       if (to_pedit(a)->tcfp_keys_ex)
+               return to_pedit(a)->tcfp_keys_ex[index].cmd;
+
+       return __PEDIT_CMD_MAX;
+}
+
+static inline u32 tcf_pedit_mask(const struct tc_action *a, int index)
+{
+       return to_pedit(a)->tcfp_keys[index].mask;
+}
+
+static inline u32 tcf_pedit_val(const struct tc_action *a, int index)
+{
+       return to_pedit(a)->tcfp_keys[index].val;
+}
+
+static inline u32 tcf_pedit_offset(const struct tc_action *a, int index)
+{
+       return to_pedit(a)->tcfp_keys[index].off;
+}
 #endif /* __NET_TC_PED_H */
index 48cca321ee6c4f3e44f8f546d05d5f32b470ccc1..c2090df944ff53224ef0500c05f62672f0d5e0b3 100644 (file)
@@ -13,9 +13,6 @@
 #include <net/act_api.h>
 #include <linux/tc_act/tc_vlan.h>
 
-#define VLAN_F_POP             0x1
-#define VLAN_F_PUSH            0x2
-
 struct tcf_vlan {
        struct tc_action        common;
        int                     tcfv_action;
@@ -49,4 +46,9 @@ static inline __be16 tcf_vlan_push_proto(const struct tc_action *a)
        return to_vlan(a)->tcfv_push_proto;
 }
 
+static inline u8 tcf_vlan_push_prio(const struct tc_action *a)
+{
+       return to_vlan(a)->tcfv_push_prio;
+}
+
 #endif /* __NET_TC_VLAN_H */
index 6ec4ea652f3f55e53675dbe09f29599af179c41a..cc6ae0a95201f0adc52c2c46b429566806da6745 100644 (file)
@@ -78,6 +78,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
 #define TCP_MAX_QUICKACKS      16U
 
+/* Maximal number of window scale according to RFC1323 */
+#define TCP_MAX_WSCALE         14U
+
 /* urg_data states */
 #define TCP_URG_VALID  0x0100
 #define TCP_URG_NOTYET 0x0200
@@ -406,11 +409,7 @@ void tcp_clear_retrans(struct tcp_sock *tp);
 void tcp_update_metrics(struct sock *sk);
 void tcp_init_metrics(struct sock *sk);
 void tcp_metrics_init(void);
-bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
-                       bool paws_check, bool timestamps);
-bool tcp_remember_stamp(struct sock *sk);
-bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
-void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
+bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
 void tcp_disable_fack(struct tcp_sock *tp);
 void tcp_close(struct sock *sk, long timeout);
 void tcp_init_sock(struct sock *sk);
@@ -1252,9 +1251,11 @@ void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
 
 static inline int tcp_win_from_space(int space)
 {
-       return sysctl_tcp_adv_win_scale<=0 ?
-               (space>>(-sysctl_tcp_adv_win_scale)) :
-               space - (space>>sysctl_tcp_adv_win_scale);
+       int tcp_adv_win_scale = sysctl_tcp_adv_win_scale;
+
+       return tcp_adv_win_scale <= 0 ?
+               (space>>(-tcp_adv_win_scale)) :
+               space - (space>>tcp_adv_win_scale);
 }
 
 /* Note: caller must be prepared to deal with negative returns */
@@ -1814,9 +1815,8 @@ struct tcp_request_sock_ops {
                                 __u16 *mss);
 #endif
        struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
-                                      const struct request_sock *req,
-                                      bool *strict);
-       __u32 (*init_seq)(const struct sk_buff *skb, u32 *tsoff);
+                                      const struct request_sock *req);
+       __u32 (*init_seq_tsoff)(const struct sk_buff *skb, u32 *tsoff);
        int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
                           struct flowi *fl, struct request_sock *req,
                           struct tcp_fastopen_cookie *foc,
index c9d8b8e848e05c2e7228f287f88ccdb57b2e10c2..3391dbd739595a76150453c28468ce8bb55530f8 100644 (file)
@@ -372,4 +372,5 @@ void udp_encap_enable(void);
 #if IS_ENABLED(CONFIG_IPV6)
 void udpv6_encap_enable(void);
 #endif
+
 #endif /* _UDP_H */
index 14d82bf16692a6d0ed66721b1548ba152dbc18de..9e3dc7b81a4db0f49b20c32b4c69bc95b62f53ea 100644 (file)
@@ -586,7 +586,6 @@ struct xfrm_migrate {
 
 struct xfrm_mgr {
        struct list_head        list;
-       char                    *id;
        int                     (*notify)(struct xfrm_state *x, const struct km_event *c);
        int                     (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
        struct xfrm_policy      *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
@@ -817,12 +816,12 @@ static inline void xfrm_state_hold(struct xfrm_state *x)
 }
 
 static inline bool addr_match(const void *token1, const void *token2,
-                             int prefixlen)
+                             unsigned int prefixlen)
 {
        const __be32 *a1 = token1;
        const __be32 *a2 = token2;
-       int pdw;
-       int pbi;
+       unsigned int pdw;
+       unsigned int pbi;
 
        pdw = prefixlen >> 5;     /* num of whole u32 in prefix */
        pbi = prefixlen &  0x1f;  /* num of bits in incomplete u32 in prefix */
@@ -846,9 +845,9 @@ static inline bool addr_match(const void *token1, const void *token2,
 static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
 {
        /* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
-       if (prefixlen == 0)
+       if (sizeof(long) == 4 && prefixlen == 0)
                return true;
-       return !((a1 ^ a2) & htonl(0xFFFFFFFFu << (32 - prefixlen)));
+       return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen)));
 }
 
 static __inline__
index 0f1813c1368795994e012d00c607499879c130aa..99e4423eb2b80b142024bed892ddc4a84ac5e576 100644 (file)
@@ -1863,6 +1863,9 @@ struct ib_port_immutable {
 };
 
 struct ib_device {
+       /* Do not access @dma_device directly from ULP nor from HW drivers. */
+       struct device                *dma_device;
+
        char                          name[IB_DEVICE_NAME_MAX];
 
        struct list_head              event_handler_list;
@@ -3007,7 +3010,7 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
  */
 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
 {
-       return dma_mapping_error(&dev->dev, dma_addr);
+       return dma_mapping_error(dev->dma_device, dma_addr);
 }
 
 /**
@@ -3021,7 +3024,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
                                    void *cpu_addr, size_t size,
                                    enum dma_data_direction direction)
 {
-       return dma_map_single(&dev->dev, cpu_addr, size, direction);
+       return dma_map_single(dev->dma_device, cpu_addr, size, direction);
 }
 
 /**
@@ -3035,7 +3038,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
                                       u64 addr, size_t size,
                                       enum dma_data_direction direction)
 {
-       dma_unmap_single(&dev->dev, addr, size, direction);
+       dma_unmap_single(dev->dma_device, addr, size, direction);
 }
 
 /**
@@ -3052,7 +3055,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
                                  size_t size,
                                         enum dma_data_direction direction)
 {
-       return dma_map_page(&dev->dev, page, offset, size, direction);
+       return dma_map_page(dev->dma_device, page, offset, size, direction);
 }
 
 /**
@@ -3066,7 +3069,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
                                     u64 addr, size_t size,
                                     enum dma_data_direction direction)
 {
-       dma_unmap_page(&dev->dev, addr, size, direction);
+       dma_unmap_page(dev->dma_device, addr, size, direction);
 }
 
 /**
@@ -3080,7 +3083,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
                                struct scatterlist *sg, int nents,
                                enum dma_data_direction direction)
 {
-       return dma_map_sg(&dev->dev, sg, nents, direction);
+       return dma_map_sg(dev->dma_device, sg, nents, direction);
 }
 
 /**
@@ -3094,7 +3097,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
                                   struct scatterlist *sg, int nents,
                                   enum dma_data_direction direction)
 {
-       dma_unmap_sg(&dev->dev, sg, nents, direction);
+       dma_unmap_sg(dev->dma_device, sg, nents, direction);
 }
 
 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
@@ -3102,7 +3105,8 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
                                      enum dma_data_direction direction,
                                      unsigned long dma_attrs)
 {
-       return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs);
+       return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
+                               dma_attrs);
 }
 
 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
@@ -3110,7 +3114,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
                                         enum dma_data_direction direction,
                                         unsigned long dma_attrs)
 {
-       dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs);
+       dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
 }
 /**
  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
@@ -3152,7 +3156,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
                                              size_t size,
                                              enum dma_data_direction dir)
 {
-       dma_sync_single_for_cpu(&dev->dev, addr, size, dir);
+       dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
 }
 
 /**
@@ -3167,7 +3171,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
                                                 size_t size,
                                                 enum dma_data_direction dir)
 {
-       dma_sync_single_for_device(&dev->dev, addr, size, dir);
+       dma_sync_single_for_device(dev->dma_device, addr, size, dir);
 }
 
 /**
@@ -3182,7 +3186,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
                                           dma_addr_t *dma_handle,
                                           gfp_t flag)
 {
-       return dma_alloc_coherent(&dev->dev, size, dma_handle, flag);
+       return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
 }
 
 /**
@@ -3196,7 +3200,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
                                        size_t size, void *cpu_addr,
                                        dma_addr_t dma_handle)
 {
-       dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle);
+       dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
 }
 
 /**
index b0e275de6dec0d2be9adf09810e889c89d7ad06c..583875ea136ab228ea14a727581c384f50527211 100644 (file)
@@ -196,6 +196,7 @@ struct iscsi_conn {
        struct iscsi_task       *task;          /* xmit task in progress */
 
        /* xmit */
+       spinlock_t              taskqueuelock;  /* protects the next three lists */
        struct list_head        mgmtqueue;      /* mgmt (control) xmit queue */
        struct list_head        cmdqueue;       /* data-path cmd queue */
        struct list_head        requeue;        /* tasks needing another run */
index 6f22b39f1b0c3bc8bb1812a631a638ef1987a784..080c7ce9bae8892a43838043d986282a1385283a 100644 (file)
@@ -472,6 +472,10 @@ static inline int scsi_device_created(struct scsi_device *sdev)
                sdev->sdev_state == SDEV_CREATED_BLOCK;
 }
 
+int scsi_internal_device_block(struct scsi_device *sdev, bool wait);
+int scsi_internal_device_unblock(struct scsi_device *sdev,
+                                enum scsi_device_state new_state);
+
 /* accessor functions for the SCSI parameters */
 static inline int scsi_device_sync(struct scsi_device *sdev)
 {
index b54b98dc2d4a77681dd3ecf883d75e062589ee8c..1b0f447ce850f015e64dd27e47751fe945cbb2ec 100644 (file)
@@ -4,7 +4,12 @@
 #include <linux/types.h>
 #include <target/target_core_base.h>
 
-#define TRANSPORT_FLAG_PASSTHROUGH             1
+#define TRANSPORT_FLAG_PASSTHROUGH             0x1
+/*
+ * ALUA commands, state checks and setup operations are handled by the
+ * backend module.
+ */
+#define TRANSPORT_FLAG_PASSTHROUGH_ALUA                0x2
 
 struct request_queue;
 struct scatterlist;
index 37c274e61acceee74d792a240b8f3695f0d78085..4b784b6e21c0d9cb533b31997883d7dd447343bf 100644 (file)
@@ -299,7 +299,7 @@ struct t10_alua_tg_pt_gp {
        struct list_head tg_pt_gp_lun_list;
        struct se_lun *tg_pt_gp_alua_lun;
        struct se_node_acl *tg_pt_gp_alua_nacl;
-       struct delayed_work tg_pt_gp_transition_work;
+       struct work_struct tg_pt_gp_transition_work;
        struct completion *tg_pt_gp_transition_complete;
 };
 
index 39123c06a5661316a80dd677b43b1e581a17e2e7..29a3d53a401535ef1e602a2dd82d79d66bc68bea 100644 (file)
@@ -683,6 +683,57 @@ TRACE_EVENT(rxrpc_rx_ack,
                      __entry->n_acks)
            );
 
+TRACE_EVENT(rxrpc_rx_abort,
+           TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
+                    u32 abort_code),
+
+           TP_ARGS(call, serial, abort_code),
+
+           TP_STRUCT__entry(
+                   __field(struct rxrpc_call *,        call            )
+                   __field(rxrpc_serial_t,             serial          )
+                   __field(u32,                        abort_code      )
+                            ),
+
+           TP_fast_assign(
+                   __entry->call = call;
+                   __entry->serial = serial;
+                   __entry->abort_code = abort_code;
+                          ),
+
+           TP_printk("c=%p ABORT %08x ac=%d",
+                     __entry->call,
+                     __entry->serial,
+                     __entry->abort_code)
+           );
+
+TRACE_EVENT(rxrpc_rx_rwind_change,
+           TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
+                    u32 rwind, bool wake),
+
+           TP_ARGS(call, serial, rwind, wake),
+
+           TP_STRUCT__entry(
+                   __field(struct rxrpc_call *,        call            )
+                   __field(rxrpc_serial_t,             serial          )
+                   __field(u32,                        rwind           )
+                   __field(bool,                       wake            )
+                            ),
+
+           TP_fast_assign(
+                   __entry->call = call;
+                   __entry->serial = serial;
+                   __entry->rwind = rwind;
+                   __entry->wake = wake;
+                          ),
+
+           TP_printk("c=%p %08x rw=%u%s",
+                     __entry->call,
+                     __entry->serial,
+                     __entry->rwind,
+                     __entry->wake ? " wake" : "")
+           );
+
 TRACE_EVENT(rxrpc_tx_data,
            TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
                     rxrpc_serial_t serial, u8 flags, bool retrans, bool lose),
@@ -1087,6 +1138,56 @@ TRACE_EVENT(rxrpc_improper_term,
                      __entry->abort_code)
            );
 
+TRACE_EVENT(rxrpc_rx_eproto,
+           TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
+                    const char *why),
+
+           TP_ARGS(call, serial, why),
+
+           TP_STRUCT__entry(
+                   __field(struct rxrpc_call *,        call            )
+                   __field(rxrpc_serial_t,             serial          )
+                   __field(const char *,               why             )
+                            ),
+
+           TP_fast_assign(
+                   __entry->call = call;
+                   __entry->serial = serial;
+                   __entry->why = why;
+                          ),
+
+           TP_printk("c=%p EPROTO %08x %s",
+                     __entry->call,
+                     __entry->serial,
+                     __entry->why)
+           );
+
+TRACE_EVENT(rxrpc_connect_call,
+           TP_PROTO(struct rxrpc_call *call),
+
+           TP_ARGS(call),
+
+           TP_STRUCT__entry(
+                   __field(struct rxrpc_call *,        call            )
+                   __field(unsigned long,              user_call_ID    )
+                   __field(u32,                        cid             )
+                   __field(u32,                        call_id         )
+                            ),
+
+           TP_fast_assign(
+                   __entry->call = call;
+                   __entry->user_call_ID = call->user_call_ID;
+                   __entry->cid = call->cid;
+                   __entry->call_id = call->call_id;
+                          ),
+
+           TP_printk("c=%p u=%p %08x:%08x",
+                     __entry->call,
+                     (void *)__entry->user_call_ID,
+                     __entry->cid,
+                     __entry->call_id)
+           );
+
 #endif /* _TRACE_RXRPC_H */
 
 /* This part must be outside protection */
index 14e49c7981359ccdac1e2d9d87a3c284e7c682f3..b35533b9427719c3ddcd2c776a20f52d5465aea0 100644 (file)
@@ -1,5 +1,6 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM raw_syscalls
+#undef TRACE_INCLUDE_FILE
 #define TRACE_INCLUDE_FILE syscalls
 
 #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
index 2c748ddad5f875711ed66f91eae9bc69b9a41fe0..2b488565599daf73b8942fe6482890830af94dff 100644 (file)
 
 #define SCM_TIMESTAMPING_OPT_STATS     54
 
+#define SO_MEMINFO             55
+
+#define SO_INCOMING_NAPI_ID    56
+
+#define SO_COOKIE              57
+
 #endif /* __ASM_GENERIC_SOCKET_H */
index 9b1462e38b821a762b284b44a20a96de9f0930d9..a076cf1a3a23be2fbee73dab483e051b37b2370c 100644 (file)
@@ -730,9 +730,11 @@ __SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
 __SYSCALL(__NR_pkey_alloc,    sys_pkey_alloc)
 #define __NR_pkey_free 290
 __SYSCALL(__NR_pkey_free,     sys_pkey_free)
+#define __NR_statx 291
+__SYSCALL(__NR_statx,     sys_statx)
 
 #undef __NR_syscalls
-#define __NR_syscalls 291
+#define __NR_syscalls 292
 
 /*
  * All syscalls below here should go away really,
index 407cb55df6ac178e11620fd9554cc913e30b401c..7fb97863c94577d7b9f583abe8a41fe14f54b734 100644 (file)
@@ -33,8 +33,8 @@ extern "C" {
 #define OMAP_PARAM_CHIPSET_ID  1       /* ie. 0x3430, 0x4430, etc */
 
 struct drm_omap_param {
-       uint64_t param;                 /* in */
-       uint64_t value;                 /* in (set_param), out (get_param) */
+       __u64 param;                    /* in */
+       __u64 value;                    /* in (set_param), out (get_param) */
 };
 
 #define OMAP_BO_SCANOUT                0x00000001      /* scanout capable (phys contiguous) */
@@ -53,18 +53,18 @@ struct drm_omap_param {
 #define OMAP_BO_TILED          (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
 
 union omap_gem_size {
-       uint32_t bytes;         /* (for non-tiled formats) */
+       __u32 bytes;            /* (for non-tiled formats) */
        struct {
-               uint16_t width;
-               uint16_t height;
+               __u16 width;
+               __u16 height;
        } tiled;                /* (for tiled formats) */
 };
 
 struct drm_omap_gem_new {
        union omap_gem_size size;       /* in */
-       uint32_t flags;                 /* in */
-       uint32_t handle;                /* out */
-       uint32_t __pad;
+       __u32 flags;                    /* in */
+       __u32 handle;                   /* out */
+       __u32 __pad;
 };
 
 /* mask of operations: */
@@ -74,33 +74,33 @@ enum omap_gem_op {
 };
 
 struct drm_omap_gem_cpu_prep {
-       uint32_t handle;                /* buffer handle (in) */
-       uint32_t op;                    /* mask of omap_gem_op (in) */
+       __u32 handle;                   /* buffer handle (in) */
+       __u32 op;                       /* mask of omap_gem_op (in) */
 };
 
 struct drm_omap_gem_cpu_fini {
-       uint32_t handle;                /* buffer handle (in) */
-       uint32_t op;                    /* mask of omap_gem_op (in) */
+       __u32 handle;                   /* buffer handle (in) */
+       __u32 op;                       /* mask of omap_gem_op (in) */
        /* TODO maybe here we pass down info about what regions are touched
         * by sw so we can be clever about cache ops?  For now a placeholder,
         * set to zero and we just do full buffer flush..
         */
-       uint32_t nregions;
-       uint32_t __pad;
+       __u32 nregions;
+       __u32 __pad;
 };
 
 struct drm_omap_gem_info {
-       uint32_t handle;                /* buffer handle (in) */
-       uint32_t pad;
-       uint64_t offset;                /* mmap offset (out) */
+       __u32 handle;                   /* buffer handle (in) */
+       __u32 pad;
+       __u64 offset;                   /* mmap offset (out) */
        /* note: in case of tiled buffers, the user virtual size can be
         * different from the physical size (ie. how many pages are needed
         * to back the object) which is returned in DRM_IOCTL_GEM_OPEN..
         * This size here is the one that should be used if you want to
         * mmap() the buffer:
         */
-       uint32_t size;                  /* virtual size for mmap'ing (out) */
-       uint32_t __pad;
+       __u32 size;                     /* virtual size for mmap'ing (out) */
+       __u32 __pad;
 };
 
 #define DRM_OMAP_GET_PARAM             0x00
index 0539a0ceef38155835552360667070552ebce641..1e062bb54eec11b866cfb8faf99e44c725d2c4e5 100644 (file)
@@ -81,6 +81,7 @@ enum bpf_cmd {
        BPF_OBJ_GET,
        BPF_PROG_ATTACH,
        BPF_PROG_DETACH,
+       BPF_PROG_TEST_RUN,
 };
 
 enum bpf_map_type {
@@ -96,6 +97,8 @@ enum bpf_map_type {
        BPF_MAP_TYPE_LRU_HASH,
        BPF_MAP_TYPE_LRU_PERCPU_HASH,
        BPF_MAP_TYPE_LPM_TRIE,
+       BPF_MAP_TYPE_ARRAY_OF_MAPS,
+       BPF_MAP_TYPE_HASH_OF_MAPS,
 };
 
 enum bpf_prog_type {
@@ -152,6 +155,7 @@ union bpf_attr {
                __u32   value_size;     /* size of value in bytes */
                __u32   max_entries;    /* max number of entries in a map */
                __u32   map_flags;      /* prealloc or not */
+               __u32   inner_map_fd;   /* fd pointing to the inner map */
        };
 
        struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -186,6 +190,17 @@ union bpf_attr {
                __u32           attach_type;
                __u32           attach_flags;
        };
+
+       struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
+               __u32           prog_fd;
+               __u32           retval;
+               __u32           data_size_in;
+               __u32           data_size_out;
+               __aligned_u64   data_in;
+               __aligned_u64   data_out;
+               __u32           repeat;
+               __u32           duration;
+       } test;
 } __attribute__((aligned(8)));
 
 /* BPF helper function descriptions:
@@ -456,6 +471,18 @@ union bpf_attr {
  *     Return:
  *       > 0 length of the string including the trailing NUL on success
  *       < 0 error
+ *
+ * u64 bpf_get_socket_cookie(skb)
+ *     Get the cookie for the socket stored inside sk_buff.
+ *     @skb: pointer to skb
+ *     Return: 8 Bytes non-decreasing number on success or 0 if the socket
+ *     field is missing inside sk_buff
+ *
+ * u32 bpf_get_socket_uid(skb)
+ *     Get the owner uid of the socket stored inside sk_buff.
+ *     @skb: pointer to skb
+ *     Return: uid of the socket owner on success or 0 if the socket pointer
+ *     inside sk_buff is NULL
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
@@ -503,7 +530,9 @@ union bpf_attr {
        FN(get_numa_node_id),           \
        FN(skb_change_head),            \
        FN(xdp_adjust_head),            \
-       FN(probe_read_str),
+       FN(probe_read_str),             \
+       FN(get_socket_cookie),          \
+       FN(get_socket_uid),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
index db4c253f8011b2f483ddd1ffc09f4f04a93fdc0a..dcfc3a5a9cb1d20f29bbac00c6ef315006e9d208 100644 (file)
@@ -713,33 +713,6 @@ enum btrfs_err_code {
        BTRFS_ERROR_DEV_ONLY_WRITABLE,
        BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS
 };
-/* An error code to error string mapping for the kernel
-*  error codes
-*/
-static inline char *btrfs_err_str(enum btrfs_err_code err_code)
-{
-       switch (err_code) {
-               case BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET:
-                       return "unable to go below two devices on raid1";
-               case BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET:
-                       return "unable to go below four devices on raid10";
-               case BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET:
-                       return "unable to go below two devices on raid5";
-               case BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET:
-                       return "unable to go below three devices on raid6";
-               case BTRFS_ERROR_DEV_TGT_REPLACE:
-                       return "unable to remove the dev_replace target dev";
-               case BTRFS_ERROR_DEV_MISSING_NOT_FOUND:
-                       return "no missing devices found to remove";
-               case BTRFS_ERROR_DEV_ONLY_WRITABLE:
-                       return "unable to remove the only writeable device";
-               case BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS:
-                       return "add/delete/balance/replace/resize operation "\
-                               "in progress";
-               default:
-                       return NULL;
-       }
-}
 
 #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
                                   struct btrfs_ioctl_vol_args)
index 0f1f3a12e23c30e511cdb332059e30ee8d3d5efb..b47bee2773477b1504ebaee3b1e88f806f35bfc2 100644 (file)
@@ -65,8 +65,12 @@ enum devlink_command {
 #define DEVLINK_CMD_ESWITCH_MODE_SET /* obsolete, never use this! */ \
        DEVLINK_CMD_ESWITCH_SET
 
-       /* add new commands above here */
+       DEVLINK_CMD_DPIPE_TABLE_GET,
+       DEVLINK_CMD_DPIPE_ENTRIES_GET,
+       DEVLINK_CMD_DPIPE_HEADERS_GET,
+       DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
 
+       /* add new commands above here */
        __DEVLINK_CMD_MAX,
        DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
 };
@@ -148,10 +152,71 @@ enum devlink_attr {
        DEVLINK_ATTR_ESWITCH_MODE,              /* u16 */
        DEVLINK_ATTR_ESWITCH_INLINE_MODE,       /* u8 */
 
+       DEVLINK_ATTR_DPIPE_TABLES,              /* nested */
+       DEVLINK_ATTR_DPIPE_TABLE,               /* nested */
+       DEVLINK_ATTR_DPIPE_TABLE_NAME,          /* string */
+       DEVLINK_ATTR_DPIPE_TABLE_SIZE,          /* u64 */
+       DEVLINK_ATTR_DPIPE_TABLE_MATCHES,       /* nested */
+       DEVLINK_ATTR_DPIPE_TABLE_ACTIONS,       /* nested */
+       DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED,      /* u8 */
+
+       DEVLINK_ATTR_DPIPE_ENTRIES,             /* nested */
+       DEVLINK_ATTR_DPIPE_ENTRY,               /* nested */
+       DEVLINK_ATTR_DPIPE_ENTRY_INDEX,         /* u64 */
+       DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES,  /* nested */
+       DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES, /* nested */
+       DEVLINK_ATTR_DPIPE_ENTRY_COUNTER,       /* u64 */
+
+       DEVLINK_ATTR_DPIPE_MATCH,               /* nested */
+       DEVLINK_ATTR_DPIPE_MATCH_VALUE,         /* nested */
+       DEVLINK_ATTR_DPIPE_MATCH_TYPE,          /* u32 */
+
+       DEVLINK_ATTR_DPIPE_ACTION,              /* nested */
+       DEVLINK_ATTR_DPIPE_ACTION_VALUE,        /* nested */
+       DEVLINK_ATTR_DPIPE_ACTION_TYPE,         /* u32 */
+
+       DEVLINK_ATTR_DPIPE_VALUE,
+       DEVLINK_ATTR_DPIPE_VALUE_MASK,
+       DEVLINK_ATTR_DPIPE_VALUE_MAPPING,       /* u32 */
+
+       DEVLINK_ATTR_DPIPE_HEADERS,             /* nested */
+       DEVLINK_ATTR_DPIPE_HEADER,              /* nested */
+       DEVLINK_ATTR_DPIPE_HEADER_NAME,         /* string */
+       DEVLINK_ATTR_DPIPE_HEADER_ID,           /* u32 */
+       DEVLINK_ATTR_DPIPE_HEADER_FIELDS,       /* nested */
+       DEVLINK_ATTR_DPIPE_HEADER_GLOBAL,       /* u8 */
+       DEVLINK_ATTR_DPIPE_HEADER_INDEX,        /* u32 */
+
+       DEVLINK_ATTR_DPIPE_FIELD,               /* nested */
+       DEVLINK_ATTR_DPIPE_FIELD_NAME,          /* string */
+       DEVLINK_ATTR_DPIPE_FIELD_ID,            /* u32 */
+       DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH,      /* u32 */
+       DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE,  /* u32 */
+
+       DEVLINK_ATTR_PAD,
+
        /* add new attributes above here, update the policy in devlink.c */
 
        __DEVLINK_ATTR_MAX,
        DEVLINK_ATTR_MAX = __DEVLINK_ATTR_MAX - 1
 };
 
+/* Mapping between internal resource described by the field and system
+ * structure
+ */
+enum devlink_dpipe_field_mapping_type {
+       DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE,
+       DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX,
+};
+
+/* Match type - specify the type of the match */
+enum devlink_dpipe_match_type {
+       DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT,
+};
+
+/* Action type - specify the action type */
+enum devlink_dpipe_action_type {
+       DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY,
+};
+
 #endif /* _UAPI_LINUX_DEVLINK_H_ */
index 3dc91a46e8b8da0b243a12a168bbf205e5a87916..5f4ea28eabe4bf65dcd7f2a5501cf89a7e1c5760 100644 (file)
@@ -1487,6 +1487,7 @@ enum ethtool_link_mode_bit_indices {
  */
 
 /* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal. */
+/* Update drivers/net/phy/phy.c:phy_speed_to_str() when adding new values */
 #define SPEED_10               10
 #define SPEED_100              100
 #define SPEED_1000             1000
index 72a04a0e8ccef1e5560378af9177a6c47954daaf..57d1edb8efd9bae86d0b11779f59de9d18f35f1a 100644 (file)
@@ -19,7 +19,8 @@ enum gtp_attrs {
        GTPA_LINK,
        GTPA_VERSION,
        GTPA_TID,       /* for GTPv0 only */
-       GTPA_SGSN_ADDRESS,
+       GTPA_PEER_ADDRESS,      /* Remote GSN peer, either SGSN or GGSN */
+#define GTPA_SGSN_ADDRESS GTPA_PEER_ADDRESS /* maintain legacy attr name */
        GTPA_MS_ADDRESS,
        GTPA_FLOW,
        GTPA_NET_NS_FD,
index 320fc1e747ee9623db56fbaf26b2a514b5d5a3d1..8b405afb23763498bf0b879ab319dcc9bf94f4b9 100644 (file)
@@ -538,11 +538,18 @@ enum {
 #define IFLA_PPP_MAX (__IFLA_PPP_MAX - 1)
 
 /* GTP section */
+
+enum ifla_gtp_role {
+       GTP_ROLE_GGSN = 0,
+       GTP_ROLE_SGSN,
+};
+
 enum {
        IFLA_GTP_UNSPEC,
        IFLA_GTP_FD0,
        IFLA_GTP_FD1,
        IFLA_GTP_PDP_HASHSIZE,
+       IFLA_GTP_ROLE,
        __IFLA_GTP_MAX,
 };
 #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
index d8f6a1ac9af49605ff67675d4639bca493da5edb..2ae59178189d7983fb9ed99ae1cd5f40197cfb04 100644 (file)
@@ -184,6 +184,7 @@ enum {
        DEVCONF_ENHANCED_DAD,
        DEVCONF_ADDR_GEN_MODE,
        DEVCONF_DISABLE_POLICY,
+       DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN,
        DEVCONF_MAX
 };
 
index d80a0498f77ed2d4dac3b61a6eef1906eb8b0626..f5e45095b0bb5c17af6515012587d6d805a7d79c 100644 (file)
 /* MPLS tunnel attributes
  * [RTA_ENCAP] = {
  *     [MPLS_IPTUNNEL_DST]
+ *     [MPLS_IPTUNNEL_TTL]
  * }
  */
 enum {
        MPLS_IPTUNNEL_UNSPEC,
        MPLS_IPTUNNEL_DST,
+       MPLS_IPTUNNEL_TTL,
        __MPLS_IPTUNNEL_MAX,
 };
 #define MPLS_IPTUNNEL_MAX (__MPLS_IPTUNNEL_MAX - 1)
index 05215d30fe5c9853b7871e799ccdce4878a04ef1..8f3842690d176bb6d847e4a157dfc404d0e6f0f1 100644 (file)
@@ -815,6 +815,17 @@ enum nft_rt_keys {
        NFT_RT_NEXTHOP6,
 };
 
+/**
+ * enum nft_hash_types - nf_tables hash expression types
+ *
+ * @NFT_HASH_JENKINS: Jenkins Hash
+ * @NFT_HASH_SYM: Symmetric Hash
+ */
+enum nft_hash_types {
+       NFT_HASH_JENKINS,
+       NFT_HASH_SYM,
+};
+
 /**
  * enum nft_hash_attributes - nf_tables hash expression netlink attributes
  *
@@ -824,6 +835,7 @@ enum nft_rt_keys {
  * @NFTA_HASH_MODULUS: modulus value (NLA_U32)
  * @NFTA_HASH_SEED: seed value (NLA_U32)
  * @NFTA_HASH_OFFSET: add this offset value to hash result (NLA_U32)
+ * @NFTA_HASH_TYPE: hash operation (NLA_U32: nft_hash_types)
  */
 enum nft_hash_attributes {
        NFTA_HASH_UNSPEC,
@@ -833,6 +845,7 @@ enum nft_hash_attributes {
        NFTA_HASH_MODULUS,
        NFTA_HASH_SEED,
        NFTA_HASH_OFFSET,
+       NFTA_HASH_TYPE,
        __NFTA_HASH_MAX,
 };
 #define NFTA_HASH_MAX  (__NFTA_HASH_MAX - 1)
@@ -1244,12 +1257,23 @@ enum nft_fib_flags {
        NFTA_FIB_F_MARK         = 1 << 2,       /* use skb->mark */
        NFTA_FIB_F_IIF          = 1 << 3,       /* restrict to iif */
        NFTA_FIB_F_OIF          = 1 << 4,       /* restrict to oif */
+       NFTA_FIB_F_PRESENT      = 1 << 5,       /* check existence only */
+};
+
+enum nft_ct_helper_attributes {
+       NFTA_CT_HELPER_UNSPEC,
+       NFTA_CT_HELPER_NAME,
+       NFTA_CT_HELPER_L3PROTO,
+       NFTA_CT_HELPER_L4PROTO,
+       __NFTA_CT_HELPER_MAX,
 };
+#define NFTA_CT_HELPER_MAX     (__NFTA_CT_HELPER_MAX - 1)
 
 #define NFT_OBJECT_UNSPEC      0
 #define NFT_OBJECT_COUNTER     1
 #define NFT_OBJECT_QUOTA       2
-#define __NFT_OBJECT_MAX       3
+#define NFT_OBJECT_CT_HELPER   3
+#define __NFT_OBJECT_MAX       4
 #define NFT_OBJECT_MAX         (__NFT_OBJECT_MAX - 1)
 
 /**
index f3946a27bd07d5164fac6964785c86044f031790..b2c9c26ea30ffde432ee2a2c0516cfc9fcd1d372 100644 (file)
@@ -50,12 +50,12 @@ struct nlmsghdr {
 
 /* Flags values */
 
-#define NLM_F_REQUEST          1       /* It is request message.       */
-#define NLM_F_MULTI            2       /* Multipart message, terminated by NLMSG_DONE */
-#define NLM_F_ACK              4       /* Reply with ack, with zero or error code */
-#define NLM_F_ECHO             8       /* Echo this request            */
-#define NLM_F_DUMP_INTR                16      /* Dump was inconsistent due to sequence change */
-#define NLM_F_DUMP_FILTERED    32      /* Dump was filtered as requested */
+#define NLM_F_REQUEST          0x01    /* It is request message.       */
+#define NLM_F_MULTI            0x02    /* Multipart message, terminated by NLMSG_DONE */
+#define NLM_F_ACK              0x04    /* Reply with ack, with zero or error code */
+#define NLM_F_ECHO             0x08    /* Echo this request            */
+#define NLM_F_DUMP_INTR                0x10    /* Dump was inconsistent due to sequence change */
+#define NLM_F_DUMP_FILTERED    0x20    /* Dump was filtered as requested */
 
 /* Modifiers to GET request */
 #define NLM_F_ROOT     0x100   /* specify tree root    */
index 76b4d87c83a863dc7b0e809f5ab318b9656d1dfb..6dcd4de3397b393eb64c9da83e898484af65fda9 100644 (file)
@@ -38,6 +38,7 @@ enum {
        NETLINK_DIAG_GROUPS,
        NETLINK_DIAG_RX_RING,
        NETLINK_DIAG_TX_RING,
+       NETLINK_DIAG_FLAGS,
 
        __NETLINK_DIAG_MAX,
 };
@@ -52,5 +53,14 @@ enum {
 /* deprecated since 4.6 */
 #define NDIAG_SHOW_RING_CFG    0x00000004 /* show ring configuration */
 #endif
+#define NDIAG_SHOW_FLAGS       0x00000008 /* show flags of a netlink socket */
+
+/* flags */
+#define NDIAG_FLAG_CB_RUNNING          0x00000001
+#define NDIAG_FLAG_PKTINFO             0x00000002
+#define NDIAG_FLAG_BROADCAST_ERROR     0x00000004
+#define NDIAG_FLAG_NO_ENOBUFS          0x00000008
+#define NDIAG_FLAG_LISTEN_ALL_NSID     0x00000010
+#define NDIAG_FLAG_CAP_ACK             0x00000020
 
 #endif
index 7f41f7d0000f9f0ee36c274d88ad0d330fa8f5d6..66d1c3ccfd8e26087644d247fe281c2a037c3864 100644 (file)
@@ -578,10 +578,25 @@ enum ovs_sample_attr {
        OVS_SAMPLE_ATTR_PROBABILITY, /* u32 number */
        OVS_SAMPLE_ATTR_ACTIONS,     /* Nested OVS_ACTION_ATTR_* attributes. */
        __OVS_SAMPLE_ATTR_MAX,
+
+#ifdef __KERNEL__
+       OVS_SAMPLE_ATTR_ARG          /* struct sample_arg  */
+#endif
 };
 
 #define OVS_SAMPLE_ATTR_MAX (__OVS_SAMPLE_ATTR_MAX - 1)
 
+#ifdef __KERNEL__
+struct sample_arg {
+       bool exec;                   /* When true, actions in sample will not
+                                     * change flow keys. False otherwise.
+                                     */
+       u32  probability;            /* Same value as
+                                     * 'OVS_SAMPLE_ATTR_PROBABILITY'.
+                                     */
+};
+#endif
+
 /**
  * enum ovs_userspace_attr - Attributes for %OVS_ACTION_ATTR_USERSPACE action.
  * @OVS_USERSPACE_ATTR_PID: u32 Netlink PID to which the %OVS_PACKET_CMD_ACTION
index d08c63f3dd6ff47c7cf090927e91f27cfc0d767f..0c5d5dd61b6ab1d2039686d25683e6dffa1f634e 100644 (file)
@@ -64,7 +64,7 @@ struct packet_diag_mclist {
        __u32   pdmc_count;
        __u16   pdmc_type;
        __u16   pdmc_alen;
-       __u8    pdmc_addr[MAX_ADDR_LEN];
+       __u8    pdmc_addr[32]; /* MAX_ADDR_LEN */
 };
 
 struct packet_diag_ring {
index df7451d351311cd915fc96752fcaeb0a43b5d112..099bf5528fed30008bfbde3529315be35c0411f9 100644 (file)
@@ -617,6 +617,14 @@ struct tc_drr_stats {
 #define TC_QOPT_BITMASK 15
 #define TC_QOPT_MAX_QUEUE 16
 
+enum {
+       TC_MQPRIO_HW_OFFLOAD_NONE,      /* no offload requested */
+       TC_MQPRIO_HW_OFFLOAD_TCS,       /* offload TCs, no queue counts */
+       __TC_MQPRIO_HW_OFFLOAD_MAX
+};
+
+#define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
+
 struct tc_mqprio_qopt {
        __u8    num_tc;
        __u8    prio_tc_map[TC_QOPT_BITMASK + 1];
index 6546917d605a916bfd5a905e30eb05d68fd6ad6b..cce061382e4073d4fe8296a00f22eba42cf13818 100644 (file)
@@ -122,6 +122,8 @@ enum {
 
        RTM_NEWNETCONF = 80,
 #define RTM_NEWNETCONF RTM_NEWNETCONF
+       RTM_DELNETCONF,
+#define RTM_DELNETCONF RTM_DELNETCONF
        RTM_GETNETCONF = 82,
 #define RTM_GETNETCONF RTM_GETNETCONF
 
@@ -319,6 +321,7 @@ enum rtattr_type_t {
        RTA_EXPIRES,
        RTA_PAD,
        RTA_UID,
+       RTA_TTL_PROPAGATE,
        __RTA_MAX
 };
 
@@ -545,6 +548,7 @@ enum {
        TCA_STATS2,
        TCA_STAB,
        TCA_PAD,
+       TCA_DUMP_INVISIBLE,
        __TCA_MAX
 };
 
index d3ae381fcf3327489c82e2f47eb39a363ec030c7..ced9d8b974268ed270661c3e2da77165e3a24784 100644 (file)
@@ -115,6 +115,8 @@ typedef __s32 sctp_assoc_t;
 #define SCTP_PR_SUPPORTED      113
 #define SCTP_DEFAULT_PRINFO    114
 #define SCTP_PR_ASSOC_STATUS   115
+#define SCTP_PR_STREAM_STATUS  116
+#define SCTP_RECONFIG_SUPPORTED        117
 #define SCTP_ENABLE_STREAM_RESET       118
 #define SCTP_RESET_STREAMS     119
 #define SCTP_RESET_ASSOC       120
@@ -502,6 +504,28 @@ struct sctp_stream_reset_event {
        __u16 strreset_stream_list[];
 };
 
+#define SCTP_ASSOC_RESET_DENIED                0x0004
+#define SCTP_ASSOC_RESET_FAILED                0x0008
+struct sctp_assoc_reset_event {
+       __u16 assocreset_type;
+       __u16 assocreset_flags;
+       __u32 assocreset_length;
+       sctp_assoc_t assocreset_assoc_id;
+       __u32 assocreset_local_tsn;
+       __u32 assocreset_remote_tsn;
+};
+
+#define SCTP_ASSOC_CHANGE_DENIED       0x0004
+#define SCTP_ASSOC_CHANGE_FAILED       0x0008
+struct sctp_stream_change_event {
+       __u16 strchange_type;
+       __u16 strchange_flags;
+       __u32 strchange_length;
+       sctp_assoc_t strchange_assoc_id;
+       __u16 strchange_instrms;
+       __u16 strchange_outstrms;
+};
+
 /*
  * Described in Section 7.3
  *   Ancillary Data and Notification Interest Options
@@ -518,6 +542,8 @@ struct sctp_event_subscribe {
        __u8 sctp_authentication_event;
        __u8 sctp_sender_dry_event;
        __u8 sctp_stream_reset_event;
+       __u8 sctp_assoc_reset_event;
+       __u8 sctp_stream_change_event;
 };
 
 /*
@@ -543,6 +569,8 @@ union sctp_notification {
        struct sctp_authkey_event sn_authkey_event;
        struct sctp_sender_dry_event sn_sender_dry_event;
        struct sctp_stream_reset_event sn_strreset_event;
+       struct sctp_assoc_reset_event sn_assocreset_event;
+       struct sctp_stream_change_event sn_strchange_event;
 };
 
 /* Section 5.3.1
@@ -572,6 +600,10 @@ enum sctp_sn_type {
 #define SCTP_SENDER_DRY_EVENT          SCTP_SENDER_DRY_EVENT
        SCTP_STREAM_RESET_EVENT,
 #define SCTP_STREAM_RESET_EVENT                SCTP_STREAM_RESET_EVENT
+       SCTP_ASSOC_RESET_EVENT,
+#define SCTP_ASSOC_RESET_EVENT         SCTP_ASSOC_RESET_EVENT
+       SCTP_STREAM_CHANGE_EVENT,
+#define SCTP_STREAM_CHANGE_EVENT       SCTP_STREAM_CHANGE_EVENT
 };
 
 /* Notification error codes used to fill up the error fields in some
index 3b2bed7ca9a4d92c5671e614f2bc598668805f75..cec0e171d20caea2f188c06a9924f886b0daaa85 100644 (file)
@@ -177,7 +177,6 @@ enum
        LINUX_MIB_TIMEWAITED,                   /* TimeWaited */
        LINUX_MIB_TIMEWAITRECYCLED,             /* TimeWaitRecycled */
        LINUX_MIB_TIMEWAITKILLED,               /* TimeWaitKilled */
-       LINUX_MIB_PAWSPASSIVEREJECTED,          /* PAWSPassiveRejected */
        LINUX_MIB_PAWSACTIVEREJECTED,           /* PAWSActiveRejected */
        LINUX_MIB_PAWSESTABREJECTED,            /* PAWSEstabRejected */
        LINUX_MIB_DELAYEDACKS,                  /* DelayedACKs */
index d2b12152e358f14e791ef3e842cb6eac7cc8ceec..e13d48058b8d0e5cf36e458e68e257d73a9a1e8f 100644 (file)
@@ -568,6 +568,7 @@ enum {
        NET_IPV6_PROXY_NDP=23,
        NET_IPV6_ACCEPT_SOURCE_ROUTE=25,
        NET_IPV6_ACCEPT_RA_FROM_LOCAL=26,
+       NET_IPV6_ACCEPT_RA_RT_INFO_MIN_PLEN=27,
        __NET_IPV6_MAX
 };
 
index c055947c5c989fa7e399a7b0dcaba8640014b548..3b059530dac95fa6e5dcf736e95a84fe80eb5f35 100644 (file)
@@ -18,8 +18,7 @@
  * means the userland is reading).
  */
 #define UFFD_API ((__u64)0xAA)
-#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_EXIT |           \
-                          UFFD_FEATURE_EVENT_FORK |            \
+#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_FORK |           \
                           UFFD_FEATURE_EVENT_REMAP |           \
                           UFFD_FEATURE_EVENT_REMOVE |  \
                           UFFD_FEATURE_EVENT_UNMAP |           \
@@ -113,7 +112,6 @@ struct uffd_msg {
 #define UFFD_EVENT_REMAP       0x14
 #define UFFD_EVENT_REMOVE      0x15
 #define UFFD_EVENT_UNMAP       0x16
-#define UFFD_EVENT_EXIT                0x17
 
 /* flags for UFFD_EVENT_PAGEFAULT */
 #define UFFD_PAGEFAULT_FLAG_WRITE      (1<<0)  /* If this was a write fault */
@@ -163,7 +161,6 @@ struct uffdio_api {
 #define UFFD_FEATURE_MISSING_HUGETLBFS         (1<<4)
 #define UFFD_FEATURE_MISSING_SHMEM             (1<<5)
 #define UFFD_FEATURE_EVENT_UNMAP               (1<<6)
-#define UFFD_FEATURE_EVENT_EXIT                        (1<<7)
        __u64 features;
 
        __u64 ioctls;
index da7cd62bace746879e154829b5ecad5f6cd87c00..0b3d30837a9f6456aabe028f5f9931b3593d778c 100644 (file)
@@ -34,6 +34,7 @@
 #define MLX5_ABI_USER_H
 
 #include <linux/types.h>
+#include <linux/if_ether.h>    /* For ETH_ALEN. */
 
 enum {
        MLX5_QP_FLAG_SIGNATURE          = 1 << 0,
@@ -66,7 +67,7 @@ struct mlx5_ib_alloc_ucontext_req {
 };
 
 enum mlx5_lib_caps {
-       MLX5_LIB_CAP_4K_UAR     = (u64)1 << 0,
+       MLX5_LIB_CAP_4K_UAR     = (__u64)1 << 0,
 };
 
 struct mlx5_ib_alloc_ucontext_req_v2 {
index ef8e2a8ad0afc85da51bc3fca00a26fa3768ff16..6b083d327e982c0c5deb7895ce6e7c6b1094c8a6 100644 (file)
@@ -46,6 +46,7 @@
 #define DECON_FRAMEFIFO_STATUS         0x0524
 #define DECON_CMU                      0x1404
 #define DECON_UPDATE                   0x1410
+#define DECON_CRFMID                   0x1414
 #define DECON_UPDATE_SCHEME            0x1438
 #define DECON_VIDCON1                  0x2000
 #define DECON_VIDCON2                  0x2004
 
 /* VIDINTCON0 */
 #define VIDINTCON0_FRAMEDONE           (1 << 17)
+#define VIDINTCON0_FRAMESEL_BP         (0 << 15)
+#define VIDINTCON0_FRAMESEL_VS         (1 << 15)
+#define VIDINTCON0_FRAMESEL_AC         (2 << 15)
+#define VIDINTCON0_FRAMESEL_FP         (3 << 15)
 #define VIDINTCON0_INTFRMEN            (1 << 12)
 #define VIDINTCON0_INTEN               (1 << 0)
 
 #define STANDALONE_UPDATE_F            (1 << 0)
 
 /* DECON_VIDCON1 */
+#define VIDCON1_LINECNT_MASK           (0x0fff << 16)
+#define VIDCON1_I80_ACTIVE             (1 << 15)
+#define VIDCON1_VSTATUS_MASK           (0x3 << 13)
+#define VIDCON1_VSTATUS_VS             (0 << 13)
+#define VIDCON1_VSTATUS_BP             (1 << 13)
+#define VIDCON1_VSTATUS_AC             (2 << 13)
+#define VIDCON1_VSTATUS_FP             (3 << 13)
 #define VIDCON1_VCLK_MASK              (0x3 << 9)
 #define VIDCON1_VCLK_RUN_VDEN_DISABLE  (0x3 << 9)
 #define VIDCON1_VCLK_HOLD              (0x0 << 9)
index a0083be5d5295157bc11b82a343b58f798488bb8..1f6d78f044b671bca827b15c5b6b8388c09e4fa6 100644 (file)
@@ -2,6 +2,7 @@
 #define __LINUX_SWIOTLB_XEN_H
 
 #include <linux/dma-direction.h>
+#include <linux/scatterlist.h>
 #include <linux/swiotlb.h>
 
 extern int xen_swiotlb_init(int verbose, bool early);
@@ -55,4 +56,14 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
 
 extern int
 xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
+
+extern int
+xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+                    void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                    unsigned long attrs);
+
+extern int
+xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
+                       void *cpu_addr, dma_addr_t handle, size_t size,
+                       unsigned long attrs);
 #endif /* __LINUX_SWIOTLB_XEN_H */
index eae2f15657c62c31f66353f4bd3c4af1e03451cf..b0c11cbf5ddf8a55a3c832e4acbd72653d9c38f1 100644 (file)
@@ -882,7 +882,6 @@ static void __init do_basic_setup(void)
        do_ctors();
        usermodehelper_enable();
        do_initcalls();
-       random_int_secret_init();
 }
 
 static void __init do_pre_smp_initcalls(void)
@@ -1023,6 +1022,8 @@ static noinline void __init kernel_init_freeable(void)
 
        workqueue_init();
 
+       init_mm_internals();
+
        do_pre_smp_initcalls();
        lockup_detector_init();
 
index e794544f5e63334afccadf6cc70f5fb2541e1e2e..2f4964cfde0b4f142778c199e606dc3d519ebbf0 100644 (file)
 #include <linux/kthread.h>
 #include <linux/kernel.h>
 #include <linux/syscalls.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/mutex.h>
+#include <linux/gfp.h>
 
 #include <linux/audit.h>
 
@@ -90,13 +94,34 @@ static u32  audit_default;
 /* If auditing cannot proceed, audit_failure selects what happens. */
 static u32     audit_failure = AUDIT_FAIL_PRINTK;
 
-/*
- * If audit records are to be written to the netlink socket, audit_pid
- * contains the pid of the auditd process and audit_nlk_portid contains
- * the portid to use to send netlink messages to that process.
+/* private audit network namespace index */
+static unsigned int audit_net_id;
+
+/**
+ * struct audit_net - audit private network namespace data
+ * @sk: communication socket
+ */
+struct audit_net {
+       struct sock *sk;
+};
+
+/**
+ * struct auditd_connection - kernel/auditd connection state
+ * @pid: auditd PID
+ * @portid: netlink portid
+ * @net: the associated network namespace
+ * @lock: spinlock to protect write access
+ *
+ * Description:
+ * This struct is RCU protected; you must either hold the RCU lock for reading
+ * or the included spinlock for writing.
  */
-int            audit_pid;
-static __u32   audit_nlk_portid;
+static struct auditd_connection {
+       int pid;
+       u32 portid;
+       struct net *net;
+       spinlock_t lock;
+} auditd_conn;
 
 /* If audit_rate_limit is non-zero, limit the rate of sending audit records
  * to that number per second.  This prevents DoS attacks, but results in
@@ -123,10 +148,6 @@ u32                audit_sig_sid = 0;
 */
 static atomic_t        audit_lost = ATOMIC_INIT(0);
 
-/* The netlink socket. */
-static struct sock *audit_sock;
-static unsigned int audit_net_id;
-
 /* Hash for inode-based rules */
 struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS];
 
@@ -139,6 +160,7 @@ static LIST_HEAD(audit_freelist);
 
 /* queue msgs to send via kauditd_task */
 static struct sk_buff_head audit_queue;
+static void kauditd_hold_skb(struct sk_buff *skb);
 /* queue msgs due to temporary unicast send problems */
 static struct sk_buff_head audit_retry_queue;
 /* queue msgs waiting for new auditd connection */
@@ -192,6 +214,43 @@ struct audit_reply {
        struct sk_buff *skb;
 };
 
+/**
+ * auditd_test_task - Check to see if a given task is an audit daemon
+ * @task: the task to check
+ *
+ * Description:
+ * Return 1 if the task is a registered audit daemon, 0 otherwise.
+ */
+int auditd_test_task(const struct task_struct *task)
+{
+       int rc;
+
+       rcu_read_lock();
+       rc = (auditd_conn.pid && task->tgid == auditd_conn.pid ? 1 : 0);
+       rcu_read_unlock();
+
+       return rc;
+}
+
+/**
+ * audit_get_sk - Return the audit socket for the given network namespace
+ * @net: the destination network namespace
+ *
+ * Description:
+ * Returns the sock pointer if valid, NULL otherwise.  The caller must ensure
+ * that a reference is held for the network namespace while the sock is in use.
+ */
+static struct sock *audit_get_sk(const struct net *net)
+{
+       struct audit_net *aunet;
+
+       if (!net)
+               return NULL;
+
+       aunet = net_generic(net, audit_net_id);
+       return aunet->sk;
+}
+
 static void audit_set_portid(struct audit_buffer *ab, __u32 portid)
 {
        if (ab) {
@@ -210,9 +269,7 @@ void audit_panic(const char *message)
                        pr_err("%s\n", message);
                break;
        case AUDIT_FAIL_PANIC:
-               /* test audit_pid since printk is always losey, why bother? */
-               if (audit_pid)
-                       panic("audit: %s\n", message);
+               panic("audit: %s\n", message);
                break;
        }
 }
@@ -370,21 +427,87 @@ static int audit_set_failure(u32 state)
        return audit_do_config_change("audit_failure", &audit_failure, state);
 }
 
-/*
- * For one reason or another this nlh isn't getting delivered to the userspace
- * audit daemon, just send it to printk.
+/**
+ * auditd_set - Set/Reset the auditd connection state
+ * @pid: auditd PID
+ * @portid: auditd netlink portid
+ * @net: auditd network namespace pointer
+ *
+ * Description:
+ * This function will obtain and drop network namespace references as
+ * necessary.
+ */
+static void auditd_set(int pid, u32 portid, struct net *net)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&auditd_conn.lock, flags);
+       auditd_conn.pid = pid;
+       auditd_conn.portid = portid;
+       if (auditd_conn.net)
+               put_net(auditd_conn.net);
+       if (net)
+               auditd_conn.net = get_net(net);
+       else
+               auditd_conn.net = NULL;
+       spin_unlock_irqrestore(&auditd_conn.lock, flags);
+}
+
+/**
+ * auditd_reset - Disconnect the auditd connection
+ *
+ * Description:
+ * Break the auditd/kauditd connection and move all the queued records into the
+ * hold queue in case auditd reconnects.
+ */
+static void auditd_reset(void)
+{
+       struct sk_buff *skb;
+
+       /* if it isn't already broken, break the connection */
+       rcu_read_lock();
+       if (auditd_conn.pid)
+               auditd_set(0, 0, NULL);
+       rcu_read_unlock();
+
+       /* flush all of the main and retry queues to the hold queue */
+       while ((skb = skb_dequeue(&audit_retry_queue)))
+               kauditd_hold_skb(skb);
+       while ((skb = skb_dequeue(&audit_queue)))
+               kauditd_hold_skb(skb);
+}
+
+/**
+ * kauditd_print_skb - Print the audit record to the ring buffer
+ * @skb: audit record
+ *
+ * Whatever the reason, this packet may not make it to the auditd connection
+ * so write it via printk so the information isn't completely lost.
  */
 static void kauditd_printk_skb(struct sk_buff *skb)
 {
        struct nlmsghdr *nlh = nlmsg_hdr(skb);
        char *data = nlmsg_data(nlh);
 
-       if (nlh->nlmsg_type != AUDIT_EOE) {
-               if (printk_ratelimit())
-                       pr_notice("type=%d %s\n", nlh->nlmsg_type, data);
-               else
-                       audit_log_lost("printk limit exceeded");
-       }
+       if (nlh->nlmsg_type != AUDIT_EOE && printk_ratelimit())
+               pr_notice("type=%d %s\n", nlh->nlmsg_type, data);
+}
+
+/**
+ * kauditd_rehold_skb - Handle a audit record send failure in the hold queue
+ * @skb: audit record
+ *
+ * Description:
+ * This should only be used by the kauditd_thread when it fails to flush the
+ * hold queue.
+ */
+static void kauditd_rehold_skb(struct sk_buff *skb)
+{
+       /* put the record back in the queue at the same place */
+       skb_queue_head(&audit_hold_queue, skb);
+
+       /* fail the auditd connection */
+       auditd_reset();
 }
 
 /**
@@ -421,6 +544,9 @@ static void kauditd_hold_skb(struct sk_buff *skb)
        /* we have no other options - drop the message */
        audit_log_lost("kauditd hold queue overflow");
        kfree_skb(skb);
+
+       /* fail the auditd connection */
+       auditd_reset();
 }
 
 /**
@@ -441,51 +567,122 @@ static void kauditd_retry_skb(struct sk_buff *skb)
 }
 
 /**
- * auditd_reset - Disconnect the auditd connection
+ * auditd_send_unicast_skb - Send a record via unicast to auditd
+ * @skb: audit record
  *
  * Description:
- * Break the auditd/kauditd connection and move all the records in the retry
- * queue into the hold queue in case auditd reconnects.  The audit_cmd_mutex
- * must be held when calling this function.
+ * Send a skb to the audit daemon, returns positive/zero values on success and
+ * negative values on failure; in all cases the skb will be consumed by this
+ * function.  If the send results in -ECONNREFUSED the connection with auditd
+ * will be reset.  This function may sleep so callers should not hold any locks
+ * where this would cause a problem.
  */
-static void auditd_reset(void)
+static int auditd_send_unicast_skb(struct sk_buff *skb)
 {
-       struct sk_buff *skb;
-
-       /* break the connection */
-       if (audit_sock) {
-               sock_put(audit_sock);
-               audit_sock = NULL;
+       int rc;
+       u32 portid;
+       struct net *net;
+       struct sock *sk;
+
+       /* NOTE: we can't call netlink_unicast while in the RCU section so
+        *       take a reference to the network namespace and grab local
+        *       copies of the namespace, the sock, and the portid; the
+        *       namespace and sock aren't going to go away while we hold a
+        *       reference and if the portid does become invalid after the RCU
+        *       section netlink_unicast() should safely return an error */
+
+       rcu_read_lock();
+       if (!auditd_conn.pid) {
+               rcu_read_unlock();
+               rc = -ECONNREFUSED;
+               goto err;
        }
-       audit_pid = 0;
-       audit_nlk_portid = 0;
+       net = auditd_conn.net;
+       get_net(net);
+       sk = audit_get_sk(net);
+       portid = auditd_conn.portid;
+       rcu_read_unlock();
 
-       /* flush all of the retry queue to the hold queue */
-       while ((skb = skb_dequeue(&audit_retry_queue)))
-               kauditd_hold_skb(skb);
+       rc = netlink_unicast(sk, skb, portid, 0);
+       put_net(net);
+       if (rc < 0)
+               goto err;
+
+       return rc;
+
+err:
+       if (rc == -ECONNREFUSED)
+               auditd_reset();
+       return rc;
 }
 
 /**
- * kauditd_send_unicast_skb - Send a record via unicast to auditd
- * @skb: audit record
+ * kauditd_send_queue - Helper for kauditd_thread to flush skb queues
+ * @sk: the sending sock
+ * @portid: the netlink destination
+ * @queue: the skb queue to process
+ * @retry_limit: limit on number of netlink unicast failures
+ * @skb_hook: per-skb hook for additional processing
+ * @err_hook: hook called if the skb fails the netlink unicast send
+ *
+ * Description:
+ * Run through the given queue and attempt to send the audit records to auditd,
+ * returns zero on success, negative values on failure.  It is up to the caller
+ * to ensure that the @sk is valid for the duration of this function.
+ *
  */
-static int kauditd_send_unicast_skb(struct sk_buff *skb)
+static int kauditd_send_queue(struct sock *sk, u32 portid,
+                             struct sk_buff_head *queue,
+                             unsigned int retry_limit,
+                             void (*skb_hook)(struct sk_buff *skb),
+                             void (*err_hook)(struct sk_buff *skb))
 {
-       int rc;
+       int rc = 0;
+       struct sk_buff *skb;
+       static unsigned int failed = 0;
 
-       /* if we know nothing is connected, don't even try the netlink call */
-       if (!audit_pid)
-               return -ECONNREFUSED;
+       /* NOTE: kauditd_thread takes care of all our locking, we just use
+        *       the netlink info passed to us (e.g. sk and portid) */
+
+       while ((skb = skb_dequeue(queue))) {
+               /* call the skb_hook for each skb we touch */
+               if (skb_hook)
+                       (*skb_hook)(skb);
+
+               /* can we send to anyone via unicast? */
+               if (!sk) {
+                       if (err_hook)
+                               (*err_hook)(skb);
+                       continue;
+               }
 
-       /* get an extra skb reference in case we fail to send */
-       skb_get(skb);
-       rc = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
-       if (rc >= 0) {
-               consume_skb(skb);
-               rc = 0;
+               /* grab an extra skb reference in case of error */
+               skb_get(skb);
+               rc = netlink_unicast(sk, skb, portid, 0);
+               if (rc < 0) {
+                       /* fatal failure for our queue flush attempt? */
+                       if (++failed >= retry_limit ||
+                           rc == -ECONNREFUSED || rc == -EPERM) {
+                               /* yes - error processing for the queue */
+                               sk = NULL;
+                               if (err_hook)
+                                       (*err_hook)(skb);
+                               if (!skb_hook)
+                                       goto out;
+                               /* keep processing with the skb_hook */
+                               continue;
+                       } else
+                               /* no - requeue to preserve ordering */
+                               skb_queue_head(queue, skb);
+               } else {
+                       /* it worked - drop the extra reference and continue */
+                       consume_skb(skb);
+                       failed = 0;
+               }
        }
 
-       return rc;
+out:
+       return (rc >= 0 ? 0 : rc);
 }
 
 /*
@@ -493,16 +690,19 @@ static int kauditd_send_unicast_skb(struct sk_buff *skb)
  * @skb: audit record
  *
  * Description:
- * This function doesn't consume an skb as might be expected since it has to
- * copy it anyways.
+ * Write a multicast message to anyone listening in the initial network
+ * namespace.  This function doesn't consume an skb as might be expected since
+ * it has to copy it anyways.
  */
 static void kauditd_send_multicast_skb(struct sk_buff *skb)
 {
        struct sk_buff *copy;
-       struct audit_net *aunet = net_generic(&init_net, audit_net_id);
-       struct sock *sock = aunet->nlsk;
+       struct sock *sock = audit_get_sk(&init_net);
        struct nlmsghdr *nlh;
 
+       /* NOTE: we are not taking an additional reference for init_net since
+        *       we don't have to worry about it going away */
+
        if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG))
                return;
 
@@ -526,149 +726,75 @@ static void kauditd_send_multicast_skb(struct sk_buff *skb)
 }
 
 /**
- * kauditd_wake_condition - Return true when it is time to wake kauditd_thread
- *
- * Description:
- * This function is for use by the wait_event_freezable() call in
- * kauditd_thread().
+ * kauditd_thread - Worker thread to send audit records to userspace
+ * @dummy: unused
  */
-static int kauditd_wake_condition(void)
-{
-       static int pid_last = 0;
-       int rc;
-       int pid = audit_pid;
-
-       /* wake on new messages or a change in the connected auditd */
-       rc = skb_queue_len(&audit_queue) || (pid && pid != pid_last);
-       if (rc)
-               pid_last = pid;
-
-       return rc;
-}
-
 static int kauditd_thread(void *dummy)
 {
        int rc;
-       int auditd = 0;
-       int reschedule = 0;
-       struct sk_buff *skb;
-       struct nlmsghdr *nlh;
+       u32 portid = 0;
+       struct net *net = NULL;
+       struct sock *sk = NULL;
 
 #define UNICAST_RETRIES 5
-#define AUDITD_BAD(x,y) \
-       ((x) == -ECONNREFUSED || (x) == -EPERM || ++(y) >= UNICAST_RETRIES)
-
-       /* NOTE: we do invalidate the auditd connection flag on any sending
-        * errors, but we only "restore" the connection flag at specific places
-        * in the loop in order to help ensure proper ordering of audit
-        * records */
 
        set_freezable();
        while (!kthread_should_stop()) {
-               /* NOTE: possible area for future improvement is to look at
-                *       the hold and retry queues, since only this thread
-                *       has access to these queues we might be able to do
-                *       our own queuing and skip some/all of the locking */
-
-               /* NOTE: it might be a fun experiment to split the hold and
-                *       retry queue handling to another thread, but the
-                *       synchronization issues and other overhead might kill
-                *       any performance gains */
+               /* NOTE: see the lock comments in auditd_send_unicast_skb() */
+               rcu_read_lock();
+               if (!auditd_conn.pid) {
+                       rcu_read_unlock();
+                       goto main_queue;
+               }
+               net = auditd_conn.net;
+               get_net(net);
+               sk = audit_get_sk(net);
+               portid = auditd_conn.portid;
+               rcu_read_unlock();
 
                /* attempt to flush the hold queue */
-               while (auditd && (skb = skb_dequeue(&audit_hold_queue))) {
-                       rc = kauditd_send_unicast_skb(skb);
-                       if (rc) {
-                               /* requeue to the same spot */
-                               skb_queue_head(&audit_hold_queue, skb);
-
-                               auditd = 0;
-                               if (AUDITD_BAD(rc, reschedule)) {
-                                       mutex_lock(&audit_cmd_mutex);
-                                       auditd_reset();
-                                       mutex_unlock(&audit_cmd_mutex);
-                                       reschedule = 0;
-                               }
-                       } else
-                               /* we were able to send successfully */
-                               reschedule = 0;
+               rc = kauditd_send_queue(sk, portid,
+                                       &audit_hold_queue, UNICAST_RETRIES,
+                                       NULL, kauditd_rehold_skb);
+               if (rc < 0) {
+                       sk = NULL;
+                       goto main_queue;
                }
 
                /* attempt to flush the retry queue */
-               while (auditd && (skb = skb_dequeue(&audit_retry_queue))) {
-                       rc = kauditd_send_unicast_skb(skb);
-                       if (rc) {
-                               auditd = 0;
-                               if (AUDITD_BAD(rc, reschedule)) {
-                                       kauditd_hold_skb(skb);
-                                       mutex_lock(&audit_cmd_mutex);
-                                       auditd_reset();
-                                       mutex_unlock(&audit_cmd_mutex);
-                                       reschedule = 0;
-                               } else
-                                       /* temporary problem (we hope), queue
-                                        * to the same spot and retry */
-                                       skb_queue_head(&audit_retry_queue, skb);
-                       } else
-                               /* we were able to send successfully */
-                               reschedule = 0;
+               rc = kauditd_send_queue(sk, portid,
+                                       &audit_retry_queue, UNICAST_RETRIES,
+                                       NULL, kauditd_hold_skb);
+               if (rc < 0) {
+                       sk = NULL;
+                       goto main_queue;
                }
 
-               /* standard queue processing, try to be as quick as possible */
-quick_loop:
-               skb = skb_dequeue(&audit_queue);
-               if (skb) {
-                       /* setup the netlink header, see the comments in
-                        * kauditd_send_multicast_skb() for length quirks */
-                       nlh = nlmsg_hdr(skb);
-                       nlh->nlmsg_len = skb->len - NLMSG_HDRLEN;
-
-                       /* attempt to send to any multicast listeners */
-                       kauditd_send_multicast_skb(skb);
-
-                       /* attempt to send to auditd, queue on failure */
-                       if (auditd) {
-                               rc = kauditd_send_unicast_skb(skb);
-                               if (rc) {
-                                       auditd = 0;
-                                       if (AUDITD_BAD(rc, reschedule)) {
-                                               mutex_lock(&audit_cmd_mutex);
-                                               auditd_reset();
-                                               mutex_unlock(&audit_cmd_mutex);
-                                               reschedule = 0;
-                                       }
-
-                                       /* move to the retry queue */
-                                       kauditd_retry_skb(skb);
-                               } else
-                                       /* everything is working so go fast! */
-                                       goto quick_loop;
-                       } else if (reschedule)
-                               /* we are currently having problems, move to
-                                * the retry queue */
-                               kauditd_retry_skb(skb);
-                       else
-                               /* dump the message via printk and hold it */
-                               kauditd_hold_skb(skb);
-               } else {
-                       /* we have flushed the backlog so wake everyone */
-                       wake_up(&audit_backlog_wait);
-
-                       /* if everything is okay with auditd (if present), go
-                        * to sleep until there is something new in the queue
-                        * or we have a change in the connected auditd;
-                        * otherwise simply reschedule to give things a chance
-                        * to recover */
-                       if (reschedule) {
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               schedule();
-                       } else
-                               wait_event_freezable(kauditd_wait,
-                                                    kauditd_wake_condition());
-
-                       /* update the auditd connection status */
-                       auditd = (audit_pid ? 1 : 0);
+main_queue:
+               /* process the main queue - do the multicast send and attempt
+                * unicast, dump failed record sends to the retry queue; if
+                * sk == NULL due to previous failures we will just do the
+                * multicast send and move the record to the retry queue */
+               kauditd_send_queue(sk, portid, &audit_queue, 1,
+                                  kauditd_send_multicast_skb,
+                                  kauditd_retry_skb);
+
+               /* drop our netns reference, no auditd sends past this line */
+               if (net) {
+                       put_net(net);
+                       net = NULL;
                }
+               sk = NULL;
+
+               /* we have processed all the queues so wake everyone */
+               wake_up(&audit_backlog_wait);
+
+               /* NOTE: we want to wake up if there is anything on the queue,
+                *       regardless of if an auditd is connected, as we need to
+                *       do the multicast send and rotate records from the
+                *       main queue to the retry/hold queues */
+               wait_event_freezable(kauditd_wait,
+                                    (skb_queue_len(&audit_queue) ? 1 : 0));
        }
 
        return 0;
@@ -678,17 +804,16 @@ int audit_send_list(void *_dest)
 {
        struct audit_netlink_list *dest = _dest;
        struct sk_buff *skb;
-       struct net *net = dest->net;
-       struct audit_net *aunet = net_generic(net, audit_net_id);
+       struct sock *sk = audit_get_sk(dest->net);
 
        /* wait for parent to finish and send an ACK */
        mutex_lock(&audit_cmd_mutex);
        mutex_unlock(&audit_cmd_mutex);
 
        while ((skb = __skb_dequeue(&dest->q)) != NULL)
-               netlink_unicast(aunet->nlsk, skb, dest->portid, 0);
+               netlink_unicast(sk, skb, dest->portid, 0);
 
-       put_net(net);
+       put_net(dest->net);
        kfree(dest);
 
        return 0;
@@ -722,16 +847,15 @@ out_kfree_skb:
 static int audit_send_reply_thread(void *arg)
 {
        struct audit_reply *reply = (struct audit_reply *)arg;
-       struct net *net = reply->net;
-       struct audit_net *aunet = net_generic(net, audit_net_id);
+       struct sock *sk = audit_get_sk(reply->net);
 
        mutex_lock(&audit_cmd_mutex);
        mutex_unlock(&audit_cmd_mutex);
 
        /* Ignore failure. It'll only happen if the sender goes away,
           because our timeout is set to infinite. */
-       netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0);
-       put_net(net);
+       netlink_unicast(sk, reply->skb, reply->portid, 0);
+       put_net(reply->net);
        kfree(reply);
        return 0;
 }
@@ -949,12 +1073,12 @@ static int audit_set_feature(struct sk_buff *skb)
 
 static int audit_replace(pid_t pid)
 {
-       struct sk_buff *skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0,
-                                              &pid, sizeof(pid));
+       struct sk_buff *skb;
 
+       skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0, &pid, sizeof(pid));
        if (!skb)
                return -ENOMEM;
-       return netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
+       return auditd_send_unicast_skb(skb);
 }
 
 static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
@@ -981,7 +1105,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                memset(&s, 0, sizeof(s));
                s.enabled               = audit_enabled;
                s.failure               = audit_failure;
-               s.pid                   = audit_pid;
+               rcu_read_lock();
+               s.pid                   = auditd_conn.pid;
+               rcu_read_unlock();
                s.rate_limit            = audit_rate_limit;
                s.backlog_limit         = audit_backlog_limit;
                s.lost                  = atomic_read(&audit_lost);
@@ -1014,30 +1140,44 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                         *       from the initial pid namespace, but something
                         *       to keep in mind if this changes */
                        int new_pid = s.pid;
+                       pid_t auditd_pid;
                        pid_t requesting_pid = task_tgid_vnr(current);
 
-                       if ((!new_pid) && (requesting_pid != audit_pid)) {
-                               audit_log_config_change("audit_pid", new_pid, audit_pid, 0);
+                       /* test the auditd connection */
+                       audit_replace(requesting_pid);
+
+                       rcu_read_lock();
+                       auditd_pid = auditd_conn.pid;
+                       /* only the current auditd can unregister itself */
+                       if ((!new_pid) && (requesting_pid != auditd_pid)) {
+                               rcu_read_unlock();
+                               audit_log_config_change("audit_pid", new_pid,
+                                                       auditd_pid, 0);
                                return -EACCES;
                        }
-                       if (audit_pid && new_pid &&
-                           audit_replace(requesting_pid) != -ECONNREFUSED) {
-                               audit_log_config_change("audit_pid", new_pid, audit_pid, 0);
+                       /* replacing a healthy auditd is not allowed */
+                       if (auditd_pid && new_pid) {
+                               rcu_read_unlock();
+                               audit_log_config_change("audit_pid", new_pid,
+                                                       auditd_pid, 0);
                                return -EEXIST;
                        }
+                       rcu_read_unlock();
+
                        if (audit_enabled != AUDIT_OFF)
-                               audit_log_config_change("audit_pid", new_pid, audit_pid, 1);
+                               audit_log_config_change("audit_pid", new_pid,
+                                                       auditd_pid, 1);
+
                        if (new_pid) {
-                               if (audit_sock)
-                                       sock_put(audit_sock);
-                               audit_pid = new_pid;
-                               audit_nlk_portid = NETLINK_CB(skb).portid;
-                               sock_hold(skb->sk);
-                               audit_sock = skb->sk;
-                       } else {
+                               /* register a new auditd connection */
+                               auditd_set(new_pid,
+                                          NETLINK_CB(skb).portid,
+                                          sock_net(NETLINK_CB(skb).sk));
+                               /* try to process any backlog */
+                               wake_up_interruptible(&kauditd_wait);
+                       } else
+                               /* unregister the auditd connection */
                                auditd_reset();
-                       }
-                       wake_up_interruptible(&kauditd_wait);
                }
                if (s.mask & AUDIT_STATUS_RATE_LIMIT) {
                        err = audit_set_rate_limit(s.rate_limit);
@@ -1090,7 +1230,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                                if (err)
                                        break;
                        }
-                       mutex_unlock(&audit_cmd_mutex);
                        audit_log_common_recv_msg(&ab, msg_type);
                        if (msg_type != AUDIT_USER_TTY)
                                audit_log_format(ab, " msg='%.*s'",
@@ -1108,7 +1247,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        }
                        audit_set_portid(ab, NETLINK_CB(skb).portid);
                        audit_log_end(ab);
-                       mutex_lock(&audit_cmd_mutex);
                }
                break;
        case AUDIT_ADD_RULE:
@@ -1298,26 +1436,26 @@ static int __net_init audit_net_init(struct net *net)
 
        struct audit_net *aunet = net_generic(net, audit_net_id);
 
-       aunet->nlsk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg);
-       if (aunet->nlsk == NULL) {
+       aunet->sk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg);
+       if (aunet->sk == NULL) {
                audit_panic("cannot initialize netlink socket in namespace");
                return -ENOMEM;
        }
-       aunet->nlsk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+       aunet->sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+
        return 0;
 }
 
 static void __net_exit audit_net_exit(struct net *net)
 {
        struct audit_net *aunet = net_generic(net, audit_net_id);
-       struct sock *sock = aunet->nlsk;
-       mutex_lock(&audit_cmd_mutex);
-       if (sock == audit_sock)
+
+       rcu_read_lock();
+       if (net == auditd_conn.net)
                auditd_reset();
-       mutex_unlock(&audit_cmd_mutex);
+       rcu_read_unlock();
 
-       netlink_kernel_release(sock);
-       aunet->nlsk = NULL;
+       netlink_kernel_release(aunet->sk);
 }
 
 static struct pernet_operations audit_net_ops __net_initdata = {
@@ -1335,20 +1473,24 @@ static int __init audit_init(void)
        if (audit_initialized == AUDIT_DISABLED)
                return 0;
 
-       pr_info("initializing netlink subsys (%s)\n",
-               audit_default ? "enabled" : "disabled");
-       register_pernet_subsys(&audit_net_ops);
+       memset(&auditd_conn, 0, sizeof(auditd_conn));
+       spin_lock_init(&auditd_conn.lock);
 
        skb_queue_head_init(&audit_queue);
        skb_queue_head_init(&audit_retry_queue);
        skb_queue_head_init(&audit_hold_queue);
-       audit_initialized = AUDIT_INITIALIZED;
-       audit_enabled = audit_default;
-       audit_ever_enabled |= !!audit_default;
 
        for (i = 0; i < AUDIT_INODE_BUCKETS; i++)
                INIT_LIST_HEAD(&audit_inode_hash[i]);
 
+       pr_info("initializing netlink subsys (%s)\n",
+               audit_default ? "enabled" : "disabled");
+       register_pernet_subsys(&audit_net_ops);
+
+       audit_initialized = AUDIT_INITIALIZED;
+       audit_enabled = audit_default;
+       audit_ever_enabled |= !!audit_default;
+
        kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd");
        if (IS_ERR(kauditd_task)) {
                int err = PTR_ERR(kauditd_task);
@@ -1519,20 +1661,16 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
        if (unlikely(!audit_filter(type, AUDIT_FILTER_TYPE)))
                return NULL;
 
-       /* don't ever fail/sleep on these two conditions:
+       /* NOTE: don't ever fail/sleep on these two conditions:
         * 1. auditd generated record - since we need auditd to drain the
         *    queue; also, when we are checking for auditd, compare PIDs using
         *    task_tgid_vnr() since auditd_pid is set in audit_receive_msg()
         *    using a PID anchored in the caller's namespace
-        * 2. audit command message - record types 1000 through 1099 inclusive
-        *    are command messages/records used to manage the kernel subsystem
-        *    and the audit userspace, blocking on these messages could cause
-        *    problems under load so don't do it (note: not all of these
-        *    command types are valid as record types, but it is quicker to
-        *    just check two ints than a series of ints in a if/switch stmt) */
-       if (!((audit_pid && audit_pid == task_tgid_vnr(current)) ||
-             (type >= 1000 && type <= 1099))) {
-               long sleep_time = audit_backlog_wait_time;
+        * 2. generator holding the audit_cmd_mutex - we don't want to block
+        *    while holding the mutex */
+       if (!(auditd_test_task(current) ||
+             (current == __mutex_owner(&audit_cmd_mutex)))) {
+               long stime = audit_backlog_wait_time;
 
                while (audit_backlog_limit &&
                       (skb_queue_len(&audit_queue) > audit_backlog_limit)) {
@@ -1541,14 +1679,13 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
 
                        /* sleep if we are allowed and we haven't exhausted our
                         * backlog wait limit */
-                       if ((gfp_mask & __GFP_DIRECT_RECLAIM) &&
-                           (sleep_time > 0)) {
+                       if (gfpflags_allow_blocking(gfp_mask) && (stime > 0)) {
                                DECLARE_WAITQUEUE(wait, current);
 
                                add_wait_queue_exclusive(&audit_backlog_wait,
                                                         &wait);
                                set_current_state(TASK_UNINTERRUPTIBLE);
-                               sleep_time = schedule_timeout(sleep_time);
+                               stime = schedule_timeout(stime);
                                remove_wait_queue(&audit_backlog_wait, &wait);
                        } else {
                                if (audit_rate_check() && printk_ratelimit())
@@ -2127,15 +2264,27 @@ out:
  */
 void audit_log_end(struct audit_buffer *ab)
 {
+       struct sk_buff *skb;
+       struct nlmsghdr *nlh;
+
        if (!ab)
                return;
-       if (!audit_rate_check()) {
-               audit_log_lost("rate limit exceeded");
-       } else {
-               skb_queue_tail(&audit_queue, ab->skb);
-               wake_up_interruptible(&kauditd_wait);
+
+       if (audit_rate_check()) {
+               skb = ab->skb;
                ab->skb = NULL;
-       }
+
+               /* setup the netlink header, see the comments in
+                * kauditd_send_multicast_skb() for length quirks */
+               nlh = nlmsg_hdr(skb);
+               nlh->nlmsg_len = skb->len - NLMSG_HDRLEN;
+
+               /* queue the netlink packet and poke the kauditd thread */
+               skb_queue_tail(&audit_queue, skb);
+               wake_up_interruptible(&kauditd_wait);
+       } else
+               audit_log_lost("rate limit exceeded");
+
        audit_buffer_free(ab);
 }
 
index ca579880303ab475b2c81839a4948bdb128e92f8..0f1cf6d1878ab34a65d72644175ed50055df9a09 100644 (file)
@@ -218,7 +218,7 @@ extern void audit_log_name(struct audit_context *context,
                           struct audit_names *n, const struct path *path,
                           int record_num, int *call_panic);
 
-extern int audit_pid;
+extern int auditd_test_task(const struct task_struct *task);
 
 #define AUDIT_INODE_BUCKETS    32
 extern struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS];
@@ -250,10 +250,6 @@ struct audit_netlink_list {
 
 int audit_send_list(void *);
 
-struct audit_net {
-       struct sock *nlsk;
-};
-
 extern int selinux_audit_rule_update(void);
 
 extern struct mutex audit_filter_mutex;
@@ -340,8 +336,7 @@ extern int audit_filter(int msgtype, unsigned int listtype);
 extern int __audit_signal_info(int sig, struct task_struct *t);
 static inline int audit_signal_info(int sig, struct task_struct *t)
 {
-       if (unlikely((audit_pid && t->tgid == audit_pid) ||
-                    (audit_signals && !audit_dummy_context())))
+       if (auditd_test_task(t) || (audit_signals && !audit_dummy_context()))
                return __audit_signal_info(sig, t);
        return 0;
 }
index d6a8de5f8fa3d0ba33c14b20e6341e32d62dab2a..e59ffc7fc522ad9d057c34339807dfe35845d45e 100644 (file)
@@ -762,7 +762,7 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
        struct audit_entry *e;
        enum audit_state state;
 
-       if (audit_pid && tsk->tgid == audit_pid)
+       if (auditd_test_task(tsk))
                return AUDIT_DISABLED;
 
        rcu_read_lock();
@@ -816,7 +816,7 @@ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx)
 {
        struct audit_names *n;
 
-       if (audit_pid && tsk->tgid == audit_pid)
+       if (auditd_test_task(tsk))
                return;
 
        rcu_read_lock();
@@ -2256,7 +2256,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
        struct audit_context *ctx = tsk->audit_context;
        kuid_t uid = current_uid(), t_uid = task_uid(t);
 
-       if (audit_pid && t->tgid == audit_pid) {
+       if (auditd_test_task(t)) {
                if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
                        audit_sig_pid = task_tgid_nr(tsk);
                        if (uid_valid(tsk->loginuid))
index e1ce4f4fd7fd47fda2c18776573c0f65479c6728..e1e5e658f2dbf887be70fbfc9492d4365aef5064 100644 (file)
@@ -1,7 +1,7 @@
 obj-y := core.o
 
 obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o
-obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o
+obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
 ifeq ($(CONFIG_PERF_EVENTS),y)
 obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
 endif
index 6b6f41f0b21164a3cc2c26bb71be2b89098bbdbd..ec621df5a97a837b821d7a8b3b4ea12676225fad 100644 (file)
@@ -1,4 +1,5 @@
 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ * Copyright (c) 2016,2017 Facebook
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -16,6 +17,8 @@
 #include <linux/filter.h>
 #include <linux/perf_event.h>
 
+#include "map_in_map.h"
+
 static void bpf_array_free_percpu(struct bpf_array *array)
 {
        int i;
@@ -113,6 +116,30 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
        return array->value + array->elem_size * index;
 }
 
+/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
+static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
+{
+       struct bpf_insn *insn = insn_buf;
+       u32 elem_size = round_up(map->value_size, 8);
+       const int ret = BPF_REG_0;
+       const int map_ptr = BPF_REG_1;
+       const int index = BPF_REG_2;
+
+       *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
+       *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
+       *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
+
+       if (is_power_of_2(elem_size)) {
+               *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
+       } else {
+               *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
+       }
+       *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
+       *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+       *insn++ = BPF_MOV64_IMM(ret, 0);
+       return insn - insn_buf;
+}
+
 /* Called from eBPF program */
 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
 {
@@ -260,21 +287,17 @@ static void array_map_free(struct bpf_map *map)
        bpf_map_area_free(array);
 }
 
-static const struct bpf_map_ops array_ops = {
+const struct bpf_map_ops array_map_ops = {
        .map_alloc = array_map_alloc,
        .map_free = array_map_free,
        .map_get_next_key = array_map_get_next_key,
        .map_lookup_elem = array_map_lookup_elem,
        .map_update_elem = array_map_update_elem,
        .map_delete_elem = array_map_delete_elem,
+       .map_gen_lookup = array_map_gen_lookup,
 };
 
-static struct bpf_map_type_list array_type __ro_after_init = {
-       .ops = &array_ops,
-       .type = BPF_MAP_TYPE_ARRAY,
-};
-
-static const struct bpf_map_ops percpu_array_ops = {
+const struct bpf_map_ops percpu_array_map_ops = {
        .map_alloc = array_map_alloc,
        .map_free = array_map_free,
        .map_get_next_key = array_map_get_next_key,
@@ -283,19 +306,6 @@ static const struct bpf_map_ops percpu_array_ops = {
        .map_delete_elem = array_map_delete_elem,
 };
 
-static struct bpf_map_type_list percpu_array_type __ro_after_init = {
-       .ops = &percpu_array_ops,
-       .type = BPF_MAP_TYPE_PERCPU_ARRAY,
-};
-
-static int __init register_array_map(void)
-{
-       bpf_register_map_type(&array_type);
-       bpf_register_map_type(&percpu_array_type);
-       return 0;
-}
-late_initcall(register_array_map);
-
 static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
 {
        /* only file descriptors can be stored in this type of map */
@@ -399,7 +409,7 @@ void bpf_fd_array_map_clear(struct bpf_map *map)
                fd_array_map_delete_elem(map, &i);
 }
 
-static const struct bpf_map_ops prog_array_ops = {
+const struct bpf_map_ops prog_array_map_ops = {
        .map_alloc = fd_array_map_alloc,
        .map_free = fd_array_map_free,
        .map_get_next_key = array_map_get_next_key,
@@ -409,18 +419,6 @@ static const struct bpf_map_ops prog_array_ops = {
        .map_fd_put_ptr = prog_fd_array_put_ptr,
 };
 
-static struct bpf_map_type_list prog_array_type __ro_after_init = {
-       .ops = &prog_array_ops,
-       .type = BPF_MAP_TYPE_PROG_ARRAY,
-};
-
-static int __init register_prog_array_map(void)
-{
-       bpf_register_map_type(&prog_array_type);
-       return 0;
-}
-late_initcall(register_prog_array_map);
-
 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
                                                   struct file *map_file)
 {
@@ -511,7 +509,7 @@ static void perf_event_fd_array_release(struct bpf_map *map,
        rcu_read_unlock();
 }
 
-static const struct bpf_map_ops perf_event_array_ops = {
+const struct bpf_map_ops perf_event_array_map_ops = {
        .map_alloc = fd_array_map_alloc,
        .map_free = fd_array_map_free,
        .map_get_next_key = array_map_get_next_key,
@@ -522,18 +520,6 @@ static const struct bpf_map_ops perf_event_array_ops = {
        .map_release = perf_event_fd_array_release,
 };
 
-static struct bpf_map_type_list perf_event_array_type __ro_after_init = {
-       .ops = &perf_event_array_ops,
-       .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
-};
-
-static int __init register_perf_event_array_map(void)
-{
-       bpf_register_map_type(&perf_event_array_type);
-       return 0;
-}
-late_initcall(register_perf_event_array_map);
-
 #ifdef CONFIG_CGROUPS
 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
                                     struct file *map_file /* not used */,
@@ -554,7 +540,7 @@ static void cgroup_fd_array_free(struct bpf_map *map)
        fd_array_map_free(map);
 }
 
-static const struct bpf_map_ops cgroup_array_ops = {
+const struct bpf_map_ops cgroup_array_map_ops = {
        .map_alloc = fd_array_map_alloc,
        .map_free = cgroup_fd_array_free,
        .map_get_next_key = array_map_get_next_key,
@@ -563,16 +549,53 @@ static const struct bpf_map_ops cgroup_array_ops = {
        .map_fd_get_ptr = cgroup_fd_array_get_ptr,
        .map_fd_put_ptr = cgroup_fd_array_put_ptr,
 };
+#endif
 
-static struct bpf_map_type_list cgroup_array_type __ro_after_init = {
-       .ops = &cgroup_array_ops,
-       .type = BPF_MAP_TYPE_CGROUP_ARRAY,
-};
+static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
+{
+       struct bpf_map *map, *inner_map_meta;
+
+       inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
+       if (IS_ERR(inner_map_meta))
+               return inner_map_meta;
 
-static int __init register_cgroup_array_map(void)
+       map = fd_array_map_alloc(attr);
+       if (IS_ERR(map)) {
+               bpf_map_meta_free(inner_map_meta);
+               return map;
+       }
+
+       map->inner_map_meta = inner_map_meta;
+
+       return map;
+}
+
+static void array_of_map_free(struct bpf_map *map)
 {
-       bpf_register_map_type(&cgroup_array_type);
-       return 0;
+       /* map->inner_map_meta is only accessed by syscall which
+        * is protected by fdget/fdput.
+        */
+       bpf_map_meta_free(map->inner_map_meta);
+       bpf_fd_array_map_clear(map);
+       fd_array_map_free(map);
 }
-late_initcall(register_cgroup_array_map);
-#endif
+
+static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
+{
+       struct bpf_map **inner_map = array_map_lookup_elem(map, key);
+
+       if (!inner_map)
+               return NULL;
+
+       return READ_ONCE(*inner_map);
+}
+
+const struct bpf_map_ops array_of_maps_map_ops = {
+       .map_alloc = array_of_map_alloc,
+       .map_free = array_of_map_free,
+       .map_get_next_key = array_map_get_next_key,
+       .map_lookup_elem = array_of_map_lookup_elem,
+       .map_delete_elem = fd_array_map_delete_elem,
+       .map_fd_get_ptr = bpf_map_fd_get_ptr,
+       .map_fd_put_ptr = bpf_map_fd_put_ptr,
+};
index da0f53690295610ff5ae447ed98fb6e70c99c4f1..ea6033cba94721fd8ca080354c825771d93620fb 100644 (file)
@@ -154,7 +154,7 @@ int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
 
 /**
  * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
- * @sk: The socken sending or receiving traffic
+ * @sk: The socket sending or receiving traffic
  * @skb: The skb that is being sent or received
  * @type: The type of program to be exectuted
  *
@@ -189,10 +189,13 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
        prog = rcu_dereference(cgrp->bpf.effective[type]);
        if (prog) {
                unsigned int offset = skb->data - skb_network_header(skb);
+               struct sock *save_sk = skb->sk;
 
+               skb->sk = sk;
                __skb_push(skb, offset);
                ret = bpf_prog_run_save_cb(prog, skb) == 1 ? 0 : -EPERM;
                __skb_pull(skb, offset);
+               skb->sk = save_sk;
        }
 
        rcu_read_unlock();
index 3ea87fb19a9416771985d9236f148ffb927ad19b..bc80c038e430a7c5b89a04aa9692735ecf345105 100644 (file)
 #include <linux/bpf.h>
 #include <linux/jhash.h>
 #include <linux/filter.h>
+#include <linux/rculist_nulls.h>
 #include "percpu_freelist.h"
 #include "bpf_lru_list.h"
+#include "map_in_map.h"
 
 struct bucket {
-       struct hlist_head head;
+       struct hlist_nulls_head head;
        raw_spinlock_t lock;
 };
 
@@ -29,28 +31,26 @@ struct bpf_htab {
                struct pcpu_freelist freelist;
                struct bpf_lru lru;
        };
-       void __percpu *extra_elems;
+       struct htab_elem *__percpu *extra_elems;
        atomic_t count; /* number of elements in this hashtable */
        u32 n_buckets;  /* number of hash buckets */
        u32 elem_size;  /* size of each element in bytes */
 };
 
-enum extra_elem_state {
-       HTAB_NOT_AN_EXTRA_ELEM = 0,
-       HTAB_EXTRA_ELEM_FREE,
-       HTAB_EXTRA_ELEM_USED
-};
-
 /* each htab element is struct htab_elem + key + value */
 struct htab_elem {
        union {
-               struct hlist_node hash_node;
-               struct bpf_htab *htab;
-               struct pcpu_freelist_node fnode;
+               struct hlist_nulls_node hash_node;
+               struct {
+                       void *padding;
+                       union {
+                               struct bpf_htab *htab;
+                               struct pcpu_freelist_node fnode;
+                       };
+               };
        };
        union {
                struct rcu_head rcu;
-               enum extra_elem_state state;
                struct bpf_lru_node lru_node;
        };
        u32 hash;
@@ -71,6 +71,11 @@ static bool htab_is_percpu(const struct bpf_htab *htab)
                htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
 }
 
+static bool htab_is_prealloc(const struct bpf_htab *htab)
+{
+       return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
+}
+
 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
                                     void __percpu *pptr)
 {
@@ -82,6 +87,11 @@ static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size
        return *(void __percpu **)(l->key + key_size);
 }
 
+static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
+{
+       return *(void **)(l->key + roundup(map->key_size, 8));
+}
+
 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
 {
        return (struct htab_elem *) (htab->elems + i * htab->elem_size);
@@ -122,17 +132,20 @@ static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
 
 static int prealloc_init(struct bpf_htab *htab)
 {
+       u32 num_entries = htab->map.max_entries;
        int err = -ENOMEM, i;
 
-       htab->elems = bpf_map_area_alloc(htab->elem_size *
-                                        htab->map.max_entries);
+       if (!htab_is_percpu(htab) && !htab_is_lru(htab))
+               num_entries += num_possible_cpus();
+
+       htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries);
        if (!htab->elems)
                return -ENOMEM;
 
        if (!htab_is_percpu(htab))
                goto skip_percpu_elems;
 
-       for (i = 0; i < htab->map.max_entries; i++) {
+       for (i = 0; i < num_entries; i++) {
                u32 size = round_up(htab->map.value_size, 8);
                void __percpu *pptr;
 
@@ -160,10 +173,11 @@ skip_percpu_elems:
        if (htab_is_lru(htab))
                bpf_lru_populate(&htab->lru, htab->elems,
                                 offsetof(struct htab_elem, lru_node),
-                                htab->elem_size, htab->map.max_entries);
+                                htab->elem_size, num_entries);
        else
-               pcpu_freelist_populate(&htab->freelist, htab->elems,
-                                      htab->elem_size, htab->map.max_entries);
+               pcpu_freelist_populate(&htab->freelist,
+                                      htab->elems + offsetof(struct htab_elem, fnode),
+                                      htab->elem_size, num_entries);
 
        return 0;
 
@@ -184,16 +198,22 @@ static void prealloc_destroy(struct bpf_htab *htab)
 
 static int alloc_extra_elems(struct bpf_htab *htab)
 {
-       void __percpu *pptr;
+       struct htab_elem *__percpu *pptr, *l_new;
+       struct pcpu_freelist_node *l;
        int cpu;
 
-       pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN);
+       pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8,
+                                 GFP_USER | __GFP_NOWARN);
        if (!pptr)
                return -ENOMEM;
 
        for_each_possible_cpu(cpu) {
-               ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state =
-                       HTAB_EXTRA_ELEM_FREE;
+               l = pcpu_freelist_pop(&htab->freelist);
+               /* pop will succeed, since prealloc_init()
+                * preallocated extra num_possible_cpus elements
+                */
+               l_new = container_of(l, struct htab_elem, fnode);
+               *per_cpu_ptr(pptr, cpu) = l_new;
        }
        htab->extra_elems = pptr;
        return 0;
@@ -217,6 +237,11 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
        int err, i;
        u64 cost;
 
+       BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
+                    offsetof(struct htab_elem, hash_node.pprev));
+       BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
+                    offsetof(struct htab_elem, hash_node.pprev));
+
        if (lru && !capable(CAP_SYS_ADMIN))
                /* LRU implementation is much complicated than other
                 * maps.  Hence, limit to CAP_SYS_ADMIN for now.
@@ -326,29 +351,29 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
                goto free_htab;
 
        for (i = 0; i < htab->n_buckets; i++) {
-               INIT_HLIST_HEAD(&htab->buckets[i].head);
+               INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
                raw_spin_lock_init(&htab->buckets[i].lock);
        }
 
-       if (!percpu && !lru) {
-               /* lru itself can remove the least used element, so
-                * there is no need for an extra elem during map_update.
-                */
-               err = alloc_extra_elems(htab);
-               if (err)
-                       goto free_buckets;
-       }
-
        if (prealloc) {
                err = prealloc_init(htab);
                if (err)
-                       goto free_extra_elems;
+                       goto free_buckets;
+
+               if (!percpu && !lru) {
+                       /* lru itself can remove the least used element, so
+                        * there is no need for an extra elem during map_update.
+                        */
+                       err = alloc_extra_elems(htab);
+                       if (err)
+                               goto free_prealloc;
+               }
        }
 
        return &htab->map;
 
-free_extra_elems:
-       free_percpu(htab->extra_elems);
+free_prealloc:
+       prealloc_destroy(htab);
 free_buckets:
        bpf_map_area_free(htab->buckets);
 free_htab:
@@ -366,28 +391,56 @@ static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
        return &htab->buckets[hash & (htab->n_buckets - 1)];
 }
 
-static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
+static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
 {
        return &__select_bucket(htab, hash)->head;
 }
 
-static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
+/* this lookup function can only be called with bucket lock taken */
+static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
                                         void *key, u32 key_size)
 {
+       struct hlist_nulls_node *n;
        struct htab_elem *l;
 
-       hlist_for_each_entry_rcu(l, head, hash_node)
+       hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
                if (l->hash == hash && !memcmp(&l->key, key, key_size))
                        return l;
 
        return NULL;
 }
 
-/* Called from syscall or from eBPF program */
+/* can be called without bucket lock. it will repeat the loop in
+ * the unlikely event when elements moved from one bucket into another
+ * while link list is being walked
+ */
+static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
+                                              u32 hash, void *key,
+                                              u32 key_size, u32 n_buckets)
+{
+       struct hlist_nulls_node *n;
+       struct htab_elem *l;
+
+again:
+       hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
+               if (l->hash == hash && !memcmp(&l->key, key, key_size))
+                       return l;
+
+       if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
+               goto again;
+
+       return NULL;
+}
+
+/* Called from syscall or from eBPF program directly, so
+ * arguments have to match bpf_map_lookup_elem() exactly.
+ * The return value is adjusted by BPF instructions
+ * in htab_map_gen_lookup().
+ */
 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
-       struct hlist_head *head;
+       struct hlist_nulls_head *head;
        struct htab_elem *l;
        u32 hash, key_size;
 
@@ -400,7 +453,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
 
        head = select_bucket(htab, hash);
 
-       l = lookup_elem_raw(head, hash, key, key_size);
+       l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
 
        return l;
 }
@@ -415,6 +468,30 @@ static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
        return NULL;
 }
 
+/* inline bpf_map_lookup_elem() call.
+ * Instead of:
+ * bpf_prog
+ *   bpf_map_lookup_elem
+ *     map->ops->map_lookup_elem
+ *       htab_map_lookup_elem
+ *         __htab_map_lookup_elem
+ * do:
+ * bpf_prog
+ *   __htab_map_lookup_elem
+ */
+static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
+{
+       struct bpf_insn *insn = insn_buf;
+       const int ret = BPF_REG_0;
+
+       *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
+       *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
+       *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
+                               offsetof(struct htab_elem, key) +
+                               round_up(map->key_size, 8));
+       return insn - insn_buf;
+}
+
 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
 {
        struct htab_elem *l = __htab_map_lookup_elem(map, key);
@@ -433,8 +510,9 @@ static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
 {
        struct bpf_htab *htab = (struct bpf_htab *)arg;
-       struct htab_elem *l, *tgt_l;
-       struct hlist_head *head;
+       struct htab_elem *l = NULL, *tgt_l;
+       struct hlist_nulls_head *head;
+       struct hlist_nulls_node *n;
        unsigned long flags;
        struct bucket *b;
 
@@ -444,9 +522,9 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
 
        raw_spin_lock_irqsave(&b->lock, flags);
 
-       hlist_for_each_entry_rcu(l, head, hash_node)
+       hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
                if (l == tgt_l) {
-                       hlist_del_rcu(&l->hash_node);
+                       hlist_nulls_del_rcu(&l->hash_node);
                        break;
                }
 
@@ -459,7 +537,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
-       struct hlist_head *head;
+       struct hlist_nulls_head *head;
        struct htab_elem *l, *next_l;
        u32 hash, key_size;
        int i;
@@ -473,7 +551,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
        head = select_bucket(htab, hash);
 
        /* lookup the key */
-       l = lookup_elem_raw(head, hash, key, key_size);
+       l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
 
        if (!l) {
                i = 0;
@@ -481,7 +559,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
        }
 
        /* key was found, get next key in the same bucket */
-       next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
+       next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
                                  struct htab_elem, hash_node);
 
        if (next_l) {
@@ -500,7 +578,7 @@ find_first_elem:
                head = select_bucket(htab, i);
 
                /* pick first element in the bucket */
-               next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
+               next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
                                          struct htab_elem, hash_node);
                if (next_l) {
                        /* if it's not empty, just return it */
@@ -538,12 +616,15 @@ static void htab_elem_free_rcu(struct rcu_head *head)
 
 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
 {
-       if (l->state == HTAB_EXTRA_ELEM_USED) {
-               l->state = HTAB_EXTRA_ELEM_FREE;
-               return;
+       struct bpf_map *map = &htab->map;
+
+       if (map->ops->map_fd_put_ptr) {
+               void *ptr = fd_htab_map_get_ptr(map, l);
+
+               map->ops->map_fd_put_ptr(ptr);
        }
 
-       if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
+       if (htab_is_prealloc(htab)) {
                pcpu_freelist_push(&htab->freelist, &l->fnode);
        } else {
                atomic_dec(&htab->count);
@@ -573,43 +654,43 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
                                         void *value, u32 key_size, u32 hash,
                                         bool percpu, bool onallcpus,
-                                        bool old_elem_exists)
+                                        struct htab_elem *old_elem)
 {
        u32 size = htab->map.value_size;
-       bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC);
-       struct htab_elem *l_new;
+       bool prealloc = htab_is_prealloc(htab);
+       struct htab_elem *l_new, **pl_new;
        void __percpu *pptr;
-       int err = 0;
 
        if (prealloc) {
-               l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist);
-               if (!l_new)
-                       err = -E2BIG;
-       } else {
-               if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
-                       atomic_dec(&htab->count);
-                       err = -E2BIG;
+               if (old_elem) {
+                       /* if we're updating the existing element,
+                        * use per-cpu extra elems to avoid freelist_pop/push
+                        */
+                       pl_new = this_cpu_ptr(htab->extra_elems);
+                       l_new = *pl_new;
+                       *pl_new = old_elem;
                } else {
-                       l_new = kmalloc(htab->elem_size,
-                                       GFP_ATOMIC | __GFP_NOWARN);
-                       if (!l_new)
-                               return ERR_PTR(-ENOMEM);
-               }
-       }
-
-       if (err) {
-               if (!old_elem_exists)
-                       return ERR_PTR(err);
+                       struct pcpu_freelist_node *l;
 
-               /* if we're updating the existing element and the hash table
-                * is full, use per-cpu extra elems
-                */
-               l_new = this_cpu_ptr(htab->extra_elems);
-               if (l_new->state != HTAB_EXTRA_ELEM_FREE)
-                       return ERR_PTR(-E2BIG);
-               l_new->state = HTAB_EXTRA_ELEM_USED;
+                       l = pcpu_freelist_pop(&htab->freelist);
+                       if (!l)
+                               return ERR_PTR(-E2BIG);
+                       l_new = container_of(l, struct htab_elem, fnode);
+               }
        } else {
-               l_new->state = HTAB_NOT_AN_EXTRA_ELEM;
+               if (atomic_inc_return(&htab->count) > htab->map.max_entries)
+                       if (!old_elem) {
+                               /* when map is full and update() is replacing
+                                * old element, it's ok to allocate, since
+                                * old element will be freed immediately.
+                                * Otherwise return an error
+                                */
+                               atomic_dec(&htab->count);
+                               return ERR_PTR(-E2BIG);
+                       }
+               l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
+               if (!l_new)
+                       return ERR_PTR(-ENOMEM);
        }
 
        memcpy(l_new->key, key, key_size);
@@ -661,7 +742,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
        struct htab_elem *l_new = NULL, *l_old;
-       struct hlist_head *head;
+       struct hlist_nulls_head *head;
        unsigned long flags;
        struct bucket *b;
        u32 key_size, hash;
@@ -690,7 +771,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
                goto err;
 
        l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
-                               !!l_old);
+                               l_old);
        if (IS_ERR(l_new)) {
                /* all pre-allocated elements are in use or memory exhausted */
                ret = PTR_ERR(l_new);
@@ -700,10 +781,11 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
        /* add new element to the head of the list, so that
         * concurrent search will find it before old elem
         */
-       hlist_add_head_rcu(&l_new->hash_node, head);
+       hlist_nulls_add_head_rcu(&l_new->hash_node, head);
        if (l_old) {
-               hlist_del_rcu(&l_old->hash_node);
-               free_htab_elem(htab, l_old);
+               hlist_nulls_del_rcu(&l_old->hash_node);
+               if (!htab_is_prealloc(htab))
+                       free_htab_elem(htab, l_old);
        }
        ret = 0;
 err:
@@ -716,7 +798,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
        struct htab_elem *l_new, *l_old = NULL;
-       struct hlist_head *head;
+       struct hlist_nulls_head *head;
        unsigned long flags;
        struct bucket *b;
        u32 key_size, hash;
@@ -757,10 +839,10 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
        /* add new element to the head of the list, so that
         * concurrent search will find it before old elem
         */
-       hlist_add_head_rcu(&l_new->hash_node, head);
+       hlist_nulls_add_head_rcu(&l_new->hash_node, head);
        if (l_old) {
                bpf_lru_node_set_ref(&l_new->lru_node);
-               hlist_del_rcu(&l_old->hash_node);
+               hlist_nulls_del_rcu(&l_old->hash_node);
        }
        ret = 0;
 
@@ -781,7 +863,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
        struct htab_elem *l_new = NULL, *l_old;
-       struct hlist_head *head;
+       struct hlist_nulls_head *head;
        unsigned long flags;
        struct bucket *b;
        u32 key_size, hash;
@@ -815,12 +897,12 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
                                value, onallcpus);
        } else {
                l_new = alloc_htab_elem(htab, key, value, key_size,
-                                       hash, true, onallcpus, false);
+                                       hash, true, onallcpus, NULL);
                if (IS_ERR(l_new)) {
                        ret = PTR_ERR(l_new);
                        goto err;
                }
-               hlist_add_head_rcu(&l_new->hash_node, head);
+               hlist_nulls_add_head_rcu(&l_new->hash_node, head);
        }
        ret = 0;
 err:
@@ -834,7 +916,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
        struct htab_elem *l_new = NULL, *l_old;
-       struct hlist_head *head;
+       struct hlist_nulls_head *head;
        unsigned long flags;
        struct bucket *b;
        u32 key_size, hash;
@@ -882,7 +964,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
        } else {
                pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
                                value, onallcpus);
-               hlist_add_head_rcu(&l_new->hash_node, head);
+               hlist_nulls_add_head_rcu(&l_new->hash_node, head);
                l_new = NULL;
        }
        ret = 0;
@@ -910,7 +992,7 @@ static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
 static int htab_map_delete_elem(struct bpf_map *map, void *key)
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
-       struct hlist_head *head;
+       struct hlist_nulls_head *head;
        struct bucket *b;
        struct htab_elem *l;
        unsigned long flags;
@@ -930,7 +1012,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
        l = lookup_elem_raw(head, hash, key, key_size);
 
        if (l) {
-               hlist_del_rcu(&l->hash_node);
+               hlist_nulls_del_rcu(&l->hash_node);
                free_htab_elem(htab, l);
                ret = 0;
        }
@@ -942,7 +1024,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
 static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
-       struct hlist_head *head;
+       struct hlist_nulls_head *head;
        struct bucket *b;
        struct htab_elem *l;
        unsigned long flags;
@@ -962,7 +1044,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
        l = lookup_elem_raw(head, hash, key, key_size);
 
        if (l) {
-               hlist_del_rcu(&l->hash_node);
+               hlist_nulls_del_rcu(&l->hash_node);
                ret = 0;
        }
 
@@ -977,17 +1059,17 @@ static void delete_all_elements(struct bpf_htab *htab)
        int i;
 
        for (i = 0; i < htab->n_buckets; i++) {
-               struct hlist_head *head = select_bucket(htab, i);
-               struct hlist_node *n;
+               struct hlist_nulls_head *head = select_bucket(htab, i);
+               struct hlist_nulls_node *n;
                struct htab_elem *l;
 
-               hlist_for_each_entry_safe(l, n, head, hash_node) {
-                       hlist_del_rcu(&l->hash_node);
-                       if (l->state != HTAB_EXTRA_ELEM_USED)
-                               htab_elem_free(htab, l);
+               hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
+                       hlist_nulls_del_rcu(&l->hash_node);
+                       htab_elem_free(htab, l);
                }
        }
 }
+
 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
 static void htab_map_free(struct bpf_map *map)
 {
@@ -1004,7 +1086,7 @@ static void htab_map_free(struct bpf_map *map)
         * not have executed. Wait for them.
         */
        rcu_barrier();
-       if (htab->map.map_flags & BPF_F_NO_PREALLOC)
+       if (!htab_is_prealloc(htab))
                delete_all_elements(htab);
        else
                prealloc_destroy(htab);
@@ -1014,21 +1096,17 @@ static void htab_map_free(struct bpf_map *map)
        kfree(htab);
 }
 
-static const struct bpf_map_ops htab_ops = {
+const struct bpf_map_ops htab_map_ops = {
        .map_alloc = htab_map_alloc,
        .map_free = htab_map_free,
        .map_get_next_key = htab_map_get_next_key,
        .map_lookup_elem = htab_map_lookup_elem,
        .map_update_elem = htab_map_update_elem,
        .map_delete_elem = htab_map_delete_elem,
+       .map_gen_lookup = htab_map_gen_lookup,
 };
 
-static struct bpf_map_type_list htab_type __ro_after_init = {
-       .ops = &htab_ops,
-       .type = BPF_MAP_TYPE_HASH,
-};
-
-static const struct bpf_map_ops htab_lru_ops = {
+const struct bpf_map_ops htab_lru_map_ops = {
        .map_alloc = htab_map_alloc,
        .map_free = htab_map_free,
        .map_get_next_key = htab_map_get_next_key,
@@ -1037,11 +1115,6 @@ static const struct bpf_map_ops htab_lru_ops = {
        .map_delete_elem = htab_lru_map_delete_elem,
 };
 
-static struct bpf_map_type_list htab_lru_type __ro_after_init = {
-       .ops = &htab_lru_ops,
-       .type = BPF_MAP_TYPE_LRU_HASH,
-};
-
 /* Called from eBPF program */
 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
 {
@@ -1115,7 +1188,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
        return ret;
 }
 
-static const struct bpf_map_ops htab_percpu_ops = {
+const struct bpf_map_ops htab_percpu_map_ops = {
        .map_alloc = htab_map_alloc,
        .map_free = htab_map_free,
        .map_get_next_key = htab_map_get_next_key,
@@ -1124,12 +1197,7 @@ static const struct bpf_map_ops htab_percpu_ops = {
        .map_delete_elem = htab_map_delete_elem,
 };
 
-static struct bpf_map_type_list htab_percpu_type __ro_after_init = {
-       .ops = &htab_percpu_ops,
-       .type = BPF_MAP_TYPE_PERCPU_HASH,
-};
-
-static const struct bpf_map_ops htab_lru_percpu_ops = {
+const struct bpf_map_ops htab_lru_percpu_map_ops = {
        .map_alloc = htab_map_alloc,
        .map_free = htab_map_free,
        .map_get_next_key = htab_map_get_next_key,
@@ -1138,17 +1206,102 @@ static const struct bpf_map_ops htab_lru_percpu_ops = {
        .map_delete_elem = htab_lru_map_delete_elem,
 };
 
-static struct bpf_map_type_list htab_lru_percpu_type __ro_after_init = {
-       .ops = &htab_lru_percpu_ops,
-       .type = BPF_MAP_TYPE_LRU_PERCPU_HASH,
-};
+static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr)
+{
+       struct bpf_map *map;
 
-static int __init register_htab_map(void)
+       if (attr->value_size != sizeof(u32))
+               return ERR_PTR(-EINVAL);
+
+       /* pointer is stored internally */
+       attr->value_size = sizeof(void *);
+       map = htab_map_alloc(attr);
+       attr->value_size = sizeof(u32);
+
+       return map;
+}
+
+static void fd_htab_map_free(struct bpf_map *map)
 {
-       bpf_register_map_type(&htab_type);
-       bpf_register_map_type(&htab_percpu_type);
-       bpf_register_map_type(&htab_lru_type);
-       bpf_register_map_type(&htab_lru_percpu_type);
-       return 0;
+       struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+       struct hlist_nulls_node *n;
+       struct hlist_nulls_head *head;
+       struct htab_elem *l;
+       int i;
+
+       for (i = 0; i < htab->n_buckets; i++) {
+               head = select_bucket(htab, i);
+
+               hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
+                       void *ptr = fd_htab_map_get_ptr(map, l);
+
+                       map->ops->map_fd_put_ptr(ptr);
+               }
+       }
+
+       htab_map_free(map);
+}
+
+/* only called from syscall */
+int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
+                               void *key, void *value, u64 map_flags)
+{
+       void *ptr;
+       int ret;
+       u32 ufd = *(u32 *)value;
+
+       ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
+       if (IS_ERR(ptr))
+               return PTR_ERR(ptr);
+
+       ret = htab_map_update_elem(map, key, &ptr, map_flags);
+       if (ret)
+               map->ops->map_fd_put_ptr(ptr);
+
+       return ret;
+}
+
+static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
+{
+       struct bpf_map *map, *inner_map_meta;
+
+       inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
+       if (IS_ERR(inner_map_meta))
+               return inner_map_meta;
+
+       map = fd_htab_map_alloc(attr);
+       if (IS_ERR(map)) {
+               bpf_map_meta_free(inner_map_meta);
+               return map;
+       }
+
+       map->inner_map_meta = inner_map_meta;
+
+       return map;
 }
-late_initcall(register_htab_map);
+
+static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
+{
+       struct bpf_map **inner_map  = htab_map_lookup_elem(map, key);
+
+       if (!inner_map)
+               return NULL;
+
+       return READ_ONCE(*inner_map);
+}
+
+static void htab_of_map_free(struct bpf_map *map)
+{
+       bpf_map_meta_free(map->inner_map_meta);
+       fd_htab_map_free(map);
+}
+
+const struct bpf_map_ops htab_of_maps_map_ops = {
+       .map_alloc = htab_of_map_alloc,
+       .map_free = htab_of_map_free,
+       .map_get_next_key = htab_map_get_next_key,
+       .map_lookup_elem = htab_of_map_lookup_elem,
+       .map_delete_elem = htab_map_delete_elem,
+       .map_fd_get_ptr = bpf_map_fd_get_ptr,
+       .map_fd_put_ptr = bpf_map_fd_put_ptr,
+};
index b37bd9ab7f574242722c1d9b0503b896019488b2..39cfafd895b80d2f0fb5054626af001949a8ad98 100644 (file)
@@ -505,7 +505,7 @@ static int trie_get_next_key(struct bpf_map *map, void *key, void *next_key)
        return -ENOTSUPP;
 }
 
-static const struct bpf_map_ops trie_ops = {
+const struct bpf_map_ops trie_map_ops = {
        .map_alloc = trie_alloc,
        .map_free = trie_free,
        .map_get_next_key = trie_get_next_key,
@@ -513,15 +513,3 @@ static const struct bpf_map_ops trie_ops = {
        .map_update_elem = trie_update_elem,
        .map_delete_elem = trie_delete_elem,
 };
-
-static struct bpf_map_type_list trie_type __ro_after_init = {
-       .ops = &trie_ops,
-       .type = BPF_MAP_TYPE_LPM_TRIE,
-};
-
-static int __init register_trie_map(void)
-{
-       bpf_register_map_type(&trie_type);
-       return 0;
-}
-late_initcall(register_trie_map);
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
new file mode 100644 (file)
index 0000000..59bcdf8
--- /dev/null
@@ -0,0 +1,97 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/slab.h>
+#include <linux/bpf.h>
+
+#include "map_in_map.h"
+
+struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
+{
+       struct bpf_map *inner_map, *inner_map_meta;
+       struct fd f;
+
+       f = fdget(inner_map_ufd);
+       inner_map = __bpf_map_get(f);
+       if (IS_ERR(inner_map))
+               return inner_map;
+
+       /* prog_array->owner_prog_type and owner_jited
+        * is a runtime binding.  Doing static check alone
+        * in the verifier is not enough.
+        */
+       if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
+               fdput(f);
+               return ERR_PTR(-ENOTSUPP);
+       }
+
+       /* Does not support >1 level map-in-map */
+       if (inner_map->inner_map_meta) {
+               fdput(f);
+               return ERR_PTR(-EINVAL);
+       }
+
+       inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER);
+       if (!inner_map_meta) {
+               fdput(f);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       inner_map_meta->map_type = inner_map->map_type;
+       inner_map_meta->key_size = inner_map->key_size;
+       inner_map_meta->value_size = inner_map->value_size;
+       inner_map_meta->map_flags = inner_map->map_flags;
+       inner_map_meta->ops = inner_map->ops;
+       inner_map_meta->max_entries = inner_map->max_entries;
+
+       fdput(f);
+       return inner_map_meta;
+}
+
+void bpf_map_meta_free(struct bpf_map *map_meta)
+{
+       kfree(map_meta);
+}
+
+bool bpf_map_meta_equal(const struct bpf_map *meta0,
+                       const struct bpf_map *meta1)
+{
+       /* No need to compare ops because it is covered by map_type */
+       return meta0->map_type == meta1->map_type &&
+               meta0->key_size == meta1->key_size &&
+               meta0->value_size == meta1->value_size &&
+               meta0->map_flags == meta1->map_flags &&
+               meta0->max_entries == meta1->max_entries;
+}
+
+void *bpf_map_fd_get_ptr(struct bpf_map *map,
+                        struct file *map_file /* not used */,
+                        int ufd)
+{
+       struct bpf_map *inner_map;
+       struct fd f;
+
+       f = fdget(ufd);
+       inner_map = __bpf_map_get(f);
+       if (IS_ERR(inner_map))
+               return inner_map;
+
+       if (bpf_map_meta_equal(map->inner_map_meta, inner_map))
+               inner_map = bpf_map_inc(inner_map, false);
+       else
+               inner_map = ERR_PTR(-EINVAL);
+
+       fdput(f);
+       return inner_map;
+}
+
+void bpf_map_fd_put_ptr(void *ptr)
+{
+       /* ptr->ops->map_free() has to go through one
+        * rcu grace period by itself.
+        */
+       bpf_map_put(ptr);
+}
diff --git a/kernel/bpf/map_in_map.h b/kernel/bpf/map_in_map.h
new file mode 100644 (file)
index 0000000..177fadb
--- /dev/null
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef __MAP_IN_MAP_H__
+#define __MAP_IN_MAP_H__
+
+#include <linux/types.h>
+
+struct file;
+struct bpf_map;
+
+struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd);
+void bpf_map_meta_free(struct bpf_map *map_meta);
+bool bpf_map_meta_equal(const struct bpf_map *meta0,
+                       const struct bpf_map *meta1);
+void *bpf_map_fd_get_ptr(struct bpf_map *map, struct file *map_file,
+                        int ufd);
+void bpf_map_fd_put_ptr(void *ptr);
+
+#endif
index 22aa45cd0324e320b9cd8b0f89bf9befdaae74b6..4dfd6f2ec2f9725681f490971e60acc8033c64b6 100644 (file)
@@ -264,7 +264,7 @@ static void stack_map_free(struct bpf_map *map)
        put_callchain_buffers();
 }
 
-static const struct bpf_map_ops stack_map_ops = {
+const struct bpf_map_ops stack_map_ops = {
        .map_alloc = stack_map_alloc,
        .map_free = stack_map_free,
        .map_get_next_key = stack_map_get_next_key,
@@ -272,15 +272,3 @@ static const struct bpf_map_ops stack_map_ops = {
        .map_update_elem = stack_map_update_elem,
        .map_delete_elem = stack_map_delete_elem,
 };
-
-static struct bpf_map_type_list stack_map_type __ro_after_init = {
-       .ops = &stack_map_ops,
-       .type = BPF_MAP_TYPE_STACK_TRACE,
-};
-
-static int __init register_stack_map(void)
-{
-       bpf_register_map_type(&stack_map_type);
-       return 0;
-}
-late_initcall(register_stack_map);
index 7af0dcc5d7555679cea6c08395ab54710e7066e6..b89288e2b58928cce371abb06b002696c754acec 100644 (file)
@@ -27,30 +27,29 @@ DEFINE_PER_CPU(int, bpf_prog_active);
 
 int sysctl_unprivileged_bpf_disabled __read_mostly;
 
-static LIST_HEAD(bpf_map_types);
+static const struct bpf_map_ops * const bpf_map_types[] = {
+#define BPF_PROG_TYPE(_id, _ops)
+#define BPF_MAP_TYPE(_id, _ops) \
+       [_id] = &_ops,
+#include <linux/bpf_types.h>
+#undef BPF_PROG_TYPE
+#undef BPF_MAP_TYPE
+};
 
 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
 {
-       struct bpf_map_type_list *tl;
        struct bpf_map *map;
 
-       list_for_each_entry(tl, &bpf_map_types, list_node) {
-               if (tl->type == attr->map_type) {
-                       map = tl->ops->map_alloc(attr);
-                       if (IS_ERR(map))
-                               return map;
-                       map->ops = tl->ops;
-                       map->map_type = attr->map_type;
-                       return map;
-               }
-       }
-       return ERR_PTR(-EINVAL);
-}
+       if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
+           !bpf_map_types[attr->map_type])
+               return ERR_PTR(-EINVAL);
 
-/* boot time registration of different map implementations */
-void bpf_register_map_type(struct bpf_map_type_list *tl)
-{
-       list_add(&tl->list_node, &bpf_map_types);
+       map = bpf_map_types[attr->map_type]->map_alloc(attr);
+       if (IS_ERR(map))
+               return map;
+       map->ops = bpf_map_types[attr->map_type];
+       map->map_type = attr->map_type;
+       return map;
 }
 
 void *bpf_map_area_alloc(size_t size)
@@ -215,7 +214,7 @@ int bpf_map_new_fd(struct bpf_map *map)
                   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
                   sizeof(attr->CMD##_LAST_FIELD)) != NULL
 
-#define BPF_MAP_CREATE_LAST_FIELD map_flags
+#define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
 /* called via syscall */
 static int map_create(union bpf_attr *attr)
 {
@@ -352,6 +351,9 @@ static int map_lookup_elem(union bpf_attr *attr)
                err = bpf_percpu_array_copy(map, key, value);
        } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
                err = bpf_stackmap_copy(map, key, value);
+       } else if (map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
+                  map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
+               err = -ENOTSUPP;
        } else {
                rcu_read_lock();
                ptr = map->ops->map_lookup_elem(map, key);
@@ -438,11 +440,17 @@ static int map_update_elem(union bpf_attr *attr)
                err = bpf_percpu_array_update(map, key, value, attr->flags);
        } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
                   map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
-                  map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) {
+                  map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
+                  map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
                rcu_read_lock();
                err = bpf_fd_array_map_update_elem(map, f.file, key, value,
                                                   attr->flags);
                rcu_read_unlock();
+       } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
+               rcu_read_lock();
+               err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
+                                                 attr->flags);
+               rcu_read_unlock();
        } else {
                rcu_read_lock();
                err = map->ops->map_update_elem(map, key, value, attr->flags);
@@ -564,79 +572,23 @@ err_put:
        return err;
 }
 
-static LIST_HEAD(bpf_prog_types);
+static const struct bpf_verifier_ops * const bpf_prog_types[] = {
+#define BPF_PROG_TYPE(_id, _ops) \
+       [_id] = &_ops,
+#define BPF_MAP_TYPE(_id, _ops)
+#include <linux/bpf_types.h>
+#undef BPF_PROG_TYPE
+#undef BPF_MAP_TYPE
+};
 
 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
 {
-       struct bpf_prog_type_list *tl;
-
-       list_for_each_entry(tl, &bpf_prog_types, list_node) {
-               if (tl->type == type) {
-                       prog->aux->ops = tl->ops;
-                       prog->type = type;
-                       return 0;
-               }
-       }
-
-       return -EINVAL;
-}
-
-void bpf_register_prog_type(struct bpf_prog_type_list *tl)
-{
-       list_add(&tl->list_node, &bpf_prog_types);
-}
-
-/* fixup insn->imm field of bpf_call instructions:
- * if (insn->imm == BPF_FUNC_map_lookup_elem)
- *      insn->imm = bpf_map_lookup_elem - __bpf_call_base;
- * else if (insn->imm == BPF_FUNC_map_update_elem)
- *      insn->imm = bpf_map_update_elem - __bpf_call_base;
- * else ...
- *
- * this function is called after eBPF program passed verification
- */
-static void fixup_bpf_calls(struct bpf_prog *prog)
-{
-       const struct bpf_func_proto *fn;
-       int i;
+       if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
+               return -EINVAL;
 
-       for (i = 0; i < prog->len; i++) {
-               struct bpf_insn *insn = &prog->insnsi[i];
-
-               if (insn->code == (BPF_JMP | BPF_CALL)) {
-                       /* we reach here when program has bpf_call instructions
-                        * and it passed bpf_check(), means that
-                        * ops->get_func_proto must have been supplied, check it
-                        */
-                       BUG_ON(!prog->aux->ops->get_func_proto);
-
-                       if (insn->imm == BPF_FUNC_get_route_realm)
-                               prog->dst_needed = 1;
-                       if (insn->imm == BPF_FUNC_get_prandom_u32)
-                               bpf_user_rnd_init_once();
-                       if (insn->imm == BPF_FUNC_xdp_adjust_head)
-                               prog->xdp_adjust_head = 1;
-                       if (insn->imm == BPF_FUNC_tail_call) {
-                               /* mark bpf_tail_call as different opcode
-                                * to avoid conditional branch in
-                                * interpeter for every normal call
-                                * and to prevent accidental JITing by
-                                * JIT compiler that doesn't support
-                                * bpf_tail_call yet
-                                */
-                               insn->imm = 0;
-                               insn->code |= BPF_X;
-                               continue;
-                       }
-
-                       fn = prog->aux->ops->get_func_proto(insn->imm);
-                       /* all functions that have prototype and verifier allowed
-                        * programs to call them, must be real in-kernel functions
-                        */
-                       BUG_ON(!fn->func);
-                       insn->imm = fn->func - __bpf_call_base;
-               }
-       }
+       prog->aux->ops = bpf_prog_types[type];
+       prog->type = type;
+       return 0;
 }
 
 /* drop refcnt on maps used by eBPF program and free auxilary data */
@@ -892,9 +844,6 @@ static int bpf_prog_load(union bpf_attr *attr)
        if (err < 0)
                goto free_used_maps;
 
-       /* fixup BPF_CALL->imm field */
-       fixup_bpf_calls(prog);
-
        /* eBPF program is ready to be JITed */
        prog = bpf_prog_select_runtime(prog, &err);
        if (err < 0)
@@ -1020,6 +969,28 @@ static int bpf_prog_detach(const union bpf_attr *attr)
 }
 #endif /* CONFIG_CGROUP_BPF */
 
+#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
+
+static int bpf_prog_test_run(const union bpf_attr *attr,
+                            union bpf_attr __user *uattr)
+{
+       struct bpf_prog *prog;
+       int ret = -ENOTSUPP;
+
+       if (CHECK_ATTR(BPF_PROG_TEST_RUN))
+               return -EINVAL;
+
+       prog = bpf_prog_get(attr->test.prog_fd);
+       if (IS_ERR(prog))
+               return PTR_ERR(prog);
+
+       if (prog->aux->ops->test_run)
+               ret = prog->aux->ops->test_run(prog, attr, uattr);
+
+       bpf_prog_put(prog);
+       return ret;
+}
+
 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
 {
        union bpf_attr attr = {};
@@ -1086,7 +1057,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
        case BPF_OBJ_GET:
                err = bpf_obj_get(&attr);
                break;
-
 #ifdef CONFIG_CGROUP_BPF
        case BPF_PROG_ATTACH:
                err = bpf_prog_attach(&attr);
@@ -1095,7 +1065,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
                err = bpf_prog_detach(&attr);
                break;
 #endif
-
+       case BPF_PROG_TEST_RUN:
+               err = bpf_prog_test_run(&attr, uattr);
+               break;
        default:
                err = -EINVAL;
                break;
index 796b68d001198a39186cba850fe8161476a17bfa..62e1e447ded9d74b0dfaa22ab751170291d2a874 100644 (file)
@@ -143,6 +143,8 @@ struct bpf_verifier_stack_elem {
 #define BPF_COMPLEXITY_LIMIT_INSNS     65536
 #define BPF_COMPLEXITY_LIMIT_STACK     1024
 
+#define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA)
+
 struct bpf_call_arg_meta {
        struct bpf_map *map_ptr;
        bool raw_mode;
@@ -765,38 +767,56 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
        }
 }
 
-static int check_ptr_alignment(struct bpf_verifier_env *env,
-                              struct bpf_reg_state *reg, int off, int size)
+static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
+                                  int off, int size)
 {
-       if (reg->type != PTR_TO_PACKET && reg->type != PTR_TO_MAP_VALUE_ADJ) {
-               if (off % size != 0) {
-                       verbose("misaligned access off %d size %d\n",
-                               off, size);
-                       return -EACCES;
-               } else {
-                       return 0;
-               }
-       }
-
-       if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
-               /* misaligned access to packet is ok on x86,arm,arm64 */
-               return 0;
-
        if (reg->id && size != 1) {
-               verbose("Unknown packet alignment. Only byte-sized access allowed\n");
+               verbose("Unknown alignment. Only byte-sized access allowed in packet access.\n");
                return -EACCES;
        }
 
        /* skb->data is NET_IP_ALIGN-ed */
-       if (reg->type == PTR_TO_PACKET &&
-           (NET_IP_ALIGN + reg->off + off) % size != 0) {
+       if ((NET_IP_ALIGN + reg->off + off) % size != 0) {
                verbose("misaligned packet access off %d+%d+%d size %d\n",
                        NET_IP_ALIGN, reg->off, off, size);
                return -EACCES;
        }
+
+       return 0;
+}
+
+static int check_val_ptr_alignment(const struct bpf_reg_state *reg,
+                                  int size)
+{
+       if (size != 1) {
+               verbose("Unknown alignment. Only byte-sized access allowed in value access.\n");
+               return -EACCES;
+       }
+
        return 0;
 }
 
+static int check_ptr_alignment(const struct bpf_reg_state *reg,
+                              int off, int size)
+{
+       switch (reg->type) {
+       case PTR_TO_PACKET:
+               return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
+                      check_pkt_ptr_alignment(reg, off, size);
+       case PTR_TO_MAP_VALUE_ADJ:
+               return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
+                      check_val_ptr_alignment(reg, size);
+       default:
+               if (off % size != 0) {
+                       verbose("misaligned access off %d size %d\n",
+                               off, size);
+                       return -EACCES;
+               }
+
+               return 0;
+       }
+}
+
 /* check whether memory at (regno + off) is accessible for t = (read | write)
  * if t==write, value_regno is a register which value is stored into memory
  * if t==read, value_regno is a register which will receive the value from memory
@@ -818,7 +838,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
        if (size < 0)
                return size;
 
-       err = check_ptr_alignment(env, reg, off, size);
+       err = check_ptr_alignment(reg, off, size);
        if (err)
                return err;
 
@@ -1197,6 +1217,10 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
                    func_id != BPF_FUNC_current_task_under_cgroup)
                        goto error;
                break;
+       case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+       case BPF_MAP_TYPE_HASH_OF_MAPS:
+               if (func_id != BPF_FUNC_map_lookup_elem)
+                       goto error;
        default:
                break;
        }
@@ -1273,7 +1297,7 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
        }
 }
 
-static int check_call(struct bpf_verifier_env *env, int func_id)
+static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
 {
        struct bpf_verifier_state *state = &env->cur_state;
        const struct bpf_func_proto *fn = NULL;
@@ -1357,6 +1381,8 @@ static int check_call(struct bpf_verifier_env *env, int func_id)
        } else if (fn->ret_type == RET_VOID) {
                regs[BPF_REG_0].type = NOT_INIT;
        } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
+               struct bpf_insn_aux_data *insn_aux;
+
                regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
                regs[BPF_REG_0].max_value = regs[BPF_REG_0].min_value = 0;
                /* remember map_ptr, so that check_map_access()
@@ -1369,6 +1395,11 @@ static int check_call(struct bpf_verifier_env *env, int func_id)
                }
                regs[BPF_REG_0].map_ptr = meta.map_ptr;
                regs[BPF_REG_0].id = ++env->id_gen;
+               insn_aux = &env->insn_aux_data[insn_idx];
+               if (!insn_aux->map_ptr)
+                       insn_aux->map_ptr = meta.map_ptr;
+               else if (insn_aux->map_ptr != meta.map_ptr)
+                       insn_aux->map_ptr = BPF_MAP_PTR_POISON;
        } else {
                verbose("unknown return type %d of func %s#%d\n",
                        fn->ret_type, func_id_name(func_id), func_id);
@@ -1925,6 +1956,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                 * register as unknown.
                 */
                if (env->allow_ptr_leaks &&
+                   BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD &&
                    (dst_reg->type == PTR_TO_MAP_VALUE ||
                     dst_reg->type == PTR_TO_MAP_VALUE_ADJ))
                        dst_reg->type = PTR_TO_MAP_VALUE_ADJ;
@@ -1973,14 +2005,15 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
 
        for (i = 0; i < MAX_BPF_REG; i++)
                if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
-                       regs[i].range = dst_reg->off;
+                       /* keep the maximum range already checked */
+                       regs[i].range = max(regs[i].range, dst_reg->off);
 
        for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
                if (state->stack_slot_type[i] != STACK_SPILL)
                        continue;
                reg = &state->spilled_regs[i / BPF_REG_SIZE];
                if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
-                       reg->range = dst_reg->off;
+                       reg->range = max(reg->range, dst_reg->off);
        }
 }
 
@@ -2092,14 +2125,19 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
        struct bpf_reg_state *reg = &regs[regno];
 
        if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
-               reg->type = type;
+               if (type == UNKNOWN_VALUE) {
+                       __mark_reg_unknown_value(regs, regno);
+               } else if (reg->map_ptr->inner_map_meta) {
+                       reg->type = CONST_PTR_TO_MAP;
+                       reg->map_ptr = reg->map_ptr->inner_map_meta;
+               } else {
+                       reg->type = type;
+               }
                /* We don't need id from this point onwards anymore, thus we
                 * should better reset it, so that state pruning has chances
                 * to take effect.
                 */
                reg->id = 0;
-               if (type == UNKNOWN_VALUE)
-                       __mark_reg_unknown_value(regs, regno);
        }
 }
 
@@ -2940,7 +2978,7 @@ static int do_check(struct bpf_verifier_env *env)
                                        return -EINVAL;
                                }
 
-                               err = check_call(env, insn->imm);
+                               err = check_call(env, insn->imm, insn_idx);
                                if (err)
                                        return err;
 
@@ -3024,16 +3062,33 @@ process_bpf_exit:
        return 0;
 }
 
+static int check_map_prealloc(struct bpf_map *map)
+{
+       return (map->map_type != BPF_MAP_TYPE_HASH &&
+               map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
+               map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
+               !(map->map_flags & BPF_F_NO_PREALLOC);
+}
+
 static int check_map_prog_compatibility(struct bpf_map *map,
                                        struct bpf_prog *prog)
 
 {
-       if (prog->type == BPF_PROG_TYPE_PERF_EVENT &&
-           (map->map_type == BPF_MAP_TYPE_HASH ||
-            map->map_type == BPF_MAP_TYPE_PERCPU_HASH) &&
-           (map->map_flags & BPF_F_NO_PREALLOC)) {
-               verbose("perf_event programs can only use preallocated hash map\n");
-               return -EINVAL;
+       /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
+        * preallocated hash maps, since doing memory allocation
+        * in overflow_handler can crash depending on where nmi got
+        * triggered.
+        */
+       if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
+               if (!check_map_prealloc(map)) {
+                       verbose("perf_event programs can only use preallocated hash map\n");
+                       return -EINVAL;
+               }
+               if (map->inner_map_meta &&
+                   !check_map_prealloc(map->inner_map_meta)) {
+                       verbose("perf_event programs can only use preallocated inner hash map\n");
+                       return -EINVAL;
+               }
        }
        return 0;
 }
@@ -3162,6 +3217,41 @@ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
                        insn->src_reg = 0;
 }
 
+/* single env->prog->insni[off] instruction was replaced with the range
+ * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
+ * [0, off) and [off, end) to new locations, so the patched range stays zero
+ */
+static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
+                               u32 off, u32 cnt)
+{
+       struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
+
+       if (cnt == 1)
+               return 0;
+       new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
+       if (!new_data)
+               return -ENOMEM;
+       memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
+       memcpy(new_data + off + cnt - 1, old_data + off,
+              sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
+       env->insn_aux_data = new_data;
+       vfree(old_data);
+       return 0;
+}
+
+static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
+                                           const struct bpf_insn *patch, u32 len)
+{
+       struct bpf_prog *new_prog;
+
+       new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
+       if (!new_prog)
+               return NULL;
+       if (adjust_insn_aux_data(env, new_prog->len, off, len))
+               return NULL;
+       return new_prog;
+}
+
 /* convert load instructions that access fields of 'struct __sk_buff'
  * into sequence of instructions that access fields of 'struct sk_buff'
  */
@@ -3181,10 +3271,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
                        verbose("bpf verifier is misconfigured\n");
                        return -EINVAL;
                } else if (cnt) {
-                       new_prog = bpf_patch_insn_single(env->prog, 0,
-                                                        insn_buf, cnt);
+                       new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
                        if (!new_prog)
                                return -ENOMEM;
+
                        env->prog = new_prog;
                        delta += cnt - 1;
                }
@@ -3209,7 +3299,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
                else
                        continue;
 
-               if (env->insn_aux_data[i].ptr_type != PTR_TO_CTX)
+               if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
                        continue;
 
                cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog);
@@ -3218,8 +3308,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
                        return -EINVAL;
                }
 
-               new_prog = bpf_patch_insn_single(env->prog, i + delta, insn_buf,
-                                                cnt);
+               new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
                if (!new_prog)
                        return -ENOMEM;
 
@@ -3233,6 +3322,84 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
        return 0;
 }
 
+/* fixup insn->imm field of bpf_call instructions
+ * and inline eligible helpers as explicit sequence of BPF instructions
+ *
+ * this function is called after eBPF program passed verification
+ */
+static int fixup_bpf_calls(struct bpf_verifier_env *env)
+{
+       struct bpf_prog *prog = env->prog;
+       struct bpf_insn *insn = prog->insnsi;
+       const struct bpf_func_proto *fn;
+       const int insn_cnt = prog->len;
+       struct bpf_insn insn_buf[16];
+       struct bpf_prog *new_prog;
+       struct bpf_map *map_ptr;
+       int i, cnt, delta = 0;
+
+       for (i = 0; i < insn_cnt; i++, insn++) {
+               if (insn->code != (BPF_JMP | BPF_CALL))
+                       continue;
+
+               if (insn->imm == BPF_FUNC_get_route_realm)
+                       prog->dst_needed = 1;
+               if (insn->imm == BPF_FUNC_get_prandom_u32)
+                       bpf_user_rnd_init_once();
+               if (insn->imm == BPF_FUNC_xdp_adjust_head)
+                       prog->xdp_adjust_head = 1;
+               if (insn->imm == BPF_FUNC_tail_call) {
+                       /* mark bpf_tail_call as different opcode to avoid
+                        * conditional branch in the interpeter for every normal
+                        * call and to prevent accidental JITing by JIT compiler
+                        * that doesn't support bpf_tail_call yet
+                        */
+                       insn->imm = 0;
+                       insn->code |= BPF_X;
+                       continue;
+               }
+
+               if (ebpf_jit_enabled() && insn->imm == BPF_FUNC_map_lookup_elem) {
+                       map_ptr = env->insn_aux_data[i + delta].map_ptr;
+                       if (map_ptr == BPF_MAP_PTR_POISON ||
+                           !map_ptr->ops->map_gen_lookup)
+                               goto patch_call_imm;
+
+                       cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
+                       if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
+                               verbose("bpf verifier is misconfigured\n");
+                               return -EINVAL;
+                       }
+
+                       new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
+                                                      cnt);
+                       if (!new_prog)
+                               return -ENOMEM;
+
+                       delta += cnt - 1;
+
+                       /* keep walking new program and skip insns we just inserted */
+                       env->prog = prog = new_prog;
+                       insn      = new_prog->insnsi + i + delta;
+                       continue;
+               }
+
+patch_call_imm:
+               fn = prog->aux->ops->get_func_proto(insn->imm);
+               /* all functions that have prototype and verifier allowed
+                * programs to call them, must be real in-kernel functions
+                */
+               if (!fn->func) {
+                       verbose("kernel subsystem misconfigured func %s#%d\n",
+                               func_id_name(insn->imm), insn->imm);
+                       return -EFAULT;
+               }
+               insn->imm = fn->func - __bpf_call_base;
+       }
+
+       return 0;
+}
+
 static void free_states(struct bpf_verifier_env *env)
 {
        struct bpf_verifier_state_list *sl, *sln;
@@ -3328,6 +3495,9 @@ skip_full_check:
                /* program is valid, convert *(u32*)(ctx + off) accesses */
                ret = convert_ctx_accesses(env);
 
+       if (ret == 0)
+               ret = fixup_bpf_calls(env);
+
        if (log_level && log_len >= log_size - 1) {
                BUG_ON(log_len >= log_size);
                /* verifier log exceeded user supplied buffer */
index 56eba9caa632adcc118114d8aa55cbab00895495..1dc22f6b49f5e06c4af22222dfb1b32c885ce16a 100644 (file)
@@ -1329,7 +1329,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
                struct task_struct *task;
                int count = 0;
 
-               seq_printf(seq, "css_set %p\n", cset);
+               seq_printf(seq, "css_set %pK\n", cset);
 
                list_for_each_entry(task, &cset->tasks, cg_list) {
                        if (count++ > MAX_TASKS_SHOWN_PER_CSS)
index 0125589c742841ddbff14639c1ded5e0590b00b4..48851327a15e18e8ba151a3a45c5126c5023ddb8 100644 (file)
@@ -2669,7 +2669,7 @@ static bool css_visible(struct cgroup_subsys_state *css)
  *
  * Returns 0 on success, -errno on failure.  On failure, csses which have
  * been processed already aren't cleaned up.  The caller is responsible for
- * cleaning up with cgroup_apply_control_disble().
+ * cleaning up with cgroup_apply_control_disable().
  */
 static int cgroup_apply_control_enable(struct cgroup *cgrp)
 {
index e756dae493008e4bc4bf9f87846f818d7349ede8..2237201d66d5dacf1fe952a15d9acc54b1e90b50 100644 (file)
@@ -229,7 +229,7 @@ static int pids_can_fork(struct task_struct *task)
                /* Only log the first time events_limit is incremented. */
                if (atomic64_inc_return(&pids->events_limit) == 1) {
                        pr_info("cgroup: fork rejected by pids controller in ");
-                       pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id));
+                       pr_cont_cgroup_path(css->cgroup);
                        pr_cont("\n");
                }
                cgroup_file_notify(&pids->events_file);
index f7c063239fa5c74636922743ddb094052b9044c9..37b223e4fc05b74fc50aa51df0c307d65da026c3 100644 (file)
@@ -1335,26 +1335,21 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
        struct cpuhp_step *sp;
        int ret = 0;
 
-       mutex_lock(&cpuhp_state_mutex);
-
        if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
                ret = cpuhp_reserve_state(state);
                if (ret < 0)
-                       goto out;
+                       return ret;
                state = ret;
        }
        sp = cpuhp_get_step(state);
-       if (name && sp->name) {
-               ret = -EBUSY;
-               goto out;
-       }
+       if (name && sp->name)
+               return -EBUSY;
+
        sp->startup.single = startup;
        sp->teardown.single = teardown;
        sp->name = name;
        sp->multi_instance = multi_instance;
        INIT_HLIST_HEAD(&sp->list);
-out:
-       mutex_unlock(&cpuhp_state_mutex);
        return ret;
 }
 
@@ -1428,6 +1423,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
                return -EINVAL;
 
        get_online_cpus();
+       mutex_lock(&cpuhp_state_mutex);
 
        if (!invoke || !sp->startup.multi)
                goto add_node;
@@ -1447,16 +1443,14 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
                if (ret) {
                        if (sp->teardown.multi)
                                cpuhp_rollback_install(cpu, state, node);
-                       goto err;
+                       goto unlock;
                }
        }
 add_node:
        ret = 0;
-       mutex_lock(&cpuhp_state_mutex);
        hlist_add_head(node, &sp->list);
+unlock:
        mutex_unlock(&cpuhp_state_mutex);
-
-err:
        put_online_cpus();
        return ret;
 }
@@ -1491,6 +1485,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
                return -EINVAL;
 
        get_online_cpus();
+       mutex_lock(&cpuhp_state_mutex);
 
        ret = cpuhp_store_callbacks(state, name, startup, teardown,
                                    multi_instance);
@@ -1524,6 +1519,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
                }
        }
 out:
+       mutex_unlock(&cpuhp_state_mutex);
        put_online_cpus();
        /*
         * If the requested state is CPUHP_AP_ONLINE_DYN, return the
@@ -1547,6 +1543,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
                return -EINVAL;
 
        get_online_cpus();
+       mutex_lock(&cpuhp_state_mutex);
+
        if (!invoke || !cpuhp_get_teardown_cb(state))
                goto remove;
        /*
@@ -1563,7 +1561,6 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
        }
 
 remove:
-       mutex_lock(&cpuhp_state_mutex);
        hlist_del(node);
        mutex_unlock(&cpuhp_state_mutex);
        put_online_cpus();
@@ -1571,6 +1568,7 @@ remove:
        return 0;
 }
 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
+
 /**
  * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
  * @state:     The state to remove
@@ -1589,6 +1587,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
 
        get_online_cpus();
 
+       mutex_lock(&cpuhp_state_mutex);
        if (sp->multi_instance) {
                WARN(!hlist_empty(&sp->list),
                     "Error: Removing state %d which has instances left.\n",
@@ -1613,6 +1612,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
        }
 remove:
        cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
+       mutex_unlock(&cpuhp_state_mutex);
        put_online_cpus();
 }
 EXPORT_SYMBOL(__cpuhp_remove_state);
index 6f41548f2e320a98182f4fe4b10700bcab7e6b86..ff01cba86f430fd29916ab73c755698bf81feff0 100644 (file)
@@ -998,7 +998,7 @@ list_update_cgroup_event(struct perf_event *event,
  */
 #define PERF_CPU_HRTIMER (1000 / HZ)
 /*
- * function must be called with interrupts disbled
+ * function must be called with interrupts disabled
  */
 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
 {
@@ -4256,7 +4256,7 @@ int perf_event_release_kernel(struct perf_event *event)
 
        raw_spin_lock_irq(&ctx->lock);
        /*
-        * Mark this even as STATE_DEAD, there is no external reference to it
+        * Mark this event as STATE_DEAD, there is no external reference to it
         * anymore.
         *
         * Anybody acquiring event->child_mutex after the below loop _must_
@@ -10417,21 +10417,22 @@ void perf_event_free_task(struct task_struct *task)
                        continue;
 
                mutex_lock(&ctx->mutex);
-again:
-               list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
-                               group_entry)
-                       perf_free_event(event, ctx);
+               raw_spin_lock_irq(&ctx->lock);
+               /*
+                * Destroy the task <-> ctx relation and mark the context dead.
+                *
+                * This is important because even though the task hasn't been
+                * exposed yet the context has been (through child_list).
+                */
+               RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
+               WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
+               put_task_struct(task); /* cannot be last */
+               raw_spin_unlock_irq(&ctx->lock);
 
-               list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
-                               group_entry)
+               list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry)
                        perf_free_event(event, ctx);
 
-               if (!list_empty(&ctx->pinned_groups) ||
-                               !list_empty(&ctx->flexible_groups))
-                       goto again;
-
                mutex_unlock(&ctx->mutex);
-
                put_ctx(ctx);
        }
 }
@@ -10469,7 +10470,12 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
 }
 
 /*
- * inherit a event from parent task to child task:
+ * Inherit a event from parent task to child task.
+ *
+ * Returns:
+ *  - valid pointer on success
+ *  - NULL for orphaned events
+ *  - IS_ERR() on error
  */
 static struct perf_event *
 inherit_event(struct perf_event *parent_event,
@@ -10563,6 +10569,16 @@ inherit_event(struct perf_event *parent_event,
        return child_event;
 }
 
+/*
+ * Inherits an event group.
+ *
+ * This will quietly suppress orphaned events; !inherit_event() is not an error.
+ * This matches with perf_event_release_kernel() removing all child events.
+ *
+ * Returns:
+ *  - 0 on success
+ *  - <0 on error
+ */
 static int inherit_group(struct perf_event *parent_event,
              struct task_struct *parent,
              struct perf_event_context *parent_ctx,
@@ -10577,6 +10593,11 @@ static int inherit_group(struct perf_event *parent_event,
                                 child, NULL, child_ctx);
        if (IS_ERR(leader))
                return PTR_ERR(leader);
+       /*
+        * @leader can be NULL here because of is_orphaned_event(). In this
+        * case inherit_event() will create individual events, similar to what
+        * perf_group_detach() would do anyway.
+        */
        list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
                child_ctr = inherit_event(sub, parent, parent_ctx,
                                            child, leader, child_ctx);
@@ -10586,6 +10607,17 @@ static int inherit_group(struct perf_event *parent_event,
        return 0;
 }
 
+/*
+ * Creates the child task context and tries to inherit the event-group.
+ *
+ * Clears @inherited_all on !attr.inherited or error. Note that we'll leave
+ * inherited_all set when we 'fail' to inherit an orphaned event; this is
+ * consistent with perf_event_release_kernel() removing all child events.
+ *
+ * Returns:
+ *  - 0 on success
+ *  - <0 on error
+ */
 static int
 inherit_task_group(struct perf_event *event, struct task_struct *parent,
                   struct perf_event_context *parent_ctx,
@@ -10608,7 +10640,6 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
                 * First allocate and initialize a context for the
                 * child.
                 */
-
                child_ctx = alloc_perf_context(parent_ctx->pmu, child);
                if (!child_ctx)
                        return -ENOMEM;
@@ -10670,7 +10701,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
                ret = inherit_task_group(event, parent, parent_ctx,
                                         child, ctxn, &inherited_all);
                if (ret)
-                       break;
+                       goto out_unlock;
        }
 
        /*
@@ -10686,7 +10717,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
                ret = inherit_task_group(event, parent, parent_ctx,
                                         child, ctxn, &inherited_all);
                if (ret)
-                       break;
+                       goto out_unlock;
        }
 
        raw_spin_lock_irqsave(&parent_ctx->lock, flags);
@@ -10714,6 +10745,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
        }
 
        raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
+out_unlock:
        mutex_unlock(&parent_ctx->mutex);
 
        perf_unpin_context(parent_ctx);
index e126ebf2400c221adfb8a73508d883ec9accd63d..516acdb0e0ec9bd48e3006a8ede165437b3e121f 100644 (file)
@@ -554,7 +554,6 @@ static void exit_mm(void)
        enter_lazy_tlb(mm, current);
        task_unlock(current);
        mm_update_next_owner(mm);
-       userfaultfd_exit(mm);
        mmput(mm);
        if (test_thread_flag(TIF_MEMDIE))
                exit_oom_victim();
index 229a744b1781be2e4fccc1b5c290bd246d8b8694..45858ec739411f5741667e560552757697441e6b 100644 (file)
@@ -2815,7 +2815,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
 {
        struct hrtimer_sleeper timeout, *to = NULL;
        struct rt_mutex_waiter rt_waiter;
-       struct rt_mutex *pi_mutex = NULL;
        struct futex_hash_bucket *hb;
        union futex_key key2 = FUTEX_KEY_INIT;
        struct futex_q q = futex_q_init;
@@ -2899,6 +2898,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
                if (q.pi_state && (q.pi_state->owner != current)) {
                        spin_lock(q.lock_ptr);
                        ret = fixup_pi_state_owner(uaddr2, &q, current);
+                       if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
+                               rt_mutex_unlock(&q.pi_state->pi_mutex);
                        /*
                         * Drop the reference to the pi state which
                         * the requeue_pi() code acquired for us.
@@ -2907,6 +2908,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
                        spin_unlock(q.lock_ptr);
                }
        } else {
+               struct rt_mutex *pi_mutex;
+
                /*
                 * We have been woken up by futex_unlock_pi(), a timeout, or a
                 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
@@ -2930,18 +2933,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
                if (res)
                        ret = (res < 0) ? res : 0;
 
+               /*
+                * If fixup_pi_state_owner() faulted and was unable to handle
+                * the fault, unlock the rt_mutex and return the fault to
+                * userspace.
+                */
+               if (ret && rt_mutex_owner(pi_mutex) == current)
+                       rt_mutex_unlock(pi_mutex);
+
                /* Unqueue and drop the lock. */
                unqueue_me_pi(&q);
        }
 
-       /*
-        * If fixup_pi_state_owner() faulted and was unable to handle the
-        * fault, unlock the rt_mutex and return the fault to userspace.
-        */
-       if (ret == -EFAULT) {
-               if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
-                       rt_mutex_unlock(pi_mutex);
-       } else if (ret == -EINTR) {
+       if (ret == -EINTR) {
                /*
                 * We've already been requeued, but cannot restart by calling
                 * futex_lock_pi() directly. We could restart this syscall, but
index b56a558e406db6375bea4b07e5873a4c6f0b401e..b118735fea9da471a15ba627c87af523b891bafa 100644 (file)
@@ -614,13 +614,13 @@ static int kexec_calculate_store_digests(struct kimage *image)
                ret = crypto_shash_final(desc, digest);
                if (ret)
                        goto out_free_digest;
-               ret = kexec_purgatory_get_set_symbol(image, "sha_regions",
-                                               sha_regions, sha_region_sz, 0);
+               ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha_regions",
+                                                    sha_regions, sha_region_sz, 0);
                if (ret)
                        goto out_free_digest;
 
-               ret = kexec_purgatory_get_set_symbol(image, "sha256_digest",
-                                               digest, SHA256_DIGEST_SIZE, 0);
+               ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha256_digest",
+                                                    digest, SHA256_DIGEST_SIZE, 0);
                if (ret)
                        goto out_free_digest;
        }
index 4cef7e4706b098d7918b53ff1e1b931d1a5ec8dc..799a8a4521870a6444818fef64c0ae1e2dfad671 100644 (file)
@@ -15,11 +15,7 @@ int kimage_is_destination_range(struct kimage *image,
 extern struct mutex kexec_mutex;
 
 #ifdef CONFIG_KEXEC_FILE
-struct kexec_sha_region {
-       unsigned long start;
-       unsigned long len;
-};
-
+#include <linux/purgatory.h>
 void kimage_file_post_load_cleanup(struct kimage *image);
 #else /* CONFIG_KEXEC_FILE */
 static inline void kimage_file_post_load_cleanup(struct kimage *image) { }
index 12e38c213b70111c1b343af3b1b0bb4eaa99f829..a95e5d1f4a9c447de6aa4b0b1b85e5f56de9f729 100644 (file)
@@ -3262,10 +3262,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        if (depth) {
                hlock = curr->held_locks + depth - 1;
                if (hlock->class_idx == class_idx && nest_lock) {
-                       if (hlock->references)
+                       if (hlock->references) {
+                               /*
+                                * Check: unsigned int references:12, overflow.
+                                */
+                               if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
+                                       return 0;
+
                                hlock->references++;
-                       else
+                       } else {
                                hlock->references = 2;
+                       }
 
                        return 1;
                }
index 7bc24d477805d868b932aab7acc6997120931fc5..c65f7989f850d12508045896a2cb98d5b691c068 100644 (file)
@@ -213,10 +213,9 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
                 */
                if (sem->count == 0)
                        break;
-               if (signal_pending_state(state, current)) {
-                       ret = -EINTR;
-                       goto out;
-               }
+               if (signal_pending_state(state, current))
+                       goto out_nolock;
+
                set_current_state(state);
                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
                schedule();
@@ -224,12 +223,19 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
        }
        /* got the lock */
        sem->count = -1;
-out:
        list_del(&waiter.list);
 
        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 
        return ret;
+
+out_nolock:
+       list_del(&waiter.list);
+       if (!list_empty(&sem->wait_list))
+               __rwsem_do_wake(sem, 1);
+       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+       return -EINTR;
 }
 
 void __sched __down_write(struct rw_semaphore *sem)
index da6c9a34f62f5c74f17eada78d6bb843c8fbf01d..6b7abb334ca6027dd2b189a211671fa98a33be82 100644 (file)
@@ -50,7 +50,7 @@ static void test_mutex_work(struct work_struct *work)
 
        if (mtx->flags & TEST_MTX_TRY) {
                while (!ww_mutex_trylock(&mtx->mutex))
-                       cpu_relax();
+                       cond_resched();
        } else {
                ww_mutex_lock(&mtx->mutex, NULL);
        }
@@ -88,7 +88,7 @@ static int __test_mutex(unsigned int flags)
                                ret = -EINVAL;
                                break;
                        }
-                       cpu_relax();
+                       cond_resched();
                } while (time_before(jiffies, timeout));
        } else {
                ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
@@ -627,7 +627,7 @@ static int __init test_ww_mutex_init(void)
        if (ret)
                return ret;
 
-       ret = stress(4096, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL);
+       ret = stress(4095, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL);
        if (ret)
                return ret;
 
index 06123234f1189c86ee42dffdc2d14873b6b16895..07e85e5229da849d33391f97234c1e1fff2c5ce1 100644 (file)
@@ -247,11 +247,9 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
        align_start = res->start & ~(SECTION_SIZE - 1);
        align_size = ALIGN(resource_size(res), SECTION_SIZE);
 
-       lock_device_hotplug();
        mem_hotplug_begin();
        arch_remove_memory(align_start, align_size);
        mem_hotplug_done();
-       unlock_device_hotplug();
 
        untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
        pgmap_radix_release(res);
@@ -364,11 +362,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
        if (error)
                goto err_pfn_remap;
 
-       lock_device_hotplug();
        mem_hotplug_begin();
        error = arch_add_memory(nid, align_start, align_size, true);
        mem_hotplug_done();
-       unlock_device_hotplug();
        if (error)
                goto err_add_memory;
 
index 05316c9f32da9d0e20b3d3c92eeaf3eb49f1deef..3202aa17492c808af5331044de710a2f34e277a3 100644 (file)
@@ -186,19 +186,20 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
 
        reorder = &next_queue->reorder;
 
+       spin_lock(&reorder->lock);
        if (!list_empty(&reorder->list)) {
                padata = list_entry(reorder->list.next,
                                    struct padata_priv, list);
 
-               spin_lock(&reorder->lock);
                list_del_init(&padata->list);
                atomic_dec(&pd->reorder_objects);
-               spin_unlock(&reorder->lock);
 
                pd->processed++;
 
+               spin_unlock(&reorder->lock);
                goto out;
        }
+       spin_unlock(&reorder->lock);
 
        if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
                padata = ERR_PTR(-ENODATA);
index a08795e216283f1292f82720bb1ff758e6a2629c..00a45c45beca09829ad479aad9ba299f5498a42e 100644 (file)
@@ -96,10 +96,10 @@ static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
 static int __sched_clock_stable_early = 1;
 
 /*
- * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset
+ * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset
  */
-static __read_mostly u64 raw_offset;
-static __read_mostly u64 gtod_offset;
+__read_mostly u64 __sched_clock_offset;
+static __read_mostly u64 __gtod_offset;
 
 struct sched_clock_data {
        u64                     tick_raw;
@@ -131,17 +131,24 @@ static void __set_sched_clock_stable(void)
        /*
         * Attempt to make the (initial) unstable->stable transition continuous.
         */
-       raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw);
+       __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw);
 
        printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
-                       scd->tick_gtod, gtod_offset,
-                       scd->tick_raw,  raw_offset);
+                       scd->tick_gtod, __gtod_offset,
+                       scd->tick_raw,  __sched_clock_offset);
 
        static_branch_enable(&__sched_clock_stable);
        tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
 }
 
-static void __clear_sched_clock_stable(struct work_struct *work)
+static void __sched_clock_work(struct work_struct *work)
+{
+       static_branch_disable(&__sched_clock_stable);
+}
+
+static DECLARE_WORK(sched_clock_work, __sched_clock_work);
+
+static void __clear_sched_clock_stable(void)
 {
        struct sched_clock_data *scd = this_scd();
 
@@ -154,17 +161,17 @@ static void __clear_sched_clock_stable(struct work_struct *work)
         *
         * Still do what we can.
         */
-       gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod);
+       __gtod_offset = (scd->tick_raw + __sched_clock_offset) - (scd->tick_gtod);
 
        printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
-                       scd->tick_gtod, gtod_offset,
-                       scd->tick_raw,  raw_offset);
+                       scd->tick_gtod, __gtod_offset,
+                       scd->tick_raw,  __sched_clock_offset);
 
-       static_branch_disable(&__sched_clock_stable);
        tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
-}
 
-static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable);
+       if (sched_clock_stable())
+               schedule_work(&sched_clock_work);
+}
 
 void clear_sched_clock_stable(void)
 {
@@ -173,7 +180,7 @@ void clear_sched_clock_stable(void)
        smp_mb(); /* matches sched_clock_init_late() */
 
        if (sched_clock_running == 2)
-               schedule_work(&sched_clock_work);
+               __clear_sched_clock_stable();
 }
 
 void sched_clock_init_late(void)
@@ -214,7 +221,7 @@ static inline u64 wrap_max(u64 x, u64 y)
  */
 static u64 sched_clock_local(struct sched_clock_data *scd)
 {
-       u64 now, clock, old_clock, min_clock, max_clock;
+       u64 now, clock, old_clock, min_clock, max_clock, gtod;
        s64 delta;
 
 again:
@@ -231,9 +238,10 @@ again:
         *                    scd->tick_gtod + TICK_NSEC);
         */
 
-       clock = scd->tick_gtod + gtod_offset + delta;
-       min_clock = wrap_max(scd->tick_gtod, old_clock);
-       max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
+       gtod = scd->tick_gtod + __gtod_offset;
+       clock = gtod + delta;
+       min_clock = wrap_max(gtod, old_clock);
+       max_clock = wrap_max(old_clock, gtod + TICK_NSEC);
 
        clock = wrap_max(clock, min_clock);
        clock = wrap_min(clock, max_clock);
@@ -317,7 +325,7 @@ u64 sched_clock_cpu(int cpu)
        u64 clock;
 
        if (sched_clock_stable())
-               return sched_clock() + raw_offset;
+               return sched_clock() + __sched_clock_offset;
 
        if (unlikely(!sched_clock_running))
                return 0ull;
index 956383844116ab456f8552abd4b5dcc00e09f347..3b31fc05a0f1e45be5985b860a5fde95ee969832 100644 (file)
@@ -3287,10 +3287,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
        struct task_struct *p;
 
        /*
-        * Optimization: we know that if all tasks are in
-        * the fair class we can call that function directly:
+        * Optimization: we know that if all tasks are in the fair class we can
+        * call that function directly, but only if the @prev task wasn't of a
+        * higher scheduling class, because otherwise those loose the
+        * opportunity to pull in more work from other CPUs.
         */
-       if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
+       if (likely((prev->sched_class == &idle_sched_class ||
+                   prev->sched_class == &fair_sched_class) &&
+                  rq->nr_running == rq->cfs.h_nr_running)) {
+
                p = fair_sched_class.pick_next_task(rq, prev, rf);
                if (unlikely(p == RETRY_TASK))
                        goto again;
index 8f8de3d4d6b7a3c71358ac1c6660f2e645b98477..54c577578da6899160cf4a611e87a386a2fd7db2 100644 (file)
@@ -36,6 +36,7 @@ struct sugov_policy {
        u64 last_freq_update_time;
        s64 freq_update_delay_ns;
        unsigned int next_freq;
+       unsigned int cached_raw_freq;
 
        /* The next fields are only needed if fast switch cannot be used. */
        struct irq_work irq_work;
@@ -52,7 +53,6 @@ struct sugov_cpu {
        struct update_util_data update_util;
        struct sugov_policy *sg_policy;
 
-       unsigned int cached_raw_freq;
        unsigned long iowait_boost;
        unsigned long iowait_boost_max;
        u64 last_update;
@@ -116,7 +116,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
 
 /**
  * get_next_freq - Compute a new frequency for a given cpufreq policy.
- * @sg_cpu: schedutil cpu object to compute the new frequency for.
+ * @sg_policy: schedutil policy object to compute the new frequency for.
  * @util: Current CPU utilization.
  * @max: CPU capacity.
  *
@@ -136,19 +136,18 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
  * next_freq (as calculated above) is returned, subject to policy min/max and
  * cpufreq driver limitations.
  */
-static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util,
-                                 unsigned long max)
+static unsigned int get_next_freq(struct sugov_policy *sg_policy,
+                                 unsigned long util, unsigned long max)
 {
-       struct sugov_policy *sg_policy = sg_cpu->sg_policy;
        struct cpufreq_policy *policy = sg_policy->policy;
        unsigned int freq = arch_scale_freq_invariant() ?
                                policy->cpuinfo.max_freq : policy->cur;
 
        freq = (freq + (freq >> 2)) * util / max;
 
-       if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
+       if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
                return sg_policy->next_freq;
-       sg_cpu->cached_raw_freq = freq;
+       sg_policy->cached_raw_freq = freq;
        return cpufreq_driver_resolve_freq(policy, freq);
 }
 
@@ -213,7 +212,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
        } else {
                sugov_get_util(&util, &max);
                sugov_iowait_boost(sg_cpu, &util, &max);
-               next_f = get_next_freq(sg_cpu, util, max);
+               next_f = get_next_freq(sg_policy, util, max);
        }
        sugov_update_commit(sg_policy, time, next_f);
 }
@@ -267,7 +266,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
                sugov_iowait_boost(j_sg_cpu, &util, &max);
        }
 
-       return get_next_freq(sg_cpu, util, max);
+       return get_next_freq(sg_policy, util, max);
 }
 
 static void sugov_update_shared(struct update_util_data *hook, u64 time,
@@ -580,25 +579,19 @@ static int sugov_start(struct cpufreq_policy *policy)
        sg_policy->next_freq = UINT_MAX;
        sg_policy->work_in_progress = false;
        sg_policy->need_freq_update = false;
+       sg_policy->cached_raw_freq = 0;
 
        for_each_cpu(cpu, policy->cpus) {
                struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
 
+               memset(sg_cpu, 0, sizeof(*sg_cpu));
                sg_cpu->sg_policy = sg_policy;
-               if (policy_is_shared(policy)) {
-                       sg_cpu->util = 0;
-                       sg_cpu->max = 0;
-                       sg_cpu->flags = SCHED_CPUFREQ_RT;
-                       sg_cpu->last_update = 0;
-                       sg_cpu->cached_raw_freq = 0;
-                       sg_cpu->iowait_boost = 0;
-                       sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
-                       cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
-                                                    sugov_update_shared);
-               } else {
-                       cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
-                                                    sugov_update_single);
-               }
+               sg_cpu->flags = SCHED_CPUFREQ_RT;
+               sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
+               cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
+                                            policy_is_shared(policy) ?
+                                                       sugov_update_shared :
+                                                       sugov_update_single);
        }
        return 0;
 }
index 99b2c33a9fbcb4411fd7b75d6dbaff36bf07f803..a2ce59015642c3ccc753006837a9485b2d9fbcd3 100644 (file)
@@ -445,13 +445,13 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
  *
  * This function returns true if:
  *
- *   runtime / (deadline - t) > dl_runtime / dl_period ,
+ *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
  *
  * IOW we can't recycle current parameters.
  *
- * Notice that the bandwidth check is done against the period. For
+ * Notice that the bandwidth check is done against the deadline. For
  * task with deadline equal to period this is the same of using
- * dl_deadline instead of dl_period in the equation above.
+ * dl_period instead of dl_deadline in the equation above.
  */
 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
                               struct sched_dl_entity *pi_se, u64 t)
@@ -476,7 +476,7 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
         * of anything below microseconds resolution is actually fiction
         * (but still we want to give the user that illusion >;).
         */
-       left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
+       left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
        right = ((dl_se->deadline - t) >> DL_SCALE) *
                (pi_se->dl_runtime >> DL_SCALE);
 
@@ -505,10 +505,15 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
        }
 }
 
+static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
+{
+       return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
+}
+
 /*
  * If the entity depleted all its runtime, and if we want it to sleep
  * while waiting for some new execution time to become available, we
- * set the bandwidth enforcement timer to the replenishment instant
+ * set the bandwidth replenishment timer to the replenishment instant
  * and try to activate it.
  *
  * Notice that it is important for the caller to know if the timer
@@ -530,7 +535,7 @@ static int start_dl_timer(struct task_struct *p)
         * that it is actually coming from rq->clock and not from
         * hrtimer's time base reading.
         */
-       act = ns_to_ktime(dl_se->deadline);
+       act = ns_to_ktime(dl_next_period(dl_se));
        now = hrtimer_cb_get_time(timer);
        delta = ktime_to_ns(now) - rq_clock(rq);
        act = ktime_add_ns(act, delta);
@@ -638,6 +643,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
                lockdep_unpin_lock(&rq->lock, rf.cookie);
                rq = dl_task_offline_migration(rq, p);
                rf.cookie = lockdep_pin_lock(&rq->lock);
+               update_rq_clock(rq);
 
                /*
                 * Now that the task has been migrated to the new RQ and we
@@ -689,6 +695,37 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
        timer->function = dl_task_timer;
 }
 
+/*
+ * During the activation, CBS checks if it can reuse the current task's
+ * runtime and period. If the deadline of the task is in the past, CBS
+ * cannot use the runtime, and so it replenishes the task. This rule
+ * works fine for implicit deadline tasks (deadline == period), and the
+ * CBS was designed for implicit deadline tasks. However, a task with
+ * constrained deadline (deadine < period) might be awakened after the
+ * deadline, but before the next period. In this case, replenishing the
+ * task would allow it to run for runtime / deadline. As in this case
+ * deadline < period, CBS enables a task to run for more than the
+ * runtime / period. In a very loaded system, this can cause a domino
+ * effect, making other tasks miss their deadlines.
+ *
+ * To avoid this problem, in the activation of a constrained deadline
+ * task after the deadline but before the next period, throttle the
+ * task and set the replenishing timer to the begin of the next period,
+ * unless it is boosted.
+ */
+static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
+{
+       struct task_struct *p = dl_task_of(dl_se);
+       struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
+
+       if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
+           dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
+               if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
+                       return;
+               dl_se->dl_throttled = 1;
+       }
+}
+
 static
 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
 {
@@ -922,6 +959,11 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
        __dequeue_dl_entity(dl_se);
 }
 
+static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
+{
+       return dl_se->dl_deadline < dl_se->dl_period;
+}
+
 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 {
        struct task_struct *pi_task = rt_mutex_get_top_task(p);
@@ -947,6 +989,15 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
                return;
        }
 
+       /*
+        * Check if a constrained deadline task was activated
+        * after the deadline but before the next period.
+        * If that is the case, the task will be throttled and
+        * the replenishment timer will be set to the next period.
+        */
+       if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))
+               dl_check_constrained_dl(&p->dl);
+
        /*
         * If p is throttled, we do nothing. In fact, if it exhausted
         * its budget it needs a replenishment and, since it now is on
index 3e88b35ac1571cd2dc1719378902ca75c08cfbb9..dea138964b9107b3e22542a8b80f5cf1d43c1dee 100644 (file)
@@ -5799,7 +5799,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
         * Due to large variance we need a large fuzz factor; hackbench in
         * particularly is sensitive here.
         */
-       if ((avg_idle / 512) < avg_cost)
+       if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost)
                return -1;
 
        time = local_clock();
index 69631fa46c2f84fecd3e15599cba0e5935c1148e..1b3c8189b28656d2644a714ff60ceab7d015d97b 100644 (file)
@@ -51,6 +51,11 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
  */
 SCHED_FEAT(TTWU_QUEUE, true)
 
+/*
+ * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
+ */
+SCHED_FEAT(SIS_AVG_CPU, false)
+
 #ifdef HAVE_RT_PUSH_IPI
 /*
  * In order to avoid a thundering herd attack of CPUs that are
index 7296b7308ecaebb6cca949e1a9e2d4361750f7c6..f15fb2bdbc0dee60d770da951424f8cf0635f5f6 100644 (file)
@@ -169,7 +169,7 @@ static inline int calc_load_write_idx(void)
         * If the folding window started, make sure we start writing in the
         * next idle-delta.
         */
-       if (!time_before(jiffies, calc_load_update))
+       if (!time_before(jiffies, READ_ONCE(calc_load_update)))
                idx++;
 
        return idx & 1;
@@ -202,8 +202,9 @@ void calc_load_exit_idle(void)
        struct rq *this_rq = this_rq();
 
        /*
-        * If we're still before the sample window, we're done.
+        * If we're still before the pending sample window, we're done.
         */
+       this_rq->calc_load_update = READ_ONCE(calc_load_update);
        if (time_before(jiffies, this_rq->calc_load_update))
                return;
 
@@ -212,7 +213,6 @@ void calc_load_exit_idle(void)
         * accounted through the nohz accounting, so skip the entire deal and
         * sync up for the next window.
         */
-       this_rq->calc_load_update = calc_load_update;
        if (time_before(jiffies, this_rq->calc_load_update + 10))
                this_rq->calc_load_update += LOAD_FREQ;
 }
@@ -308,13 +308,15 @@ calc_load_n(unsigned long load, unsigned long exp,
  */
 static void calc_global_nohz(void)
 {
+       unsigned long sample_window;
        long delta, active, n;
 
-       if (!time_before(jiffies, calc_load_update + 10)) {
+       sample_window = READ_ONCE(calc_load_update);
+       if (!time_before(jiffies, sample_window + 10)) {
                /*
                 * Catch-up, fold however many we are behind still
                 */
-               delta = jiffies - calc_load_update - 10;
+               delta = jiffies - sample_window - 10;
                n = 1 + (delta / LOAD_FREQ);
 
                active = atomic_long_read(&calc_load_tasks);
@@ -324,7 +326,7 @@ static void calc_global_nohz(void)
                avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
                avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
 
-               calc_load_update += n * LOAD_FREQ;
+               WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ);
        }
 
        /*
@@ -352,9 +354,11 @@ static inline void calc_global_nohz(void) { }
  */
 void calc_global_load(unsigned long ticks)
 {
+       unsigned long sample_window;
        long active, delta;
 
-       if (time_before(jiffies, calc_load_update + 10))
+       sample_window = READ_ONCE(calc_load_update);
+       if (time_before(jiffies, sample_window + 10))
                return;
 
        /*
@@ -371,7 +375,7 @@ void calc_global_load(unsigned long ticks)
        avenrun[1] = calc_load(avenrun[1], EXP_5, active);
        avenrun[2] = calc_load(avenrun[2], EXP_15, active);
 
-       calc_load_update += LOAD_FREQ;
+       WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ);
 
        /*
         * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
index 4d2ea6f255683811f6eefb5d95fb18eb9e7c7192..b8c84c6dee64bd31ca28b4cfe7283a55945aa596 100644 (file)
@@ -242,6 +242,45 @@ long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
 }
 EXPORT_SYMBOL(prepare_to_wait_event);
 
+/*
+ * Note! These two wait functions are entered with the
+ * wait-queue lock held (and interrupts off in the _irq
+ * case), so there is no race with testing the wakeup
+ * condition in the caller before they add the wait
+ * entry to the wake queue.
+ */
+int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait)
+{
+       if (likely(list_empty(&wait->task_list)))
+               __add_wait_queue_tail(wq, wait);
+
+       set_current_state(TASK_INTERRUPTIBLE);
+       if (signal_pending(current))
+               return -ERESTARTSYS;
+
+       spin_unlock(&wq->lock);
+       schedule();
+       spin_lock(&wq->lock);
+       return 0;
+}
+EXPORT_SYMBOL(do_wait_intr);
+
+int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_t *wait)
+{
+       if (likely(list_empty(&wait->task_list)))
+               __add_wait_queue_tail(wq, wait);
+
+       set_current_state(TASK_INTERRUPTIBLE);
+       if (signal_pending(current))
+               return -ERESTARTSYS;
+
+       spin_unlock_irq(&wq->lock);
+       schedule();
+       spin_lock_irq(&wq->lock);
+       return 0;
+}
+EXPORT_SYMBOL(do_wait_intr_irq);
+
 /**
  * finish_wait - clean up after waiting in a queue
  * @q: waitqueue waited on
index 7906b3f0c41a1a5b662c703c428a892ad7816670..497719127bf9f65c1c992874abc33f0c52d5c766 100644 (file)
@@ -125,7 +125,7 @@ int register_refined_jiffies(long cycles_per_second)
        shift_hz += cycles_per_tick/2;
        do_div(shift_hz, cycles_per_tick);
        /* Calculate nsec_per_tick using shift_hz */
-       nsec_per_tick = (u64)TICK_NSEC << 8;
+       nsec_per_tick = (u64)NSEC_PER_SEC << 8;
        nsec_per_tick += (u32)shift_hz/2;
        do_div(nsec_per_tick, (u32)shift_hz);
 
index d5038005eb5dc06dd8432cc17e5be3c2c1de7e81..d4a06e714645df56f75db97ba6bb052534a4bb41 100644 (file)
@@ -429,7 +429,7 @@ config BLK_DEV_IO_TRACE
 
          If unsure, say N.
 
-config KPROBE_EVENT
+config KPROBE_EVENTS
        depends on KPROBES
        depends on HAVE_REGS_AND_STACK_ACCESS_API
        bool "Enable kprobes-based dynamic events"
@@ -447,7 +447,7 @@ config KPROBE_EVENT
          This option is also required by perf-probe subcommand of perf tools.
          If you want to use perf tools, this option is strongly recommended.
 
-config UPROBE_EVENT
+config UPROBE_EVENTS
        bool "Enable uprobes-based dynamic events"
        depends on ARCH_SUPPORTS_UPROBES
        depends on MMU
@@ -466,7 +466,7 @@ config UPROBE_EVENT
 
 config BPF_EVENTS
        depends on BPF_SYSCALL
-       depends on (KPROBE_EVENT || UPROBE_EVENT) && PERF_EVENTS
+       depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS
        bool
        default y
        help
index e5798084554911440844e1757c9ee656dc40cf12..90f2701d92a7eee98334f2b10e515b369307df2b 100644 (file)
@@ -57,7 +57,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
 obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o
 obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o
 obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o
-obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
+obj-$(CONFIG_KPROBE_EVENTS) += trace_kprobe.o
 obj-$(CONFIG_TRACEPOINTS) += power-traces.o
 ifeq ($(CONFIG_PM),y)
 obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o
@@ -66,7 +66,7 @@ ifeq ($(CONFIG_TRACING),y)
 obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
 endif
 obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
-obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o
+obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o
 
 obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
 
index cee9802cf3e00f0f5ef1625df14fa9d6892b3581..8a4efac287104dd146df6994135a607d97d2e812 100644 (file)
@@ -501,16 +501,11 @@ static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type
        return true;
 }
 
-static const struct bpf_verifier_ops kprobe_prog_ops = {
+const struct bpf_verifier_ops kprobe_prog_ops = {
        .get_func_proto  = kprobe_prog_func_proto,
        .is_valid_access = kprobe_prog_is_valid_access,
 };
 
-static struct bpf_prog_type_list kprobe_tl __ro_after_init = {
-       .ops    = &kprobe_prog_ops,
-       .type   = BPF_PROG_TYPE_KPROBE,
-};
-
 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
           u64, flags, void *, data, u64, size)
 {
@@ -584,16 +579,11 @@ static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type
        return true;
 }
 
-static const struct bpf_verifier_ops tracepoint_prog_ops = {
+const struct bpf_verifier_ops tracepoint_prog_ops = {
        .get_func_proto  = tp_prog_func_proto,
        .is_valid_access = tp_prog_is_valid_access,
 };
 
-static struct bpf_prog_type_list tracepoint_tl __ro_after_init = {
-       .ops    = &tracepoint_prog_ops,
-       .type   = BPF_PROG_TYPE_TRACEPOINT,
-};
-
 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
                                    enum bpf_reg_type *reg_type)
 {
@@ -642,22 +632,8 @@ static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
        return insn - insn_buf;
 }
 
-static const struct bpf_verifier_ops perf_event_prog_ops = {
+const struct bpf_verifier_ops perf_event_prog_ops = {
        .get_func_proto         = tp_prog_func_proto,
        .is_valid_access        = pe_prog_is_valid_access,
        .convert_ctx_access     = pe_prog_convert_ctx_access,
 };
-
-static struct bpf_prog_type_list perf_event_tl __ro_after_init = {
-       .ops    = &perf_event_prog_ops,
-       .type   = BPF_PROG_TYPE_PERF_EVENT,
-};
-
-static int __init register_kprobe_prog_ops(void)
-{
-       bpf_register_prog_type(&kprobe_tl);
-       bpf_register_prog_type(&tracepoint_tl);
-       bpf_register_prog_type(&perf_event_tl);
-       return 0;
-}
-late_initcall(register_kprobe_prog_ops);
index 0d1597c9ee305780ff65200001b55928dcdf09e3..b9691ee8f6c182cfee1af7308555b9291f3730bd 100644 (file)
@@ -4416,16 +4416,24 @@ static int __init set_graph_notrace_function(char *str)
 }
 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
 
+static int __init set_graph_max_depth_function(char *str)
+{
+       if (!str)
+               return 0;
+       fgraph_max_depth = simple_strtoul(str, NULL, 0);
+       return 1;
+}
+__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
+
 static void __init set_ftrace_early_graph(char *buf, int enable)
 {
        int ret;
        char *func;
        struct ftrace_hash *hash;
 
-       if (enable)
-               hash = ftrace_graph_hash;
-       else
-               hash = ftrace_graph_notrace_hash;
+       hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
+       if (WARN_ON(!hash))
+               return;
 
        while (buf) {
                func = strsep(&buf, ",");
@@ -4435,6 +4443,11 @@ static void __init set_ftrace_early_graph(char *buf, int enable)
                        printk(KERN_DEBUG "ftrace: function %s not "
                                          "traceable\n", func);
        }
+
+       if (enable)
+               ftrace_graph_hash = hash;
+       else
+               ftrace_graph_notrace_hash = hash;
 }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
@@ -5488,7 +5501,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
  * Normally the mcount trampoline will call the ops->func, but there
  * are times that it should not. For example, if the ops does not
  * have its own recursion protection, then it should call the
- * ftrace_ops_recurs_func() instead.
+ * ftrace_ops_assist_func() instead.
  *
  * Returns the function that the trampoline should call for @ops.
  */
index 707445ceb7efd4e098ba3ad5a129bb19a03122f9..f35109514a015c38de8b2e1da99399fd5f399692 100644 (file)
@@ -4341,22 +4341,22 @@ static const char readme_msg[] =
        "\t\t\t  traces\n"
 #endif
 #endif /* CONFIG_STACK_TRACER */
-#ifdef CONFIG_KPROBE_EVENT
+#ifdef CONFIG_KPROBE_EVENTS
        "  kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
        "\t\t\t  Write into this file to define/undefine new trace events.\n"
 #endif
-#ifdef CONFIG_UPROBE_EVENT
+#ifdef CONFIG_UPROBE_EVENTS
        "  uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
        "\t\t\t  Write into this file to define/undefine new trace events.\n"
 #endif
-#if defined(CONFIG_KPROBE_EVENT) || defined(CONFIG_UPROBE_EVENT)
+#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
        "\t  accepts: event-definitions (one definition per line)\n"
        "\t   Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
        "\t           -:[<group>/]<event>\n"
-#ifdef CONFIG_KPROBE_EVENT
+#ifdef CONFIG_KPROBE_EVENTS
        "\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
 #endif
-#ifdef CONFIG_UPROBE_EVENT
+#ifdef CONFIG_UPROBE_EVENTS
        "\t    place: <path>:<offset>\n"
 #endif
        "\t     args: <name>=fetcharg[:type]\n"
index 0c0ae54d44c616d5d09876165c2516c3e032af77..903273c93e6167afcbe2de99451a906c2e79ab1f 100644 (file)
@@ -248,7 +248,7 @@ ASSIGN_FETCH_FUNC(file_offset, ftype),                      \
 #define FETCH_TYPE_STRING      0
 #define FETCH_TYPE_STRSIZE     1
 
-#ifdef CONFIG_KPROBE_EVENT
+#ifdef CONFIG_KPROBE_EVENTS
 struct symbol_cache;
 unsigned long update_symbol_cache(struct symbol_cache *sc);
 void free_symbol_cache(struct symbol_cache *sc);
@@ -278,7 +278,7 @@ alloc_symbol_cache(const char *sym, long offset)
 {
        return NULL;
 }
-#endif /* CONFIG_KPROBE_EVENT */
+#endif /* CONFIG_KPROBE_EVENTS */
 
 struct probe_arg {
        struct fetch_param      fetch;
index 1d68b5b7ad4133d102a39006f575bdfe49d808ea..5fb1f2c87e6b846b7f9d32823ef3aede4b28db9e 100644 (file)
@@ -65,7 +65,7 @@ void stack_trace_print(void)
 }
 
 /*
- * When arch-specific code overides this function, the following
+ * When arch-specific code overrides this function, the following
  * data should be filled up, assuming stack_trace_max_lock is held to
  * prevent concurrent updates.
  *     stack_trace_index[]
index 62630a40ab3a4225291c7804ff67917aa997646c..b4eeee03934fe8f083b70e9907be0721759bc3be 100644 (file)
@@ -144,7 +144,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
 
                new->ns = ns;
                new->uid = uid;
-               atomic_set(&new->count, 0);
+               new->count = 0;
 
                spin_lock_irq(&ucounts_lock);
                ucounts = find_ucounts(ns, uid, hashent);
@@ -155,8 +155,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
                        ucounts = new;
                }
        }
-       if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
+       if (ucounts->count == INT_MAX)
                ucounts = NULL;
+       else
+               ucounts->count += 1;
        spin_unlock_irq(&ucounts_lock);
        return ucounts;
 }
@@ -165,13 +167,15 @@ static void put_ucounts(struct ucounts *ucounts)
 {
        unsigned long flags;
 
-       if (atomic_dec_and_test(&ucounts->count)) {
-               spin_lock_irqsave(&ucounts_lock, flags);
+       spin_lock_irqsave(&ucounts_lock, flags);
+       ucounts->count -= 1;
+       if (!ucounts->count)
                hlist_del_init(&ucounts->node);
-               spin_unlock_irqrestore(&ucounts_lock, flags);
+       else
+               ucounts = NULL;
+       spin_unlock_irqrestore(&ucounts_lock, flags);
 
-               kfree(ucounts);
-       }
+       kfree(ucounts);
 }
 
 static inline bool atomic_inc_below(atomic_t *v, int u)
index 072cbc9b175dc1efbe95c14858f810f92db12130..c0168b7da1eaf22c216147ca5ebd03ef7311dca8 100644 (file)
@@ -1507,6 +1507,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
        struct timer_list *timer = &dwork->timer;
        struct work_struct *work = &dwork->work;
 
+       WARN_ON_ONCE(!wq);
        WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
                     timer->data != (unsigned long)dwork);
        WARN_ON_ONCE(timer_pending(timer));
index a3e14ce92a5684a662c2c8f80f97e6fef95943b7..4bb30206b9426f1fcece4324cc0dfe76b8855c65 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/pgtable.h>
 
 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+static int __read_mostly ioremap_p4d_capable;
 static int __read_mostly ioremap_pud_capable;
 static int __read_mostly ioremap_pmd_capable;
 static int __read_mostly ioremap_huge_disabled;
@@ -35,6 +36,11 @@ void __init ioremap_huge_init(void)
        }
 }
 
+static inline int ioremap_p4d_enabled(void)
+{
+       return ioremap_p4d_capable;
+}
+
 static inline int ioremap_pud_enabled(void)
 {
        return ioremap_pud_capable;
@@ -46,6 +52,7 @@ static inline int ioremap_pmd_enabled(void)
 }
 
 #else  /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+static inline int ioremap_p4d_enabled(void) { return 0; }
 static inline int ioremap_pud_enabled(void) { return 0; }
 static inline int ioremap_pmd_enabled(void) { return 0; }
 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
@@ -94,14 +101,14 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
        return 0;
 }
 
-static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
+static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
                unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
 {
        pud_t *pud;
        unsigned long next;
 
        phys_addr -= addr;
-       pud = pud_alloc(&init_mm, pgd, addr);
+       pud = pud_alloc(&init_mm, p4d, addr);
        if (!pud)
                return -ENOMEM;
        do {
@@ -120,6 +127,32 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
        return 0;
 }
 
+static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
+               unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
+{
+       p4d_t *p4d;
+       unsigned long next;
+
+       phys_addr -= addr;
+       p4d = p4d_alloc(&init_mm, pgd, addr);
+       if (!p4d)
+               return -ENOMEM;
+       do {
+               next = p4d_addr_end(addr, end);
+
+               if (ioremap_p4d_enabled() &&
+                   ((next - addr) == P4D_SIZE) &&
+                   IS_ALIGNED(phys_addr + addr, P4D_SIZE)) {
+                       if (p4d_set_huge(p4d, phys_addr + addr, prot))
+                               continue;
+               }
+
+               if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot))
+                       return -ENOMEM;
+       } while (p4d++, addr = next, addr != end);
+       return 0;
+}
+
 int ioremap_page_range(unsigned long addr,
                       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
 {
@@ -135,7 +168,7 @@ int ioremap_page_range(unsigned long addr,
        pgd = pgd_offset_k(addr);
        do {
                next = pgd_addr_end(addr, end);
-               err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot);
+               err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
index 5ed506d648c4e53ee955e9c19b942fd0d666eee1..691a9ad48497b02e3b09304d6565165ef2317b16 100644 (file)
@@ -2129,8 +2129,8 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
                struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
                if (!bitmap)
                        return 0;
-               bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap);
-               kfree(bitmap);
+               if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap))
+                       kfree(bitmap);
        }
 
        return 1;
index 1d33366189d10c88bf10616e6a9b25c64c4bf570..aa09ad3c30b0dc37a920c46f0da711f366d29423 100644 (file)
@@ -58,7 +58,7 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
                val = old;
        }
 
-       WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+       WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
 
        return true;
 }
@@ -66,7 +66,7 @@ EXPORT_SYMBOL_GPL(refcount_add_not_zero);
 
 void refcount_add(unsigned int i, refcount_t *r)
 {
-       WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
+       WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
 }
 EXPORT_SYMBOL_GPL(refcount_add);
 
@@ -97,7 +97,7 @@ bool refcount_inc_not_zero(refcount_t *r)
                val = old;
        }
 
-       WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+       WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
 
        return true;
 }
@@ -111,7 +111,7 @@ EXPORT_SYMBOL_GPL(refcount_inc_not_zero);
  */
 void refcount_inc(refcount_t *r)
 {
-       WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+       WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
 }
 EXPORT_SYMBOL_GPL(refcount_inc);
 
@@ -125,7 +125,7 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
 
                new = val - i;
                if (new > val) {
-                       WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+                       WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
                        return false;
                }
 
@@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(refcount_dec_and_test);
 
 void refcount_dec(refcount_t *r)
 {
-       WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
+       WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
 }
 EXPORT_SYMBOL_GPL(refcount_dec);
 
@@ -204,7 +204,7 @@ bool refcount_dec_not_one(refcount_t *r)
 
                new = val - 1;
                if (new > val) {
-                       WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+                       WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
                        return true;
                }
 
index 17d5ff5fa6a388c4dc947a47615608bca79b2c40..2c6cd1b5c3ea86668bc73196c4aa980c724ad34c 100644 (file)
@@ -12,6 +12,7 @@ static int collect_syscall(struct task_struct *target, long *callno,
 
        if (!try_get_task_stack(target)) {
                /* Task has no stack, so the task isn't in a syscall. */
+               *sp = *pc = 0;
                *callno = -1;
                return 0;
        }
index 0b1d3140fbb87738ec37031997075e50c5670575..a25c9763fce19f17c723b9db3645ae93ba47dcb6 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/string.h>
 #include <linux/uaccess.h>
 #include <linux/module.h>
+#include <linux/kasan.h>
 
 /*
  * Note: test functions are marked noinline so that their names appear in
@@ -474,6 +475,12 @@ static noinline void __init use_after_scope_test(void)
 
 static int __init kmalloc_tests_init(void)
 {
+       /*
+        * Temporarily enable multi-shot mode. Otherwise, we'd only get a
+        * report for the first case.
+        */
+       bool multishot = kasan_save_enable_multi_shot();
+
        kmalloc_oob_right();
        kmalloc_oob_left();
        kmalloc_node_oob_right();
@@ -499,6 +506,9 @@ static int __init kmalloc_tests_init(void)
        ksize_unpoisons_memory();
        copy_user_test();
        use_after_scope_test();
+
+       kasan_restore_multi_shot(multishot);
+
        return -EAGAIN;
 }
 
index 6d861d090e9fc79d39e2b48f57b1d9f4bc91463f..c6f2a37028c205db8143ebe58677c790c66a0faf 100644 (file)
@@ -683,33 +683,26 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
 static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
 {
        struct radix_tree_iter iter;
-       struct rb_node *rbn;
        void **slot;
 
        WARN_ON(test_bit(WB_registered, &bdi->wb.state));
 
        spin_lock_irq(&cgwb_lock);
-
        radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
                cgwb_kill(*slot);
-
-       while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
-               struct bdi_writeback_congested *congested =
-                       rb_entry(rbn, struct bdi_writeback_congested, rb_node);
-
-               rb_erase(rbn, &bdi->cgwb_congested_tree);
-               congested->bdi = NULL;  /* mark @congested unlinked */
-       }
-
        spin_unlock_irq(&cgwb_lock);
 
        /*
-        * All cgwb's and their congested states must be shutdown and
-        * released before returning.  Drain the usage counter to wait for
-        * all cgwb's and cgwb_congested's ever created on @bdi.
+        * All cgwb's must be shutdown and released before returning.  Drain
+        * the usage counter to wait for all cgwb's ever created on @bdi.
         */
        atomic_dec(&bdi->usage_cnt);
        wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt));
+       /*
+        * Grab back our reference so that we hold it when @bdi gets
+        * re-registered.
+        */
+       atomic_inc(&bdi->usage_cnt);
 }
 
 /**
@@ -749,6 +742,21 @@ void wb_blkcg_offline(struct blkcg *blkcg)
        spin_unlock_irq(&cgwb_lock);
 }
 
+static void cgwb_bdi_exit(struct backing_dev_info *bdi)
+{
+       struct rb_node *rbn;
+
+       spin_lock_irq(&cgwb_lock);
+       while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
+               struct bdi_writeback_congested *congested =
+                       rb_entry(rbn, struct bdi_writeback_congested, rb_node);
+
+               rb_erase(rbn, &bdi->cgwb_congested_tree);
+               congested->bdi = NULL;  /* mark @congested unlinked */
+       }
+       spin_unlock_irq(&cgwb_lock);
+}
+
 #else  /* CONFIG_CGROUP_WRITEBACK */
 
 static int cgwb_bdi_init(struct backing_dev_info *bdi)
@@ -769,7 +777,9 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
        return 0;
 }
 
-static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
+static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
+
+static void cgwb_bdi_exit(struct backing_dev_info *bdi)
 {
        wb_congested_put(bdi->wb_congested);
 }
@@ -857,6 +867,8 @@ int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
                        MINOR(owner->devt));
        if (rc)
                return rc;
+       /* Leaking owner reference... */
+       WARN_ON(bdi->owner);
        bdi->owner = owner;
        get_device(owner);
        return 0;
@@ -898,6 +910,7 @@ static void bdi_exit(struct backing_dev_info *bdi)
 {
        WARN_ON_ONCE(bdi->dev);
        wb_exit(&bdi->wb);
+       cgwb_bdi_exit(bdi);
 }
 
 static void release_bdi(struct kref *ref)
index 9c047e951aa3d0399f331eb3914b09d207dc989b..04aa405350dce8656db4293a34e95e9bfbe166d8 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -226,6 +226,7 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
                              unsigned int *page_mask)
 {
        pgd_t *pgd;
+       p4d_t *p4d;
        pud_t *pud;
        pmd_t *pmd;
        spinlock_t *ptl;
@@ -243,8 +244,13 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
        pgd = pgd_offset(mm, address);
        if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
                return no_page_table(vma, flags);
-
-       pud = pud_offset(pgd, address);
+       p4d = p4d_offset(pgd, address);
+       if (p4d_none(*p4d))
+               return no_page_table(vma, flags);
+       BUILD_BUG_ON(p4d_huge(*p4d));
+       if (unlikely(p4d_bad(*p4d)))
+               return no_page_table(vma, flags);
+       pud = pud_offset(p4d, address);
        if (pud_none(*pud))
                return no_page_table(vma, flags);
        if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
@@ -325,6 +331,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
                struct page **page)
 {
        pgd_t *pgd;
+       p4d_t *p4d;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
@@ -338,7 +345,9 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
        else
                pgd = pgd_offset_gate(mm, address);
        BUG_ON(pgd_none(*pgd));
-       pud = pud_offset(pgd, address);
+       p4d = p4d_offset(pgd, address);
+       BUG_ON(p4d_none(*p4d));
+       pud = pud_offset(p4d, address);
        BUG_ON(pud_none(*pud));
        pmd = pmd_offset(pud, address);
        if (pmd_none(*pmd))
@@ -1400,13 +1409,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
        return 1;
 }
 
-static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
+static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
                         int write, struct page **pages, int *nr)
 {
        unsigned long next;
        pud_t *pudp;
 
-       pudp = pud_offset(&pgd, addr);
+       pudp = pud_offset(&p4d, addr);
        do {
                pud_t pud = READ_ONCE(*pudp);
 
@@ -1428,6 +1437,31 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
        return 1;
 }
 
+static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
+                        int write, struct page **pages, int *nr)
+{
+       unsigned long next;
+       p4d_t *p4dp;
+
+       p4dp = p4d_offset(&pgd, addr);
+       do {
+               p4d_t p4d = READ_ONCE(*p4dp);
+
+               next = p4d_addr_end(addr, end);
+               if (p4d_none(p4d))
+                       return 0;
+               BUILD_BUG_ON(p4d_huge(p4d));
+               if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
+                       if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
+                                        P4D_SHIFT, next, write, pages, nr))
+                               return 0;
+               } else if (!gup_pud_range(p4d, addr, next, write, pages, nr))
+                       return 0;
+       } while (p4dp++, addr = next, addr != end);
+
+       return 1;
+}
+
 /*
  * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
  * the regular GUP. It will only return non-negative values.
@@ -1478,7 +1512,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
                        if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
                                         PGDIR_SHIFT, next, write, pages, &nr))
                                break;
-               } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+               } else if (!gup_p4d_range(pgd, addr, next, write, pages, &nr))
                        break;
        } while (pgdp++, addr = next, addr != end);
        local_irq_restore(flags);
index d36b2af4d1bf4b6621974823f36c52dda405f181..1ebc93e179f3eab40cf469fd67a361ea43a11368 100644 (file)
@@ -1828,7 +1828,7 @@ static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
        VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
        VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
 
-       count_vm_event(THP_SPLIT_PMD);
+       count_vm_event(THP_SPLIT_PUD);
 
        pudp_huge_clear_flush_notify(vma, haddr, pud);
 }
@@ -2048,6 +2048,7 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
                bool freeze, struct page *page)
 {
        pgd_t *pgd;
+       p4d_t *p4d;
        pud_t *pud;
        pmd_t *pmd;
 
@@ -2055,7 +2056,11 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
        if (!pgd_present(*pgd))
                return;
 
-       pud = pud_offset(pgd, address);
+       p4d = p4d_offset(pgd, address);
+       if (!p4d_present(*p4d))
+               return;
+
+       pud = pud_offset(p4d, address);
        if (!pud_present(*pud))
                return;
 
index a7aa811b7d14c5a6f3825ad32df773fee5526991..e5828875f7bbd7a770d5c23334a0e3994ffe544f 100644 (file)
@@ -4403,7 +4403,9 @@ int hugetlb_reserve_pages(struct inode *inode,
        return 0;
 out_err:
        if (!vma || vma->vm_flags & VM_MAYSHARE)
-               region_abort(resv_map, from, to);
+               /* Don't call region_abort if region_chg failed */
+               if (chg >= 0)
+                       region_abort(resv_map, from, to);
        if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
                kref_put(&resv_map->refs, resv_map_release);
        return ret;
@@ -4555,7 +4557,8 @@ out:
 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 {
        pgd_t *pgd = pgd_offset(mm, *addr);
-       pud_t *pud = pud_offset(pgd, *addr);
+       p4d_t *p4d = p4d_offset(pgd, *addr);
+       pud_t *pud = pud_offset(p4d, *addr);
 
        BUG_ON(page_count(virt_to_page(ptep)) == 0);
        if (page_count(virt_to_page(ptep)) == 1)
@@ -4586,11 +4589,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
                        unsigned long addr, unsigned long sz)
 {
        pgd_t *pgd;
+       p4d_t *p4d;
        pud_t *pud;
        pte_t *pte = NULL;
 
        pgd = pgd_offset(mm, addr);
-       pud = pud_alloc(mm, pgd, addr);
+       p4d = p4d_offset(pgd, addr);
+       pud = pud_alloc(mm, p4d, addr);
        if (pud) {
                if (sz == PUD_SIZE) {
                        pte = (pte_t *)pud;
@@ -4610,18 +4615,22 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 {
        pgd_t *pgd;
+       p4d_t *p4d;
        pud_t *pud;
-       pmd_t *pmd = NULL;
+       pmd_t *pmd;
 
        pgd = pgd_offset(mm, addr);
-       if (pgd_present(*pgd)) {
-               pud = pud_offset(pgd, addr);
-               if (pud_present(*pud)) {
-                       if (pud_huge(*pud))
-                               return (pte_t *)pud;
-                       pmd = pmd_offset(pud, addr);
-               }
-       }
+       if (!pgd_present(*pgd))
+               return NULL;
+       p4d = p4d_offset(pgd, addr);
+       if (!p4d_present(*p4d))
+               return NULL;
+       pud = pud_offset(p4d, addr);
+       if (!pud_present(*pud))
+               return NULL;
+       if (pud_huge(*pud))
+               return (pte_t *)pud;
+       pmd = pmd_offset(pud, addr);
        return (pte_t *) pmd;
 }
 
@@ -4644,6 +4653,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 {
        struct page *page = NULL;
        spinlock_t *ptl;
+       pte_t pte;
 retry:
        ptl = pmd_lockptr(mm, pmd);
        spin_lock(ptl);
@@ -4653,12 +4663,13 @@ retry:
         */
        if (!pmd_huge(*pmd))
                goto out;
-       if (pmd_present(*pmd)) {
+       pte = huge_ptep_get((pte_t *)pmd);
+       if (pte_present(pte)) {
                page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
                if (flags & FOLL_GET)
                        get_page(page);
        } else {
-               if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
+               if (is_hugetlb_entry_migration(pte)) {
                        spin_unlock(ptl);
                        __migration_entry_wait(mm, (pte_t *)pmd, ptl);
                        goto retry;
index 1c260e6b3b3c6a1f26fc1e13a0fdb39099bbbf68..dd2dea8eb0771a506c0b510efc79c3fc5253bda5 100644 (file)
@@ -96,11 +96,6 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
                << KASAN_SHADOW_SCALE_SHIFT);
 }
 
-static inline bool kasan_report_enabled(void)
-{
-       return !current->kasan_depth;
-}
-
 void kasan_report(unsigned long addr, size_t size,
                bool is_write, unsigned long ip);
 void kasan_report_double_free(struct kmem_cache *cache, void *object,
index 31238dad85fbc6c630963323c236811c0e4f44f5..b96a5f773d880869c1c84510fbb0063ee63faed9 100644 (file)
@@ -30,6 +30,9 @@
  */
 unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
 
+#if CONFIG_PGTABLE_LEVELS > 4
+p4d_t kasan_zero_p4d[PTRS_PER_P4D] __page_aligned_bss;
+#endif
 #if CONFIG_PGTABLE_LEVELS > 3
 pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
 #endif
@@ -82,10 +85,10 @@ static void __init zero_pmd_populate(pud_t *pud, unsigned long addr,
        } while (pmd++, addr = next, addr != end);
 }
 
-static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
+static void __init zero_pud_populate(p4d_t *p4d, unsigned long addr,
                                unsigned long end)
 {
-       pud_t *pud = pud_offset(pgd, addr);
+       pud_t *pud = pud_offset(p4d, addr);
        unsigned long next;
 
        do {
@@ -107,6 +110,23 @@ static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
        } while (pud++, addr = next, addr != end);
 }
 
+static void __init zero_p4d_populate(pgd_t *pgd, unsigned long addr,
+                               unsigned long end)
+{
+       p4d_t *p4d = p4d_offset(pgd, addr);
+       unsigned long next;
+
+       do {
+               next = p4d_addr_end(addr, end);
+
+               if (p4d_none(*p4d)) {
+                       p4d_populate(&init_mm, p4d,
+                               early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+               }
+               zero_pud_populate(p4d, addr, next);
+       } while (p4d++, addr = next, addr != end);
+}
+
 /**
  * kasan_populate_zero_shadow - populate shadow memory region with
  *                               kasan_zero_page
@@ -125,6 +145,7 @@ void __init kasan_populate_zero_shadow(const void *shadow_start,
                next = pgd_addr_end(addr, end);
 
                if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
+                       p4d_t *p4d;
                        pud_t *pud;
                        pmd_t *pmd;
 
@@ -135,9 +156,22 @@ void __init kasan_populate_zero_shadow(const void *shadow_start,
                         * 3,2 - level page tables where we don't have
                         * puds,pmds, so pgd_populate(), pud_populate()
                         * is noops.
+                        *
+                        * The ifndef is required to avoid build breakage.
+                        *
+                        * With 5level-fixup.h, pgd_populate() is not nop and
+                        * we reference kasan_zero_p4d. It's not defined
+                        * unless 5-level paging enabled.
+                        *
+                        * The ifndef can be dropped once all KASAN-enabled
+                        * architectures will switch to pgtable-nop4d.h.
                         */
-                       pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_pud));
-                       pud = pud_offset(pgd, addr);
+#ifndef __ARCH_HAS_5LEVEL_HACK
+                       pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_p4d));
+#endif
+                       p4d = p4d_offset(pgd, addr);
+                       p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud));
+                       pud = pud_offset(p4d, addr);
                        pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
                        pmd = pmd_offset(pud, addr);
                        pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
@@ -148,6 +182,6 @@ void __init kasan_populate_zero_shadow(const void *shadow_start,
                        pgd_populate(&init_mm, pgd,
                                early_alloc(PAGE_SIZE, NUMA_NO_NODE));
                }
-               zero_pud_populate(pgd, addr, next);
+               zero_p4d_populate(pgd, addr, next);
        } while (pgd++, addr = next, addr != end);
 }
index 6f1ed16308736918730ea836b5fecafc908e487b..3a8ddf8baf7dc3d52597bf0e53753c0cc17503cd 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/printk.h>
 #include <linux/shrinker.h>
 #include <linux/slab.h>
+#include <linux/srcu.h>
 #include <linux/string.h>
 #include <linux/types.h>
 
@@ -103,6 +104,7 @@ static int quarantine_tail;
 /* Total size of all objects in global_quarantine across all batches. */
 static unsigned long quarantine_size;
 static DEFINE_SPINLOCK(quarantine_lock);
+DEFINE_STATIC_SRCU(remove_cache_srcu);
 
 /* Maximum size of the global queue. */
 static unsigned long quarantine_max_size;
@@ -173,17 +175,22 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
        struct qlist_head *q;
        struct qlist_head temp = QLIST_INIT;
 
+       /*
+        * Note: irq must be disabled until after we move the batch to the
+        * global quarantine. Otherwise quarantine_remove_cache() can miss
+        * some objects belonging to the cache if they are in our local temp
+        * list. quarantine_remove_cache() executes on_each_cpu() at the
+        * beginning which ensures that it either sees the objects in per-cpu
+        * lists or in the global quarantine.
+        */
        local_irq_save(flags);
 
        q = this_cpu_ptr(&cpu_quarantine);
        qlist_put(q, &info->quarantine_link, cache->size);
-       if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE))
+       if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
                qlist_move_all(q, &temp);
 
-       local_irq_restore(flags);
-
-       if (unlikely(!qlist_empty(&temp))) {
-               spin_lock_irqsave(&quarantine_lock, flags);
+               spin_lock(&quarantine_lock);
                WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
                qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
                if (global_quarantine[quarantine_tail].bytes >=
@@ -196,20 +203,33 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
                        if (new_tail != quarantine_head)
                                quarantine_tail = new_tail;
                }
-               spin_unlock_irqrestore(&quarantine_lock, flags);
+               spin_unlock(&quarantine_lock);
        }
+
+       local_irq_restore(flags);
 }
 
 void quarantine_reduce(void)
 {
        size_t total_size, new_quarantine_size, percpu_quarantines;
        unsigned long flags;
+       int srcu_idx;
        struct qlist_head to_free = QLIST_INIT;
 
        if (likely(READ_ONCE(quarantine_size) <=
                   READ_ONCE(quarantine_max_size)))
                return;
 
+       /*
+        * srcu critical section ensures that quarantine_remove_cache()
+        * will not miss objects belonging to the cache while they are in our
+        * local to_free list. srcu is chosen because (1) it gives us private
+        * grace period domain that does not interfere with anything else,
+        * and (2) it allows synchronize_srcu() to return without waiting
+        * if there are no pending read critical sections (which is the
+        * expected case).
+        */
+       srcu_idx = srcu_read_lock(&remove_cache_srcu);
        spin_lock_irqsave(&quarantine_lock, flags);
 
        /*
@@ -237,6 +257,7 @@ void quarantine_reduce(void)
        spin_unlock_irqrestore(&quarantine_lock, flags);
 
        qlist_free_all(&to_free, NULL);
+       srcu_read_unlock(&remove_cache_srcu, srcu_idx);
 }
 
 static void qlist_move_cache(struct qlist_head *from,
@@ -280,12 +301,28 @@ void quarantine_remove_cache(struct kmem_cache *cache)
        unsigned long flags, i;
        struct qlist_head to_free = QLIST_INIT;
 
+       /*
+        * Must be careful to not miss any objects that are being moved from
+        * per-cpu list to the global quarantine in quarantine_put(),
+        * nor objects being freed in quarantine_reduce(). on_each_cpu()
+        * achieves the first goal, while synchronize_srcu() achieves the
+        * second.
+        */
        on_each_cpu(per_cpu_remove_cache, cache, 1);
 
        spin_lock_irqsave(&quarantine_lock, flags);
-       for (i = 0; i < QUARANTINE_BATCHES; i++)
+       for (i = 0; i < QUARANTINE_BATCHES; i++) {
+               if (qlist_empty(&global_quarantine[i]))
+                       continue;
                qlist_move_cache(&global_quarantine[i], &to_free, cache);
+               /* Scanning whole quarantine can take a while. */
+               spin_unlock_irqrestore(&quarantine_lock, flags);
+               cond_resched();
+               spin_lock_irqsave(&quarantine_lock, flags);
+       }
        spin_unlock_irqrestore(&quarantine_lock, flags);
 
        qlist_free_all(&to_free, cache);
+
+       synchronize_srcu(&remove_cache_srcu);
 }
index f479365530b6484bbd5cae42064521fed362961e..ab42a0803f161c6834b1362aefd5ded1990eb04f 100644 (file)
@@ -13,7 +13,9 @@
  *
  */
 
+#include <linux/bitops.h>
 #include <linux/ftrace.h>
+#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/printk.h>
@@ -293,6 +295,40 @@ static void kasan_report_error(struct kasan_access_info *info)
        kasan_end_report(&flags);
 }
 
+static unsigned long kasan_flags;
+
+#define KASAN_BIT_REPORTED     0
+#define KASAN_BIT_MULTI_SHOT   1
+
+bool kasan_save_enable_multi_shot(void)
+{
+       return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
+}
+EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
+
+void kasan_restore_multi_shot(bool enabled)
+{
+       if (!enabled)
+               clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
+}
+EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
+
+static int __init kasan_set_multi_shot(char *str)
+{
+       set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
+       return 1;
+}
+__setup("kasan_multi_shot", kasan_set_multi_shot);
+
+static inline bool kasan_report_enabled(void)
+{
+       if (current->kasan_depth)
+               return false;
+       if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
+               return true;
+       return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
+}
+
 void kasan_report(unsigned long addr, size_t size,
                bool is_write, unsigned long ip)
 {
index 26c874e90b12ef164d7b80171bb8bea979df5b1a..20036d4f9f13d4dc7b5b091e389b8a7b6b2ca32f 100644 (file)
@@ -1416,7 +1416,7 @@ static void kmemleak_scan(void)
        /* data/bss scanning */
        scan_large_block(_sdata, _edata);
        scan_large_block(__bss_start, __bss_stop);
-       scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init);
+       scan_large_block(__start_ro_after_init, __end_ro_after_init);
 
 #ifdef CONFIG_SMP
        /* per-cpu sections scanning */
index dc5927c812d3d1f9a209fbdbea3a36a61cbde17d..7a2abf0127aef7a9d4879278293d8cab766133e1 100644 (file)
@@ -513,7 +513,43 @@ static long madvise_dontneed(struct vm_area_struct *vma,
        if (!can_madv_dontneed_vma(vma))
                return -EINVAL;
 
-       userfaultfd_remove(vma, prev, start, end);
+       if (!userfaultfd_remove(vma, start, end)) {
+               *prev = NULL; /* mmap_sem has been dropped, prev is stale */
+
+               down_read(&current->mm->mmap_sem);
+               vma = find_vma(current->mm, start);
+               if (!vma)
+                       return -ENOMEM;
+               if (start < vma->vm_start) {
+                       /*
+                        * This "vma" under revalidation is the one
+                        * with the lowest vma->vm_start where start
+                        * is also < vma->vm_end. If start <
+                        * vma->vm_start it means an hole materialized
+                        * in the user address space within the
+                        * virtual range passed to MADV_DONTNEED.
+                        */
+                       return -ENOMEM;
+               }
+               if (!can_madv_dontneed_vma(vma))
+                       return -EINVAL;
+               if (end > vma->vm_end) {
+                       /*
+                        * Don't fail if end > vma->vm_end. If the old
+                        * vma was splitted while the mmap_sem was
+                        * released the effect of the concurrent
+                        * operation may not cause MADV_DONTNEED to
+                        * have an undefined result. There may be an
+                        * adjacent next vma that we'll walk
+                        * next. userfaultfd_remove() will generate an
+                        * UFFD_EVENT_REMOVE repetition on the
+                        * end-vma->vm_end range, but the manager can
+                        * handle a repetition fine.
+                        */
+                       end = vma->vm_end;
+               }
+               VM_WARN_ON(start >= end);
+       }
        zap_page_range(vma, start, end - start);
        return 0;
 }
@@ -554,8 +590,10 @@ static long madvise_remove(struct vm_area_struct *vma,
         * mmap_sem.
         */
        get_file(f);
-       userfaultfd_remove(vma, prev, start, end);
-       up_read(&current->mm->mmap_sem);
+       if (userfaultfd_remove(vma, start, end)) {
+               /* mmap_sem was not released by userfaultfd_remove() */
+               up_read(&current->mm->mmap_sem);
+       }
        error = vfs_fallocate(f,
                                FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
                                offset, end - start);
index b64b47803e529a87d87f3e3f022e97f17ff606be..696f06d17c4e89b676f19c3c3a5a4c1908697caf 100644 (file)
@@ -1118,7 +1118,10 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
                }
        } while (left < right);
 
-       return min(PHYS_PFN(type->regions[right].base), max_pfn);
+       if (right == type->cnt)
+               return max_pfn;
+       else
+               return min(PHYS_PFN(type->regions[right].base), max_pfn);
 }
 
 /**
index c52ec893e241cf6b52764797f6aea5ed56219e23..2bd7541d7c11231431c060ca6cfe84a89f096fe3 100644 (file)
@@ -466,6 +466,8 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
        struct mem_cgroup_tree_per_node *mctz;
 
        mctz = soft_limit_tree_from_page(page);
+       if (!mctz)
+               return;
        /*
         * Necessary to update all ancestors when hierarchy is used.
         * because their event counter is not touched.
@@ -503,7 +505,8 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
        for_each_node(nid) {
                mz = mem_cgroup_nodeinfo(memcg, nid);
                mctz = soft_limit_tree_node(nid);
-               mem_cgroup_remove_exceeded(mz, mctz);
+               if (mctz)
+                       mem_cgroup_remove_exceeded(mz, mctz);
        }
 }
 
@@ -2558,7 +2561,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
         * is empty. Do it lockless to prevent lock bouncing. Races
         * are acceptable as soft limit is best effort anyway.
         */
-       if (RB_EMPTY_ROOT(&mctz->rb_root))
+       if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
                return 0;
 
        /*
@@ -4135,17 +4138,22 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
        kfree(memcg->nodeinfo[node]);
 }
 
-static void mem_cgroup_free(struct mem_cgroup *memcg)
+static void __mem_cgroup_free(struct mem_cgroup *memcg)
 {
        int node;
 
-       memcg_wb_domain_exit(memcg);
        for_each_node(node)
                free_mem_cgroup_per_node_info(memcg, node);
        free_percpu(memcg->stat);
        kfree(memcg);
 }
 
+static void mem_cgroup_free(struct mem_cgroup *memcg)
+{
+       memcg_wb_domain_exit(memcg);
+       __mem_cgroup_free(memcg);
+}
+
 static struct mem_cgroup *mem_cgroup_alloc(void)
 {
        struct mem_cgroup *memcg;
@@ -4196,7 +4204,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
 fail:
        if (memcg->id.id > 0)
                idr_remove(&mem_cgroup_idr, memcg->id.id);
-       mem_cgroup_free(memcg);
+       __mem_cgroup_free(memcg);
        return NULL;
 }
 
index a97a4cec2e1fcd94c5e1eb5f1af334749efe64fa..235ba51b2fbf07ffeeeb6b70d6522b4b0addb3de 100644 (file)
@@ -445,7 +445,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
        mm_dec_nr_pmds(tlb->mm);
 }
 
-static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
                                unsigned long addr, unsigned long end,
                                unsigned long floor, unsigned long ceiling)
 {
@@ -454,7 +454,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
        unsigned long start;
 
        start = addr;
-       pud = pud_offset(pgd, addr);
+       pud = pud_offset(p4d, addr);
        do {
                next = pud_addr_end(addr, end);
                if (pud_none_or_clear_bad(pud))
@@ -462,6 +462,39 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
                free_pmd_range(tlb, pud, addr, next, floor, ceiling);
        } while (pud++, addr = next, addr != end);
 
+       start &= P4D_MASK;
+       if (start < floor)
+               return;
+       if (ceiling) {
+               ceiling &= P4D_MASK;
+               if (!ceiling)
+                       return;
+       }
+       if (end - 1 > ceiling - 1)
+               return;
+
+       pud = pud_offset(p4d, start);
+       p4d_clear(p4d);
+       pud_free_tlb(tlb, pud, start);
+}
+
+static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
+                               unsigned long addr, unsigned long end,
+                               unsigned long floor, unsigned long ceiling)
+{
+       p4d_t *p4d;
+       unsigned long next;
+       unsigned long start;
+
+       start = addr;
+       p4d = p4d_offset(pgd, addr);
+       do {
+               next = p4d_addr_end(addr, end);
+               if (p4d_none_or_clear_bad(p4d))
+                       continue;
+               free_pud_range(tlb, p4d, addr, next, floor, ceiling);
+       } while (p4d++, addr = next, addr != end);
+
        start &= PGDIR_MASK;
        if (start < floor)
                return;
@@ -473,9 +506,9 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
        if (end - 1 > ceiling - 1)
                return;
 
-       pud = pud_offset(pgd, start);
+       p4d = p4d_offset(pgd, start);
        pgd_clear(pgd);
-       pud_free_tlb(tlb, pud, start);
+       p4d_free_tlb(tlb, p4d, start);
 }
 
 /*
@@ -539,7 +572,7 @@ void free_pgd_range(struct mmu_gather *tlb,
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               free_pud_range(tlb, pgd, addr, next, floor, ceiling);
+               free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
        } while (pgd++, addr = next, addr != end);
 }
 
@@ -658,7 +691,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
                          pte_t pte, struct page *page)
 {
        pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
-       pud_t *pud = pud_offset(pgd, addr);
+       p4d_t *p4d = p4d_offset(pgd, addr);
+       pud_t *pud = pud_offset(p4d, addr);
        pmd_t *pmd = pmd_offset(pud, addr);
        struct address_space *mapping;
        pgoff_t index;
@@ -1023,16 +1057,16 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src
 }
 
 static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
-               pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
+               p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
                unsigned long addr, unsigned long end)
 {
        pud_t *src_pud, *dst_pud;
        unsigned long next;
 
-       dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
+       dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
        if (!dst_pud)
                return -ENOMEM;
-       src_pud = pud_offset(src_pgd, addr);
+       src_pud = pud_offset(src_p4d, addr);
        do {
                next = pud_addr_end(addr, end);
                if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
@@ -1056,6 +1090,28 @@ static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src
        return 0;
 }
 
+static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+               pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
+               unsigned long addr, unsigned long end)
+{
+       p4d_t *src_p4d, *dst_p4d;
+       unsigned long next;
+
+       dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
+       if (!dst_p4d)
+               return -ENOMEM;
+       src_p4d = p4d_offset(src_pgd, addr);
+       do {
+               next = p4d_addr_end(addr, end);
+               if (p4d_none_or_clear_bad(src_p4d))
+                       continue;
+               if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
+                                               vma, addr, next))
+                       return -ENOMEM;
+       } while (dst_p4d++, src_p4d++, addr = next, addr != end);
+       return 0;
+}
+
 int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                struct vm_area_struct *vma)
 {
@@ -1111,7 +1167,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(src_pgd))
                        continue;
-               if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
+               if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
                                            vma, addr, next))) {
                        ret = -ENOMEM;
                        break;
@@ -1267,14 +1323,14 @@ next:
 }
 
 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
-                               struct vm_area_struct *vma, pgd_t *pgd,
+                               struct vm_area_struct *vma, p4d_t *p4d,
                                unsigned long addr, unsigned long end,
                                struct zap_details *details)
 {
        pud_t *pud;
        unsigned long next;
 
-       pud = pud_offset(pgd, addr);
+       pud = pud_offset(p4d, addr);
        do {
                next = pud_addr_end(addr, end);
                if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
@@ -1295,6 +1351,25 @@ next:
        return addr;
 }
 
+static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
+                               struct vm_area_struct *vma, pgd_t *pgd,
+                               unsigned long addr, unsigned long end,
+                               struct zap_details *details)
+{
+       p4d_t *p4d;
+       unsigned long next;
+
+       p4d = p4d_offset(pgd, addr);
+       do {
+               next = p4d_addr_end(addr, end);
+               if (p4d_none_or_clear_bad(p4d))
+                       continue;
+               next = zap_pud_range(tlb, vma, p4d, addr, next, details);
+       } while (p4d++, addr = next, addr != end);
+
+       return addr;
+}
+
 void unmap_page_range(struct mmu_gather *tlb,
                             struct vm_area_struct *vma,
                             unsigned long addr, unsigned long end,
@@ -1310,7 +1385,7 @@ void unmap_page_range(struct mmu_gather *tlb,
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               next = zap_pud_range(tlb, vma, pgd, addr, next, details);
+               next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
        } while (pgd++, addr = next, addr != end);
        tlb_end_vma(tlb, vma);
 }
@@ -1465,16 +1540,24 @@ EXPORT_SYMBOL_GPL(zap_vma_ptes);
 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
                        spinlock_t **ptl)
 {
-       pgd_t *pgd = pgd_offset(mm, addr);
-       pud_t *pud = pud_alloc(mm, pgd, addr);
-       if (pud) {
-               pmd_t *pmd = pmd_alloc(mm, pud, addr);
-               if (pmd) {
-                       VM_BUG_ON(pmd_trans_huge(*pmd));
-                       return pte_alloc_map_lock(mm, pmd, addr, ptl);
-               }
-       }
-       return NULL;
+       pgd_t *pgd;
+       p4d_t *p4d;
+       pud_t *pud;
+       pmd_t *pmd;
+
+       pgd = pgd_offset(mm, addr);
+       p4d = p4d_alloc(mm, pgd, addr);
+       if (!p4d)
+               return NULL;
+       pud = pud_alloc(mm, p4d, addr);
+       if (!pud)
+               return NULL;
+       pmd = pmd_alloc(mm, pud, addr);
+       if (!pmd)
+               return NULL;
+
+       VM_BUG_ON(pmd_trans_huge(*pmd));
+       return pte_alloc_map_lock(mm, pmd, addr, ptl);
 }
 
 /*
@@ -1740,7 +1823,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
        return 0;
 }
 
-static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
+static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
                        unsigned long addr, unsigned long end,
                        unsigned long pfn, pgprot_t prot)
 {
@@ -1748,7 +1831,7 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
        unsigned long next;
 
        pfn -= addr >> PAGE_SHIFT;
-       pud = pud_alloc(mm, pgd, addr);
+       pud = pud_alloc(mm, p4d, addr);
        if (!pud)
                return -ENOMEM;
        do {
@@ -1760,6 +1843,26 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
        return 0;
 }
 
+static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
+                       unsigned long addr, unsigned long end,
+                       unsigned long pfn, pgprot_t prot)
+{
+       p4d_t *p4d;
+       unsigned long next;
+
+       pfn -= addr >> PAGE_SHIFT;
+       p4d = p4d_alloc(mm, pgd, addr);
+       if (!p4d)
+               return -ENOMEM;
+       do {
+               next = p4d_addr_end(addr, end);
+               if (remap_pud_range(mm, p4d, addr, next,
+                               pfn + (addr >> PAGE_SHIFT), prot))
+                       return -ENOMEM;
+       } while (p4d++, addr = next, addr != end);
+       return 0;
+}
+
 /**
  * remap_pfn_range - remap kernel memory to userspace
  * @vma: user vma to map to
@@ -1816,7 +1919,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
        flush_cache_range(vma, addr, end);
        do {
                next = pgd_addr_end(addr, end);
-               err = remap_pud_range(mm, pgd, addr, next,
+               err = remap_p4d_range(mm, pgd, addr, next,
                                pfn + (addr >> PAGE_SHIFT), prot);
                if (err)
                        break;
@@ -1932,7 +2035,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
        return err;
 }
 
-static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
                                     unsigned long addr, unsigned long end,
                                     pte_fn_t fn, void *data)
 {
@@ -1940,7 +2043,7 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
        unsigned long next;
        int err;
 
-       pud = pud_alloc(mm, pgd, addr);
+       pud = pud_alloc(mm, p4d, addr);
        if (!pud)
                return -ENOMEM;
        do {
@@ -1952,6 +2055,26 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
        return err;
 }
 
+static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
+                                    unsigned long addr, unsigned long end,
+                                    pte_fn_t fn, void *data)
+{
+       p4d_t *p4d;
+       unsigned long next;
+       int err;
+
+       p4d = p4d_alloc(mm, pgd, addr);
+       if (!p4d)
+               return -ENOMEM;
+       do {
+               next = p4d_addr_end(addr, end);
+               err = apply_to_pud_range(mm, p4d, addr, next, fn, data);
+               if (err)
+                       break;
+       } while (p4d++, addr = next, addr != end);
+       return err;
+}
+
 /*
  * Scan a region of virtual memory, filling in page tables as necessary
  * and calling a provided function on each leaf page table.
@@ -1970,7 +2093,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
        pgd = pgd_offset(mm, addr);
        do {
                next = pgd_addr_end(addr, end);
-               err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
+               err = apply_to_p4d_range(mm, pgd, addr, next, fn, data);
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
@@ -3653,11 +3776,15 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
        };
        struct mm_struct *mm = vma->vm_mm;
        pgd_t *pgd;
+       p4d_t *p4d;
        int ret;
 
        pgd = pgd_offset(mm, address);
+       p4d = p4d_alloc(mm, pgd, address);
+       if (!p4d)
+               return VM_FAULT_OOM;
 
-       vmf.pud = pud_alloc(mm, pgd, address);
+       vmf.pud = pud_alloc(mm, p4d, address);
        if (!vmf.pud)
                return VM_FAULT_OOM;
        if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
@@ -3779,12 +3906,35 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
 }
 EXPORT_SYMBOL_GPL(handle_mm_fault);
 
+#ifndef __PAGETABLE_P4D_FOLDED
+/*
+ * Allocate p4d page table.
+ * We've already handled the fast-path in-line.
+ */
+int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+{
+       p4d_t *new = p4d_alloc_one(mm, address);
+       if (!new)
+               return -ENOMEM;
+
+       smp_wmb(); /* See comment in __pte_alloc */
+
+       spin_lock(&mm->page_table_lock);
+       if (pgd_present(*pgd))          /* Another has populated it */
+               p4d_free(mm, new);
+       else
+               pgd_populate(mm, pgd, new);
+       spin_unlock(&mm->page_table_lock);
+       return 0;
+}
+#endif /* __PAGETABLE_P4D_FOLDED */
+
 #ifndef __PAGETABLE_PUD_FOLDED
 /*
  * Allocate page upper directory.
  * We've already handled the fast-path in-line.
  */
-int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
 {
        pud_t *new = pud_alloc_one(mm, address);
        if (!new)
@@ -3793,10 +3943,17 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
        smp_wmb(); /* See comment in __pte_alloc */
 
        spin_lock(&mm->page_table_lock);
-       if (pgd_present(*pgd))          /* Another has populated it */
+#ifndef __ARCH_HAS_5LEVEL_HACK
+       if (p4d_present(*p4d))          /* Another has populated it */
                pud_free(mm, new);
        else
-               pgd_populate(mm, pgd, new);
+               p4d_populate(mm, p4d, new);
+#else
+       if (pgd_present(*p4d))          /* Another has populated it */
+               pud_free(mm, new);
+       else
+               pgd_populate(mm, p4d, new);
+#endif /* __ARCH_HAS_5LEVEL_HACK */
        spin_unlock(&mm->page_table_lock);
        return 0;
 }
@@ -3839,6 +3996,7 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
                pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
 {
        pgd_t *pgd;
+       p4d_t *p4d;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *ptep;
@@ -3847,7 +4005,11 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
        if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
                goto out;
 
-       pud = pud_offset(pgd, address);
+       p4d = p4d_offset(pgd, address);
+       if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
+               goto out;
+
+       pud = pud_offset(p4d, address);
        if (pud_none(*pud) || unlikely(pud_bad(*pud)))
                goto out;
 
index 295479b792ec488b6d984ef98e7e715f6ac162b4..6fa7208bcd564ec8fb6bcf25e206aef9bd724ecb 100644 (file)
@@ -125,9 +125,12 @@ void put_online_mems(void)
 
 }
 
+/* Serializes write accesses to mem_hotplug.active_writer. */
+static DEFINE_MUTEX(memory_add_remove_lock);
+
 void mem_hotplug_begin(void)
 {
-       assert_held_device_hotplug();
+       mutex_lock(&memory_add_remove_lock);
 
        mem_hotplug.active_writer = current;
 
@@ -147,6 +150,7 @@ void mem_hotplug_done(void)
        mem_hotplug.active_writer = NULL;
        mutex_unlock(&mem_hotplug.lock);
        memhp_lock_release();
+       mutex_unlock(&memory_add_remove_lock);
 }
 
 /* add this memory to iomem resource */
index 9a0897a14d37be3d7759d577f98060fa75c7be0f..ed97c2c14fa80b47ffbf7fa22ec6d4b9b57202b1 100644 (file)
@@ -209,8 +209,11 @@ static int remove_migration_pte(struct page *page, struct vm_area_struct *vma,
 
        VM_BUG_ON_PAGE(PageTail(page), page);
        while (page_vma_mapped_walk(&pvmw)) {
-               new = page - pvmw.page->index +
-                       linear_page_index(vma, pvmw.address);
+               if (PageKsm(page))
+                       new = page;
+               else
+                       new = page - pvmw.page->index +
+                               linear_page_index(vma, pvmw.address);
 
                get_page(new);
                pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
index 1050511f8b2bdbfbd55a69b8fcfc41ea6cd51b72..0dd9ca18e19ed7ddb499a480c5831c312791b10a 100644 (file)
@@ -380,6 +380,7 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
        pte = get_locked_pte(vma->vm_mm, start, &ptl);
        /* Make sure we do not cross the page table boundary */
        end = pgd_addr_end(start, end);
+       end = p4d_addr_end(start, end);
        end = pud_addr_end(start, end);
        end = pmd_addr_end(start, end);
 
@@ -442,7 +443,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
 
        while (start < end) {
                struct page *page;
-               unsigned int page_mask;
+               unsigned int page_mask = 0;
                unsigned long page_increm;
                struct pagevec pvec;
                struct zone *zone;
@@ -456,8 +457,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
                 * suits munlock very well (and if somehow an abnormal page
                 * has sneaked into the range, we won't oops here: great).
                 */
-               page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
-                               &page_mask);
+               page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
 
                if (page && !IS_ERR(page)) {
                        if (PageTransTail(page)) {
@@ -468,8 +468,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
                                /*
                                 * Any THP page found by follow_page_mask() may
                                 * have gotten split before reaching
-                                * munlock_vma_page(), so we need to recompute
-                                * the page_mask here.
+                                * munlock_vma_page(), so we need to compute
+                                * the page_mask here instead.
                                 */
                                page_mask = munlock_vma_page(page);
                                unlock_page(page);
index 848e946b08e58e31bf6482bd091338a43bb66fe1..8edd0d576254d4c6a3974a42dd2a27eff17fa8bf 100644 (file)
@@ -193,14 +193,14 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
 }
 
 static inline unsigned long change_pud_range(struct vm_area_struct *vma,
-               pgd_t *pgd, unsigned long addr, unsigned long end,
+               p4d_t *p4d, unsigned long addr, unsigned long end,
                pgprot_t newprot, int dirty_accountable, int prot_numa)
 {
        pud_t *pud;
        unsigned long next;
        unsigned long pages = 0;
 
-       pud = pud_offset(pgd, addr);
+       pud = pud_offset(p4d, addr);
        do {
                next = pud_addr_end(addr, end);
                if (pud_none_or_clear_bad(pud))
@@ -212,6 +212,26 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma,
        return pages;
 }
 
+static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
+               pgd_t *pgd, unsigned long addr, unsigned long end,
+               pgprot_t newprot, int dirty_accountable, int prot_numa)
+{
+       p4d_t *p4d;
+       unsigned long next;
+       unsigned long pages = 0;
+
+       p4d = p4d_offset(pgd, addr);
+       do {
+               next = p4d_addr_end(addr, end);
+               if (p4d_none_or_clear_bad(p4d))
+                       continue;
+               pages += change_pud_range(vma, p4d, addr, next, newprot,
+                                dirty_accountable, prot_numa);
+       } while (p4d++, addr = next, addr != end);
+
+       return pages;
+}
+
 static unsigned long change_protection_range(struct vm_area_struct *vma,
                unsigned long addr, unsigned long end, pgprot_t newprot,
                int dirty_accountable, int prot_numa)
@@ -230,7 +250,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               pages += change_pud_range(vma, pgd, addr, next, newprot,
+               pages += change_p4d_range(vma, pgd, addr, next, newprot,
                                 dirty_accountable, prot_numa);
        } while (pgd++, addr = next, addr != end);
 
index 8233b0105c8258ec5757c42c0a65e34b2908272c..cd8a1b199ef9496ef63a50d97f92e648b8eecd58 100644 (file)
@@ -32,6 +32,7 @@
 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
 {
        pgd_t *pgd;
+       p4d_t *p4d;
        pud_t *pud;
        pmd_t *pmd;
 
@@ -39,7 +40,11 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
        if (pgd_none_or_clear_bad(pgd))
                return NULL;
 
-       pud = pud_offset(pgd, addr);
+       p4d = p4d_offset(pgd, addr);
+       if (p4d_none_or_clear_bad(p4d))
+               return NULL;
+
+       pud = pud_offset(p4d, addr);
        if (pud_none_or_clear_bad(pud))
                return NULL;
 
@@ -54,11 +59,15 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
                            unsigned long addr)
 {
        pgd_t *pgd;
+       p4d_t *p4d;
        pud_t *pud;
        pmd_t *pmd;
 
        pgd = pgd_offset(mm, addr);
-       pud = pud_alloc(mm, pgd, addr);
+       p4d = p4d_alloc(mm, pgd, addr);
+       if (!p4d)
+               return NULL;
+       pud = pud_alloc(mm, p4d, addr);
        if (!pud)
                return NULL;
 
index eaa64d2ffdc553af8ce6146bfa60ba73908f8b6a..6cbde310abed8df22f9cd6ed80fcc252f4c80f43 100644 (file)
@@ -873,7 +873,8 @@ done_merging:
                higher_page = page + (combined_pfn - pfn);
                buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
                higher_buddy = higher_page + (buddy_pfn - combined_pfn);
-               if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
+               if (pfn_valid_within(buddy_pfn) &&
+                   page_is_buddy(higher_page, higher_buddy, order + 1)) {
                        list_add_tail(&page->lru,
                                &zone->free_area[order].free_list[migratetype]);
                        goto out;
index a23001a22c151886919d3c24508d64ed1a7c8c16..c4c9def8ffea47b4838fc3095221ee90e0c0fae3 100644 (file)
@@ -104,6 +104,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
        struct mm_struct *mm = pvmw->vma->vm_mm;
        struct page *page = pvmw->page;
        pgd_t *pgd;
+       p4d_t *p4d;
        pud_t *pud;
 
        /* The only possible pmd mapping has been handled on last iteration */
@@ -133,7 +134,10 @@ restart:
        pgd = pgd_offset(mm, pvmw->address);
        if (!pgd_present(*pgd))
                return false;
-       pud = pud_offset(pgd, pvmw->address);
+       p4d = p4d_offset(pgd, pvmw->address);
+       if (!p4d_present(*p4d))
+               return false;
+       pud = pud_offset(p4d, pvmw->address);
        if (!pud_present(*pud))
                return false;
        pvmw->pmd = pmd_offset(pud, pvmw->address);
index 03761577ae86e462cf2a7218892be7a28ae5877d..60f7856e508fb90e6010feadad2233f4d148341e 100644 (file)
@@ -69,14 +69,14 @@ again:
        return err;
 }
 
-static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
+static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
                          struct mm_walk *walk)
 {
        pud_t *pud;
        unsigned long next;
        int err = 0;
 
-       pud = pud_offset(pgd, addr);
+       pud = pud_offset(p4d, addr);
        do {
  again:
                next = pud_addr_end(addr, end);
@@ -113,6 +113,32 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
        return err;
 }
 
+static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
+                         struct mm_walk *walk)
+{
+       p4d_t *p4d;
+       unsigned long next;
+       int err = 0;
+
+       p4d = p4d_offset(pgd, addr);
+       do {
+               next = p4d_addr_end(addr, end);
+               if (p4d_none_or_clear_bad(p4d)) {
+                       if (walk->pte_hole)
+                               err = walk->pte_hole(addr, next, walk);
+                       if (err)
+                               break;
+                       continue;
+               }
+               if (walk->pmd_entry || walk->pte_entry)
+                       err = walk_pud_range(p4d, addr, next, walk);
+               if (err)
+                       break;
+       } while (p4d++, addr = next, addr != end);
+
+       return err;
+}
+
 static int walk_pgd_range(unsigned long addr, unsigned long end,
                          struct mm_walk *walk)
 {
@@ -131,7 +157,7 @@ static int walk_pgd_range(unsigned long addr, unsigned long end,
                        continue;
                }
                if (walk->pmd_entry || walk->pte_entry)
-                       err = walk_pud_range(pgd, addr, next, walk);
+                       err = walk_p4d_range(pgd, addr, next, walk);
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
index 538998a137d24e069969dcc3ed00cedc6c25616f..9ac639499bd1146347557141b10f1135ee2c0048 100644 (file)
@@ -21,7 +21,6 @@ static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
 
 /**
  * pcpu_get_pages - get temp pages array
- * @chunk: chunk of interest
  *
  * Returns pointer to array of pointers to struct page which can be indexed
  * with pcpu_page_idx().  Note that there is only one array and accesses
@@ -30,7 +29,7 @@ static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
  * RETURNS:
  * Pointer to temp pages array on success.
  */
-static struct page **pcpu_get_pages(struct pcpu_chunk *chunk_alloc)
+static struct page **pcpu_get_pages(void)
 {
        static struct page **pages;
        size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
@@ -275,7 +274,7 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
 {
        struct page **pages;
 
-       pages = pcpu_get_pages(chunk);
+       pages = pcpu_get_pages();
        if (!pages)
                return -ENOMEM;
 
@@ -313,7 +312,7 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
         * successful population attempt so the temp pages array must
         * be available now.
         */
-       pages = pcpu_get_pages(chunk);
+       pages = pcpu_get_pages();
        BUG_ON(!pages);
 
        /* unmap and free */
index 5696039b5c0707eddcb652bc120a8556ca3cc58b..60a6488e9e6d49d5e9c5d4b611a5f5b037342316 100644 (file)
@@ -1011,8 +1011,11 @@ area_found:
                mutex_unlock(&pcpu_alloc_mutex);
        }
 
-       if (chunk != pcpu_reserved_chunk)
+       if (chunk != pcpu_reserved_chunk) {
+               spin_lock_irqsave(&pcpu_lock, flags);
                pcpu_nr_empty_pop_pages -= occ_pages;
+               spin_unlock_irqrestore(&pcpu_lock, flags);
+       }
 
        if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
                pcpu_schedule_balance_work();
index 4ed5908c65b0f17d29f128f0f0b6e21ca9a53c36..c99d9512a45b8a1599f0d679ec92d9e4511b3d68 100644 (file)
@@ -22,6 +22,12 @@ void pgd_clear_bad(pgd_t *pgd)
        pgd_clear(pgd);
 }
 
+void p4d_clear_bad(p4d_t *p4d)
+{
+       p4d_ERROR(*p4d);
+       p4d_clear(p4d);
+}
+
 void pud_clear_bad(pud_t *pud)
 {
        pud_ERROR(*pud);
index 2da487d6cea83b4f51db93bcbd05feaad31b927c..f6838015810f5610abe039daec170aa1da634422 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -684,6 +684,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
 {
        pgd_t *pgd;
+       p4d_t *p4d;
        pud_t *pud;
        pmd_t *pmd = NULL;
        pmd_t pmde;
@@ -692,7 +693,11 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
        if (!pgd_present(*pgd))
                goto out;
 
-       pud = pud_offset(pgd, address);
+       p4d = p4d_offset(pgd, address);
+       if (!p4d_present(*p4d))
+               goto out;
+
+       pud = pud_offset(p4d, address);
        if (!pud_present(*pud))
                goto out;
 
@@ -1154,7 +1159,7 @@ void page_add_file_rmap(struct page *page, bool compound)
                        goto out;
        }
        __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
-       mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
+       mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, nr);
 out:
        unlock_page_memcg(page);
 }
@@ -1194,7 +1199,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
         * pte lock(a spinlock) is held, which implies preemption disabled.
         */
        __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
-       mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
+       mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, -nr);
 
        if (unlikely(PageMlocked(page)))
                clear_page_mlock(page);
@@ -1316,12 +1321,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        }
 
        while (page_vma_mapped_walk(&pvmw)) {
-               subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
-               address = pvmw.address;
-
-               /* Unexpected PMD-mapped THP? */
-               VM_BUG_ON_PAGE(!pvmw.pte, page);
-
                /*
                 * If the page is mlock()d, we cannot swap it out.
                 * If it's recently referenced (perhaps page_referenced
@@ -1345,6 +1344,13 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                                continue;
                }
 
+               /* Unexpected PMD-mapped THP? */
+               VM_BUG_ON_PAGE(!pvmw.pte, page);
+
+               subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
+               address = pvmw.address;
+
+
                if (!(flags & TTU_IGNORE_ACCESS)) {
                        if (ptep_clear_flush_young_notify(vma, address,
                                                pvmw.pte)) {
index 574c67b663fe8a6ef802b36cb0379d21c96cb77c..a56c3989f77312085f31124f7705908a5f69609a 100644 (file)
@@ -196,9 +196,9 @@ pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
        return pmd;
 }
 
-pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
+pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
 {
-       pud_t *pud = pud_offset(pgd, addr);
+       pud_t *pud = pud_offset(p4d, addr);
        if (pud_none(*pud)) {
                void *p = vmemmap_alloc_block(PAGE_SIZE, node);
                if (!p)
@@ -208,6 +208,18 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
        return pud;
 }
 
+p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
+{
+       p4d_t *p4d = p4d_offset(pgd, addr);
+       if (p4d_none(*p4d)) {
+               void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+               if (!p)
+                       return NULL;
+               p4d_populate(&init_mm, p4d, p);
+       }
+       return p4d;
+}
+
 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
 {
        pgd_t *pgd = pgd_offset_k(addr);
@@ -225,6 +237,7 @@ int __meminit vmemmap_populate_basepages(unsigned long start,
 {
        unsigned long addr = start;
        pgd_t *pgd;
+       p4d_t *p4d;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
@@ -233,7 +246,10 @@ int __meminit vmemmap_populate_basepages(unsigned long start,
                pgd = vmemmap_pgd_populate(addr, node);
                if (!pgd)
                        return -ENOMEM;
-               pud = vmemmap_pud_populate(pgd, addr, node);
+               p4d = vmemmap_p4d_populate(pgd, addr, node);
+               if (!p4d)
+                       return -ENOMEM;
+               pud = vmemmap_pud_populate(p4d, addr, node);
                if (!pud)
                        return -ENOMEM;
                pmd = vmemmap_pmd_populate(pud, addr, node);
index 9b5bc86f96ad731269e2051719583f168a74bc51..b1ccb58ad397403214a220e4a0ac7901a6b6ae1e 100644 (file)
@@ -267,8 +267,6 @@ int free_swap_slot(swp_entry_t entry)
 {
        struct swap_slots_cache *cache;
 
-       BUG_ON(!swap_slot_cache_initialized);
-
        cache = &get_cpu_var(swp_slots);
        if (use_swap_slot_cache && cache->slots_ret) {
                spin_lock_irq(&cache->free_lock);
index 521ef9b6064fea1429ba196447855a69591e6963..178130880b908515a105eccf9fa428f7cf61719a 100644 (file)
@@ -1517,7 +1517,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
        return 0;
 }
 
-static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
+static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
                                unsigned long addr, unsigned long end,
                                swp_entry_t entry, struct page *page)
 {
@@ -1525,7 +1525,7 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
        unsigned long next;
        int ret;
 
-       pud = pud_offset(pgd, addr);
+       pud = pud_offset(p4d, addr);
        do {
                next = pud_addr_end(addr, end);
                if (pud_none_or_clear_bad(pud))
@@ -1537,6 +1537,26 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
        return 0;
 }
 
+static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
+                               unsigned long addr, unsigned long end,
+                               swp_entry_t entry, struct page *page)
+{
+       p4d_t *p4d;
+       unsigned long next;
+       int ret;
+
+       p4d = p4d_offset(pgd, addr);
+       do {
+               next = p4d_addr_end(addr, end);
+               if (p4d_none_or_clear_bad(p4d))
+                       continue;
+               ret = unuse_pud_range(vma, p4d, addr, next, entry, page);
+               if (ret)
+                       return ret;
+       } while (p4d++, addr = next, addr != end);
+       return 0;
+}
+
 static int unuse_vma(struct vm_area_struct *vma,
                                swp_entry_t entry, struct page *page)
 {
@@ -1560,7 +1580,7 @@ static int unuse_vma(struct vm_area_struct *vma,
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
+               ret = unuse_p4d_range(vma, pgd, addr, next, entry, page);
                if (ret)
                        return ret;
        } while (pgd++, addr = next, addr != end);
index 479e631d43c2f609466b1dee97ca2d0314ae12ba..8bcb501bce60b84f8bbc3c79cb2790bae2daa86a 100644 (file)
@@ -128,19 +128,22 @@ out_unlock:
 static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
 {
        pgd_t *pgd;
+       p4d_t *p4d;
        pud_t *pud;
-       pmd_t *pmd = NULL;
 
        pgd = pgd_offset(mm, address);
-       pud = pud_alloc(mm, pgd, address);
-       if (pud)
-               /*
-                * Note that we didn't run this because the pmd was
-                * missing, the *pmd may be already established and in
-                * turn it may also be a trans_huge_pmd.
-                */
-               pmd = pmd_alloc(mm, pud, address);
-       return pmd;
+       p4d = p4d_alloc(mm, pgd, address);
+       if (!p4d)
+               return NULL;
+       pud = pud_alloc(mm, p4d, address);
+       if (!pud)
+               return NULL;
+       /*
+        * Note that we didn't run this because the pmd was
+        * missing, the *pmd may be already established and in
+        * turn it may also be a trans_huge_pmd.
+        */
+       return pmd_alloc(mm, pud, address);
 }
 
 #ifdef CONFIG_HUGETLB_PAGE
index b4024d688f38698bdbea86034e9a72f71f052da4..0b057628a7ba5c45d722710082ce32df3f7e8e13 100644 (file)
@@ -86,12 +86,12 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
        } while (pmd++, addr = next, addr != end);
 }
 
-static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
+static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end)
 {
        pud_t *pud;
        unsigned long next;
 
-       pud = pud_offset(pgd, addr);
+       pud = pud_offset(p4d, addr);
        do {
                next = pud_addr_end(addr, end);
                if (pud_clear_huge(pud))
@@ -102,6 +102,22 @@ static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
        } while (pud++, addr = next, addr != end);
 }
 
+static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
+{
+       p4d_t *p4d;
+       unsigned long next;
+
+       p4d = p4d_offset(pgd, addr);
+       do {
+               next = p4d_addr_end(addr, end);
+               if (p4d_clear_huge(p4d))
+                       continue;
+               if (p4d_none_or_clear_bad(p4d))
+                       continue;
+               vunmap_pud_range(p4d, addr, next);
+       } while (p4d++, addr = next, addr != end);
+}
+
 static void vunmap_page_range(unsigned long addr, unsigned long end)
 {
        pgd_t *pgd;
@@ -113,7 +129,7 @@ static void vunmap_page_range(unsigned long addr, unsigned long end)
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               vunmap_pud_range(pgd, addr, next);
+               vunmap_p4d_range(pgd, addr, next);
        } while (pgd++, addr = next, addr != end);
 }
 
@@ -160,13 +176,13 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
        return 0;
 }
 
-static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
+static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
                unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 {
        pud_t *pud;
        unsigned long next;
 
-       pud = pud_alloc(&init_mm, pgd, addr);
+       pud = pud_alloc(&init_mm, p4d, addr);
        if (!pud)
                return -ENOMEM;
        do {
@@ -177,6 +193,23 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
        return 0;
 }
 
+static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
+               unsigned long end, pgprot_t prot, struct page **pages, int *nr)
+{
+       p4d_t *p4d;
+       unsigned long next;
+
+       p4d = p4d_alloc(&init_mm, pgd, addr);
+       if (!p4d)
+               return -ENOMEM;
+       do {
+               next = p4d_addr_end(addr, end);
+               if (vmap_pud_range(p4d, addr, next, prot, pages, nr))
+                       return -ENOMEM;
+       } while (p4d++, addr = next, addr != end);
+       return 0;
+}
+
 /*
  * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
  * will have pfns corresponding to the "pages" array.
@@ -196,7 +229,7 @@ static int vmap_page_range_noflush(unsigned long start, unsigned long end,
        pgd = pgd_offset_k(addr);
        do {
                next = pgd_addr_end(addr, end);
-               err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
+               err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
                if (err)
                        return err;
        } while (pgd++, addr = next, addr != end);
@@ -237,6 +270,10 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
        unsigned long addr = (unsigned long) vmalloc_addr;
        struct page *page = NULL;
        pgd_t *pgd = pgd_offset_k(addr);
+       p4d_t *p4d;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *ptep, pte;
 
        /*
         * XXX we might need to change this if we add VIRTUAL_BUG_ON for
@@ -244,21 +281,23 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
         */
        VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
 
-       if (!pgd_none(*pgd)) {
-               pud_t *pud = pud_offset(pgd, addr);
-               if (!pud_none(*pud)) {
-                       pmd_t *pmd = pmd_offset(pud, addr);
-                       if (!pmd_none(*pmd)) {
-                               pte_t *ptep, pte;
-
-                               ptep = pte_offset_map(pmd, addr);
-                               pte = *ptep;
-                               if (pte_present(pte))
-                                       page = pte_page(pte);
-                               pte_unmap(ptep);
-                       }
-               }
-       }
+       if (pgd_none(*pgd))
+               return NULL;
+       p4d = p4d_offset(pgd, addr);
+       if (p4d_none(*p4d))
+               return NULL;
+       pud = pud_offset(p4d, addr);
+       if (pud_none(*pud))
+               return NULL;
+       pmd = pmd_offset(pud, addr);
+       if (pmd_none(*pmd))
+               return NULL;
+
+       ptep = pte_offset_map(pmd, addr);
+       pte = *ptep;
+       if (pte_present(pte))
+               page = pte_page(pte);
+       pte_unmap(ptep);
        return page;
 }
 EXPORT_SYMBOL(vmalloc_to_page);
@@ -1644,7 +1683,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 
                if (fatal_signal_pending(current)) {
                        area->nr_pages = i;
-                       goto fail;
+                       goto fail_no_warn;
                }
 
                if (node == NUMA_NO_NODE)
@@ -1670,6 +1709,7 @@ fail:
        warn_alloc(gfp_mask, NULL,
                          "vmalloc: allocation failure, allocated %ld of %ld bytes",
                          (area->nr_pages*PAGE_SIZE), area->size);
+fail_no_warn:
        vfree(area->addr);
        return NULL;
 }
index 69f9aff39a2eaf608d4f7cfaed8904bd3c3312c8..89f95396ec46be64055f1a658c9c0f7bdad90d5c 100644 (file)
@@ -1065,6 +1065,9 @@ const char * const vmstat_text[] = {
        "thp_split_page_failed",
        "thp_deferred_split_page",
        "thp_split_pmd",
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+       "thp_split_pud",
+#endif
        "thp_zero_page_alloc",
        "thp_zero_page_alloc_failed",
 #endif
@@ -1761,7 +1764,7 @@ static int vmstat_cpu_dead(unsigned int cpu)
 
 #endif
 
-static int __init setup_vmstat(void)
+void __init init_mm_internals(void)
 {
 #ifdef CONFIG_SMP
        int ret;
@@ -1789,9 +1792,7 @@ static int __init setup_vmstat(void)
        proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
        proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
 #endif
-       return 0;
 }
-module_init(setup_vmstat)
 
 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
 
index ac839fca0e76ae3cc5a025684cb1516301922d92..eda05c71fa49e6e1e4f93a4029ddef04a4f8ab4c 100644 (file)
@@ -532,7 +532,7 @@ static int __init workingset_init(void)
        pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
               timestamp_bits, max_order, bucket_order);
 
-       ret = list_lru_init_key(&shadow_nodes, &shadow_nodes_key);
+       ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key);
        if (ret)
                goto err;
        ret = register_shrinker(&workingset_shadow_shrinker);
index 8970a2fd3b1a5354fb4bc843292a1c7358eed51c..f9492bccfd794a1983eabbc4bff32df35b31cea8 100644 (file)
@@ -667,6 +667,7 @@ next:
                        z3fold_page_unlock(zhdr);
                        spin_lock(&pool->lock);
                        if (kref_put(&zhdr->refcount, release_z3fold_page)) {
+                               spin_unlock(&pool->lock);
                                atomic64_dec(&pool->pages_nr);
                                return 0;
                        }
index e97ab824e368cc16f9609acd70d5337866eb2936..9ee5787634e59690d67cb8fa148e03b18d455c99 100644 (file)
@@ -562,8 +562,7 @@ static int vlan_dev_init(struct net_device *dev)
                           NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC |
                           NETIF_F_ALL_FCOE;
 
-       dev->features |= real_dev->vlan_features | NETIF_F_LLTX |
-                        NETIF_F_GSO_SOFTWARE;
+       dev->features |= dev->hw_features | NETIF_F_LLTX;
        dev->gso_max_size = real_dev->gso_max_size;
        dev->gso_max_segs = real_dev->gso_max_segs;
        if (dev->features & NETIF_F_VLAN_FEATURES)
index 9b681550e3a3ea3c6146ac67572b6c97a28c9d2c..9086ffbb508514c1e4fb1a5d2d04d6c6b1cf5bea 100644 (file)
@@ -12,7 +12,7 @@ obj-$(CONFIG_NET)             += $(tmp-y)
 
 # LLC has to be linked before the files in net/802/
 obj-$(CONFIG_LLC)              += llc/
-obj-$(CONFIG_NET)              += ethernet/ 802/ sched/ netlink/
+obj-$(CONFIG_NET)              += ethernet/ 802/ sched/ netlink/ bpf/
 obj-$(CONFIG_NETFILTER)                += netfilter/
 obj-$(CONFIG_INET)             += ipv4/
 obj-$(CONFIG_XFRM)             += xfrm/
index 53b4ac09e7b7d5d6a57f049dc1653c961b052622..ec527b62f79db1a4712d6fd654d2f899255de8bc 100644 (file)
@@ -106,7 +106,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
                        entry->expires = jiffies - 1;
                        /* force resolution or expiration */
                        error = neigh_update(entry->neigh, NULL, NUD_NONE,
-                                            NEIGH_UPDATE_F_ADMIN);
+                                            NEIGH_UPDATE_F_ADMIN, 0);
                        if (error)
                                pr_crit("neigh_update failed with %d\n", error);
                        goto out;
@@ -481,7 +481,7 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
                link_vcc(clip_vcc, entry);
        }
        error = neigh_update(neigh, llc_oui, NUD_PERMANENT,
-                            NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN);
+                            NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN, 0);
        neigh_release(neigh);
        return error;
 }
index 9613381f5db04e28ff66749706d1d61d82f77b88..f06422f4108d209fde356457c453164f2f4d7289 100644 (file)
@@ -62,21 +62,16 @@ static void vcc_remove_socket(struct sock *sk)
        write_unlock_irq(&vcc_sklist_lock);
 }
 
-static struct sk_buff *alloc_tx(struct atm_vcc *vcc, unsigned int size)
+static bool vcc_tx_ready(struct atm_vcc *vcc, unsigned int size)
 {
-       struct sk_buff *skb;
        struct sock *sk = sk_atm(vcc);
 
        if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) {
                pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n",
                         sk_wmem_alloc_get(sk), size, sk->sk_sndbuf);
-               return NULL;
+               return false;
        }
-       while (!(skb = alloc_skb(size, GFP_KERNEL)))
-               schedule();
-       pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
-       atomic_add(skb->truesize, &sk->sk_wmem_alloc);
-       return skb;
+       return true;
 }
 
 static void vcc_sock_destruct(struct sock *sk)
@@ -606,7 +601,7 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
        eff = (size+3) & ~3; /* align to word boundary */
        prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
        error = 0;
-       while (!(skb = alloc_tx(vcc, eff))) {
+       while (!vcc_tx_ready(vcc, eff)) {
                if (m->msg_flags & MSG_DONTWAIT) {
                        error = -EAGAIN;
                        break;
@@ -628,6 +623,15 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
        finish_wait(sk_sleep(sk), &wait);
        if (error)
                goto out;
+
+       skb = alloc_skb(eff, GFP_KERNEL);
+       if (!skb) {
+               error = -ENOMEM;
+               goto out;
+       }
+       pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
+       atomic_add(skb->truesize, &sk->sk_wmem_alloc);
+
        skb->dev = NULL; /* for paths shared with net_device interfaces */
        ATM_SKB(skb)->atm_options = vcc->atm_options;
        if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
index db9794ec61d88efe16419a6c4534daf7c8770bc1..5589de7086af4eca7634e786918600a81cf6b09c 100644 (file)
@@ -318,7 +318,8 @@ out:
        return error;
 }
 
-static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
+static int svc_accept(struct socket *sock, struct socket *newsock, int flags,
+                     bool kern)
 {
        struct sock *sk = sock->sk;
        struct sk_buff *skb;
@@ -329,7 +330,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
 
        lock_sock(sk);
 
-       error = svc_create(sock_net(sk), newsock, 0, 0);
+       error = svc_create(sock_net(sk), newsock, 0, kern);
        if (error)
                goto out;
 
index a8e42cedf1dbc7e11a5803a3dbe857e1e4cd54e1..b7c486752b3acf64b821ccb8b0e1a9bc25c945da 100644 (file)
@@ -1320,7 +1320,8 @@ out_release:
        return err;
 }
 
-static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
+static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
+                      bool kern)
 {
        struct sk_buff *skb;
        struct sock *newsk;
index 7c3d994e90d87b868f2b1614cc5d26e2413e70ee..495ba7cdcb0451c997656116a300a49b7e43a089 100644 (file)
@@ -679,15 +679,11 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
 {
        struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
        struct batadv_forw_packet *forw_packet_aggr;
+       struct sk_buff *skb;
        unsigned char *skb_buff;
        unsigned int skb_size;
        atomic_t *queue_left = own_packet ? NULL : &bat_priv->batman_queue_left;
 
-       forw_packet_aggr = batadv_forw_packet_alloc(if_incoming, if_outgoing,
-                                                   queue_left, bat_priv);
-       if (!forw_packet_aggr)
-               return;
-
        if (atomic_read(&bat_priv->aggregated_ogms) &&
            packet_len < BATADV_MAX_AGGREGATION_BYTES)
                skb_size = BATADV_MAX_AGGREGATION_BYTES;
@@ -696,9 +692,14 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
 
        skb_size += ETH_HLEN;
 
-       forw_packet_aggr->skb = netdev_alloc_skb_ip_align(NULL, skb_size);
-       if (!forw_packet_aggr->skb) {
-               batadv_forw_packet_free(forw_packet_aggr, true);
+       skb = netdev_alloc_skb_ip_align(NULL, skb_size);
+       if (!skb)
+               return;
+
+       forw_packet_aggr = batadv_forw_packet_alloc(if_incoming, if_outgoing,
+                                                   queue_left, bat_priv, skb);
+       if (!forw_packet_aggr) {
+               kfree_skb(skb);
                return;
        }
 
@@ -2477,6 +2478,16 @@ static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
        batadv_iv_ogm_schedule(hard_iface);
 }
 
+/**
+ * batadv_iv_init_sel_class - initialize GW selection class
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
+{
+       /* set default TQ difference threshold to 20 */
+       atomic_set(&bat_priv->gw.sel_class, 20);
+}
+
 static struct batadv_gw_node *
 batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
 {
@@ -2823,6 +2834,7 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
                .del_if = batadv_iv_ogm_orig_del_if,
        },
        .gw = {
+               .init_sel_class = batadv_iv_init_sel_class,
                .get_best_gw_node = batadv_iv_gw_get_best_gw_node,
                .is_eligible = batadv_iv_gw_is_eligible,
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
index 0acd081dd286996444d121b526f4530c4c1c0845..a36c8e7291d61f171cdb128dee865739d22cb00e 100644 (file)
@@ -668,6 +668,16 @@ err_ifinfo1:
        return ret;
 }
 
+/**
+ * batadv_v_init_sel_class - initialize GW selection class
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
+{
+       /* set default throughput difference threshold to 5Mbps */
+       atomic_set(&bat_priv->gw.sel_class, 50);
+}
+
 static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv,
                                        char *buff, size_t count)
 {
@@ -1052,6 +1062,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
                .dump = batadv_v_orig_dump,
        },
        .gw = {
+               .init_sel_class = batadv_v_init_sel_class,
                .store_sel_class = batadv_v_store_sel_class,
                .show_sel_class = batadv_v_show_sel_class,
                .get_best_gw_node = batadv_v_gw_get_best_gw_node,
@@ -1092,9 +1103,6 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv)
        if (ret < 0)
                return ret;
 
-       /* set default throughput difference threshold to 5Mbps */
-       atomic_set(&bat_priv->gw.sel_class, 50);
-
        return 0;
 }
 
index ba8420d8a992db2c14936f568f761869afd921c0..d07e89ec84677d22a74891406ab6fa5ee6087011 100644 (file)
@@ -395,7 +395,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
                ether_addr_copy(ethhdr->h_source, mac);
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_send_claim(): CLAIM %pM on vid %d\n", mac,
-                          BATADV_PRINT_VID(vid));
+                          batadv_print_vid(vid));
                break;
        case BATADV_CLAIM_TYPE_UNCLAIM:
                /* unclaim frame
@@ -404,7 +404,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
                ether_addr_copy(hw_src, mac);
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
-                          BATADV_PRINT_VID(vid));
+                          batadv_print_vid(vid));
                break;
        case BATADV_CLAIM_TYPE_ANNOUNCE:
                /* announcement frame
@@ -413,7 +413,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
                ether_addr_copy(hw_src, mac);
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
-                          ethhdr->h_source, BATADV_PRINT_VID(vid));
+                          ethhdr->h_source, batadv_print_vid(vid));
                break;
        case BATADV_CLAIM_TYPE_REQUEST:
                /* request frame
@@ -425,14 +425,14 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n",
                           ethhdr->h_source, ethhdr->h_dest,
-                          BATADV_PRINT_VID(vid));
+                          batadv_print_vid(vid));
                break;
        case BATADV_CLAIM_TYPE_LOOPDETECT:
                ether_addr_copy(ethhdr->h_source, mac);
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_send_claim(): LOOPDETECT of %pM to %pM on vid %d\n",
                           ethhdr->h_source, ethhdr->h_dest,
-                          BATADV_PRINT_VID(vid));
+                          batadv_print_vid(vid));
 
                break;
        }
@@ -475,9 +475,9 @@ static void batadv_bla_loopdetect_report(struct work_struct *work)
 
        batadv_info(bat_priv->soft_iface,
                    "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
-                   BATADV_PRINT_VID(backbone_gw->vid));
+                   batadv_print_vid(backbone_gw->vid));
        snprintf(vid_str, sizeof(vid_str), "%d",
-                BATADV_PRINT_VID(backbone_gw->vid));
+                batadv_print_vid(backbone_gw->vid));
        vid_str[sizeof(vid_str) - 1] = 0;
 
        batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
@@ -510,7 +510,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv,
                   "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
-                  orig, BATADV_PRINT_VID(vid));
+                  orig, batadv_print_vid(vid));
 
        entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
        if (!entry)
@@ -719,7 +719,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
 
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
-                          mac, BATADV_PRINT_VID(vid));
+                          mac, batadv_print_vid(vid));
 
                kref_get(&claim->refcount);
                hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
@@ -739,8 +739,8 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                        goto claim_free_ref;
 
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
-                          "bla_add_claim(): changing ownership for %pM, vid %d\n",
-                          mac, BATADV_PRINT_VID(vid));
+                          "bla_add_claim(): changing ownership for %pM, vid %d to gw %pM\n",
+                          mac, batadv_print_vid(vid), backbone_gw->orig);
 
                remove_crc = true;
        }
@@ -809,7 +809,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
                return;
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
-                  mac, BATADV_PRINT_VID(vid));
+                  mac, batadv_print_vid(vid));
 
        batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
                           batadv_choose_claim, claim);
@@ -849,7 +849,7 @@ static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv,
                   "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
-                  BATADV_PRINT_VID(vid), backbone_gw->orig, crc);
+                  batadv_print_vid(vid), backbone_gw->orig, crc);
 
        spin_lock_bh(&backbone_gw->crc_lock);
        backbone_crc = backbone_gw->crc;
@@ -859,7 +859,7 @@ static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
                batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
                           "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
                           backbone_gw->orig,
-                          BATADV_PRINT_VID(backbone_gw->vid),
+                          batadv_print_vid(backbone_gw->vid),
                           backbone_crc, crc);
 
                batadv_bla_send_request(backbone_gw);
@@ -904,7 +904,7 @@ static bool batadv_handle_request(struct batadv_priv *bat_priv,
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv,
                   "handle_request(): REQUEST vid %d (sent by %pM)...\n",
-                  BATADV_PRINT_VID(vid), ethhdr->h_source);
+                  batadv_print_vid(vid), ethhdr->h_source);
 
        batadv_bla_answer_request(bat_priv, primary_if, vid);
        return true;
@@ -941,7 +941,7 @@ static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
        /* this must be an UNCLAIM frame */
        batadv_dbg(BATADV_DBG_BLA, bat_priv,
                   "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
-                  claim_addr, BATADV_PRINT_VID(vid), backbone_gw->orig);
+                  claim_addr, batadv_print_vid(vid), backbone_gw->orig);
 
        batadv_bla_del_claim(bat_priv, claim_addr, vid);
        batadv_backbone_gw_put(backbone_gw);
@@ -1161,7 +1161,7 @@ static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
        if (ret == 1)
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
-                          ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src,
+                          ethhdr->h_source, batadv_print_vid(vid), hw_src,
                           hw_dst);
 
        if (ret < 2)
@@ -1197,7 +1197,7 @@ static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv,
                   "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
-                  ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst);
+                  ethhdr->h_source, batadv_print_vid(vid), hw_src, hw_dst);
        return true;
 }
 
@@ -1295,10 +1295,13 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
                                goto skip;
 
                        batadv_dbg(BATADV_DBG_BLA, bat_priv,
-                                  "bla_purge_claims(): %pM, vid %d, time out\n",
-                                  claim->addr, claim->vid);
+                                  "bla_purge_claims(): timed out.\n");
 
 purge_now:
+                       batadv_dbg(BATADV_DBG_BLA, bat_priv,
+                                  "bla_purge_claims(): %pM, vid %d\n",
+                                  claim->addr, claim->vid);
+
                        batadv_handle_unclaim(bat_priv, primary_if,
                                              backbone_gw->orig,
                                              claim->addr, claim->vid);
@@ -1846,6 +1849,13 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
                /* possible optimization: race for a claim */
                /* No claim exists yet, claim it for us!
                 */
+
+               batadv_dbg(BATADV_DBG_BLA, bat_priv,
+                          "bla_rx(): Unclaimed MAC %pM found. Claim it. Local: %s\n",
+                          ethhdr->h_source,
+                          batadv_is_my_client(bat_priv,
+                                              ethhdr->h_source, vid) ?
+                          "yes" : "no");
                batadv_handle_claim(bat_priv, primary_if,
                                    primary_if->net_dev->dev_addr,
                                    ethhdr->h_source, vid);
@@ -1963,10 +1973,22 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
                /* if yes, the client has roamed and we have
                 * to unclaim it.
                 */
-               batadv_handle_unclaim(bat_priv, primary_if,
-                                     primary_if->net_dev->dev_addr,
-                                     ethhdr->h_source, vid);
-               goto allow;
+               if (batadv_has_timed_out(claim->lasttime, 100)) {
+                       /* only unclaim if the last claim entry is
+                        * older than 100 ms to make sure we really
+                        * have a roaming client here.
+                        */
+                       batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_tx(): Roaming client %pM detected. Unclaim it.\n",
+                                  ethhdr->h_source);
+                       batadv_handle_unclaim(bat_priv, primary_if,
+                                             primary_if->net_dev->dev_addr,
+                                             ethhdr->h_source, vid);
+                       goto allow;
+               } else {
+                       batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_tx(): Race for claim %pM detected. Drop packet.\n",
+                                  ethhdr->h_source);
+                       goto handled;
+               }
        }
 
        /* check if it is a multicast/broadcast frame */
@@ -2042,7 +2064,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
                        backbone_crc = backbone_gw->crc;
                        spin_unlock_bh(&backbone_gw->crc_lock);
                        seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
-                                  claim->addr, BATADV_PRINT_VID(claim->vid),
+                                  claim->addr, batadv_print_vid(claim->vid),
                                   backbone_gw->orig,
                                   (is_own ? 'x' : ' '),
                                   backbone_crc);
@@ -2274,7 +2296,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
 
                        seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
                                   backbone_gw->orig,
-                                  BATADV_PRINT_VID(backbone_gw->vid), secs,
+                                  batadv_print_vid(backbone_gw->vid), secs,
                                   msecs, backbone_crc);
                }
                rcu_read_unlock();
@@ -2449,3 +2471,52 @@ out:
 
        return ret;
 }
+
+#ifdef CONFIG_BATMAN_ADV_DAT
+/**
+ * batadv_bla_check_claim - check if address is claimed
+ *
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: mac address of which the claim status is checked
+ * @vid: the VLAN ID
+ *
+ * addr is checked if this address is claimed by the local device itself.
+ *
+ * Return: true if bla is disabled or the mac is claimed by the device,
+ * false if the device addr is already claimed by another gateway
+ */
+bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
+                           u8 *addr, unsigned short vid)
+{
+       struct batadv_bla_claim search_claim;
+       struct batadv_bla_claim *claim = NULL;
+       struct batadv_hard_iface *primary_if = NULL;
+       bool ret = true;
+
+       if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+               return ret;
+
+       primary_if = batadv_primary_if_get_selected(bat_priv);
+       if (!primary_if)
+               return ret;
+
+       /* First look if the mac address is claimed */
+       ether_addr_copy(search_claim.addr, addr);
+       search_claim.vid = vid;
+
+       claim = batadv_claim_hash_find(bat_priv, &search_claim);
+
+       /* If there is a claim and we are not owner of the claim,
+        * return false.
+        */
+       if (claim) {
+               if (!batadv_compare_eth(claim->backbone_gw->orig,
+                                       primary_if->net_dev->dev_addr))
+                       ret = false;
+               batadv_claim_put(claim);
+       }
+
+       batadv_hardif_put(primary_if);
+       return ret;
+}
+#endif
index e157986bd01cf989dc70c93bae7a4fdf17cf3c4f..234775748b8eae9477f802804f004e50d2a4840c 100644 (file)
@@ -69,6 +69,10 @@ void batadv_bla_status_update(struct net_device *net_dev);
 int batadv_bla_init(struct batadv_priv *bat_priv);
 void batadv_bla_free(struct batadv_priv *bat_priv);
 int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb);
+#ifdef CONFIG_BATMAN_ADV_DAT
+bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr,
+                           unsigned short vid);
+#endif
 #define BATADV_BLA_CRC_INIT    0
 #else /* ifdef CONFIG_BATMAN_ADV_BLA */
 
@@ -145,6 +149,13 @@ static inline int batadv_bla_backbone_dump(struct sk_buff *msg,
        return -EOPNOTSUPP;
 }
 
+static inline
+bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr,
+                           unsigned short vid)
+{
+       return true;
+}
+
 #endif /* ifdef CONFIG_BATMAN_ADV_BLA */
 
 #endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */
index 1bfd1dbc2feba7bf6c16004ea8ca85ed6066c929..013e970eff393e0550aa250f7e72c27301071552 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/workqueue.h>
 #include <net/arp.h>
 
+#include "bridge_loop_avoidance.h"
 #include "hard-interface.h"
 #include "hash.h"
 #include "log.h"
@@ -330,7 +331,7 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
                batadv_dbg(BATADV_DBG_DAT, bat_priv,
                           "Entry updated: %pI4 %pM (vid: %d)\n",
                           &dat_entry->ip, dat_entry->mac_addr,
-                          BATADV_PRINT_VID(vid));
+                          batadv_print_vid(vid));
                goto out;
        }
 
@@ -356,7 +357,7 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
        }
 
        batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM (vid: %d)\n",
-                  &dat_entry->ip, dat_entry->mac_addr, BATADV_PRINT_VID(vid));
+                  &dat_entry->ip, dat_entry->mac_addr, batadv_print_vid(vid));
 
 out:
        if (dat_entry)
@@ -835,7 +836,7 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
 
                        seq_printf(seq, " * %15pI4 %14pM %4i %6i:%02i\n",
                                   &dat_entry->ip, dat_entry->mac_addr,
-                                  BATADV_PRINT_VID(dat_entry->vid),
+                                  batadv_print_vid(dat_entry->vid),
                                   last_seen_mins, last_seen_secs);
                }
                rcu_read_unlock();
@@ -1002,6 +1003,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
        bool ret = false;
        struct batadv_dat_entry *dat_entry = NULL;
        struct sk_buff *skb_new;
+       struct net_device *soft_iface = bat_priv->soft_iface;
        int hdr_size = 0;
        unsigned short vid;
 
@@ -1040,16 +1042,30 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                        goto out;
                }
 
+               /* If BLA is enabled, only send ARP replies if we have claimed
+                * the destination for the ARP request or if no one else of
+                * the backbone gws belonging to our backbone has claimed the
+                * destination.
+                */
+               if (!batadv_bla_check_claim(bat_priv,
+                                           dat_entry->mac_addr, vid)) {
+                       batadv_dbg(BATADV_DBG_DAT, bat_priv,
+                                  "Device %pM claimed by another backbone gw. Don't send ARP reply!",
+                                  dat_entry->mac_addr);
+                       ret = true;
+                       goto out;
+               }
+
                skb_new = batadv_dat_arp_create_reply(bat_priv, ip_dst, ip_src,
                                                      dat_entry->mac_addr,
                                                      hw_src, vid);
                if (!skb_new)
                        goto out;
 
-               skb_new->protocol = eth_type_trans(skb_new,
-                                                  bat_priv->soft_iface);
-               bat_priv->stats.rx_packets++;
-               bat_priv->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size;
+               skb_new->protocol = eth_type_trans(skb_new, soft_iface);
+
+               soft_iface->stats.rx_packets++;
+               soft_iface->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size;
 
                netif_rx(skb_new);
                batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n");
@@ -1188,6 +1204,7 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
 bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
                                         struct sk_buff *skb, int hdr_size)
 {
+       struct batadv_dat_entry *dat_entry = NULL;
        u16 type;
        __be32 ip_src, ip_dst;
        u8 *hw_src, *hw_dst;
@@ -1210,12 +1227,41 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
        hw_dst = batadv_arp_hw_dst(skb, hdr_size);
        ip_dst = batadv_arp_ip_dst(skb, hdr_size);
 
+       /* If ip_dst is already in cache and has the right mac address,
+        * drop this frame if this ARP reply is destined for us because it's
+        * most probably an ARP reply generated by another node of the DHT.
+        * We have most probably received already a reply earlier. Delivering
+        * this frame would lead to doubled receive of an ARP reply.
+        */
+       dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_src, vid);
+       if (dat_entry && batadv_compare_eth(hw_src, dat_entry->mac_addr)) {
+               batadv_dbg(BATADV_DBG_DAT, bat_priv, "Doubled ARP reply removed: ARP MSG = [src: %pM-%pI4 dst: %pM-%pI4]; dat_entry: %pM-%pI4\n",
+                          hw_src, &ip_src, hw_dst, &ip_dst,
+                          dat_entry->mac_addr, &dat_entry->ip);
+               dropped = true;
+               goto out;
+       }
+
        /* Update our internal cache with both the IP addresses the node got
         * within the ARP reply
         */
        batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
        batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
 
+       /* If BLA is enabled, only forward ARP replies if we have claimed the
+        * source of the ARP reply or if no one else of the same backbone has
+        * already claimed that client. This prevents that different gateways
+        * to the same backbone all forward the ARP reply leading to multiple
+        * replies in the backbone.
+        */
+       if (!batadv_bla_check_claim(bat_priv, hw_src, vid)) {
+               batadv_dbg(BATADV_DBG_DAT, bat_priv,
+                          "Device %pM claimed by another backbone gw. Drop ARP reply.\n",
+                          hw_src);
+               dropped = true;
+               goto out;
+       }
+
        /* if this REPLY is directed to a client of mine, let's deliver the
         * packet to the interface
         */
@@ -1228,6 +1274,8 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
 out:
        if (dropped)
                kfree_skb(skb);
+       if (dat_entry)
+               batadv_dat_entry_put(dat_entry);
        /* if dropped == false -> deliver to the interface */
        return dropped;
 }
@@ -1256,7 +1304,7 @@ bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
        /* If this packet is an ARP_REQUEST and the node already has the
         * information that it is going to ask, then the packet can be dropped
         */
-       if (forw_packet->num_packets)
+       if (batadv_forw_packet_is_rebroadcast(forw_packet))
                goto out;
 
        vid = batadv_dat_get_vid(forw_packet->skb, &hdr_size);
index 11a23fd6e1a07fa0c541fa3ea0a13775f9933893..8f964beaac284905c487ecfc5babaf2dd72d822c 100644 (file)
@@ -404,7 +404,7 @@ out:
  * batadv_frag_create - create a fragment from skb
  * @skb: skb to create fragment from
  * @frag_head: header to use in new fragment
- * @mtu: size of new fragment
+ * @fragment_size: size of new fragment
  *
  * Split the passed skb into two fragments: A new one with size matching the
  * passed mtu and the old one with the rest. The new skb contains data from the
@@ -414,11 +414,11 @@ out:
  */
 static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
                                          struct batadv_frag_packet *frag_head,
-                                         unsigned int mtu)
+                                         unsigned int fragment_size)
 {
        struct sk_buff *skb_fragment;
        unsigned int header_size = sizeof(*frag_head);
-       unsigned int fragment_size = mtu - header_size;
+       unsigned int mtu = fragment_size + header_size;
 
        skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
        if (!skb_fragment)
@@ -456,7 +456,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
        struct sk_buff *skb_fragment;
        unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
        unsigned int header_size = sizeof(frag_header);
-       unsigned int max_fragment_size, max_packet_size;
+       unsigned int max_fragment_size, num_fragments;
        int ret;
 
        /* To avoid merge and refragmentation at next-hops we never send
@@ -464,10 +464,15 @@ int batadv_frag_send_packet(struct sk_buff *skb,
         */
        mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
        max_fragment_size = mtu - header_size;
-       max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
+
+       if (skb->len == 0 || max_fragment_size == 0)
+               return -EINVAL;
+
+       num_fragments = (skb->len - 1) / max_fragment_size + 1;
+       max_fragment_size = (skb->len - 1) / num_fragments + 1;
 
        /* Don't even try to fragment, if we need more than 16 fragments */
-       if (skb->len > max_packet_size) {
+       if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) {
                ret = -EAGAIN;
                goto free_skb;
        }
@@ -507,7 +512,8 @@ int batadv_frag_send_packet(struct sk_buff *skb,
                        goto put_primary_if;
                }
 
-               skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
+               skb_fragment = batadv_frag_create(skb, &frag_header,
+                                                 max_fragment_size);
                if (!skb_fragment) {
                        ret = -ENOMEM;
                        goto put_primary_if;
index 5db2e43e3775ef40fc3832984c93411c7f0dbb08..33940c5c74a8730c4ed3e06f7246e022cfb798da 100644 (file)
@@ -253,6 +253,11 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
  */
 void batadv_gw_init(struct batadv_priv *bat_priv)
 {
+       if (bat_priv->algo_ops->gw.init_sel_class)
+               bat_priv->algo_ops->gw.init_sel_class(bat_priv);
+       else
+               atomic_set(&bat_priv->gw.sel_class, 1);
+
        batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
                                     NULL, BATADV_TVLV_GW, 1,
                                     BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
index 7a2b9f4da07830103a8f00e4c3b488b0367a9dc2..65ce97efa6b5946175f64d300573a532f28387f8 100644 (file)
@@ -73,9 +73,10 @@ __printf(2, 3);
 /* possibly ratelimited debug output */
 #define _batadv_dbg(type, bat_priv, ratelimited, fmt, arg...)          \
        do {                                                            \
-               if (atomic_read(&(bat_priv)->log_level) & (type) &&     \
+               struct batadv_priv *__batpriv = (bat_priv);             \
+               if (atomic_read(&__batpriv->log_level) & (type) &&      \
                    (!(ratelimited) || net_ratelimit()))                \
-                       batadv_debug_log(bat_priv, fmt, ## arg);        \
+                       batadv_debug_log(__batpriv, fmt, ## arg);       \
        }                                                               \
        while (0)
 #else /* !CONFIG_BATMAN_ADV_DEBUG */
index 5000c540614d0c0a866857e5245ff3f365d14223..fb381fb26a66196942401ec6cb636c66e648a36a 100644 (file)
@@ -516,6 +516,9 @@ static void batadv_recv_handler_init(void)
        BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
        BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
 
+       i = FIELD_SIZEOF(struct sk_buff, cb);
+       BUILD_BUG_ON(sizeof(struct batadv_skb_cb) > i);
+
        /* broadcast packet */
        batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
 
index 57a8103dbce7f00fb58ff5ae252e12a0f6453aec..810f7d026f544027991af1714c169342792e1a68 100644 (file)
@@ -24,7 +24,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2017.0"
+#define BATADV_SOURCE_VERSION "2017.1"
 #endif
 
 /* B.A.T.M.A.N. parameters */
@@ -193,6 +193,7 @@ enum batadv_uev_type {
 #include <linux/percpu.h>
 #include <linux/types.h>
 
+#include "packet.h"
 #include "types.h"
 
 struct net_device;
@@ -200,8 +201,19 @@ struct packet_type;
 struct seq_file;
 struct sk_buff;
 
-#define BATADV_PRINT_VID(vid) (((vid) & BATADV_VLAN_HAS_TAG) ? \
-                              (int)((vid) & VLAN_VID_MASK) : -1)
+/**
+ * batadv_print_vid - return printable version of vid information
+ * @vid: the VLAN identifier
+ *
+ * Return: -1 when no VLAN is used, VLAN id otherwise
+ */
+static inline int batadv_print_vid(unsigned short vid)
+{
+       if (vid & BATADV_VLAN_HAS_TAG)
+               return (int)(vid & VLAN_VID_MASK);
+       else
+               return -1;
+}
 
 extern struct list_head batadv_hardif_list;
 
index 952ba81a565b611ee0a83fc0bbfb719b54187407..d327670641ac336a14f0ecc85dd848d4952e8e6e 100644 (file)
@@ -494,9 +494,8 @@ static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv)
        if (!bridged)
                goto update;
 
-#if !IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
-       pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n");
-#endif
+       if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING))
+               pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n");
 
        querier4.exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP);
        querier4.shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP);
@@ -671,7 +670,6 @@ static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
        return 0;
 }
 
-#if IS_ENABLED(CONFIG_IPV6)
 /**
  * batadv_mcast_is_report_ipv6 - check for MLD reports
  * @skb: the ethernet frame destined for the mesh
@@ -736,7 +734,6 @@ static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
 
        return 0;
 }
-#endif
 
 /**
  * batadv_mcast_forw_mode_check - check for optimized forwarding potential
@@ -765,11 +762,12 @@ static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
        case ETH_P_IP:
                return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb,
                                                         is_unsnoopable);
-#if IS_ENABLED(CONFIG_IPV6)
        case ETH_P_IPV6:
+               if (!IS_ENABLED(CONFIG_IPV6))
+                       return -EINVAL;
+
                return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb,
                                                         is_unsnoopable);
-#endif
        default:
                return -EINVAL;
        }
index 7fd740b6e36dfb0e11c67c283cec68a5cfd1b5f6..e1ebe14ee2a6e21cc8d6b4a42552cae4bd15061f 100644 (file)
@@ -941,15 +941,17 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
        struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
        struct batadv_unicast_packet *unicast_packet;
        struct batadv_unicast_4addr_packet *unicast_4addr_packet;
-       u8 *orig_addr;
-       struct batadv_orig_node *orig_node = NULL;
+       u8 *orig_addr, *orig_addr_gw;
+       struct batadv_orig_node *orig_node = NULL, *orig_node_gw = NULL;
        int check, hdr_size = sizeof(*unicast_packet);
        enum batadv_subtype subtype;
-       bool is4addr;
+       struct ethhdr *ethhdr;
        int ret = NET_RX_DROP;
+       bool is4addr, is_gw;
 
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
        unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
+       ethhdr = eth_hdr(skb);
 
        is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
        /* the caller function should have already pulled 2 bytes */
@@ -972,6 +974,23 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
 
        /* packet for me */
        if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
+               /* If this is a unicast packet from another backgone gw,
+                * drop it.
+                */
+               orig_addr_gw = ethhdr->h_source;
+               orig_node_gw = batadv_orig_hash_find(bat_priv, orig_addr_gw);
+               if (orig_node_gw) {
+                       is_gw = batadv_bla_is_backbone_gw(skb, orig_node_gw,
+                                                         hdr_size);
+                       batadv_orig_node_put(orig_node_gw);
+                       if (is_gw) {
+                               batadv_dbg(BATADV_DBG_BLA, bat_priv,
+                                          "recv_unicast_packet(): Dropped unicast pkt received from another backbone gw %pM.\n",
+                                          orig_addr_gw);
+                               return NET_RX_DROP;
+                       }
+               }
+
                if (is4addr) {
                        subtype = unicast_4addr_packet->subtype;
                        batadv_dat_inc_counter(bat_priv, subtype);
index 1489ec27daff5548b072e88648f5cca192f74afa..403df596a73d28afa8b31b46a2c0649052ce3db0 100644 (file)
@@ -482,6 +482,7 @@ void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet,
  * @if_outgoing: The (optional) if_outgoing to be grabbed
  * @queue_left: The (optional) queue counter to decrease
  * @bat_priv: The bat_priv for the mesh of this forw_packet
+ * @skb: The raw packet this forwarding packet shall contain
  *
  * Allocates a forwarding packet and tries to get a reference to the
  * (optional) if_incoming, if_outgoing and queue_left. If queue_left
@@ -493,7 +494,8 @@ struct batadv_forw_packet *
 batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
                         struct batadv_hard_iface *if_outgoing,
                         atomic_t *queue_left,
-                        struct batadv_priv *bat_priv)
+                        struct batadv_priv *bat_priv,
+                        struct sk_buff *skb)
 {
        struct batadv_forw_packet *forw_packet;
        const char *qname;
@@ -525,7 +527,7 @@ batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
 
        INIT_HLIST_NODE(&forw_packet->list);
        INIT_HLIST_NODE(&forw_packet->cleanup_list);
-       forw_packet->skb = NULL;
+       forw_packet->skb = skb;
        forw_packet->queue_left = queue_left;
        forw_packet->if_incoming = if_incoming;
        forw_packet->if_outgoing = if_outgoing;
@@ -756,22 +758,23 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
        if (!primary_if)
                goto err;
 
+       newskb = skb_copy(skb, GFP_ATOMIC);
+       if (!newskb) {
+               batadv_hardif_put(primary_if);
+               goto err;
+       }
+
        forw_packet = batadv_forw_packet_alloc(primary_if, NULL,
                                               &bat_priv->bcast_queue_left,
-                                              bat_priv);
+                                              bat_priv, newskb);
        batadv_hardif_put(primary_if);
        if (!forw_packet)
-               goto err;
-
-       newskb = skb_copy(skb, GFP_ATOMIC);
-       if (!newskb)
                goto err_packet_free;
 
        /* as we have a copy now, it is safe to decrease the TTL */
        bcast_packet = (struct batadv_bcast_packet *)newskb->data;
        bcast_packet->ttl--;
 
-       forw_packet->skb = newskb;
        forw_packet->own = own_packet;
 
        INIT_DELAYED_WORK(&forw_packet->delayed_work,
@@ -781,11 +784,60 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
        return NETDEV_TX_OK;
 
 err_packet_free:
-       batadv_forw_packet_free(forw_packet, true);
+       kfree_skb(newskb);
 err:
        return NETDEV_TX_BUSY;
 }
 
+/**
+ * batadv_forw_packet_bcasts_left - check if a retransmission is necessary
+ * @forw_packet: the forwarding packet to check
+ * @hard_iface: the interface to check on
+ *
+ * Checks whether a given packet has any (re)transmissions left on the provided
+ * interface.
+ *
+ * hard_iface may be NULL: In that case the number of transmissions this skb had
+ * so far is compared with the maximum amount of retransmissions independent of
+ * any interface instead.
+ *
+ * Return: True if (re)transmissions are left, false otherwise.
+ */
+static bool
+batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet,
+                              struct batadv_hard_iface *hard_iface)
+{
+       unsigned int max;
+
+       if (hard_iface)
+               max = hard_iface->num_bcasts;
+       else
+               max = BATADV_NUM_BCASTS_MAX;
+
+       return BATADV_SKB_CB(forw_packet->skb)->num_bcasts < max;
+}
+
+/**
+ * batadv_forw_packet_bcasts_inc - increment retransmission counter of a packet
+ * @forw_packet: the packet to increase the counter for
+ */
+static void
+batadv_forw_packet_bcasts_inc(struct batadv_forw_packet *forw_packet)
+{
+       BATADV_SKB_CB(forw_packet->skb)->num_bcasts++;
+}
+
+/**
+ * batadv_forw_packet_is_rebroadcast - check packet for previous transmissions
+ * @forw_packet: the packet to check
+ *
+ * Return: True if this packet was transmitted before, false otherwise.
+ */
+bool batadv_forw_packet_is_rebroadcast(struct batadv_forw_packet *forw_packet)
+{
+       return BATADV_SKB_CB(forw_packet->skb)->num_bcasts > 0;
+}
+
 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
 {
        struct batadv_hard_iface *hard_iface;
@@ -826,7 +878,7 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
                if (hard_iface->soft_iface != soft_iface)
                        continue;
 
-               if (forw_packet->num_packets >= hard_iface->num_bcasts)
+               if (!batadv_forw_packet_bcasts_left(forw_packet, hard_iface))
                        continue;
 
                if (forw_packet->own) {
@@ -884,10 +936,10 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
        }
        rcu_read_unlock();
 
-       forw_packet->num_packets++;
+       batadv_forw_packet_bcasts_inc(forw_packet);
 
        /* if we still have some more bcasts to send */
-       if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
+       if (batadv_forw_packet_bcasts_left(forw_packet, NULL)) {
                batadv_forw_packet_bcast_queue(bat_priv, forw_packet,
                                               send_time);
                return;
index f21166d1032360a1febe3cebdfc9ee0e9958bb9c..a16b34f473ef02e46e642b46d4b31f588d89ea67 100644 (file)
@@ -34,11 +34,13 @@ struct batadv_forw_packet *
 batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
                         struct batadv_hard_iface *if_outgoing,
                         atomic_t *queue_left,
-                        struct batadv_priv *bat_priv);
+                        struct batadv_priv *bat_priv,
+                        struct sk_buff *skb);
 bool batadv_forw_packet_steal(struct batadv_forw_packet *packet, spinlock_t *l);
 void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv,
                                    struct batadv_forw_packet *forw_packet,
                                    unsigned long send_time);
+bool batadv_forw_packet_is_rebroadcast(struct batadv_forw_packet *forw_packet);
 
 int batadv_send_skb_to_orig(struct sk_buff *skb,
                            struct batadv_orig_node *orig_node,
index 5d099b2e6cfccb8a436d98a10a6d513d89e31dc1..b25789abf7b9e10aec7af1dfc41a5c9ff805284a 100644 (file)
 #include "sysfs.h"
 #include "translation-table.h"
 
-static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
-static void batadv_get_drvinfo(struct net_device *dev,
-                              struct ethtool_drvinfo *info);
-static u32 batadv_get_msglevel(struct net_device *dev);
-static void batadv_set_msglevel(struct net_device *dev, u32 value);
-static u32 batadv_get_link(struct net_device *dev);
-static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data);
-static void batadv_get_ethtool_stats(struct net_device *dev,
-                                    struct ethtool_stats *stats, u64 *data);
-static int batadv_get_sset_count(struct net_device *dev, int stringset);
-
-static const struct ethtool_ops batadv_ethtool_ops = {
-       .get_settings = batadv_get_settings,
-       .get_drvinfo = batadv_get_drvinfo,
-       .get_msglevel = batadv_get_msglevel,
-       .set_msglevel = batadv_set_msglevel,
-       .get_link = batadv_get_link,
-       .get_strings = batadv_get_strings,
-       .get_ethtool_stats = batadv_get_ethtool_stats,
-       .get_sset_count = batadv_get_sset_count,
-};
-
 int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
 {
        int result;
@@ -140,7 +118,7 @@ static u64 batadv_sum_counter(struct batadv_priv *bat_priv,  size_t idx)
 static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
 {
        struct batadv_priv *bat_priv = netdev_priv(dev);
-       struct net_device_stats *stats = &bat_priv->stats;
+       struct net_device_stats *stats = &dev->stats;
 
        stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX);
        stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES);
@@ -230,6 +208,9 @@ static int batadv_interface_tx(struct sk_buff *skb,
        if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
                goto dropped;
 
+       /* reset control block to avoid left overs from previous users */
+       memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
+
        netif_trans_update(soft_iface);
        vid = batadv_get_vid(skb, 0);
        ethhdr = eth_hdr(skb);
@@ -819,7 +800,6 @@ static int batadv_softif_init_late(struct net_device *dev)
        atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
 #endif
        atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
-       atomic_set(&bat_priv->gw.sel_class, 20);
        atomic_set(&bat_priv->gw.bandwidth_down, 100);
        atomic_set(&bat_priv->gw.bandwidth_up, 20);
        atomic_set(&bat_priv->orig_interval, 1000);
@@ -948,6 +928,98 @@ static const struct net_device_ops batadv_netdev_ops = {
        .ndo_del_slave = batadv_softif_slave_del,
 };
 
+static void batadv_get_drvinfo(struct net_device *dev,
+                              struct ethtool_drvinfo *info)
+{
+       strlcpy(info->driver, "B.A.T.M.A.N. advanced", sizeof(info->driver));
+       strlcpy(info->version, BATADV_SOURCE_VERSION, sizeof(info->version));
+       strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+       strlcpy(info->bus_info, "batman", sizeof(info->bus_info));
+}
+
+/* Inspired by drivers/net/ethernet/dlink/sundance.c:1702
+ * Declare each description string in struct.name[] to get fixed sized buffer
+ * and compile time checking for strings longer than ETH_GSTRING_LEN.
+ */
+static const struct {
+       const char name[ETH_GSTRING_LEN];
+} batadv_counters_strings[] = {
+       { "tx" },
+       { "tx_bytes" },
+       { "tx_dropped" },
+       { "rx" },
+       { "rx_bytes" },
+       { "forward" },
+       { "forward_bytes" },
+       { "mgmt_tx" },
+       { "mgmt_tx_bytes" },
+       { "mgmt_rx" },
+       { "mgmt_rx_bytes" },
+       { "frag_tx" },
+       { "frag_tx_bytes" },
+       { "frag_rx" },
+       { "frag_rx_bytes" },
+       { "frag_fwd" },
+       { "frag_fwd_bytes" },
+       { "tt_request_tx" },
+       { "tt_request_rx" },
+       { "tt_response_tx" },
+       { "tt_response_rx" },
+       { "tt_roam_adv_tx" },
+       { "tt_roam_adv_rx" },
+#ifdef CONFIG_BATMAN_ADV_DAT
+       { "dat_get_tx" },
+       { "dat_get_rx" },
+       { "dat_put_tx" },
+       { "dat_put_rx" },
+       { "dat_cached_reply_tx" },
+#endif
+#ifdef CONFIG_BATMAN_ADV_NC
+       { "nc_code" },
+       { "nc_code_bytes" },
+       { "nc_recode" },
+       { "nc_recode_bytes" },
+       { "nc_buffer" },
+       { "nc_decode" },
+       { "nc_decode_bytes" },
+       { "nc_decode_failed" },
+       { "nc_sniffed" },
+#endif
+};
+
+static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+       if (stringset == ETH_SS_STATS)
+               memcpy(data, batadv_counters_strings,
+                      sizeof(batadv_counters_strings));
+}
+
+static void batadv_get_ethtool_stats(struct net_device *dev,
+                                    struct ethtool_stats *stats, u64 *data)
+{
+       struct batadv_priv *bat_priv = netdev_priv(dev);
+       int i;
+
+       for (i = 0; i < BATADV_CNT_NUM; i++)
+               data[i] = batadv_sum_counter(bat_priv, i);
+}
+
+static int batadv_get_sset_count(struct net_device *dev, int stringset)
+{
+       if (stringset == ETH_SS_STATS)
+               return BATADV_CNT_NUM;
+
+       return -EOPNOTSUPP;
+}
+
+static const struct ethtool_ops batadv_ethtool_ops = {
+       .get_drvinfo = batadv_get_drvinfo,
+       .get_link = ethtool_op_get_link,
+       .get_strings = batadv_get_strings,
+       .get_ethtool_stats = batadv_get_ethtool_stats,
+       .get_sset_count = batadv_get_sset_count,
+};
+
 /**
  * batadv_softif_free - Deconstructor of batadv_soft_interface
  * @dev: Device to cleanup and remove
@@ -972,8 +1044,6 @@ static void batadv_softif_free(struct net_device *dev)
  */
 static void batadv_softif_init_early(struct net_device *dev)
 {
-       struct batadv_priv *priv = netdev_priv(dev);
-
        ether_setup(dev);
 
        dev->netdev_ops = &batadv_netdev_ops;
@@ -990,8 +1060,6 @@ static void batadv_softif_init_early(struct net_device *dev)
        eth_hw_addr_random(dev);
 
        dev->ethtool_ops = &batadv_ethtool_ops;
-
-       memset(priv, 0, sizeof(*priv));
 }
 
 struct net_device *batadv_softif_create(struct net *net, const char *name)
@@ -1084,118 +1152,3 @@ struct rtnl_link_ops batadv_link_ops __read_mostly = {
        .setup          = batadv_softif_init_early,
        .dellink        = batadv_softif_destroy_netlink,
 };
-
-/* ethtool */
-static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-       cmd->supported = 0;
-       cmd->advertising = 0;
-       ethtool_cmd_speed_set(cmd, SPEED_10);
-       cmd->duplex = DUPLEX_FULL;
-       cmd->port = PORT_TP;
-       cmd->phy_address = 0;
-       cmd->transceiver = XCVR_INTERNAL;
-       cmd->autoneg = AUTONEG_DISABLE;
-       cmd->maxtxpkt = 0;
-       cmd->maxrxpkt = 0;
-
-       return 0;
-}
-
-static void batadv_get_drvinfo(struct net_device *dev,
-                              struct ethtool_drvinfo *info)
-{
-       strlcpy(info->driver, "B.A.T.M.A.N. advanced", sizeof(info->driver));
-       strlcpy(info->version, BATADV_SOURCE_VERSION, sizeof(info->version));
-       strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
-       strlcpy(info->bus_info, "batman", sizeof(info->bus_info));
-}
-
-static u32 batadv_get_msglevel(struct net_device *dev)
-{
-       return -EOPNOTSUPP;
-}
-
-static void batadv_set_msglevel(struct net_device *dev, u32 value)
-{
-}
-
-static u32 batadv_get_link(struct net_device *dev)
-{
-       return 1;
-}
-
-/* Inspired by drivers/net/ethernet/dlink/sundance.c:1702
- * Declare each description string in struct.name[] to get fixed sized buffer
- * and compile time checking for strings longer than ETH_GSTRING_LEN.
- */
-static const struct {
-       const char name[ETH_GSTRING_LEN];
-} batadv_counters_strings[] = {
-       { "tx" },
-       { "tx_bytes" },
-       { "tx_dropped" },
-       { "rx" },
-       { "rx_bytes" },
-       { "forward" },
-       { "forward_bytes" },
-       { "mgmt_tx" },
-       { "mgmt_tx_bytes" },
-       { "mgmt_rx" },
-       { "mgmt_rx_bytes" },
-       { "frag_tx" },
-       { "frag_tx_bytes" },
-       { "frag_rx" },
-       { "frag_rx_bytes" },
-       { "frag_fwd" },
-       { "frag_fwd_bytes" },
-       { "tt_request_tx" },
-       { "tt_request_rx" },
-       { "tt_response_tx" },
-       { "tt_response_rx" },
-       { "tt_roam_adv_tx" },
-       { "tt_roam_adv_rx" },
-#ifdef CONFIG_BATMAN_ADV_DAT
-       { "dat_get_tx" },
-       { "dat_get_rx" },
-       { "dat_put_tx" },
-       { "dat_put_rx" },
-       { "dat_cached_reply_tx" },
-#endif
-#ifdef CONFIG_BATMAN_ADV_NC
-       { "nc_code" },
-       { "nc_code_bytes" },
-       { "nc_recode" },
-       { "nc_recode_bytes" },
-       { "nc_buffer" },
-       { "nc_decode" },
-       { "nc_decode_bytes" },
-       { "nc_decode_failed" },
-       { "nc_sniffed" },
-#endif
-};
-
-static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data)
-{
-       if (stringset == ETH_SS_STATS)
-               memcpy(data, batadv_counters_strings,
-                      sizeof(batadv_counters_strings));
-}
-
-static void batadv_get_ethtool_stats(struct net_device *dev,
-                                    struct ethtool_stats *stats, u64 *data)
-{
-       struct batadv_priv *bat_priv = netdev_priv(dev);
-       int i;
-
-       for (i = 0; i < BATADV_CNT_NUM; i++)
-               data[i] = batadv_sum_counter(bat_priv, i);
-}
-
-static int batadv_get_sset_count(struct net_device *dev, int stringset)
-{
-       if (stringset == ETH_SS_STATS)
-               return BATADV_CNT_NUM;
-
-       return -EOPNOTSUPP;
-}
index c94ebdecdc3d123f71c5af89783623e58d2f323d..556f9a865ddfb5e488c65e9edd708fad187f56b1 100644 (file)
@@ -873,8 +873,8 @@ static int batadv_tp_send(void *arg)
                /* something went wrong during the preparation/transmission */
                if (unlikely(err && err != BATADV_TP_REASON_CANT_SEND)) {
                        batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
-                                  "Meter: batadv_tp_send() cannot send packets (%d)\n",
-                                  err);
+                                  "Meter: %s() cannot send packets (%d)\n",
+                                  __func__, err);
                        /* ensure nobody else tries to stop the thread now */
                        if (atomic_dec_and_test(&tp_vars->sending))
                                tp_vars->reason = err;
@@ -979,7 +979,8 @@ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
        if (!tp_vars) {
                spin_unlock_bh(&bat_priv->tp_list_lock);
                batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
-                          "Meter: batadv_tp_start cannot allocate list elements\n");
+                          "Meter: %s cannot allocate list elements\n",
+                          __func__);
                batadv_tp_batctl_error_notify(BATADV_TP_REASON_MEMORY_ERROR,
                                              dst, bat_priv, session_cookie);
                return;
index 6077a87d46f0f781ac72dcc3cc1d9f84c814ad19..e75b4937b497401284bc553182737a2f7af54605 100644 (file)
@@ -617,7 +617,7 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Deleting global tt entry %pM (vid: %d): %s\n",
                   tt_global->common.addr,
-                  BATADV_PRINT_VID(tt_global->common.vid), message);
+                  batadv_print_vid(tt_global->common.vid), message);
 
        batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
                           batadv_choose_tt, &tt_global->common);
@@ -671,7 +671,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
                if (tt_local->common.flags & BATADV_TT_CLIENT_PENDING) {
                        batadv_dbg(BATADV_DBG_TT, bat_priv,
                                   "Re-adding pending client %pM (vid: %d)\n",
-                                  addr, BATADV_PRINT_VID(vid));
+                                  addr, batadv_print_vid(vid));
                        /* whatever the reason why the PENDING flag was set,
                         * this is a client which was enqueued to be removed in
                         * this orig_interval. Since it popped up again, the
@@ -684,7 +684,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
                if (tt_local->common.flags & BATADV_TT_CLIENT_ROAM) {
                        batadv_dbg(BATADV_DBG_TT, bat_priv,
                                   "Roaming client %pM (vid: %d) came back to its original location\n",
-                                  addr, BATADV_PRINT_VID(vid));
+                                  addr, batadv_print_vid(vid));
                        /* the ROAM flag is set because this client roamed away
                         * and the node got a roaming_advertisement message. Now
                         * that the client popped up again at its original
@@ -716,7 +716,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
        if (!vlan) {
                net_ratelimited_function(batadv_info, soft_iface,
                                         "adding TT local entry %pM to non-existent VLAN %d\n",
-                                        addr, BATADV_PRINT_VID(vid));
+                                        addr, batadv_print_vid(vid));
                kmem_cache_free(batadv_tl_cache, tt_local);
                tt_local = NULL;
                goto out;
@@ -724,7 +724,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
-                  addr, BATADV_PRINT_VID(vid),
+                  addr, batadv_print_vid(vid),
                   (u8)atomic_read(&bat_priv->tt.vn));
 
        ether_addr_copy(tt_local->common.addr, addr);
@@ -1097,7 +1097,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
                        seq_printf(seq,
                                   " * %pM %4i [%c%c%c%c%c%c] %3u.%03u   (%#.8x)\n",
                                   tt_common_entry->addr,
-                                  BATADV_PRINT_VID(tt_common_entry->vid),
+                                  batadv_print_vid(tt_common_entry->vid),
                                   ((tt_common_entry->flags &
                                     BATADV_TT_CLIENT_ROAM) ? 'R' : '.'),
                                   no_purge ? 'P' : '.',
@@ -1296,7 +1296,7 @@ batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Local tt entry (%pM, vid: %d) pending to be removed: %s\n",
                   tt_local_entry->common.addr,
-                  BATADV_PRINT_VID(tt_local_entry->common.vid), message);
+                  batadv_print_vid(tt_local_entry->common.vid), message);
 }
 
 /**
@@ -1727,7 +1727,7 @@ add_orig_entry:
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new global tt entry: %pM (vid: %d, via %pM)\n",
-                  common->addr, BATADV_PRINT_VID(common->vid),
+                  common->addr, batadv_print_vid(common->vid),
                   orig_node->orig);
        ret = true;
 
@@ -1835,7 +1835,7 @@ batadv_tt_global_print_entry(struct batadv_priv *bat_priv,
                if (!vlan) {
                        seq_printf(seq,
                                   " * Cannot retrieve VLAN %d for originator %pM\n",
-                                  BATADV_PRINT_VID(tt_common_entry->vid),
+                                  batadv_print_vid(tt_common_entry->vid),
                                   best_entry->orig_node->orig);
                        goto print_list;
                }
@@ -1844,7 +1844,7 @@ batadv_tt_global_print_entry(struct batadv_priv *bat_priv,
                seq_printf(seq,
                           " %c %pM %4i   (%3u) via %pM     (%3u)   (%#.8x) [%c%c%c%c]\n",
                           '*', tt_global_entry->common.addr,
-                          BATADV_PRINT_VID(tt_global_entry->common.vid),
+                          batadv_print_vid(tt_global_entry->common.vid),
                           best_entry->ttvn, best_entry->orig_node->orig,
                           last_ttvn, vlan->tt.crc,
                           ((flags & BATADV_TT_CLIENT_ROAM) ? 'R' : '.'),
@@ -1867,7 +1867,7 @@ print_list:
                if (!vlan) {
                        seq_printf(seq,
                                   " + Cannot retrieve VLAN %d for originator %pM\n",
-                                  BATADV_PRINT_VID(tt_common_entry->vid),
+                                  batadv_print_vid(tt_common_entry->vid),
                                   orig_entry->orig_node->orig);
                        continue;
                }
@@ -1876,7 +1876,7 @@ print_list:
                seq_printf(seq,
                           " %c %pM %4d   (%3u) via %pM     (%3u)   (%#.8x) [%c%c%c%c]\n",
                           '+', tt_global_entry->common.addr,
-                          BATADV_PRINT_VID(tt_global_entry->common.vid),
+                          batadv_print_vid(tt_global_entry->common.vid),
                           orig_entry->ttvn, orig_entry->orig_node->orig,
                           last_ttvn, vlan->tt.crc,
                           ((flags & BATADV_TT_CLIENT_ROAM) ? 'R' : '.'),
@@ -2213,7 +2213,7 @@ batadv_tt_global_del_orig_node(struct batadv_priv *bat_priv,
                                   "Deleting %pM from global tt entry %pM (vid: %d): %s\n",
                                   orig_node->orig,
                                   tt_global_entry->common.addr,
-                                  BATADV_PRINT_VID(vid), message);
+                                  batadv_print_vid(vid), message);
                        _batadv_tt_global_del_orig_entry(tt_global_entry,
                                                         orig_entry);
                }
@@ -2253,12 +2253,13 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
                /* its the last one, mark for roaming. */
                tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
                tt_global_entry->roam_at = jiffies;
-       } else
+       } else {
                /* there is another entry, we can simply delete this
                 * one and can still use the other one.
                 */
                batadv_tt_global_del_orig_node(bat_priv, tt_global_entry,
                                               orig_node, message);
+       }
 }
 
 /**
@@ -2314,10 +2315,11 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
                /* local entry exists, case 2: client roamed to us. */
                batadv_tt_global_del_orig_list(tt_global_entry);
                batadv_tt_global_free(bat_priv, tt_global_entry, message);
-       } else
+       } else {
                /* no local entry exists, case 1: check for roaming */
                batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
                                             orig_node, message);
+       }
 
 out:
        if (tt_global_entry)
@@ -2375,7 +2377,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
                                batadv_dbg(BATADV_DBG_TT, bat_priv,
                                           "Deleting global tt entry %pM (vid: %d): %s\n",
                                           tt_global->common.addr,
-                                          BATADV_PRINT_VID(vid), message);
+                                          batadv_print_vid(vid), message);
                                hlist_del_rcu(&tt_common_entry->hash_entry);
                                batadv_tt_global_entry_put(tt_global);
                        }
@@ -2435,7 +2437,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
                        batadv_dbg(BATADV_DBG_TT, bat_priv,
                                   "Deleting global tt entry %pM (vid: %d): %s\n",
                                   tt_global->common.addr,
-                                  BATADV_PRINT_VID(tt_global->common.vid),
+                                  batadv_print_vid(tt_global->common.vid),
                                   msg);
 
                        hlist_del_rcu(&tt_common->hash_entry);
@@ -3650,7 +3652,7 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, u8 *client,
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Sending ROAMING_ADV to %pM (client %pM, vid: %d)\n",
-                  orig_node->orig, client, BATADV_PRINT_VID(vid));
+                  orig_node->orig, client, batadv_print_vid(vid));
 
        batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
 
@@ -3773,7 +3775,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
                        batadv_dbg(BATADV_DBG_TT, bat_priv,
                                   "Deleting local tt entry (%pM, vid: %d): pending\n",
                                   tt_common->addr,
-                                  BATADV_PRINT_VID(tt_common->vid));
+                                  batadv_print_vid(tt_common->vid));
 
                        batadv_tt_local_size_dec(bat_priv, tt_common->vid);
                        hlist_del_rcu(&tt_common->hash_entry);
@@ -4017,7 +4019,7 @@ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Added temporary global client (addr: %pM, vid: %d, orig: %pM)\n",
-                  addr, BATADV_PRINT_VID(vid), orig_node->orig);
+                  addr, batadv_print_vid(vid), orig_node->orig);
        ret = true;
 out:
        return ret;
index 66b25e410a41375e5c70bd7400a5b353bdff4520..ea43a64492479809fe6bdf95b436792078f50e9f 100644 (file)
@@ -1000,7 +1000,6 @@ struct batadv_priv_bat_v {
  * struct batadv_priv - per mesh interface data
  * @mesh_state: current status of the mesh (inactive/active/deactivating)
  * @soft_iface: net device which holds this struct as private data
- * @stats: structure holding the data for the ndo_get_stats() call
  * @bat_counters: mesh internal traffic statistic counters (see batadv_counters)
  * @aggregated_ogms: bool indicating whether OGM aggregation is enabled
  * @bonding: bool indicating whether traffic bonding is enabled
@@ -1055,7 +1054,6 @@ struct batadv_priv_bat_v {
 struct batadv_priv {
        atomic_t mesh_state;
        struct net_device *soft_iface;
-       struct net_device_stats stats;
        u64 __percpu *bat_counters; /* Per cpu counters */
        atomic_t aggregated_ogms;
        atomic_t bonding;
@@ -1377,9 +1375,11 @@ struct batadv_nc_packet {
  *  relevant to batman-adv in the skb->cb buffer in skbs.
  * @decoded: Marks a skb as decoded, which is checked when searching for coding
  *  opportunities in network-coding.c
+ * @num_bcasts: Counter for broadcast packet retransmissions
  */
 struct batadv_skb_cb {
        bool decoded;
+       unsigned int num_bcasts;
 };
 
 /**
@@ -1392,7 +1392,7 @@ struct batadv_skb_cb {
  * @skb: bcast packet's skb buffer
  * @packet_len: size of aggregated OGM packet inside the skb buffer
  * @direct_link_flags: direct link flags for aggregated OGM packets
- * @num_packets: counter for bcast packet retransmission
+ * @num_packets: counter for aggregated OGMv1 packets
  * @delayed_work: work queue callback item for packet sending
  * @if_incoming: pointer to incoming hard-iface or primary iface if
  *  locally generated packet
@@ -1489,6 +1489,7 @@ struct batadv_algo_orig_ops {
 
 /**
  * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
+ * @init_sel_class: initialize GW selection class (optional)
  * @store_sel_class: parse and stores a new GW selection class (optional)
  * @show_sel_class: prints the current GW selection class (optional)
  * @get_best_gw_node: select the best GW from the list of available nodes
@@ -1499,6 +1500,7 @@ struct batadv_algo_orig_ops {
  * @dump: dump gateways to a netlink socket (optional)
  */
 struct batadv_algo_gw_ops {
+       void (*init_sel_class)(struct batadv_priv *bat_priv);
        ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
                                   size_t count);
        ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
index f307b145ea5405482434a9c37cafeb6d3f32dee4..507b80d59dec4fd3b0eb3c50ed1cd95a78adfcb7 100644 (file)
@@ -301,7 +301,7 @@ done:
 }
 
 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
-                            int flags)
+                            int flags, bool kern)
 {
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
        struct sock *sk = sock->sk, *nsk;
index aa1a814ceddca77f790f0c570e9c89ef08ebe186..ac3c650cb234f9985ddf0b54924db9000c4586c3 100644 (file)
@@ -471,7 +471,8 @@ done:
        return err;
 }
 
-static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
+static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags,
+                             bool kern)
 {
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
        struct sock *sk = sock->sk, *nsk;
index e4e9a2da1e7e7a0e4b9764fe6de8819b9908ef3f..728e0c8dc8e74ccb134b8ed1d493ea8ee49bf49b 100644 (file)
@@ -627,7 +627,7 @@ done:
 }
 
 static int sco_sock_accept(struct socket *sock, struct socket *newsock,
-                          int flags)
+                          int flags, bool kern)
 {
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
        struct sock *sk = sock->sk, *ch;
diff --git a/net/bpf/Makefile b/net/bpf/Makefile
new file mode 100644 (file)
index 0000000..27b2992
--- /dev/null
@@ -0,0 +1 @@
+obj-y  := test_run.o
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
new file mode 100644 (file)
index 0000000..8a6d0a3
--- /dev/null
@@ -0,0 +1,172 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/bpf.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/etherdevice.h>
+#include <linux/filter.h>
+#include <linux/sched/signal.h>
+
+static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx)
+{
+       u32 ret;
+
+       preempt_disable();
+       rcu_read_lock();
+       ret = BPF_PROG_RUN(prog, ctx);
+       rcu_read_unlock();
+       preempt_enable();
+
+       return ret;
+}
+
+static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
+{
+       u64 time_start, time_spent = 0;
+       u32 ret = 0, i;
+
+       if (!repeat)
+               repeat = 1;
+       time_start = ktime_get_ns();
+       for (i = 0; i < repeat; i++) {
+               ret = bpf_test_run_one(prog, ctx);
+               if (need_resched()) {
+                       if (signal_pending(current))
+                               break;
+                       time_spent += ktime_get_ns() - time_start;
+                       cond_resched();
+                       time_start = ktime_get_ns();
+               }
+       }
+       time_spent += ktime_get_ns() - time_start;
+       do_div(time_spent, repeat);
+       *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
+
+       return ret;
+}
+
+static int bpf_test_finish(union bpf_attr __user *uattr, const void *data,
+                          u32 size, u32 retval, u32 duration)
+{
+       void __user *data_out = u64_to_user_ptr(uattr->test.data_out);
+       int err = -EFAULT;
+
+       if (data_out && copy_to_user(data_out, data, size))
+               goto out;
+       if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
+               goto out;
+       if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
+               goto out;
+       if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
+               goto out;
+       err = 0;
+out:
+       return err;
+}
+
+static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
+                          u32 headroom, u32 tailroom)
+{
+       void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
+       void *data;
+
+       if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
+               return ERR_PTR(-EINVAL);
+
+       data = kzalloc(size + headroom + tailroom, GFP_USER);
+       if (!data)
+               return ERR_PTR(-ENOMEM);
+
+       if (copy_from_user(data + headroom, data_in, size)) {
+               kfree(data);
+               return ERR_PTR(-EFAULT);
+       }
+       return data;
+}
+
+int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
+                         union bpf_attr __user *uattr)
+{
+       bool is_l2 = false, is_direct_pkt_access = false;
+       u32 size = kattr->test.data_size_in;
+       u32 repeat = kattr->test.repeat;
+       u32 retval, duration;
+       struct sk_buff *skb;
+       void *data;
+       int ret;
+
+       data = bpf_test_init(kattr, size, NET_SKB_PAD,
+                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       switch (prog->type) {
+       case BPF_PROG_TYPE_SCHED_CLS:
+       case BPF_PROG_TYPE_SCHED_ACT:
+               is_l2 = true;
+               /* fall through */
+       case BPF_PROG_TYPE_LWT_IN:
+       case BPF_PROG_TYPE_LWT_OUT:
+       case BPF_PROG_TYPE_LWT_XMIT:
+               is_direct_pkt_access = true;
+               break;
+       default:
+               break;
+       }
+
+       skb = build_skb(data, 0);
+       if (!skb) {
+               kfree(data);
+               return -ENOMEM;
+       }
+
+       skb_reserve(skb, NET_SKB_PAD);
+       __skb_put(skb, size);
+       skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
+       skb_reset_network_header(skb);
+
+       if (is_l2)
+               __skb_push(skb, ETH_HLEN);
+       if (is_direct_pkt_access)
+               bpf_compute_data_end(skb);
+       retval = bpf_test_run(prog, skb, repeat, &duration);
+       if (!is_l2)
+               __skb_push(skb, ETH_HLEN);
+       size = skb->len;
+       /* bpf program can never convert linear skb to non-linear */
+       if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
+               size = skb_headlen(skb);
+       ret = bpf_test_finish(uattr, skb->data, size, retval, duration);
+       kfree_skb(skb);
+       return ret;
+}
+
+int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
+                         union bpf_attr __user *uattr)
+{
+       u32 size = kattr->test.data_size_in;
+       u32 repeat = kattr->test.repeat;
+       struct xdp_buff xdp = {};
+       u32 retval, duration;
+       void *data;
+       int ret;
+
+       data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM, 0);
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       xdp.data_hard_start = data;
+       xdp.data = data + XDP_PACKET_HEADROOM;
+       xdp.data_end = xdp.data + size;
+
+       retval = bpf_test_run(prog, &xdp, repeat, &duration);
+       if (xdp.data != data + XDP_PACKET_HEADROOM)
+               size = xdp.data_end - xdp.data;
+       ret = bpf_test_finish(uattr, xdp.data, size, retval, duration);
+       kfree(data);
+       return ret;
+}
index 4f598dc2d9168cd323a3027d77d601854aa35f04..5a40a87c4f4fff9cdfa8e54ba54fe47587dc97b0 100644 (file)
@@ -106,7 +106,7 @@ static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
        struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
        struct net_bridge_fdb_entry *fdb;
 
-       WARN_ON_ONCE(!br_hash_lock_held(br));
+       lockdep_assert_held_once(&br->hash_lock);
 
        rcu_read_lock();
        fdb = fdb_find_rcu(head, addr, vid);
@@ -594,6 +594,9 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
                                fdb->updated = now;
                        if (unlikely(added_by_user))
                                fdb->added_by_user = 1;
+                       /* Take over HW learned entry */
+                       if (unlikely(fdb->added_by_external_learn))
+                               fdb->added_by_external_learn = 0;
                        if (unlikely(fdb_modified))
                                fdb_notify(br, fdb, RTM_NEWNEIGH);
                }
@@ -854,6 +857,8 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
                br_fdb_update(br, p, addr, vid, true);
                rcu_read_unlock();
                local_bh_enable();
+       } else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
+               err = br_fdb_external_learn_add(br, p, addr, vid);
        } else {
                spin_lock_bh(&br->hash_lock);
                err = fdb_add_entry(br, p, addr, ndm->ndm_state,
index 8ac1770aa222f21f89027d303a218c49be9dc650..6eb52d422dd9c871dc4a54304fbc707ef68b90ba 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/if_ether.h>
 #include <linux/slab.h>
+#include <net/dsa.h>
 #include <net/sock.h>
 #include <linux/if_vlan.h>
 #include <net/switchdev.h>
index 236f34244dbe1f2cd2bdfaf9d4eceb0765276882..013f2290bfa56df90708879437a762c812dec101 100644 (file)
@@ -30,6 +30,7 @@ EXPORT_SYMBOL(br_should_route_hook);
 static int
 br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+       br_drop_fake_rtable(skb);
        return netif_receive_skb(skb);
 }
 
index 95087e6e8258366af95579bb308d1a6e18266f0e..067cf03134492a33f10982755105e103666cd1ef 100644 (file)
@@ -521,21 +521,6 @@ static unsigned int br_nf_pre_routing(void *priv,
 }
 
 
-/* PF_BRIDGE/LOCAL_IN ************************************************/
-/* The packet is locally destined, which requires a real
- * dst_entry, so detach the fake one.  On the way up, the
- * packet would pass through PRE_ROUTING again (which already
- * took place when the packet entered the bridge), but we
- * register an IPv4 PRE_ROUTING 'sabotage' hook that will
- * prevent this from happening. */
-static unsigned int br_nf_local_in(void *priv,
-                                  struct sk_buff *skb,
-                                  const struct nf_hook_state *state)
-{
-       br_drop_fake_rtable(skb);
-       return NF_ACCEPT;
-}
-
 /* PF_BRIDGE/FORWARD *************************************************/
 static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
@@ -721,18 +706,20 @@ static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
 
 static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-       struct nf_bridge_info *nf_bridge;
-       unsigned int mtu_reserved;
+       struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+       unsigned int mtu, mtu_reserved;
 
        mtu_reserved = nf_bridge_mtu_reduction(skb);
+       mtu = skb->dev->mtu;
 
-       if (skb_is_gso(skb) || skb->len + mtu_reserved <= skb->dev->mtu) {
+       if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
+               mtu = nf_bridge->frag_max_size;
+
+       if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
                nf_bridge_info_free(skb);
                return br_dev_queue_push_xmit(net, sk, skb);
        }
 
-       nf_bridge = nf_bridge_info_get(skb);
-
        /* This is wrong! We should preserve the original fragment
         * boundaries by preserving frag_list rather than refragmenting.
         */
@@ -907,12 +894,6 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
                .hooknum = NF_BR_PRE_ROUTING,
                .priority = NF_BR_PRI_BRNF,
        },
-       {
-               .hook = br_nf_local_in,
-               .pf = NFPROTO_BRIDGE,
-               .hooknum = NF_BR_LOCAL_IN,
-               .priority = NF_BR_PRI_BRNF,
-       },
        {
                .hook = br_nf_forward_ip,
                .pf = NFPROTO_BRIDGE,
@@ -1016,13 +997,10 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net,
        if (!elem)
                return okfn(net, sk, skb);
 
-       /* We may already have this, but read-locks nest anyway */
-       rcu_read_lock();
        nf_hook_state_init(&state, hook, NFPROTO_BRIDGE, indev, outdev,
                           sk, net, okfn);
 
        ret = nf_hook_slow(skb, &state, elem);
-       rcu_read_unlock();
        if (ret == 1)
                ret = okfn(net, sk, skb);
 
index 2288fca7756c5103fc4e8420ad61a2f9e633c097..61368186edea53841b1f00b37ddaa0d26461aee3 100644 (file)
@@ -531,15 +531,6 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
 int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
                              const unsigned char *addr, u16 vid);
 
-static inline bool br_hash_lock_held(struct net_bridge *br)
-{
-#ifdef CONFIG_LOCKDEP
-       return lockdep_is_held(&br->hash_lock);
-#else
-       return true;
-#endif
-}
-
 /* br_forward.c */
 enum br_pkt_type {
        BR_PKT_UNICAST,
index 98b9c8e8615ebc6e2ddefd1885a01af3ef58781b..707caea397433b66c887e527f0e4532eaf260880 100644 (file)
@@ -62,10 +62,10 @@ print_ports(const struct sk_buff *skb, uint8_t protocol, int offset)
                pptr = skb_header_pointer(skb, offset,
                                          sizeof(_ports), &_ports);
                if (pptr == NULL) {
-                       printk(" INCOMPLETE TCP/UDP header");
+                       pr_cont(" INCOMPLETE TCP/UDP header");
                        return;
                }
-               printk(" SPT=%u DPT=%u", ntohs(pptr->src), ntohs(pptr->dst));
+               pr_cont(" SPT=%u DPT=%u", ntohs(pptr->src), ntohs(pptr->dst));
        }
 }
 
@@ -100,11 +100,11 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
 
                ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
                if (ih == NULL) {
-                       printk(" INCOMPLETE IP header");
+                       pr_cont(" INCOMPLETE IP header");
                        goto out;
                }
-               printk(" IP SRC=%pI4 IP DST=%pI4, IP tos=0x%02X, IP proto=%d",
-                      &ih->saddr, &ih->daddr, ih->tos, ih->protocol);
+               pr_cont(" IP SRC=%pI4 IP DST=%pI4, IP tos=0x%02X, IP proto=%d",
+                       &ih->saddr, &ih->daddr, ih->tos, ih->protocol);
                print_ports(skb, ih->protocol, ih->ihl*4);
                goto out;
        }
@@ -120,11 +120,11 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
 
                ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
                if (ih == NULL) {
-                       printk(" INCOMPLETE IPv6 header");
+                       pr_cont(" INCOMPLETE IPv6 header");
                        goto out;
                }
-               printk(" IPv6 SRC=%pI6 IPv6 DST=%pI6, IPv6 priority=0x%01X, Next Header=%d",
-                      &ih->saddr, &ih->daddr, ih->priority, ih->nexthdr);
+               pr_cont(" IPv6 SRC=%pI6 IPv6 DST=%pI6, IPv6 priority=0x%01X, Next Header=%d",
+                       &ih->saddr, &ih->daddr, ih->priority, ih->nexthdr);
                nexthdr = ih->nexthdr;
                offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr, &frag_off);
                if (offset_ph == -1)
@@ -142,12 +142,12 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
 
                ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
                if (ah == NULL) {
-                       printk(" INCOMPLETE ARP header");
+                       pr_cont(" INCOMPLETE ARP header");
                        goto out;
                }
-               printk(" ARP HTYPE=%d, PTYPE=0x%04x, OPCODE=%d",
-                      ntohs(ah->ar_hrd), ntohs(ah->ar_pro),
-                      ntohs(ah->ar_op));
+               pr_cont(" ARP HTYPE=%d, PTYPE=0x%04x, OPCODE=%d",
+                       ntohs(ah->ar_hrd), ntohs(ah->ar_pro),
+                       ntohs(ah->ar_op));
 
                /* If it's for Ethernet and the lengths are OK,
                 * then log the ARP payload
@@ -161,17 +161,17 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
                        ap = skb_header_pointer(skb, sizeof(_arph),
                                                sizeof(_arpp), &_arpp);
                        if (ap == NULL) {
-                               printk(" INCOMPLETE ARP payload");
+                               pr_cont(" INCOMPLETE ARP payload");
                                goto out;
                        }
-                       printk(" ARP MAC SRC=%pM ARP IP SRC=%pI4 ARP MAC DST=%pM ARP IP DST=%pI4",
-                                       ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst);
+                       pr_cont(" ARP MAC SRC=%pM ARP IP SRC=%pI4 ARP MAC DST=%pM ARP IP DST=%pI4",
+                               ap->mac_src, ap->ip_src,
+                               ap->mac_dst, ap->ip_dst);
                }
        }
 out:
-       printk("\n");
+       pr_cont("\n");
        spin_unlock_bh(&ebt_log_lock);
-
 }
 
 static unsigned int
index 206dc266ecd237c2874d25352dd631e3bc31b002..346ef6b00b8f05b62edc911d06c01692624596d9 100644 (file)
@@ -375,11 +375,7 @@ static int nft_reject_bridge_init(const struct nft_ctx *ctx,
                                  const struct nlattr * const tb[])
 {
        struct nft_reject *priv = nft_expr_priv(expr);
-       int icmp_code, err;
-
-       err = nft_reject_bridge_validate(ctx, expr, NULL);
-       if (err < 0)
-               return err;
+       int icmp_code;
 
        if (tb[NFTA_REJECT_TYPE] == NULL)
                return -EINVAL;
index 5488e4a6ccd062e6f6e7e2b841dde5ef055d4337..abf7d854a94db9082eccb4d62d709b639c08a0ef 100644 (file)
@@ -75,9 +75,7 @@ static int stats_timer __read_mostly = 1;
 module_param(stats_timer, int, S_IRUGO);
 MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
 
-/* receive filters subscribed for 'all' CAN devices */
-struct dev_rcv_lists can_rx_alldev_list;
-static DEFINE_SPINLOCK(can_rcvlists_lock);
+static int can_net_id;
 
 static struct kmem_cache *rcv_cache __read_mostly;
 
@@ -145,9 +143,6 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
        if (protocol < 0 || protocol >= CAN_NPROTO)
                return -EINVAL;
 
-       if (!net_eq(net, &init_net))
-               return -EAFNOSUPPORT;
-
        cp = can_get_proto(protocol);
 
 #ifdef CONFIG_MODULES
@@ -331,10 +326,11 @@ EXPORT_SYMBOL(can_send);
  * af_can rx path
  */
 
-static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
+static struct dev_rcv_lists *find_dev_rcv_lists(struct net *net,
+                                               struct net_device *dev)
 {
        if (!dev)
-               return &can_rx_alldev_list;
+               return net->can.can_rx_alldev_list;
        else
                return (struct dev_rcv_lists *)dev->ml_priv;
 }
@@ -467,9 +463,9 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
  *  -ENOMEM on missing cache mem to create subscription entry
  *  -ENODEV unknown device
  */
-int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
-                   void (*func)(struct sk_buff *, void *), void *data,
-                   char *ident, struct sock *sk)
+int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id,
+                   canid_t mask, void (*func)(struct sk_buff *, void *),
+                   void *data, char *ident, struct sock *sk)
 {
        struct receiver *r;
        struct hlist_head *rl;
@@ -481,13 +477,16 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
        if (dev && dev->type != ARPHRD_CAN)
                return -ENODEV;
 
+       if (dev && !net_eq(net, dev_net(dev)))
+               return -ENODEV;
+
        r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
        if (!r)
                return -ENOMEM;
 
-       spin_lock(&can_rcvlists_lock);
+       spin_lock(&net->can.can_rcvlists_lock);
 
-       d = find_dev_rcv_lists(dev);
+       d = find_dev_rcv_lists(net, dev);
        if (d) {
                rl = find_rcv_list(&can_id, &mask, d);
 
@@ -510,7 +509,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
                err = -ENODEV;
        }
 
-       spin_unlock(&can_rcvlists_lock);
+       spin_unlock(&net->can.can_rcvlists_lock);
 
        return err;
 }
@@ -540,8 +539,9 @@ static void can_rx_delete_receiver(struct rcu_head *rp)
  * Description:
  *  Removes subscription entry depending on given (subscription) values.
  */
-void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
-                      void (*func)(struct sk_buff *, void *), void *data)
+void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id,
+                      canid_t mask, void (*func)(struct sk_buff *, void *),
+                      void *data)
 {
        struct receiver *r = NULL;
        struct hlist_head *rl;
@@ -550,9 +550,12 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
        if (dev && dev->type != ARPHRD_CAN)
                return;
 
-       spin_lock(&can_rcvlists_lock);
+       if (dev && !net_eq(net, dev_net(dev)))
+               return;
 
-       d = find_dev_rcv_lists(dev);
+       spin_lock(&net->can.can_rcvlists_lock);
+
+       d = find_dev_rcv_lists(net, dev);
        if (!d) {
                pr_err("BUG: receive list not found for "
                       "dev %s, id %03X, mask %03X\n",
@@ -598,7 +601,7 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
        }
 
  out:
-       spin_unlock(&can_rcvlists_lock);
+       spin_unlock(&net->can.can_rcvlists_lock);
 
        /* schedule the receiver item for deletion */
        if (r) {
@@ -696,10 +699,10 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
        rcu_read_lock();
 
        /* deliver the packet to sockets listening on all devices */
-       matches = can_rcv_filter(&can_rx_alldev_list, skb);
+       matches = can_rcv_filter(dev_net(dev)->can.can_rx_alldev_list, skb);
 
        /* find receive list for this device */
-       d = find_dev_rcv_lists(dev);
+       d = find_dev_rcv_lists(dev_net(dev), dev);
        if (d)
                matches += can_rcv_filter(d, skb);
 
@@ -719,9 +722,6 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
 {
        struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
 
-       if (unlikely(!net_eq(dev_net(dev), &init_net)))
-               goto drop;
-
        if (WARN_ONCE(dev->type != ARPHRD_CAN ||
                      skb->len != CAN_MTU ||
                      cfd->len > CAN_MAX_DLEN,
@@ -743,9 +743,6 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
 {
        struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
 
-       if (unlikely(!net_eq(dev_net(dev), &init_net)))
-               goto drop;
-
        if (WARN_ONCE(dev->type != ARPHRD_CAN ||
                      skb->len != CANFD_MTU ||
                      cfd->len > CANFD_MAX_DLEN,
@@ -835,9 +832,6 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct dev_rcv_lists *d;
 
-       if (!net_eq(dev_net(dev), &init_net))
-               return NOTIFY_DONE;
-
        if (dev->type != ARPHRD_CAN)
                return NOTIFY_DONE;
 
@@ -855,7 +849,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
                break;
 
        case NETDEV_UNREGISTER:
-               spin_lock(&can_rcvlists_lock);
+               spin_lock(&dev_net(dev)->can.can_rcvlists_lock);
 
                d = dev->ml_priv;
                if (d) {
@@ -869,7 +863,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
                        pr_err("can: notifier: receive list not found for dev "
                               "%s\n", dev->name);
 
-               spin_unlock(&can_rcvlists_lock);
+               spin_unlock(&dev_net(dev)->can.can_rcvlists_lock);
 
                break;
        }
@@ -877,6 +871,40 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
        return NOTIFY_DONE;
 }
 
+static int can_pernet_init(struct net *net)
+{
+       net->can.can_rcvlists_lock =
+               __SPIN_LOCK_UNLOCKED(net->can.can_rcvlists_lock);
+       net->can.can_rx_alldev_list =
+               kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
+
+       if (IS_ENABLED(CONFIG_PROC_FS))
+               can_init_proc(net);
+
+       return 0;
+}
+
+static void can_pernet_exit(struct net *net)
+{
+       struct net_device *dev;
+
+       if (IS_ENABLED(CONFIG_PROC_FS))
+               can_remove_proc(net);
+
+       /* remove created dev_rcv_lists from still registered CAN devices */
+       rcu_read_lock();
+       for_each_netdev_rcu(net, dev) {
+               if (dev->type == ARPHRD_CAN && dev->ml_priv) {
+                       struct dev_rcv_lists *d = dev->ml_priv;
+
+                       BUG_ON(d->entries);
+                       kfree(d);
+                       dev->ml_priv = NULL;
+               }
+       }
+       rcu_read_unlock();
+}
+
 /*
  * af_can module init/exit functions
  */
@@ -902,6 +930,13 @@ static struct notifier_block can_netdev_notifier __read_mostly = {
        .notifier_call = can_notifier,
 };
 
+static struct pernet_operations can_pernet_ops __read_mostly = {
+       .init = can_pernet_init,
+       .exit = can_pernet_exit,
+       .id = &can_net_id,
+       .size = 0,
+};
+
 static __init int can_init(void)
 {
        /* check for correct padding to be able to use the structs similarly */
@@ -912,8 +947,6 @@ static __init int can_init(void)
 
        pr_info("can: controller area network core (" CAN_VERSION_STRING ")\n");
 
-       memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list));
-
        rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
                                      0, 0, NULL);
        if (!rcv_cache)
@@ -925,9 +958,10 @@ static __init int can_init(void)
                        setup_timer(&can_stattimer, can_stat_update, 0);
                        mod_timer(&can_stattimer, round_jiffies(jiffies + HZ));
                }
-               can_init_proc();
        }
 
+       register_pernet_subsys(&can_pernet_ops);
+
        /* protocol register */
        sock_register(&can_family_ops);
        register_netdevice_notifier(&can_netdev_notifier);
@@ -939,13 +973,9 @@ static __init int can_init(void)
 
 static __exit void can_exit(void)
 {
-       struct net_device *dev;
-
        if (IS_ENABLED(CONFIG_PROC_FS)) {
                if (stats_timer)
                        del_timer_sync(&can_stattimer);
-
-               can_remove_proc();
        }
 
        /* protocol unregister */
@@ -954,19 +984,7 @@ static __exit void can_exit(void)
        unregister_netdevice_notifier(&can_netdev_notifier);
        sock_unregister(PF_CAN);
 
-       /* remove created dev_rcv_lists from still registered CAN devices */
-       rcu_read_lock();
-       for_each_netdev_rcu(&init_net, dev) {
-               if (dev->type == ARPHRD_CAN && dev->ml_priv) {
-
-                       struct dev_rcv_lists *d = dev->ml_priv;
-
-                       BUG_ON(d->entries);
-                       kfree(d);
-                       dev->ml_priv = NULL;
-               }
-       }
-       rcu_read_unlock();
+       unregister_pernet_subsys(&can_pernet_ops);
 
        rcu_barrier(); /* Wait for completion of call_rcu()'s */
 
index b86f5129e8385fe84ef671bb914e8e05c2977ca0..f273c9d9b129954ba36b6808981436a2836c7ea5 100644 (file)
@@ -114,8 +114,8 @@ struct s_pstats {
 extern struct dev_rcv_lists can_rx_alldev_list;
 
 /* function prototypes for the CAN networklayer procfs (proc.c) */
-void can_init_proc(void);
-void can_remove_proc(void);
+void can_init_proc(struct net *net);
+void can_remove_proc(struct net *net);
 void can_stat_update(unsigned long data);
 
 /* structures and variables from af_can.c needed in proc.c for reading */
index 95d13b233c65161cf3595a8b0036207f5c2892e3..1976629a84630e76911bc02297fa50f2dbe76ec6 100644 (file)
@@ -764,8 +764,8 @@ static void bcm_remove_op(struct bcm_op *op)
 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
 {
        if (op->rx_reg_dev == dev) {
-               can_rx_unregister(dev, op->can_id, REGMASK(op->can_id),
-                                 bcm_rx_handler, op);
+               can_rx_unregister(&init_net, dev, op->can_id,
+                                 REGMASK(op->can_id), bcm_rx_handler, op);
 
                /* mark as removed subscription */
                op->rx_reg_dev = NULL;
@@ -808,7 +808,7 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
                                        }
                                }
                        } else
-                               can_rx_unregister(NULL, op->can_id,
+                               can_rx_unregister(&init_net, NULL, op->can_id,
                                                  REGMASK(op->can_id),
                                                  bcm_rx_handler, op);
 
@@ -1222,7 +1222,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
 
                        dev = dev_get_by_index(&init_net, ifindex);
                        if (dev) {
-                               err = can_rx_register(dev, op->can_id,
+                               err = can_rx_register(&init_net, dev,
+                                                     op->can_id,
                                                      REGMASK(op->can_id),
                                                      bcm_rx_handler, op,
                                                      "bcm", sk);
@@ -1232,7 +1233,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                        }
 
                } else
-                       err = can_rx_register(NULL, op->can_id,
+                       err = can_rx_register(&init_net, NULL, op->can_id,
                                              REGMASK(op->can_id),
                                              bcm_rx_handler, op, "bcm", sk);
                if (err) {
@@ -1528,7 +1529,7 @@ static int bcm_release(struct socket *sock)
                                }
                        }
                } else
-                       can_rx_unregister(NULL, op->can_id,
+                       can_rx_unregister(&init_net, NULL, op->can_id,
                                          REGMASK(op->can_id),
                                          bcm_rx_handler, op);
 
index 7056a1a2bb70098e691ce557f05e5bc1f27cb42f..3c117a33e15f953213b23fe721804deafa73dc1d 100644 (file)
@@ -440,14 +440,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
 
 static inline int cgw_register_filter(struct cgw_job *gwj)
 {
-       return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
+       return can_rx_register(&init_net, gwj->src.dev, gwj->ccgw.filter.can_id,
                               gwj->ccgw.filter.can_mask, can_can_gw_rcv,
                               gwj, "gw", NULL);
 }
 
 static inline void cgw_unregister_filter(struct cgw_job *gwj)
 {
-       can_rx_unregister(gwj->src.dev, gwj->ccgw.filter.can_id,
+       can_rx_unregister(&init_net, gwj->src.dev, gwj->ccgw.filter.can_id,
                          gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj);
 }
 
index 85ef7bb0f1768fdc34e41baae297893e4758d16c..9a8d54d57b229139977a1fc85a7dbefbe3bb0b2b 100644 (file)
 #define CAN_PROC_RCVLIST_EFF "rcvlist_eff"
 #define CAN_PROC_RCVLIST_ERR "rcvlist_err"
 
-static struct proc_dir_entry *can_dir;
-static struct proc_dir_entry *pde_version;
-static struct proc_dir_entry *pde_stats;
-static struct proc_dir_entry *pde_reset_stats;
-static struct proc_dir_entry *pde_rcvlist_all;
-static struct proc_dir_entry *pde_rcvlist_fil;
-static struct proc_dir_entry *pde_rcvlist_inv;
-static struct proc_dir_entry *pde_rcvlist_sff;
-static struct proc_dir_entry *pde_rcvlist_eff;
-static struct proc_dir_entry *pde_rcvlist_err;
-
 static int user_reset;
 
 static const char rx_list_name[][8] = {
@@ -351,20 +340,21 @@ static inline void can_rcvlist_proc_show_one(struct seq_file *m, int idx,
 static int can_rcvlist_proc_show(struct seq_file *m, void *v)
 {
        /* double cast to prevent GCC warning */
-       int idx = (int)(long)m->private;
+       int idx = (int)(long)PDE_DATA(m->file->f_inode);
        struct net_device *dev;
        struct dev_rcv_lists *d;
+       struct net *net = m->private;
 
        seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]);
 
        rcu_read_lock();
 
        /* receive list for 'all' CAN devices (dev == NULL) */
-       d = &can_rx_alldev_list;
+       d = net->can.can_rx_alldev_list;
        can_rcvlist_proc_show_one(m, idx, NULL, d);
 
        /* receive list for registered CAN devices */
-       for_each_netdev_rcu(&init_net, dev) {
+       for_each_netdev_rcu(net, dev) {
                if (dev->type == ARPHRD_CAN && dev->ml_priv)
                        can_rcvlist_proc_show_one(m, idx, dev, dev->ml_priv);
        }
@@ -377,7 +367,7 @@ static int can_rcvlist_proc_show(struct seq_file *m, void *v)
 
 static int can_rcvlist_proc_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, can_rcvlist_proc_show, PDE_DATA(inode));
+       return single_open_net(inode, file, can_rcvlist_proc_show);
 }
 
 static const struct file_operations can_rcvlist_proc_fops = {
@@ -417,6 +407,7 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
 {
        struct net_device *dev;
        struct dev_rcv_lists *d;
+       struct net *net = m->private;
 
        /* RX_SFF */
        seq_puts(m, "\nreceive list 'rx_sff':\n");
@@ -424,11 +415,11 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
        rcu_read_lock();
 
        /* sff receive list for 'all' CAN devices (dev == NULL) */
-       d = &can_rx_alldev_list;
+       d = net->can.can_rx_alldev_list;
        can_rcvlist_proc_show_array(m, NULL, d->rx_sff, ARRAY_SIZE(d->rx_sff));
 
        /* sff receive list for registered CAN devices */
-       for_each_netdev_rcu(&init_net, dev) {
+       for_each_netdev_rcu(net, dev) {
                if (dev->type == ARPHRD_CAN && dev->ml_priv) {
                        d = dev->ml_priv;
                        can_rcvlist_proc_show_array(m, dev, d->rx_sff,
@@ -444,7 +435,7 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
 
 static int can_rcvlist_sff_proc_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, can_rcvlist_sff_proc_show, NULL);
+       return single_open_net(inode, file, can_rcvlist_sff_proc_show);
 }
 
 static const struct file_operations can_rcvlist_sff_proc_fops = {
@@ -460,6 +451,7 @@ static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
 {
        struct net_device *dev;
        struct dev_rcv_lists *d;
+       struct net *net = m->private;
 
        /* RX_EFF */
        seq_puts(m, "\nreceive list 'rx_eff':\n");
@@ -467,11 +459,11 @@ static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
        rcu_read_lock();
 
        /* eff receive list for 'all' CAN devices (dev == NULL) */
-       d = &can_rx_alldev_list;
+       d = net->can.can_rx_alldev_list;
        can_rcvlist_proc_show_array(m, NULL, d->rx_eff, ARRAY_SIZE(d->rx_eff));
 
        /* eff receive list for registered CAN devices */
-       for_each_netdev_rcu(&init_net, dev) {
+       for_each_netdev_rcu(net, dev) {
                if (dev->type == ARPHRD_CAN && dev->ml_priv) {
                        d = dev->ml_priv;
                        can_rcvlist_proc_show_array(m, dev, d->rx_eff,
@@ -487,7 +479,7 @@ static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
 
 static int can_rcvlist_eff_proc_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, can_rcvlist_eff_proc_show, NULL);
+       return single_open_net(inode, file, can_rcvlist_eff_proc_show);
 }
 
 static const struct file_operations can_rcvlist_eff_proc_fops = {
@@ -498,82 +490,86 @@ static const struct file_operations can_rcvlist_eff_proc_fops = {
        .release        = single_release,
 };
 
-/*
- * proc utility functions
- */
-
-static void can_remove_proc_readentry(const char *name)
-{
-       if (can_dir)
-               remove_proc_entry(name, can_dir);
-}
-
 /*
  * can_init_proc - create main CAN proc directory and procfs entries
  */
-void can_init_proc(void)
+void can_init_proc(struct net *net)
 {
        /* create /proc/net/can directory */
-       can_dir = proc_mkdir("can", init_net.proc_net);
+       net->can.proc_dir = proc_net_mkdir(net, "can", net->proc_net);
 
-       if (!can_dir) {
-               pr_info("can: failed to create /proc/net/can.\n");
+       if (!net->can.proc_dir) {
+               printk(KERN_INFO "can: failed to create /proc/net/can . "
+                          "CONFIG_PROC_FS missing?\n");
                return;
        }
 
        /* own procfs entries from the AF_CAN core */
-       pde_version     = proc_create(CAN_PROC_VERSION, 0644, can_dir,
-                                     &can_version_proc_fops);
-       pde_stats       = proc_create(CAN_PROC_STATS, 0644, can_dir,
-                                     &can_stats_proc_fops);
-       pde_reset_stats = proc_create(CAN_PROC_RESET_STATS, 0644, can_dir,
-                                     &can_reset_stats_proc_fops);
-       pde_rcvlist_err = proc_create_data(CAN_PROC_RCVLIST_ERR, 0644, can_dir,
-                                          &can_rcvlist_proc_fops, (void *)RX_ERR);
-       pde_rcvlist_all = proc_create_data(CAN_PROC_RCVLIST_ALL, 0644, can_dir,
-                                          &can_rcvlist_proc_fops, (void *)RX_ALL);
-       pde_rcvlist_fil = proc_create_data(CAN_PROC_RCVLIST_FIL, 0644, can_dir,
-                                          &can_rcvlist_proc_fops, (void *)RX_FIL);
-       pde_rcvlist_inv = proc_create_data(CAN_PROC_RCVLIST_INV, 0644, can_dir,
-                                          &can_rcvlist_proc_fops, (void *)RX_INV);
-       pde_rcvlist_eff = proc_create(CAN_PROC_RCVLIST_EFF, 0644, can_dir,
-                                     &can_rcvlist_eff_proc_fops);
-       pde_rcvlist_sff = proc_create(CAN_PROC_RCVLIST_SFF, 0644, can_dir,
-                                     &can_rcvlist_sff_proc_fops);
+       net->can.pde_version     = proc_create(CAN_PROC_VERSION, 0644,
+                                              net->can.proc_dir,
+                                              &can_version_proc_fops);
+       net->can.pde_stats       = proc_create(CAN_PROC_STATS, 0644,
+                                              net->can.proc_dir,
+                                              &can_stats_proc_fops);
+       net->can.pde_reset_stats = proc_create(CAN_PROC_RESET_STATS, 0644,
+                                              net->can.proc_dir,
+                                              &can_reset_stats_proc_fops);
+       net->can.pde_rcvlist_err = proc_create_data(CAN_PROC_RCVLIST_ERR, 0644,
+                                                   net->can.proc_dir,
+                                                   &can_rcvlist_proc_fops,
+                                                   (void *)RX_ERR);
+       net->can.pde_rcvlist_all = proc_create_data(CAN_PROC_RCVLIST_ALL, 0644,
+                                                   net->can.proc_dir,
+                                                   &can_rcvlist_proc_fops,
+                                                   (void *)RX_ALL);
+       net->can.pde_rcvlist_fil = proc_create_data(CAN_PROC_RCVLIST_FIL, 0644,
+                                                   net->can.proc_dir,
+                                                   &can_rcvlist_proc_fops,
+                                                   (void *)RX_FIL);
+       net->can.pde_rcvlist_inv = proc_create_data(CAN_PROC_RCVLIST_INV, 0644,
+                                                   net->can.proc_dir,
+                                                   &can_rcvlist_proc_fops,
+                                                   (void *)RX_INV);
+       net->can.pde_rcvlist_eff = proc_create(CAN_PROC_RCVLIST_EFF, 0644,
+                                              net->can.proc_dir,
+                                              &can_rcvlist_eff_proc_fops);
+       net->can.pde_rcvlist_sff = proc_create(CAN_PROC_RCVLIST_SFF, 0644,
+                                              net->can.proc_dir,
+                                              &can_rcvlist_sff_proc_fops);
 }
 
 /*
  * can_remove_proc - remove procfs entries and main CAN proc directory
  */
-void can_remove_proc(void)
+void can_remove_proc(struct net *net)
 {
-       if (pde_version)
-               can_remove_proc_readentry(CAN_PROC_VERSION);
+       if (net->can.pde_version)
+               remove_proc_entry(CAN_PROC_VERSION, net->can.proc_dir);
 
-       if (pde_stats)
-               can_remove_proc_readentry(CAN_PROC_STATS);
+       if (net->can.pde_stats)
+               remove_proc_entry(CAN_PROC_STATS, net->can.proc_dir);
 
-       if (pde_reset_stats)
-               can_remove_proc_readentry(CAN_PROC_RESET_STATS);
+       if (net->can.pde_reset_stats)
+               remove_proc_entry(CAN_PROC_RESET_STATS, net->can.proc_dir);
 
-       if (pde_rcvlist_err)
-               can_remove_proc_readentry(CAN_PROC_RCVLIST_ERR);
+       if (net->can.pde_rcvlist_err)
+               remove_proc_entry(CAN_PROC_RCVLIST_ERR, net->can.proc_dir);
 
-       if (pde_rcvlist_all)
-               can_remove_proc_readentry(CAN_PROC_RCVLIST_ALL);
+       if (net->can.pde_rcvlist_all)
+               remove_proc_entry(CAN_PROC_RCVLIST_ALL, net->can.proc_dir);
 
-       if (pde_rcvlist_fil)
-               can_remove_proc_readentry(CAN_PROC_RCVLIST_FIL);
+       if (net->can.pde_rcvlist_fil)
+               remove_proc_entry(CAN_PROC_RCVLIST_FIL, net->can.proc_dir);
 
-       if (pde_rcvlist_inv)
-               can_remove_proc_readentry(CAN_PROC_RCVLIST_INV);
+       if (net->can.pde_rcvlist_inv)
+               remove_proc_entry(CAN_PROC_RCVLIST_INV, net->can.proc_dir);
 
-       if (pde_rcvlist_eff)
-               can_remove_proc_readentry(CAN_PROC_RCVLIST_EFF);
+       if (net->can.pde_rcvlist_eff)
+               remove_proc_entry(CAN_PROC_RCVLIST_EFF, net->can.proc_dir);
 
-       if (pde_rcvlist_sff)
-               can_remove_proc_readentry(CAN_PROC_RCVLIST_SFF);
+       if (net->can.pde_rcvlist_sff)
+               remove_proc_entry(CAN_PROC_RCVLIST_SFF, net->can.proc_dir);
 
-       if (can_dir)
-               remove_proc_entry("can", init_net.proc_net);
+       if (net->can.proc_dir)
+               remove_proc_entry("can", net->proc_net);
 }
index 6dc546a06673ff41fc121c546ebd0567bb0da05f..864c80dbdb72d11cdd24183cec3769222b339ccc 100644 (file)
@@ -181,20 +181,21 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
                kfree_skb(skb);
 }
 
-static int raw_enable_filters(struct net_device *dev, struct sock *sk,
-                             struct can_filter *filter, int count)
+static int raw_enable_filters(struct net *net, struct net_device *dev,
+                             struct sock *sk, struct can_filter *filter,
+                             int count)
 {
        int err = 0;
        int i;
 
        for (i = 0; i < count; i++) {
-               err = can_rx_register(dev, filter[i].can_id,
+               err = can_rx_register(net, dev, filter[i].can_id,
                                      filter[i].can_mask,
                                      raw_rcv, sk, "raw", sk);
                if (err) {
                        /* clean up successfully registered filters */
                        while (--i >= 0)
-                               can_rx_unregister(dev, filter[i].can_id,
+                               can_rx_unregister(net, dev, filter[i].can_id,
                                                  filter[i].can_mask,
                                                  raw_rcv, sk);
                        break;
@@ -204,57 +205,62 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk,
        return err;
 }
 
-static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
-                               can_err_mask_t err_mask)
+static int raw_enable_errfilter(struct net *net, struct net_device *dev,
+                               struct sock *sk, can_err_mask_t err_mask)
 {
        int err = 0;
 
        if (err_mask)
-               err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
+               err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG,
                                      raw_rcv, sk, "raw", sk);
 
        return err;
 }
 
-static void raw_disable_filters(struct net_device *dev, struct sock *sk,
-                             struct can_filter *filter, int count)
+static void raw_disable_filters(struct net *net, struct net_device *dev,
+                               struct sock *sk, struct can_filter *filter,
+                               int count)
 {
        int i;
 
        for (i = 0; i < count; i++)
-               can_rx_unregister(dev, filter[i].can_id, filter[i].can_mask,
-                                 raw_rcv, sk);
+               can_rx_unregister(net, dev, filter[i].can_id,
+                                 filter[i].can_mask, raw_rcv, sk);
 }
 
-static inline void raw_disable_errfilter(struct net_device *dev,
+static inline void raw_disable_errfilter(struct net *net,
+                                        struct net_device *dev,
                                         struct sock *sk,
                                         can_err_mask_t err_mask)
 
 {
        if (err_mask)
-               can_rx_unregister(dev, 0, err_mask | CAN_ERR_FLAG,
+               can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG,
                                  raw_rcv, sk);
 }
 
-static inline void raw_disable_allfilters(struct net_device *dev,
+static inline void raw_disable_allfilters(struct net *net,
+                                         struct net_device *dev,
                                          struct sock *sk)
 {
        struct raw_sock *ro = raw_sk(sk);
 
-       raw_disable_filters(dev, sk, ro->filter, ro->count);
-       raw_disable_errfilter(dev, sk, ro->err_mask);
+       raw_disable_filters(net, dev, sk, ro->filter, ro->count);
+       raw_disable_errfilter(net, dev, sk, ro->err_mask);
 }
 
-static int raw_enable_allfilters(struct net_device *dev, struct sock *sk)
+static int raw_enable_allfilters(struct net *net, struct net_device *dev,
+                                struct sock *sk)
 {
        struct raw_sock *ro = raw_sk(sk);
        int err;
 
-       err = raw_enable_filters(dev, sk, ro->filter, ro->count);
+       err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
        if (!err) {
-               err = raw_enable_errfilter(dev, sk, ro->err_mask);
+               err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
                if (err)
-                       raw_disable_filters(dev, sk, ro->filter, ro->count);
+                       raw_disable_filters(net, dev, sk, ro->filter,
+                                           ro->count);
        }
 
        return err;
@@ -267,7 +273,7 @@ static int raw_notifier(struct notifier_block *nb,
        struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
        struct sock *sk = &ro->sk;
 
-       if (!net_eq(dev_net(dev), &init_net))
+       if (!net_eq(dev_net(dev), sock_net(sk)))
                return NOTIFY_DONE;
 
        if (dev->type != ARPHRD_CAN)
@@ -282,7 +288,7 @@ static int raw_notifier(struct notifier_block *nb,
                lock_sock(sk);
                /* remove current filters & unregister */
                if (ro->bound)
-                       raw_disable_allfilters(dev, sk);
+                       raw_disable_allfilters(dev_net(dev), dev, sk);
 
                if (ro->count > 1)
                        kfree(ro->filter);
@@ -358,13 +364,13 @@ static int raw_release(struct socket *sock)
                if (ro->ifindex) {
                        struct net_device *dev;
 
-                       dev = dev_get_by_index(&init_net, ro->ifindex);
+                       dev = dev_get_by_index(sock_net(sk), ro->ifindex);
                        if (dev) {
-                               raw_disable_allfilters(dev, sk);
+                               raw_disable_allfilters(dev_net(dev), dev, sk);
                                dev_put(dev);
                        }
                } else
-                       raw_disable_allfilters(NULL, sk);
+                       raw_disable_allfilters(sock_net(sk), NULL, sk);
        }
 
        if (ro->count > 1)
@@ -404,7 +410,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
        if (addr->can_ifindex) {
                struct net_device *dev;
 
-               dev = dev_get_by_index(&init_net, addr->can_ifindex);
+               dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
                if (!dev) {
                        err = -ENODEV;
                        goto out;
@@ -420,13 +426,13 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
                ifindex = dev->ifindex;
 
                /* filters set by default/setsockopt */
-               err = raw_enable_allfilters(dev, sk);
+               err = raw_enable_allfilters(sock_net(sk), dev, sk);
                dev_put(dev);
        } else {
                ifindex = 0;
 
                /* filters set by default/setsockopt */
-               err = raw_enable_allfilters(NULL, sk);
+               err = raw_enable_allfilters(sock_net(sk), NULL, sk);
        }
 
        if (!err) {
@@ -435,13 +441,15 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
                        if (ro->ifindex) {
                                struct net_device *dev;
 
-                               dev = dev_get_by_index(&init_net, ro->ifindex);
+                               dev = dev_get_by_index(sock_net(sk),
+                                                      ro->ifindex);
                                if (dev) {
-                                       raw_disable_allfilters(dev, sk);
+                                       raw_disable_allfilters(dev_net(dev),
+                                                              dev, sk);
                                        dev_put(dev);
                                }
                        } else
-                               raw_disable_allfilters(NULL, sk);
+                               raw_disable_allfilters(sock_net(sk), NULL, sk);
                }
                ro->ifindex = ifindex;
                ro->bound = 1;
@@ -517,15 +525,16 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                lock_sock(sk);
 
                if (ro->bound && ro->ifindex)
-                       dev = dev_get_by_index(&init_net, ro->ifindex);
+                       dev = dev_get_by_index(sock_net(sk), ro->ifindex);
 
                if (ro->bound) {
                        /* (try to) register the new filters */
                        if (count == 1)
-                               err = raw_enable_filters(dev, sk, &sfilter, 1);
+                               err = raw_enable_filters(sock_net(sk), dev, sk,
+                                                        &sfilter, 1);
                        else
-                               err = raw_enable_filters(dev, sk, filter,
-                                                        count);
+                               err = raw_enable_filters(sock_net(sk), dev, sk,
+                                                        filter, count);
                        if (err) {
                                if (count > 1)
                                        kfree(filter);
@@ -533,7 +542,8 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                        }
 
                        /* remove old filter registrations */
-                       raw_disable_filters(dev, sk, ro->filter, ro->count);
+                       raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
+                                           ro->count);
                }
 
                /* remove old filter space */
@@ -569,18 +579,20 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                lock_sock(sk);
 
                if (ro->bound && ro->ifindex)
-                       dev = dev_get_by_index(&init_net, ro->ifindex);
+                       dev = dev_get_by_index(sock_net(sk), ro->ifindex);
 
                /* remove current error mask */
                if (ro->bound) {
                        /* (try to) register the new err_mask */
-                       err = raw_enable_errfilter(dev, sk, err_mask);
+                       err = raw_enable_errfilter(sock_net(sk), dev, sk,
+                                                  err_mask);
 
                        if (err)
                                goto out_err;
 
                        /* remove old err_mask registration */
-                       raw_disable_errfilter(dev, sk, ro->err_mask);
+                       raw_disable_errfilter(sock_net(sk), dev, sk,
+                                             ro->err_mask);
                }
 
                /* link new err_mask to the socket */
@@ -741,7 +753,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
                        return -EINVAL;
        }
 
-       dev = dev_get_by_index(&init_net, ifindex);
+       dev = dev_get_by_index(sock_net(sk), ifindex);
        if (!dev)
                return -ENXIO;
 
index 464e88599b9d2918f191eae62e1fab7b80a4806e..108533859a53292cde61a3cedd052a2579684e87 100644 (file)
@@ -230,6 +230,7 @@ enum {
        Opt_osdkeepalivetimeout,
        Opt_mount_timeout,
        Opt_osd_idle_ttl,
+       Opt_osd_request_timeout,
        Opt_last_int,
        /* int args above */
        Opt_fsid,
@@ -256,6 +257,7 @@ static match_table_t opt_tokens = {
        {Opt_osdkeepalivetimeout, "osdkeepalive=%d"},
        {Opt_mount_timeout, "mount_timeout=%d"},
        {Opt_osd_idle_ttl, "osd_idle_ttl=%d"},
+       {Opt_osd_request_timeout, "osd_request_timeout=%d"},
        /* int args above */
        {Opt_fsid, "fsid=%s"},
        {Opt_name, "name=%s"},
@@ -361,6 +363,7 @@ ceph_parse_options(char *options, const char *dev_name,
        opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
        opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT;
        opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT;
+       opt->osd_request_timeout = CEPH_OSD_REQUEST_TIMEOUT_DEFAULT;
 
        /* get mon ip(s) */
        /* ip1[:port1][,ip2[:port2]...] */
@@ -473,6 +476,15 @@ ceph_parse_options(char *options, const char *dev_name,
                        }
                        opt->mount_timeout = msecs_to_jiffies(intval * 1000);
                        break;
+               case Opt_osd_request_timeout:
+                       /* 0 is "wait forever" (i.e. infinite timeout) */
+                       if (intval < 0 || intval > INT_MAX / 1000) {
+                               pr_err("osd_request_timeout out of range\n");
+                               err = -EINVAL;
+                               goto out;
+                       }
+                       opt->osd_request_timeout = msecs_to_jiffies(intval * 1000);
+                       break;
 
                case Opt_share:
                        opt->flags &= ~CEPH_OPT_NOSHARE;
@@ -557,6 +569,9 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client)
        if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
                seq_printf(m, "osdkeepalivetimeout=%d,",
                    jiffies_to_msecs(opt->osd_keepalive_timeout) / 1000);
+       if (opt->osd_request_timeout != CEPH_OSD_REQUEST_TIMEOUT_DEFAULT)
+               seq_printf(m, "osd_request_timeout=%d,",
+                          jiffies_to_msecs(opt->osd_request_timeout) / 1000);
 
        /* drop redundant comma */
        if (m->count != pos)
index 38dcf1eb427de562776934b1c2dfff2c46f3ca12..f76bb333261384257490b0f5125207028e8352aa 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/kthread.h>
 #include <linux/net.h>
 #include <linux/nsproxy.h>
+#include <linux/sched/mm.h>
 #include <linux/slab.h>
 #include <linux/socket.h>
 #include <linux/string.h>
@@ -469,11 +470,16 @@ static int ceph_tcp_connect(struct ceph_connection *con)
 {
        struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
        struct socket *sock;
+       unsigned int noio_flag;
        int ret;
 
        BUG_ON(con->sock);
+
+       /* sock_create_kern() allocates with GFP_KERNEL */
+       noio_flag = memalloc_noio_save();
        ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
                               SOCK_STREAM, IPPROTO_TCP, &sock);
+       memalloc_noio_restore(noio_flag);
        if (ret)
                return ret;
        sock->sk->sk_allocation = GFP_NOFS;
index b65bbf9f45ebb22c8ac51af34c6b1c29ef7ed17c..e15ea9e4c4955fbd697e545cedfdb7f7925c347e 100644 (file)
@@ -1709,6 +1709,8 @@ static void account_request(struct ceph_osd_request *req)
 
        req->r_flags |= CEPH_OSD_FLAG_ONDISK;
        atomic_inc(&req->r_osdc->num_requests);
+
+       req->r_start_stamp = jiffies;
 }
 
 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
@@ -1789,6 +1791,14 @@ static void cancel_request(struct ceph_osd_request *req)
        ceph_osdc_put_request(req);
 }
 
+static void abort_request(struct ceph_osd_request *req, int err)
+{
+       dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
+
+       cancel_map_check(req);
+       complete_request(req, err);
+}
+
 static void check_pool_dne(struct ceph_osd_request *req)
 {
        struct ceph_osd_client *osdc = req->r_osdc;
@@ -2487,6 +2497,7 @@ static void handle_timeout(struct work_struct *work)
                container_of(work, struct ceph_osd_client, timeout_work.work);
        struct ceph_options *opts = osdc->client->options;
        unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
+       unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
        LIST_HEAD(slow_osds);
        struct rb_node *n, *p;
 
@@ -2502,15 +2513,23 @@ static void handle_timeout(struct work_struct *work)
                struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
                bool found = false;
 
-               for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
+               for (p = rb_first(&osd->o_requests); p; ) {
                        struct ceph_osd_request *req =
                            rb_entry(p, struct ceph_osd_request, r_node);
 
+                       p = rb_next(p); /* abort_request() */
+
                        if (time_before(req->r_stamp, cutoff)) {
                                dout(" req %p tid %llu on osd%d is laggy\n",
                                     req, req->r_tid, osd->o_osd);
                                found = true;
                        }
+                       if (opts->osd_request_timeout &&
+                           time_before(req->r_start_stamp, expiry_cutoff)) {
+                               pr_err_ratelimited("tid %llu on osd%d timeout\n",
+                                      req->r_tid, osd->o_osd);
+                               abort_request(req, -ETIMEDOUT);
+                       }
                }
                for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
                        struct ceph_osd_linger_request *lreq =
@@ -2530,6 +2549,21 @@ static void handle_timeout(struct work_struct *work)
                        list_move_tail(&osd->o_keepalive_item, &slow_osds);
        }
 
+       if (opts->osd_request_timeout) {
+               for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
+                       struct ceph_osd_request *req =
+                           rb_entry(p, struct ceph_osd_request, r_node);
+
+                       p = rb_next(p); /* abort_request() */
+
+                       if (time_before(req->r_start_stamp, expiry_cutoff)) {
+                               pr_err_ratelimited("tid %llu on osd%d timeout\n",
+                                      req->r_tid, osdc->homeless_osd.o_osd);
+                               abort_request(req, -ETIMEDOUT);
+                       }
+               }
+       }
+
        if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
                maybe_request_map(osdc);
 
index 6824c0ec8373e721ac9ca2d837f488ff22233e1f..ffe9e904d4d1d130b0353edbe45d50d236b4f74e 100644 (file)
@@ -390,9 +390,8 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
        dout("crush decode tunable chooseleaf_stable = %d\n",
             c->chooseleaf_stable);
 
-       crush_finalize(c);
-
 done:
+       crush_finalize(c);
        dout("crush_decode success\n");
        return c;
 
@@ -1380,7 +1379,6 @@ static int decode_new_up_state_weight(void **p, void *end,
                if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
                    (xorstate & CEPH_OSD_EXISTS)) {
                        pr_info("osd%d does not exist\n", osd);
-                       map->osd_weight[osd] = CEPH_OSD_IN;
                        ret = set_primary_affinity(map, osd,
                                                   CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
                        if (ret)
index ea633342ab0d046cbc49e55b679440ef9e015c2d..4608aa245410ccdbcb3510c8e8c6dec2beac8a8d 100644 (file)
@@ -256,8 +256,12 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
                }
 
                spin_unlock_irqrestore(&queue->lock, cpu_flags);
-       } while (sk_can_busy_loop(sk) &&
-                sk_busy_loop(sk, flags & MSG_DONTWAIT));
+
+               if (!sk_can_busy_loop(sk))
+                       break;
+
+               sk_busy_loop(sk, flags & MSG_DONTWAIT);
+       } while (!skb_queue_empty(&sk->sk_receive_queue));
 
        error = -EAGAIN;
 
index 8637b2b71f3d4751366a2ca5ba46579e6a5fa953..ef9fe60ee294b0e2503456f68136440646c86344 100644 (file)
@@ -1304,6 +1304,7 @@ void netdev_notify_peers(struct net_device *dev)
 {
        rtnl_lock();
        call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
+       call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
        rtnl_unlock();
 }
 EXPORT_SYMBOL(netdev_notify_peers);
@@ -5059,27 +5060,28 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
                do_softirq();
 }
 
-bool sk_busy_loop(struct sock *sk, int nonblock)
+void napi_busy_loop(unsigned int napi_id,
+                   bool (*loop_end)(void *, unsigned long),
+                   void *loop_end_arg)
 {
-       unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
+       unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
        int (*napi_poll)(struct napi_struct *napi, int budget);
        void *have_poll_lock = NULL;
        struct napi_struct *napi;
-       int rc;
 
 restart:
-       rc = false;
        napi_poll = NULL;
 
        rcu_read_lock();
 
-       napi = napi_by_id(sk->sk_napi_id);
+       napi = napi_by_id(napi_id);
        if (!napi)
                goto out;
 
        preempt_disable();
        for (;;) {
-               rc = 0;
+               int work = 0;
+
                local_bh_disable();
                if (!napi_poll) {
                        unsigned long val = READ_ONCE(napi->state);
@@ -5097,16 +5099,15 @@ restart:
                        have_poll_lock = netpoll_poll_lock(napi);
                        napi_poll = napi->poll;
                }
-               rc = napi_poll(napi, BUSY_POLL_BUDGET);
-               trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
+               work = napi_poll(napi, BUSY_POLL_BUDGET);
+               trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
 count:
-               if (rc > 0)
-                       __NET_ADD_STATS(sock_net(sk),
-                                       LINUX_MIB_BUSYPOLLRXPACKETS, rc);
+               if (work > 0)
+                       __NET_ADD_STATS(dev_net(napi->dev),
+                                       LINUX_MIB_BUSYPOLLRXPACKETS, work);
                local_bh_enable();
 
-               if (nonblock || !skb_queue_empty(&sk->sk_receive_queue) ||
-                   busy_loop_timeout(end_time))
+               if (!loop_end || loop_end(loop_end_arg, start_time))
                        break;
 
                if (unlikely(need_resched())) {
@@ -5115,9 +5116,8 @@ count:
                        preempt_enable();
                        rcu_read_unlock();
                        cond_resched();
-                       rc = !skb_queue_empty(&sk->sk_receive_queue);
-                       if (rc || busy_loop_timeout(end_time))
-                               return rc;
+                       if (loop_end(loop_end_arg, start_time))
+                               return;
                        goto restart;
                }
                cpu_relax();
@@ -5125,12 +5125,10 @@ count:
        if (napi_poll)
                busy_poll_stop(napi, have_poll_lock);
        preempt_enable();
-       rc = !skb_queue_empty(&sk->sk_receive_queue);
 out:
        rcu_read_unlock();
-       return rc;
 }
-EXPORT_SYMBOL(sk_busy_loop);
+EXPORT_SYMBOL(napi_busy_loop);
 
 #endif /* CONFIG_NET_RX_BUSY_POLL */
 
@@ -5142,10 +5140,10 @@ static void napi_hash_add(struct napi_struct *napi)
 
        spin_lock(&napi_hash_lock);
 
-       /* 0..NR_CPUS+1 range is reserved for sender_cpu use */
+       /* 0..NR_CPUS range is reserved for sender_cpu use */
        do {
-               if (unlikely(++napi_gen_id < NR_CPUS + 1))
-                       napi_gen_id = NR_CPUS + 1;
+               if (unlikely(++napi_gen_id < MIN_NAPI_ID))
+                       napi_gen_id = MIN_NAPI_ID;
        } while (napi_by_id(napi_gen_id));
        napi->napi_id = napi_gen_id;
 
index e9c1e6acfb6d196d4373dcc36bcf76577952dd32..0afac5800b57bdb525923f77acbd532072e21e58 100644 (file)
@@ -1493,8 +1493,686 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
                if (err)
                        return err;
        }
+       return 0;
+}
+
+int devlink_dpipe_match_put(struct sk_buff *skb,
+                           struct devlink_dpipe_match *match)
+{
+       struct devlink_dpipe_header *header = match->header;
+       struct devlink_dpipe_field *field = &header->fields[match->field_id];
+       struct nlattr *match_attr;
+
+       match_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_MATCH);
+       if (!match_attr)
+               return -EMSGSIZE;
+
+       if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_MATCH_TYPE, match->type) ||
+           nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_INDEX, match->header_index) ||
+           nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
+           nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
+           nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
+               goto nla_put_failure;
+
+       nla_nest_end(skb, match_attr);
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, match_attr);
+       return -EMSGSIZE;
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_match_put);
+
+static int devlink_dpipe_matches_put(struct devlink_dpipe_table *table,
+                                    struct sk_buff *skb)
+{
+       struct nlattr *matches_attr;
+
+       matches_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_TABLE_MATCHES);
+       if (!matches_attr)
+               return -EMSGSIZE;
+
+       if (table->table_ops->matches_dump(table->priv, skb))
+               goto nla_put_failure;
+
+       nla_nest_end(skb, matches_attr);
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, matches_attr);
+       return -EMSGSIZE;
+}
+
+int devlink_dpipe_action_put(struct sk_buff *skb,
+                            struct devlink_dpipe_action *action)
+{
+       struct devlink_dpipe_header *header = action->header;
+       struct devlink_dpipe_field *field = &header->fields[action->field_id];
+       struct nlattr *action_attr;
+
+       action_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_ACTION);
+       if (!action_attr)
+               return -EMSGSIZE;
+
+       if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_ACTION_TYPE, action->type) ||
+           nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_INDEX, action->header_index) ||
+           nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
+           nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
+           nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
+               goto nla_put_failure;
+
+       nla_nest_end(skb, action_attr);
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, action_attr);
+       return -EMSGSIZE;
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_action_put);
+
+static int devlink_dpipe_actions_put(struct devlink_dpipe_table *table,
+                                    struct sk_buff *skb)
+{
+       struct nlattr *actions_attr;
+
+       actions_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_TABLE_ACTIONS);
+       if (!actions_attr)
+               return -EMSGSIZE;
+
+       if (table->table_ops->actions_dump(table->priv, skb))
+               goto nla_put_failure;
+
+       nla_nest_end(skb, actions_attr);
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, actions_attr);
+       return -EMSGSIZE;
+}
+
+static int devlink_dpipe_table_put(struct sk_buff *skb,
+                                  struct devlink_dpipe_table *table)
+{
+       struct nlattr *table_attr;
+
+       table_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_TABLE);
+       if (!table_attr)
+               return -EMSGSIZE;
+
+       if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_TABLE_NAME, table->name) ||
+           nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_SIZE, table->size,
+                             DEVLINK_ATTR_PAD))
+               goto nla_put_failure;
+       if (nla_put_u8(skb, DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED,
+                      table->counters_enabled))
+               goto nla_put_failure;
+
+       if (devlink_dpipe_matches_put(table, skb))
+               goto nla_put_failure;
+
+       if (devlink_dpipe_actions_put(table, skb))
+               goto nla_put_failure;
+
+       nla_nest_end(skb, table_attr);
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, table_attr);
+       return -EMSGSIZE;
+}
+
+static int devlink_dpipe_send_and_alloc_skb(struct sk_buff **pskb,
+                                           struct genl_info *info)
+{
+       int err;
+
+       if (*pskb) {
+               err = genlmsg_reply(*pskb, info);
+               if (err)
+                       return err;
+       }
+       *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!*pskb)
+               return -ENOMEM;
+       return 0;
+}
+
+static int devlink_dpipe_tables_fill(struct genl_info *info,
+                                    enum devlink_command cmd, int flags,
+                                    struct list_head *dpipe_tables,
+                                    const char *table_name)
+{
+       struct devlink *devlink = info->user_ptr[0];
+       struct devlink_dpipe_table *table;
+       struct nlattr *tables_attr;
+       struct sk_buff *skb = NULL;
+       struct nlmsghdr *nlh;
+       bool incomplete;
+       void *hdr;
+       int i;
+       int err;
+
+       table = list_first_entry(dpipe_tables,
+                                struct devlink_dpipe_table, list);
+start_again:
+       err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+       if (err)
+               return err;
+
+       hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
+                         &devlink_nl_family, NLM_F_MULTI, cmd);
+       if (!hdr)
+               return -EMSGSIZE;
+
+       if (devlink_nl_put_handle(skb, devlink))
+               goto nla_put_failure;
+       tables_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_TABLES);
+       if (!tables_attr)
+               goto nla_put_failure;
+
+       i = 0;
+       incomplete = false;
+       list_for_each_entry_from(table, dpipe_tables, list) {
+               if (!table_name) {
+                       err = devlink_dpipe_table_put(skb, table);
+                       if (err) {
+                               if (!i)
+                                       goto err_table_put;
+                               incomplete = true;
+                               break;
+                       }
+               } else {
+                       if (!strcmp(table->name, table_name)) {
+                               err = devlink_dpipe_table_put(skb, table);
+                               if (err)
+                                       break;
+                       }
+               }
+               i++;
+       }
+
+       nla_nest_end(skb, tables_attr);
+       genlmsg_end(skb, hdr);
+       if (incomplete)
+               goto start_again;
+
+send_done:
+       nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
+                       NLMSG_DONE, 0, flags | NLM_F_MULTI);
+       if (!nlh) {
+               err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+               if (err)
+                       goto err_skb_send_alloc;
+               goto send_done;
+       }
+
+       return genlmsg_reply(skb, info);
+
+nla_put_failure:
+       err = -EMSGSIZE;
+err_table_put:
+err_skb_send_alloc:
+       genlmsg_cancel(skb, hdr);
+       nlmsg_free(skb);
+       return err;
+}
+
+static int devlink_nl_cmd_dpipe_table_get(struct sk_buff *skb,
+                                         struct genl_info *info)
+{
+       struct devlink *devlink = info->user_ptr[0];
+       const char *table_name =  NULL;
+
+       if (info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME])
+               table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
+
+       return devlink_dpipe_tables_fill(info, DEVLINK_CMD_DPIPE_TABLE_GET, 0,
+                                        &devlink->dpipe_table_list,
+                                        table_name);
+}
+
+static int devlink_dpipe_value_put(struct sk_buff *skb,
+                                  struct devlink_dpipe_value *value)
+{
+       if (nla_put(skb, DEVLINK_ATTR_DPIPE_VALUE,
+                   value->value_size, value->value))
+               return -EMSGSIZE;
+       if (value->mask)
+               if (nla_put(skb, DEVLINK_ATTR_DPIPE_VALUE_MASK,
+                           value->value_size, value->mask))
+                       return -EMSGSIZE;
+       if (value->mapping_valid)
+               if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_VALUE_MAPPING,
+                               value->mapping_value))
+                       return -EMSGSIZE;
+       return 0;
+}
+
+static int devlink_dpipe_action_value_put(struct sk_buff *skb,
+                                         struct devlink_dpipe_value *value)
+{
+       if (!value->action)
+               return -EINVAL;
+       if (devlink_dpipe_action_put(skb, value->action))
+               return -EMSGSIZE;
+       if (devlink_dpipe_value_put(skb, value))
+               return -EMSGSIZE;
+       return 0;
+}
+
+static int devlink_dpipe_action_values_put(struct sk_buff *skb,
+                                          struct devlink_dpipe_value *values,
+                                          unsigned int values_count)
+{
+       struct nlattr *action_attr;
+       int i;
+       int err;
+
+       for (i = 0; i < values_count; i++) {
+               action_attr = nla_nest_start(skb,
+                                            DEVLINK_ATTR_DPIPE_ACTION_VALUE);
+               if (!action_attr)
+                       return -EMSGSIZE;
+               err = devlink_dpipe_action_value_put(skb, &values[i]);
+               if (err)
+                       goto err_action_value_put;
+               nla_nest_end(skb, action_attr);
+       }
+       return 0;
+
+err_action_value_put:
+       nla_nest_cancel(skb, action_attr);
+       return err;
+}
+
+static int devlink_dpipe_match_value_put(struct sk_buff *skb,
+                                        struct devlink_dpipe_value *value)
+{
+       if (!value->match)
+               return -EINVAL;
+       if (devlink_dpipe_match_put(skb, value->match))
+               return -EMSGSIZE;
+       if (devlink_dpipe_value_put(skb, value))
+               return -EMSGSIZE;
+       return 0;
+}
+
+static int devlink_dpipe_match_values_put(struct sk_buff *skb,
+                                         struct devlink_dpipe_value *values,
+                                         unsigned int values_count)
+{
+       struct nlattr *match_attr;
+       int i;
+       int err;
+
+       for (i = 0; i < values_count; i++) {
+               match_attr = nla_nest_start(skb,
+                                           DEVLINK_ATTR_DPIPE_MATCH_VALUE);
+               if (!match_attr)
+                       return -EMSGSIZE;
+               err = devlink_dpipe_match_value_put(skb, &values[i]);
+               if (err)
+                       goto err_match_value_put;
+               nla_nest_end(skb, match_attr);
+       }
+       return 0;
+
+err_match_value_put:
+       nla_nest_cancel(skb, match_attr);
+       return err;
+}
+
+static int devlink_dpipe_entry_put(struct sk_buff *skb,
+                                  struct devlink_dpipe_entry *entry)
+{
+       struct nlattr *entry_attr, *matches_attr, *actions_attr;
+       int err;
+
+       entry_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_ENTRY);
+       if (!entry_attr)
+               return  -EMSGSIZE;
+
+       if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_INDEX, entry->index,
+                             DEVLINK_ATTR_PAD))
+               goto nla_put_failure;
+       if (entry->counter_valid)
+               if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_COUNTER,
+                                     entry->counter, DEVLINK_ATTR_PAD))
+                       goto nla_put_failure;
+
+       matches_attr = nla_nest_start(skb,
+                                     DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES);
+       if (!matches_attr)
+               goto nla_put_failure;
+
+       err = devlink_dpipe_match_values_put(skb, entry->match_values,
+                                            entry->match_values_count);
+       if (err) {
+               nla_nest_cancel(skb, matches_attr);
+               goto err_match_values_put;
+       }
+       nla_nest_end(skb, matches_attr);
+
+       actions_attr = nla_nest_start(skb,
+                                     DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES);
+       if (!actions_attr)
+               goto nla_put_failure;
+
+       err = devlink_dpipe_action_values_put(skb, entry->action_values,
+                                             entry->action_values_count);
+       if (err) {
+               nla_nest_cancel(skb, actions_attr);
+               goto err_action_values_put;
+       }
+       nla_nest_end(skb, actions_attr);
 
+       nla_nest_end(skb, entry_attr);
        return 0;
+
+nla_put_failure:
+       err = -EMSGSIZE;
+err_match_values_put:
+err_action_values_put:
+       nla_nest_cancel(skb, entry_attr);
+       return err;
+}
+
+static struct devlink_dpipe_table *
+devlink_dpipe_table_find(struct list_head *dpipe_tables,
+                        const char *table_name)
+{
+       struct devlink_dpipe_table *table;
+
+       list_for_each_entry_rcu(table, dpipe_tables, list) {
+               if (!strcmp(table->name, table_name))
+                       return table;
+       }
+       return NULL;
+}
+
+int devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+       struct devlink *devlink;
+       int err;
+
+       err = devlink_dpipe_send_and_alloc_skb(&dump_ctx->skb,
+                                              dump_ctx->info);
+       if (err)
+               return err;
+
+       dump_ctx->hdr = genlmsg_put(dump_ctx->skb,
+                                   dump_ctx->info->snd_portid,
+                                   dump_ctx->info->snd_seq,
+                                   &devlink_nl_family, NLM_F_MULTI,
+                                   dump_ctx->cmd);
+       if (!dump_ctx->hdr)
+               goto nla_put_failure;
+
+       devlink = dump_ctx->info->user_ptr[0];
+       if (devlink_nl_put_handle(dump_ctx->skb, devlink))
+               goto nla_put_failure;
+       dump_ctx->nest = nla_nest_start(dump_ctx->skb,
+                                       DEVLINK_ATTR_DPIPE_ENTRIES);
+       if (!dump_ctx->nest)
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(dump_ctx->skb, dump_ctx->hdr);
+       nlmsg_free(dump_ctx->skb);
+       return -EMSGSIZE;
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_prepare);
+
+int devlink_dpipe_entry_ctx_append(struct devlink_dpipe_dump_ctx *dump_ctx,
+                                  struct devlink_dpipe_entry *entry)
+{
+       return devlink_dpipe_entry_put(dump_ctx->skb, entry);
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_append);
+
+int devlink_dpipe_entry_ctx_close(struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+       nla_nest_end(dump_ctx->skb, dump_ctx->nest);
+       genlmsg_end(dump_ctx->skb, dump_ctx->hdr);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_close);
+
+static int devlink_dpipe_entries_fill(struct genl_info *info,
+                                     enum devlink_command cmd, int flags,
+                                     struct devlink_dpipe_table *table)
+{
+       struct devlink_dpipe_dump_ctx dump_ctx;
+       struct nlmsghdr *nlh;
+       int err;
+
+       dump_ctx.skb = NULL;
+       dump_ctx.cmd = cmd;
+       dump_ctx.info = info;
+
+       err = table->table_ops->entries_dump(table->priv,
+                                            table->counters_enabled,
+                                            &dump_ctx);
+       if (err)
+               goto err_entries_dump;
+
+send_done:
+       nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq,
+                       NLMSG_DONE, 0, flags | NLM_F_MULTI);
+       if (!nlh) {
+               err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info);
+               if (err)
+                       goto err_skb_send_alloc;
+               goto send_done;
+       }
+       return genlmsg_reply(dump_ctx.skb, info);
+
+err_entries_dump:
+err_skb_send_alloc:
+       genlmsg_cancel(dump_ctx.skb, dump_ctx.hdr);
+       nlmsg_free(dump_ctx.skb);
+       return err;
+}
+
+static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
+                                           struct genl_info *info)
+{
+       struct devlink *devlink = info->user_ptr[0];
+       struct devlink_dpipe_table *table;
+       const char *table_name;
+
+       if (!info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME])
+               return -EINVAL;
+
+       table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
+       table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
+                                        table_name);
+       if (!table)
+               return -EINVAL;
+
+       if (!table->table_ops->entries_dump)
+               return -EINVAL;
+
+       return devlink_dpipe_entries_fill(info, DEVLINK_CMD_DPIPE_ENTRIES_GET,
+                                         0, table);
+}
+
+static int devlink_dpipe_fields_put(struct sk_buff *skb,
+                                   const struct devlink_dpipe_header *header)
+{
+       struct devlink_dpipe_field *field;
+       struct nlattr *field_attr;
+       int i;
+
+       for (i = 0; i < header->fields_count; i++) {
+               field = &header->fields[i];
+               field_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_FIELD);
+               if (!field_attr)
+                       return -EMSGSIZE;
+               if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_FIELD_NAME, field->name) ||
+                   nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
+                   nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH, field->bitwidth) ||
+                   nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE, field->mapping_type))
+                       goto nla_put_failure;
+               nla_nest_end(skb, field_attr);
+       }
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, field_attr);
+       return -EMSGSIZE;
+}
+
+static int devlink_dpipe_header_put(struct sk_buff *skb,
+                                   struct devlink_dpipe_header *header)
+{
+       struct nlattr *fields_attr, *header_attr;
+       int err;
+
+       header_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_HEADER);
+       if (!header_attr)
+               return -EMSGSIZE;
+
+       if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_HEADER_NAME, header->name) ||
+           nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
+           nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
+               goto nla_put_failure;
+
+       fields_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_HEADER_FIELDS);
+       if (!fields_attr)
+               goto nla_put_failure;
+
+       err = devlink_dpipe_fields_put(skb, header);
+       if (err) {
+               nla_nest_cancel(skb, fields_attr);
+               goto nla_put_failure;
+       }
+       nla_nest_end(skb, fields_attr);
+       nla_nest_end(skb, header_attr);
+       return 0;
+
+nla_put_failure:
+       err = -EMSGSIZE;
+       nla_nest_cancel(skb, header_attr);
+       return err;
+}
+
+static int devlink_dpipe_headers_fill(struct genl_info *info,
+                                     enum devlink_command cmd, int flags,
+                                     struct devlink_dpipe_headers *
+                                     dpipe_headers)
+{
+       struct devlink *devlink = info->user_ptr[0];
+       struct nlattr *headers_attr;
+       struct sk_buff *skb = NULL;
+       struct nlmsghdr *nlh;
+       void *hdr;
+       int i, j;
+       int err;
+
+       i = 0;
+start_again:
+       err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+       if (err)
+               return err;
+
+       hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
+                         &devlink_nl_family, NLM_F_MULTI, cmd);
+       if (!hdr)
+               return -EMSGSIZE;
+
+       if (devlink_nl_put_handle(skb, devlink))
+               goto nla_put_failure;
+       headers_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_HEADERS);
+       if (!headers_attr)
+               goto nla_put_failure;
+
+       j = 0;
+       for (; i < dpipe_headers->headers_count; i++) {
+               err = devlink_dpipe_header_put(skb, dpipe_headers->headers[i]);
+               if (err) {
+                       if (!j)
+                               goto err_table_put;
+                       break;
+               }
+               j++;
+       }
+       nla_nest_end(skb, headers_attr);
+       genlmsg_end(skb, hdr);
+       if (i != dpipe_headers->headers_count)
+               goto start_again;
+
+send_done:
+       nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
+                       NLMSG_DONE, 0, flags | NLM_F_MULTI);
+       if (!nlh) {
+               err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+               if (err)
+                       goto err_skb_send_alloc;
+               goto send_done;
+       }
+       return genlmsg_reply(skb, info);
+
+nla_put_failure:
+       err = -EMSGSIZE;
+err_table_put:
+err_skb_send_alloc:
+       genlmsg_cancel(skb, hdr);
+       nlmsg_free(skb);
+       return err;
+}
+
+static int devlink_nl_cmd_dpipe_headers_get(struct sk_buff *skb,
+                                           struct genl_info *info)
+{
+       struct devlink *devlink = info->user_ptr[0];
+
+       if (!devlink->dpipe_headers)
+               return -EOPNOTSUPP;
+       return devlink_dpipe_headers_fill(info, DEVLINK_CMD_DPIPE_HEADERS_GET,
+                                         0, devlink->dpipe_headers);
+}
+
+static int devlink_dpipe_table_counters_set(struct devlink *devlink,
+                                           const char *table_name,
+                                           bool enable)
+{
+       struct devlink_dpipe_table *table;
+
+       table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
+                                        table_name);
+       if (!table)
+               return -EINVAL;
+
+       if (table->counter_control_extern)
+               return -EOPNOTSUPP;
+
+       if (!(table->counters_enabled ^ enable))
+               return 0;
+
+       table->counters_enabled = enable;
+       if (table->table_ops->counters_set_update)
+               table->table_ops->counters_set_update(table->priv, enable);
+       return 0;
+}
+
+static int devlink_nl_cmd_dpipe_table_counters_set(struct sk_buff *skb,
+                                                  struct genl_info *info)
+{
+       struct devlink *devlink = info->user_ptr[0];
+       const char *table_name;
+       bool counters_enable;
+
+       if (!info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME] ||
+           !info->attrs[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED])
+               return -EINVAL;
+
+       table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
+       counters_enable = !!nla_get_u8(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED]);
+
+       return devlink_dpipe_table_counters_set(devlink, table_name,
+                                               counters_enable);
 }
 
 static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
@@ -1512,6 +2190,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
        [DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
        [DEVLINK_ATTR_ESWITCH_MODE] = { .type = NLA_U16 },
        [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = { .type = NLA_U8 },
+       [DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING },
+       [DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 },
 };
 
 static const struct genl_ops devlink_nl_ops[] = {
@@ -1644,6 +2324,34 @@ static const struct genl_ops devlink_nl_ops[] = {
                .flags = GENL_ADMIN_PERM,
                .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
        },
+       {
+               .cmd = DEVLINK_CMD_DPIPE_TABLE_GET,
+               .doit = devlink_nl_cmd_dpipe_table_get,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+       },
+       {
+               .cmd = DEVLINK_CMD_DPIPE_ENTRIES_GET,
+               .doit = devlink_nl_cmd_dpipe_entries_get,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+       },
+       {
+               .cmd = DEVLINK_CMD_DPIPE_HEADERS_GET,
+               .doit = devlink_nl_cmd_dpipe_headers_get,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+       },
+       {
+               .cmd = DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
+               .doit = devlink_nl_cmd_dpipe_table_counters_set,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+       },
 };
 
 static struct genl_family devlink_nl_family __ro_after_init = {
@@ -1680,6 +2388,7 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
        devlink_net_set(devlink, &init_net);
        INIT_LIST_HEAD(&devlink->port_list);
        INIT_LIST_HEAD(&devlink->sb_list);
+       INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
        return devlink;
 }
 EXPORT_SYMBOL_GPL(devlink_alloc);
@@ -1880,6 +2589,133 @@ void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index)
 }
 EXPORT_SYMBOL_GPL(devlink_sb_unregister);
 
+/**
+ *     devlink_dpipe_headers_register - register dpipe headers
+ *
+ *     @devlink: devlink
+ *     @dpipe_headers: dpipe header array
+ *
+ *     Register the headers supported by hardware.
+ */
+int devlink_dpipe_headers_register(struct devlink *devlink,
+                                  struct devlink_dpipe_headers *dpipe_headers)
+{
+       mutex_lock(&devlink_mutex);
+       devlink->dpipe_headers = dpipe_headers;
+       mutex_unlock(&devlink_mutex);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_headers_register);
+
+/**
+ *     devlink_dpipe_headers_unregister - unregister dpipe headers
+ *
+ *     @devlink: devlink
+ *
+ *     Unregister the headers supported by hardware.
+ */
+void devlink_dpipe_headers_unregister(struct devlink *devlink)
+{
+       mutex_lock(&devlink_mutex);
+       devlink->dpipe_headers = NULL;
+       mutex_unlock(&devlink_mutex);
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_headers_unregister);
+
+/**
+ *     devlink_dpipe_table_counter_enabled - check if counter allocation
+ *                                           required
+ *     @devlink: devlink
+ *     @table_name: tables name
+ *
+ *     Used by driver to check if counter allocation is required.
+ *     After counter allocation is turned on the table entries
+ *     are updated to include counter statistics.
+ *
+ *     After that point on the driver must respect the counter
+ *     state so that each entry added to the table is added
+ *     with a counter.
+ */
+bool devlink_dpipe_table_counter_enabled(struct devlink *devlink,
+                                        const char *table_name)
+{
+       struct devlink_dpipe_table *table;
+       bool enabled;
+
+       rcu_read_lock();
+       table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
+                                        table_name);
+       enabled = false;
+       if (table)
+               enabled = table->counters_enabled;
+       rcu_read_unlock();
+       return enabled;
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_table_counter_enabled);
+
+/**
+ *     devlink_dpipe_table_register - register dpipe table
+ *
+ *     @devlink: devlink
+ *     @table_name: table name
+ *     @table_ops: table ops
+ *     @priv: priv
+ *     @size: size
+ *     @counter_control_extern: external control for counters
+ */
+int devlink_dpipe_table_register(struct devlink *devlink,
+                                const char *table_name,
+                                struct devlink_dpipe_table_ops *table_ops,
+                                void *priv, u64 size,
+                                bool counter_control_extern)
+{
+       struct devlink_dpipe_table *table;
+
+       if (devlink_dpipe_table_find(&devlink->dpipe_table_list, table_name))
+               return -EEXIST;
+
+       table = kzalloc(sizeof(*table), GFP_KERNEL);
+       if (!table)
+               return -ENOMEM;
+
+       table->name = table_name;
+       table->table_ops = table_ops;
+       table->priv = priv;
+       table->size = size;
+       table->counter_control_extern = counter_control_extern;
+
+       mutex_lock(&devlink_mutex);
+       list_add_tail_rcu(&table->list, &devlink->dpipe_table_list);
+       mutex_unlock(&devlink_mutex);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_table_register);
+
+/**
+ *     devlink_dpipe_table_unregister - unregister dpipe table
+ *
+ *     @devlink: devlink
+ *     @table_name: table name
+ */
+void devlink_dpipe_table_unregister(struct devlink *devlink,
+                                   const char *table_name)
+{
+       struct devlink_dpipe_table *table;
+
+       mutex_lock(&devlink_mutex);
+       table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
+                                        table_name);
+       if (!table)
+               goto unlock;
+       list_del_rcu(&table->list);
+       mutex_unlock(&devlink_mutex);
+       kfree_rcu(table, rcu);
+       return;
+unlock:
+       mutex_unlock(&devlink_mutex);
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_table_unregister);
+
 static int __init devlink_module_init(void)
 {
        return genl_register_family(&devlink_nl_family);
index fb55327dcfeabdaf3eeecc3a8d176ae215612649..70ccda233bd1f1aab18535e6d9d0419bb9a1a23b 100644 (file)
@@ -412,9 +412,8 @@ static int __init init_net_drop_monitor(void)
        for_each_possible_cpu(cpu) {
                data = &per_cpu(dm_cpu_data, cpu);
                INIT_WORK(&data->dm_alert_work, send_dm_alert);
-               init_timer(&data->send_timer);
-               data->send_timer.data = (unsigned long)data;
-               data->send_timer.function = sched_send_work;
+               setup_timer(&data->send_timer, sched_send_work,
+                           (unsigned long)data);
                spin_lock_init(&data->lock);
                reset_per_cpu_data(data);
        }
index aecb2c7241b697e79628fdb79467f5087b2bbf9f..905a88ad28e096d57289eba7f966629336382032 100644 (file)
@@ -109,6 +109,7 @@ static const char
 rss_hash_func_strings[ETH_RSS_HASH_FUNCS_COUNT][ETH_GSTRING_LEN] = {
        [ETH_RSS_HASH_TOP_BIT] =        "toeplitz",
        [ETH_RSS_HASH_XOR_BIT] =        "xor",
+       [ETH_RSS_HASH_CRC32_BIT] =      "crc32",
 };
 
 static const char
index b6791d94841d56cf8b1027d3ba2d71dd21302caf..816e3ccb0ec9ffc41442300f64cc63c97d1af879 100644 (file)
@@ -23,6 +23,20 @@ static const struct fib_kuid_range fib_kuid_range_unset = {
        KUIDT_INIT(~0),
 };
 
+bool fib_rule_matchall(const struct fib_rule *rule)
+{
+       if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id ||
+           rule->flags)
+               return false;
+       if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1)
+               return false;
+       if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) ||
+           !uid_eq(rule->uid_range.end, fib_kuid_range_unset.end))
+               return false;
+       return true;
+}
+EXPORT_SYMBOL_GPL(fib_rule_matchall);
+
 int fib_default_rule_add(struct fib_rules_ops *ops,
                         u32 pref, u32 table, u32 flags)
 {
index ebaeaf2e46e8bd0171379604930d232f205afd07..ce2a19da8aa404ecaf7e25b5c1b2ddee6dad99f9 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/mm.h>
 #include <linux/fcntl.h>
 #include <linux/socket.h>
+#include <linux/sock_diag.h>
 #include <linux/in.h>
 #include <linux/inet.h>
 #include <linux/netdevice.h>
@@ -91,8 +92,13 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
        rcu_read_lock();
        filter = rcu_dereference(sk->sk_filter);
        if (filter) {
-               unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
+               struct sock *save_sk = skb->sk;
+               unsigned int pkt_len;
+
+               skb->sk = sk;
+               pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
                err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
+               skb->sk = save_sk;
        }
        rcu_read_unlock();
 
@@ -928,7 +934,7 @@ static void sk_filter_release_rcu(struct rcu_head *rcu)
  */
 static void sk_filter_release(struct sk_filter *fp)
 {
-       if (atomic_dec_and_test(&fp->refcnt))
+       if (refcount_dec_and_test(&fp->refcnt))
                call_rcu(&fp->rcu, sk_filter_release_rcu);
 }
 
@@ -943,20 +949,27 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
 /* try to charge the socket memory if there is space available
  * return true on success
  */
-bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
+static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
 {
        u32 filter_size = bpf_prog_size(fp->prog->len);
 
        /* same check as in sock_kmalloc() */
        if (filter_size <= sysctl_optmem_max &&
            atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
-               atomic_inc(&fp->refcnt);
                atomic_add(filter_size, &sk->sk_omem_alloc);
                return true;
        }
        return false;
 }
 
+bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
+{
+       bool ret = __sk_filter_charge(sk, fp);
+       if (ret)
+               refcount_inc(&fp->refcnt);
+       return ret;
+}
+
 static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
 {
        struct sock_filter *old_prog;
@@ -1179,12 +1192,12 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
                return -ENOMEM;
 
        fp->prog = prog;
-       atomic_set(&fp->refcnt, 0);
 
-       if (!sk_filter_charge(sk, fp)) {
+       if (!__sk_filter_charge(sk, fp)) {
                kfree(fp);
                return -ENOMEM;
        }
+       refcount_set(&fp->refcnt, 1);
 
        old_fp = rcu_dereference_protected(sk->sk_filter,
                                           lockdep_sock_is_held(sk));
@@ -2599,6 +2612,36 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
        .arg5_type      = ARG_CONST_SIZE,
 };
 
+BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
+{
+       return skb->sk ? sock_gen_cookie(skb->sk) : 0;
+}
+
+static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
+       .func           = bpf_get_socket_cookie,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+};
+
+BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
+{
+       struct sock *sk = sk_to_full_sk(skb->sk);
+       kuid_t kuid;
+
+       if (!sk || !sk_fullsock(sk))
+               return overflowuid;
+       kuid = sock_net_uid(sock_net(sk), sk);
+       return from_kuid_munged(sock_net(sk)->user_ns, kuid);
+}
+
+static const struct bpf_func_proto bpf_get_socket_uid_proto = {
+       .func           = bpf_get_socket_uid,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+};
+
 static const struct bpf_func_proto *
 bpf_base_func_proto(enum bpf_func_id func_id)
 {
@@ -2633,6 +2676,10 @@ sk_filter_func_proto(enum bpf_func_id func_id)
        switch (func_id) {
        case BPF_FUNC_skb_load_bytes:
                return &bpf_skb_load_bytes_proto;
+       case BPF_FUNC_get_socket_cookie:
+               return &bpf_get_socket_cookie_proto;
+       case BPF_FUNC_get_socket_uid:
+               return &bpf_get_socket_uid_proto;
        default:
                return bpf_base_func_proto(func_id);
        }
@@ -2692,6 +2739,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
                return &bpf_get_smp_processor_id_proto;
        case BPF_FUNC_skb_under_cgroup:
                return &bpf_skb_under_cgroup_proto;
+       case BPF_FUNC_get_socket_cookie:
+               return &bpf_get_socket_cookie_proto;
+       case BPF_FUNC_get_socket_uid:
+               return &bpf_get_socket_uid_proto;
        default:
                return bpf_base_func_proto(func_id);
        }
@@ -3252,111 +3303,55 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
        return insn - insn_buf;
 }
 
-static const struct bpf_verifier_ops sk_filter_ops = {
+const struct bpf_verifier_ops sk_filter_prog_ops = {
        .get_func_proto         = sk_filter_func_proto,
        .is_valid_access        = sk_filter_is_valid_access,
        .convert_ctx_access     = bpf_convert_ctx_access,
 };
 
-static const struct bpf_verifier_ops tc_cls_act_ops = {
+const struct bpf_verifier_ops tc_cls_act_prog_ops = {
        .get_func_proto         = tc_cls_act_func_proto,
        .is_valid_access        = tc_cls_act_is_valid_access,
        .convert_ctx_access     = tc_cls_act_convert_ctx_access,
        .gen_prologue           = tc_cls_act_prologue,
+       .test_run               = bpf_prog_test_run_skb,
 };
 
-static const struct bpf_verifier_ops xdp_ops = {
+const struct bpf_verifier_ops xdp_prog_ops = {
        .get_func_proto         = xdp_func_proto,
        .is_valid_access        = xdp_is_valid_access,
        .convert_ctx_access     = xdp_convert_ctx_access,
+       .test_run               = bpf_prog_test_run_xdp,
 };
 
-static const struct bpf_verifier_ops cg_skb_ops = {
+const struct bpf_verifier_ops cg_skb_prog_ops = {
        .get_func_proto         = cg_skb_func_proto,
        .is_valid_access        = sk_filter_is_valid_access,
        .convert_ctx_access     = bpf_convert_ctx_access,
+       .test_run               = bpf_prog_test_run_skb,
 };
 
-static const struct bpf_verifier_ops lwt_inout_ops = {
+const struct bpf_verifier_ops lwt_inout_prog_ops = {
        .get_func_proto         = lwt_inout_func_proto,
        .is_valid_access        = lwt_is_valid_access,
        .convert_ctx_access     = bpf_convert_ctx_access,
+       .test_run               = bpf_prog_test_run_skb,
 };
 
-static const struct bpf_verifier_ops lwt_xmit_ops = {
+const struct bpf_verifier_ops lwt_xmit_prog_ops = {
        .get_func_proto         = lwt_xmit_func_proto,
        .is_valid_access        = lwt_is_valid_access,
        .convert_ctx_access     = bpf_convert_ctx_access,
        .gen_prologue           = tc_cls_act_prologue,
+       .test_run               = bpf_prog_test_run_skb,
 };
 
-static const struct bpf_verifier_ops cg_sock_ops = {
+const struct bpf_verifier_ops cg_sock_prog_ops = {
        .get_func_proto         = bpf_base_func_proto,
        .is_valid_access        = sock_filter_is_valid_access,
        .convert_ctx_access     = sock_filter_convert_ctx_access,
 };
 
-static struct bpf_prog_type_list sk_filter_type __ro_after_init = {
-       .ops    = &sk_filter_ops,
-       .type   = BPF_PROG_TYPE_SOCKET_FILTER,
-};
-
-static struct bpf_prog_type_list sched_cls_type __ro_after_init = {
-       .ops    = &tc_cls_act_ops,
-       .type   = BPF_PROG_TYPE_SCHED_CLS,
-};
-
-static struct bpf_prog_type_list sched_act_type __ro_after_init = {
-       .ops    = &tc_cls_act_ops,
-       .type   = BPF_PROG_TYPE_SCHED_ACT,
-};
-
-static struct bpf_prog_type_list xdp_type __ro_after_init = {
-       .ops    = &xdp_ops,
-       .type   = BPF_PROG_TYPE_XDP,
-};
-
-static struct bpf_prog_type_list cg_skb_type __ro_after_init = {
-       .ops    = &cg_skb_ops,
-       .type   = BPF_PROG_TYPE_CGROUP_SKB,
-};
-
-static struct bpf_prog_type_list lwt_in_type __ro_after_init = {
-       .ops    = &lwt_inout_ops,
-       .type   = BPF_PROG_TYPE_LWT_IN,
-};
-
-static struct bpf_prog_type_list lwt_out_type __ro_after_init = {
-       .ops    = &lwt_inout_ops,
-       .type   = BPF_PROG_TYPE_LWT_OUT,
-};
-
-static struct bpf_prog_type_list lwt_xmit_type __ro_after_init = {
-       .ops    = &lwt_xmit_ops,
-       .type   = BPF_PROG_TYPE_LWT_XMIT,
-};
-
-static struct bpf_prog_type_list cg_sock_type __ro_after_init = {
-       .ops    = &cg_sock_ops,
-       .type   = BPF_PROG_TYPE_CGROUP_SOCK
-};
-
-static int __init register_sk_filter_ops(void)
-{
-       bpf_register_prog_type(&sk_filter_type);
-       bpf_register_prog_type(&sched_cls_type);
-       bpf_register_prog_type(&sched_act_type);
-       bpf_register_prog_type(&xdp_type);
-       bpf_register_prog_type(&cg_skb_type);
-       bpf_register_prog_type(&cg_sock_type);
-       bpf_register_prog_type(&lwt_in_type);
-       bpf_register_prog_type(&lwt_out_type);
-       bpf_register_prog_type(&lwt_xmit_type);
-
-       return 0;
-}
-late_initcall(register_sk_filter_ops);
-
 int sk_detach_filter(struct sock *sk)
 {
        int ret = -ENOENT;
index f765c11d8df567d704998185482c3d220280c148..f7f5d1932a2720767dd31f4033f196815ff08447 100644 (file)
@@ -47,7 +47,7 @@ struct flow_flush_info {
 
 static struct kmem_cache *flow_cachep __read_mostly;
 
-#define flow_cache_hash_size(cache)    (1 << (cache)->hash_shift)
+#define flow_cache_hash_size(cache)    (1U << (cache)->hash_shift)
 #define FLOW_HASH_RND_PERIOD           (10 * 60 * HZ)
 
 static void flow_cache_new_hashrnd(unsigned long arg)
@@ -99,7 +99,8 @@ static void flow_cache_gc_task(struct work_struct *work)
 }
 
 static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
-                                    int deleted, struct list_head *gc_list,
+                                    unsigned int deleted,
+                                    struct list_head *gc_list,
                                     struct netns_xfrm *xfrm)
 {
        if (deleted) {
@@ -114,17 +115,18 @@ static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
 
 static void __flow_cache_shrink(struct flow_cache *fc,
                                struct flow_cache_percpu *fcp,
-                               int shrink_to)
+                               unsigned int shrink_to)
 {
        struct flow_cache_entry *fle;
        struct hlist_node *tmp;
        LIST_HEAD(gc_list);
-       int i, deleted = 0;
+       unsigned int deleted = 0;
        struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
                                                flow_cache_global);
+       unsigned int i;
 
        for (i = 0; i < flow_cache_hash_size(fc); i++) {
-               int saved = 0;
+               unsigned int saved = 0;
 
                hlist_for_each_entry_safe(fle, tmp,
                                          &fcp->hash_table[i], u.hlist) {
@@ -145,7 +147,7 @@ static void __flow_cache_shrink(struct flow_cache *fc,
 static void flow_cache_shrink(struct flow_cache *fc,
                              struct flow_cache_percpu *fcp)
 {
-       int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
+       unsigned int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
 
        __flow_cache_shrink(fc, fcp, shrink_to);
 }
@@ -161,7 +163,7 @@ static void flow_new_hash_rnd(struct flow_cache *fc,
 static u32 flow_hash_code(struct flow_cache *fc,
                          struct flow_cache_percpu *fcp,
                          const struct flowi *key,
-                         size_t keysize)
+                         unsigned int keysize)
 {
        const u32 *k = (const u32 *) key;
        const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
@@ -174,7 +176,7 @@ static u32 flow_hash_code(struct flow_cache *fc,
  * important assumptions that we can here, such as alignment.
  */
 static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
-                           size_t keysize)
+                           unsigned int keysize)
 {
        const flow_compare_t *k1, *k1_lim, *k2;
 
@@ -199,7 +201,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
        struct flow_cache_percpu *fcp;
        struct flow_cache_entry *fle, *tfle;
        struct flow_cache_object *flo;
-       size_t keysize;
+       unsigned int keysize;
        unsigned int hash;
 
        local_bh_disable();
@@ -295,9 +297,10 @@ static void flow_cache_flush_tasklet(unsigned long data)
        struct flow_cache_entry *fle;
        struct hlist_node *tmp;
        LIST_HEAD(gc_list);
-       int i, deleted = 0;
+       unsigned int deleted = 0;
        struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
                                                flow_cache_global);
+       unsigned int i;
 
        fcp = this_cpu_ptr(fc->percpu);
        for (i = 0; i < flow_cache_hash_size(fc); i++) {
@@ -327,7 +330,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
 static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu)
 {
        struct flow_cache_percpu *fcp;
-       int i;
+       unsigned int i;
 
        fcp = per_cpu_ptr(fc->percpu, cpu);
        for (i = 0; i < flow_cache_hash_size(fc); i++)
@@ -402,12 +405,12 @@ void flow_cache_flush_deferred(struct net *net)
 static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
 {
        struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
-       size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
+       unsigned int sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
 
        if (!fcp->hash_table) {
                fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
                if (!fcp->hash_table) {
-                       pr_err("NET: failed to allocate flow cache sz %zu\n", sz);
+                       pr_err("NET: failed to allocate flow cache sz %u\n", sz);
                        return -ENOMEM;
                }
                fcp->hash_rnd_recalc = 1;
index c35aae13c8d22680cb07222cbd9f1ee976f0bd64..c9cf425303f84b6b5c3a12876d68435a531b6b30 100644 (file)
@@ -113,6 +113,216 @@ __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
 }
 EXPORT_SYMBOL(__skb_flow_get_ports);
 
+enum flow_dissect_ret {
+       FLOW_DISSECT_RET_OUT_GOOD,
+       FLOW_DISSECT_RET_OUT_BAD,
+       FLOW_DISSECT_RET_OUT_PROTO_AGAIN,
+};
+
+static enum flow_dissect_ret
+__skb_flow_dissect_mpls(const struct sk_buff *skb,
+                       struct flow_dissector *flow_dissector,
+                       void *target_container, void *data, int nhoff, int hlen)
+{
+       struct flow_dissector_key_keyid *key_keyid;
+       struct mpls_label *hdr, _hdr[2];
+
+       if (!dissector_uses_key(flow_dissector,
+                               FLOW_DISSECTOR_KEY_MPLS_ENTROPY))
+               return FLOW_DISSECT_RET_OUT_GOOD;
+
+       hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
+                                  hlen, &_hdr);
+       if (!hdr)
+               return FLOW_DISSECT_RET_OUT_BAD;
+
+       if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >>
+           MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) {
+               key_keyid = skb_flow_dissector_target(flow_dissector,
+                                                     FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
+                                                     target_container);
+               key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK);
+       }
+       return FLOW_DISSECT_RET_OUT_GOOD;
+}
+
+static enum flow_dissect_ret
+__skb_flow_dissect_arp(const struct sk_buff *skb,
+                      struct flow_dissector *flow_dissector,
+                      void *target_container, void *data, int nhoff, int hlen)
+{
+       struct flow_dissector_key_arp *key_arp;
+       struct {
+               unsigned char ar_sha[ETH_ALEN];
+               unsigned char ar_sip[4];
+               unsigned char ar_tha[ETH_ALEN];
+               unsigned char ar_tip[4];
+       } *arp_eth, _arp_eth;
+       const struct arphdr *arp;
+       struct arphdr _arp;
+
+       if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
+               return FLOW_DISSECT_RET_OUT_GOOD;
+
+       arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
+                                  hlen, &_arp);
+       if (!arp)
+               return FLOW_DISSECT_RET_OUT_BAD;
+
+       if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
+           arp->ar_pro != htons(ETH_P_IP) ||
+           arp->ar_hln != ETH_ALEN ||
+           arp->ar_pln != 4 ||
+           (arp->ar_op != htons(ARPOP_REPLY) &&
+            arp->ar_op != htons(ARPOP_REQUEST)))
+               return FLOW_DISSECT_RET_OUT_BAD;
+
+       arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
+                                      sizeof(_arp_eth), data,
+                                      hlen, &_arp_eth);
+       if (!arp_eth)
+               return FLOW_DISSECT_RET_OUT_BAD;
+
+       key_arp = skb_flow_dissector_target(flow_dissector,
+                                           FLOW_DISSECTOR_KEY_ARP,
+                                           target_container);
+
+       memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
+       memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));
+
+       /* Only store the lower byte of the opcode;
+        * this covers ARPOP_REPLY and ARPOP_REQUEST.
+        */
+       key_arp->op = ntohs(arp->ar_op) & 0xff;
+
+       ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
+       ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
+
+       return FLOW_DISSECT_RET_OUT_GOOD;
+}
+
+static enum flow_dissect_ret
+__skb_flow_dissect_gre(const struct sk_buff *skb,
+                      struct flow_dissector_key_control *key_control,
+                      struct flow_dissector *flow_dissector,
+                      void *target_container, void *data,
+                      __be16 *p_proto, int *p_nhoff, int *p_hlen,
+                      unsigned int flags)
+{
+       struct flow_dissector_key_keyid *key_keyid;
+       struct gre_base_hdr *hdr, _hdr;
+       int offset = 0;
+       u16 gre_ver;
+
+       hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
+                                  data, *p_hlen, &_hdr);
+       if (!hdr)
+               return FLOW_DISSECT_RET_OUT_BAD;
+
+       /* Only look inside GRE without routing */
+       if (hdr->flags & GRE_ROUTING)
+               return FLOW_DISSECT_RET_OUT_GOOD;
+
+       /* Only look inside GRE for version 0 and 1 */
+       gre_ver = ntohs(hdr->flags & GRE_VERSION);
+       if (gre_ver > 1)
+               return FLOW_DISSECT_RET_OUT_GOOD;
+
+       *p_proto = hdr->protocol;
+       if (gre_ver) {
+               /* Version1 must be PPTP, and check the flags */
+               if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
+                       return FLOW_DISSECT_RET_OUT_GOOD;
+       }
+
+       offset += sizeof(struct gre_base_hdr);
+
+       if (hdr->flags & GRE_CSUM)
+               offset += sizeof(((struct gre_full_hdr *) 0)->csum) +
+                         sizeof(((struct gre_full_hdr *) 0)->reserved1);
+
+       if (hdr->flags & GRE_KEY) {
+               const __be32 *keyid;
+               __be32 _keyid;
+
+               keyid = __skb_header_pointer(skb, *p_nhoff + offset,
+                                            sizeof(_keyid),
+                                            data, *p_hlen, &_keyid);
+               if (!keyid)
+                       return FLOW_DISSECT_RET_OUT_BAD;
+
+               if (dissector_uses_key(flow_dissector,
+                                      FLOW_DISSECTOR_KEY_GRE_KEYID)) {
+                       key_keyid = skb_flow_dissector_target(flow_dissector,
+                                                             FLOW_DISSECTOR_KEY_GRE_KEYID,
+                                                             target_container);
+                       if (gre_ver == 0)
+                               key_keyid->keyid = *keyid;
+                       else
+                               key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
+               }
+               offset += sizeof(((struct gre_full_hdr *) 0)->key);
+       }
+
+       if (hdr->flags & GRE_SEQ)
+               offset += sizeof(((struct pptp_gre_header *) 0)->seq);
+
+       if (gre_ver == 0) {
+               if (*p_proto == htons(ETH_P_TEB)) {
+                       const struct ethhdr *eth;
+                       struct ethhdr _eth;
+
+                       eth = __skb_header_pointer(skb, *p_nhoff + offset,
+                                                  sizeof(_eth),
+                                                  data, *p_hlen, &_eth);
+                       if (!eth)
+                               return FLOW_DISSECT_RET_OUT_BAD;
+                       *p_proto = eth->h_proto;
+                       offset += sizeof(*eth);
+
+                       /* Cap headers that we access via pointers at the
+                        * end of the Ethernet header as our maximum alignment
+                        * at that point is only 2 bytes.
+                        */
+                       if (NET_IP_ALIGN)
+                               *p_hlen = *p_nhoff + offset;
+               }
+       } else { /* version 1, must be PPTP */
+               u8 _ppp_hdr[PPP_HDRLEN];
+               u8 *ppp_hdr;
+
+               if (hdr->flags & GRE_ACK)
+                       offset += sizeof(((struct pptp_gre_header *) 0)->ack);
+
+               ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
+                                              sizeof(_ppp_hdr),
+                                              data, *p_hlen, _ppp_hdr);
+               if (!ppp_hdr)
+                       return FLOW_DISSECT_RET_OUT_BAD;
+
+               switch (PPP_PROTOCOL(ppp_hdr)) {
+               case PPP_IP:
+                       *p_proto = htons(ETH_P_IP);
+                       break;
+               case PPP_IPV6:
+                       *p_proto = htons(ETH_P_IPV6);
+                       break;
+               default:
+                       /* Could probably catch some more like MPLS */
+                       break;
+               }
+
+               offset += PPP_HDRLEN;
+       }
+
+       *p_nhoff += offset;
+       key_control->flags |= FLOW_DIS_ENCAPSULATION;
+       if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
+               return FLOW_DISSECT_RET_OUT_GOOD;
+
+       return FLOW_DISSECT_RET_OUT_PROTO_AGAIN;
+}
+
 /**
  * __skb_flow_dissect - extract the flow_keys struct and return it
  * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
@@ -138,12 +348,10 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
        struct flow_dissector_key_control *key_control;
        struct flow_dissector_key_basic *key_basic;
        struct flow_dissector_key_addrs *key_addrs;
-       struct flow_dissector_key_arp *key_arp;
        struct flow_dissector_key_ports *key_ports;
        struct flow_dissector_key_icmp *key_icmp;
        struct flow_dissector_key_tags *key_tags;
        struct flow_dissector_key_vlan *key_vlan;
-       struct flow_dissector_key_keyid *key_keyid;
        bool skip_vlan = false;
        u8 ip_proto = 0;
        bool ret;
@@ -181,7 +389,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
                memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
        }
 
-again:
+proto_again:
        switch (proto) {
        case htons(ETH_P_IP): {
                const struct iphdr *iph;
@@ -284,7 +492,7 @@ ipv6:
                        proto = vlan->h_vlan_encapsulated_proto;
                        nhoff += sizeof(*vlan);
                        if (skip_vlan)
-                               goto again;
+                               goto proto_again;
                }
 
                skip_vlan = true;
@@ -307,7 +515,7 @@ ipv6:
                        }
                }
 
-               goto again;
+               goto proto_again;
        }
        case htons(ETH_P_PPP_SES): {
                struct {
@@ -349,31 +557,17 @@ ipv6:
        }
 
        case htons(ETH_P_MPLS_UC):
-       case htons(ETH_P_MPLS_MC): {
-               struct mpls_label *hdr, _hdr[2];
+       case htons(ETH_P_MPLS_MC):
 mpls:
-               hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
-                                          hlen, &_hdr);
-               if (!hdr)
-                       goto out_bad;
-
-               if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >>
-                    MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) {
-                       if (dissector_uses_key(flow_dissector,
-                                              FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
-                               key_keyid = skb_flow_dissector_target(flow_dissector,
-                                                                     FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
-                                                                     target_container);
-                               key_keyid->keyid = hdr[1].entry &
-                                       htonl(MPLS_LS_LABEL_MASK);
-                       }
-
+               switch (__skb_flow_dissect_mpls(skb, flow_dissector,
+                                               target_container, data,
+                                               nhoff, hlen)) {
+               case FLOW_DISSECT_RET_OUT_GOOD:
                        goto out_good;
+               case FLOW_DISSECT_RET_OUT_BAD:
+               default:
+                       goto out_bad;
                }
-
-               goto out_good;
-       }
-
        case htons(ETH_P_FCOE):
                if ((hlen - nhoff) < FCOE_HEADER_LEN)
                        goto out_bad;
@@ -382,177 +576,33 @@ mpls:
                goto out_good;
 
        case htons(ETH_P_ARP):
-       case htons(ETH_P_RARP): {
-               struct {
-                       unsigned char ar_sha[ETH_ALEN];
-                       unsigned char ar_sip[4];
-                       unsigned char ar_tha[ETH_ALEN];
-                       unsigned char ar_tip[4];
-               } *arp_eth, _arp_eth;
-               const struct arphdr *arp;
-               struct arphdr *_arp;
-
-               arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
-                                          hlen, &_arp);
-               if (!arp)
-                       goto out_bad;
-
-               if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
-                   arp->ar_pro != htons(ETH_P_IP) ||
-                   arp->ar_hln != ETH_ALEN ||
-                   arp->ar_pln != 4 ||
-                   (arp->ar_op != htons(ARPOP_REPLY) &&
-                    arp->ar_op != htons(ARPOP_REQUEST)))
-                       goto out_bad;
-
-               arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
-                                              sizeof(_arp_eth), data,
-                                              hlen,
-                                              &_arp_eth);
-               if (!arp_eth)
+       case htons(ETH_P_RARP):
+               switch (__skb_flow_dissect_arp(skb, flow_dissector,
+                                              target_container, data,
+                                              nhoff, hlen)) {
+               case FLOW_DISSECT_RET_OUT_GOOD:
+                       goto out_good;
+               case FLOW_DISSECT_RET_OUT_BAD:
+               default:
                        goto out_bad;
-
-               if (dissector_uses_key(flow_dissector,
-                                      FLOW_DISSECTOR_KEY_ARP)) {
-
-                       key_arp = skb_flow_dissector_target(flow_dissector,
-                                                           FLOW_DISSECTOR_KEY_ARP,
-                                                           target_container);
-
-                       memcpy(&key_arp->sip, arp_eth->ar_sip,
-                              sizeof(key_arp->sip));
-                       memcpy(&key_arp->tip, arp_eth->ar_tip,
-                              sizeof(key_arp->tip));
-
-                       /* Only store the lower byte of the opcode;
-                        * this covers ARPOP_REPLY and ARPOP_REQUEST.
-                        */
-                       key_arp->op = ntohs(arp->ar_op) & 0xff;
-
-                       ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
-                       ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
                }
-
-               goto out_good;
-       }
-
        default:
                goto out_bad;
        }
 
 ip_proto_again:
        switch (ip_proto) {
-       case IPPROTO_GRE: {
-               struct gre_base_hdr *hdr, _hdr;
-               u16 gre_ver;
-               int offset = 0;
-
-               hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
-               if (!hdr)
+       case IPPROTO_GRE:
+               switch (__skb_flow_dissect_gre(skb, key_control, flow_dissector,
+                                              target_container, data,
+                                              &proto, &nhoff, &hlen, flags)) {
+               case FLOW_DISSECT_RET_OUT_GOOD:
+                       goto out_good;
+               case FLOW_DISSECT_RET_OUT_BAD:
                        goto out_bad;
-
-               /* Only look inside GRE without routing */
-               if (hdr->flags & GRE_ROUTING)
-                       break;
-
-               /* Only look inside GRE for version 0 and 1 */
-               gre_ver = ntohs(hdr->flags & GRE_VERSION);
-               if (gre_ver > 1)
-                       break;
-
-               proto = hdr->protocol;
-               if (gre_ver) {
-                       /* Version1 must be PPTP, and check the flags */
-                       if (!(proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
-                               break;
-               }
-
-               offset += sizeof(struct gre_base_hdr);
-
-               if (hdr->flags & GRE_CSUM)
-                       offset += sizeof(((struct gre_full_hdr *)0)->csum) +
-                                 sizeof(((struct gre_full_hdr *)0)->reserved1);
-
-               if (hdr->flags & GRE_KEY) {
-                       const __be32 *keyid;
-                       __be32 _keyid;
-
-                       keyid = __skb_header_pointer(skb, nhoff + offset, sizeof(_keyid),
-                                                    data, hlen, &_keyid);
-                       if (!keyid)
-                               goto out_bad;
-
-                       if (dissector_uses_key(flow_dissector,
-                                              FLOW_DISSECTOR_KEY_GRE_KEYID)) {
-                               key_keyid = skb_flow_dissector_target(flow_dissector,
-                                                                     FLOW_DISSECTOR_KEY_GRE_KEYID,
-                                                                     target_container);
-                               if (gre_ver == 0)
-                                       key_keyid->keyid = *keyid;
-                               else
-                                       key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
-                       }
-                       offset += sizeof(((struct gre_full_hdr *)0)->key);
+               case FLOW_DISSECT_RET_OUT_PROTO_AGAIN:
+                       goto proto_again;
                }
-
-               if (hdr->flags & GRE_SEQ)
-                       offset += sizeof(((struct pptp_gre_header *)0)->seq);
-
-               if (gre_ver == 0) {
-                       if (proto == htons(ETH_P_TEB)) {
-                               const struct ethhdr *eth;
-                               struct ethhdr _eth;
-
-                               eth = __skb_header_pointer(skb, nhoff + offset,
-                                                          sizeof(_eth),
-                                                          data, hlen, &_eth);
-                               if (!eth)
-                                       goto out_bad;
-                               proto = eth->h_proto;
-                               offset += sizeof(*eth);
-
-                               /* Cap headers that we access via pointers at the
-                                * end of the Ethernet header as our maximum alignment
-                                * at that point is only 2 bytes.
-                                */
-                               if (NET_IP_ALIGN)
-                                       hlen = (nhoff + offset);
-                       }
-               } else { /* version 1, must be PPTP */
-                       u8 _ppp_hdr[PPP_HDRLEN];
-                       u8 *ppp_hdr;
-
-                       if (hdr->flags & GRE_ACK)
-                               offset += sizeof(((struct pptp_gre_header *)0)->ack);
-
-                       ppp_hdr = __skb_header_pointer(skb, nhoff + offset,
-                                                    sizeof(_ppp_hdr),
-                                                    data, hlen, _ppp_hdr);
-                       if (!ppp_hdr)
-                               goto out_bad;
-
-                       switch (PPP_PROTOCOL(ppp_hdr)) {
-                       case PPP_IP:
-                               proto = htons(ETH_P_IP);
-                               break;
-                       case PPP_IPV6:
-                               proto = htons(ETH_P_IPV6);
-                               break;
-                       default:
-                               /* Could probably catch some more like MPLS */
-                               break;
-                       }
-
-                       offset += PPP_HDRLEN;
-               }
-
-               nhoff += offset;
-               key_control->flags |= FLOW_DIS_ENCAPSULATION;
-               if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
-                       goto out_good;
-
-               goto again;
-       }
        case NEXTHDR_HOP:
        case NEXTHDR_ROUTING:
        case NEXTHDR_DEST: {
index 6df9f8fabf0ca5d2ced3070406900b7ec28a7924..b5888190223c4d3978639c81fdec3a19dd6ee8f2 100644 (file)
@@ -162,7 +162,6 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
        struct rtnexthop *rtnh = (struct rtnexthop *)attr;
        struct nlattr *nla_entype;
        struct nlattr *attrs;
-       struct nlattr *nla;
        u16 encap_type;
        int attrlen;
 
@@ -170,7 +169,6 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
                attrlen = rtnh_attrlen(rtnh);
                if (attrlen > 0) {
                        attrs = rtnh_attrs(rtnh);
-                       nla = nla_find(attrs, attrlen, RTA_ENCAP);
                        nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
 
                        if (nla_entype) {
index e7c12caa20c88acc9a5dd86f07d11644fb58341d..8ae87c591c8e71f27de4008666684044884762de 100644 (file)
@@ -52,8 +52,9 @@ do {                                          \
 #define PNEIGH_HASHMASK                0xF
 
 static void neigh_timer_handler(unsigned long arg);
-static void __neigh_notify(struct neighbour *n, int type, int flags);
-static void neigh_update_notify(struct neighbour *neigh);
+static void __neigh_notify(struct neighbour *n, int type, int flags,
+                          u32 pid);
+static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
 
 #ifdef CONFIG_PROC_FS
@@ -99,7 +100,7 @@ static void neigh_cleanup_and_release(struct neighbour *neigh)
        if (neigh->parms->neigh_cleanup)
                neigh->parms->neigh_cleanup(neigh);
 
-       __neigh_notify(neigh, RTM_DELNEIGH, 0);
+       __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
        call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
        neigh_release(neigh);
 }
@@ -860,7 +861,8 @@ static void neigh_probe(struct neighbour *neigh)
        if (skb)
                skb = skb_clone(skb, GFP_ATOMIC);
        write_unlock(&neigh->lock);
-       neigh->ops->solicit(neigh, skb);
+       if (neigh->ops->solicit)
+               neigh->ops->solicit(neigh, skb);
        atomic_inc(&neigh->probes);
        kfree_skb(skb);
 }
@@ -948,7 +950,7 @@ out:
        }
 
        if (notify)
-               neigh_update_notify(neigh);
+               neigh_update_notify(neigh, 0);
 
        neigh_release(neigh);
 }
@@ -1072,7 +1074,7 @@ static void neigh_update_hhs(struct neighbour *neigh)
  */
 
 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
-                u32 flags)
+                u32 flags, u32 nlmsg_pid)
 {
        u8 old;
        int err;
@@ -1229,7 +1231,7 @@ out:
        write_unlock_bh(&neigh->lock);
 
        if (notify)
-               neigh_update_notify(neigh);
+               neigh_update_notify(neigh, nlmsg_pid);
 
        return err;
 }
@@ -1260,7 +1262,7 @@ struct neighbour *neigh_event_ns(struct neigh_table *tbl,
                                                 lladdr || !dev->addr_len);
        if (neigh)
                neigh_update(neigh, lladdr, NUD_STALE,
-                            NEIGH_UPDATE_F_OVERRIDE);
+                            NEIGH_UPDATE_F_OVERRIDE, 0);
        return neigh;
 }
 EXPORT_SYMBOL(neigh_event_ns);
@@ -1638,7 +1640,8 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
 
        err = neigh_update(neigh, NULL, NUD_FAILED,
                           NEIGH_UPDATE_F_OVERRIDE |
-                          NEIGH_UPDATE_F_ADMIN);
+                          NEIGH_UPDATE_F_ADMIN,
+                          NETLINK_CB(skb).portid);
        neigh_release(neigh);
 
 out:
@@ -1729,7 +1732,8 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
                neigh_event_send(neigh, NULL);
                err = 0;
        } else
-               err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
+               err = neigh_update(neigh, lladdr, ndm->ndm_state, flags,
+                                  NETLINK_CB(skb).portid);
        neigh_release(neigh);
 
 out:
@@ -2229,10 +2233,10 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-static void neigh_update_notify(struct neighbour *neigh)
+static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
 {
        call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
-       __neigh_notify(neigh, RTM_NEWNEIGH, 0);
+       __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
 }
 
 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
@@ -2830,7 +2834,8 @@ static inline size_t neigh_nlmsg_size(void)
               + nla_total_size(4); /* NDA_PROBES */
 }
 
-static void __neigh_notify(struct neighbour *n, int type, int flags)
+static void __neigh_notify(struct neighbour *n, int type, int flags,
+                          u32 pid)
 {
        struct net *net = dev_net(n->dev);
        struct sk_buff *skb;
@@ -2840,7 +2845,7 @@ static void __neigh_notify(struct neighbour *n, int type, int flags)
        if (skb == NULL)
                goto errout;
 
-       err = neigh_fill_info(skb, n, 0, 0, type, flags);
+       err = neigh_fill_info(skb, n, pid, 0, type, flags);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
@@ -2856,7 +2861,7 @@ errout:
 
 void neigh_app_ns(struct neighbour *n)
 {
-       __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
+       __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
 }
 EXPORT_SYMBOL(neigh_app_ns);
 
index 3945821e9c1f8f8c33290e55d33aba28ff68a9cd..65ea0ff4017c166fea648f3ef3db57966f44aa66 100644 (file)
@@ -953,7 +953,7 @@ net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
        while (--i >= new_num) {
                struct kobject *kobj = &dev->_rx[i].kobj;
 
-               if (!list_empty(&dev_net(dev)->exit_list))
+               if (!atomic_read(&dev_net(dev)->count))
                        kobj->uevent_suppress = 1;
                if (dev->sysfs_rx_queue_group)
                        sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
@@ -1371,7 +1371,7 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
        while (--i >= new_num) {
                struct netdev_queue *queue = dev->_tx + i;
 
-               if (!list_empty(&dev_net(dev)->exit_list))
+               if (!atomic_read(&dev_net(dev)->count))
                        queue->kobj.uevent_suppress = 1;
 #ifdef CONFIG_BQL
                sysfs_remove_group(&queue->kobj, &dql_group);
@@ -1558,7 +1558,7 @@ void netdev_unregister_kobject(struct net_device *ndev)
 {
        struct device *dev = &(ndev->dev);
 
-       if (!list_empty(&dev_net(ndev)->exit_list))
+       if (!atomic_read(&dev_net(ndev)->count))
                dev_set_uevent_suppress(dev, 1);
 
        kobject_get(&dev->kobj);
index 6ae56037bb1336d9cb6b6fc36043a203f3978202..029a61ac6cdd8a0b4dd54d2be3c5bdf047a82cb0 100644 (file)
@@ -71,27 +71,17 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n)
        return 0;
 }
 
-static void update_classid(struct cgroup_subsys_state *css, void *v)
+static void cgrp_attach(struct cgroup_taskset *tset)
 {
-       struct css_task_iter it;
+       struct cgroup_subsys_state *css;
        struct task_struct *p;
 
-       css_task_iter_start(css, &it);
-       while ((p = css_task_iter_next(&it))) {
+       cgroup_taskset_for_each(p, css, tset) {
                task_lock(p);
-               iterate_fd(p->files, 0, update_classid_sock, v);
+               iterate_fd(p->files, 0, update_classid_sock,
+                          (void *)(unsigned long)css_cls_state(css)->classid);
                task_unlock(p);
        }
-       css_task_iter_end(&it);
-}
-
-static void cgrp_attach(struct cgroup_taskset *tset)
-{
-       struct cgroup_subsys_state *css;
-
-       cgroup_taskset_first(tset, &css);
-       update_classid(css,
-                      (void *)(unsigned long)css_cls_state(css)->classid);
 }
 
 static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
@@ -103,12 +93,22 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
                         u64 value)
 {
        struct cgroup_cls_state *cs = css_cls_state(css);
+       struct css_task_iter it;
+       struct task_struct *p;
 
        cgroup_sk_alloc_disable();
 
        cs->classid = (u32)value;
 
-       update_classid(css, (void *)(unsigned long)cs->classid);
+       css_task_iter_start(css, &it);
+       while ((p = css_task_iter_next(&it))) {
+               task_lock(p);
+               iterate_fd(p->files, 0, update_classid_sock,
+                          (void *)(unsigned long)cs->classid);
+               task_unlock(p);
+       }
+       css_task_iter_end(&it);
+
        return 0;
 }
 
index 0f9275ee55958156a6cbac3f0d2b1ff54c3c89a5..1c4810919a0a35900d45a659de0cd780b7e500d3 100644 (file)
@@ -11,6 +11,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/module.h>
index c4e84c55824085b343679cc295a81f7b35e3acaf..58419da7961bfeb2481d7eb7f9c13a88171bb0cb 100644 (file)
@@ -4116,22 +4116,25 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
        switch (event) {
-       case NETDEV_UP:
-       case NETDEV_DOWN:
-       case NETDEV_PRE_UP:
-       case NETDEV_POST_INIT:
-       case NETDEV_REGISTER:
-       case NETDEV_CHANGE:
-       case NETDEV_PRE_TYPE_CHANGE:
-       case NETDEV_GOING_DOWN:
-       case NETDEV_UNREGISTER:
-       case NETDEV_UNREGISTER_FINAL:
-       case NETDEV_RELEASE:
-       case NETDEV_JOIN:
-       case NETDEV_BONDING_INFO:
+       case NETDEV_REBOOT:
+       case NETDEV_CHANGEMTU:
+       case NETDEV_CHANGEADDR:
+       case NETDEV_CHANGENAME:
+       case NETDEV_FEAT_CHANGE:
+       case NETDEV_BONDING_FAILOVER:
+       case NETDEV_POST_TYPE_CHANGE:
+       case NETDEV_NOTIFY_PEERS:
+       case NETDEV_CHANGEUPPER:
+       case NETDEV_RESEND_IGMP:
+       case NETDEV_PRECHANGEMTU:
+       case NETDEV_CHANGEINFODATA:
+       case NETDEV_PRECHANGEUPPER:
+       case NETDEV_CHANGELOWERSTATE:
+       case NETDEV_UDP_TUNNEL_PUSH_INFO:
+       case NETDEV_CHANGE_TX_QUEUE_LEN:
+               rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
                break;
        default:
-               rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
                break;
        }
        return NOTIFY_DONE;
@@ -4185,6 +4188,7 @@ void __init rtnetlink_init(void)
 
        rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL);
        rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL);
+       rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, NULL);
 
        rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL);
        rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL);
index 758f140b6bedc51669fed973b39ee317c2bf1570..6bd2f8fb0476baabf507557fc0d06b6787511c70 100644 (file)
 #include <net/tcp.h>
 
 static siphash_key_t net_secret __read_mostly;
+static siphash_key_t ts_secret __read_mostly;
 
 static __always_inline void net_secret_init(void)
 {
+       net_get_random_once(&ts_secret, sizeof(ts_secret));
        net_get_random_once(&net_secret, sizeof(net_secret));
 }
 #endif
@@ -45,8 +47,25 @@ static u32 seq_scale(u32 seq)
 #endif
 
 #if IS_ENABLED(CONFIG_IPV6)
-u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
-                                __be16 sport, __be16 dport, u32 *tsoff)
+static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
+{
+       const struct {
+               struct in6_addr saddr;
+               struct in6_addr daddr;
+       } __aligned(SIPHASH_ALIGNMENT) combined = {
+               .saddr = *(struct in6_addr *)saddr,
+               .daddr = *(struct in6_addr *)daddr,
+       };
+
+       if (sysctl_tcp_timestamps != 1)
+               return 0;
+
+       return siphash(&combined, offsetofend(typeof(combined), daddr),
+                      &ts_secret);
+}
+
+u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr,
+                              __be16 sport, __be16 dport, u32 *tsoff)
 {
        const struct {
                struct in6_addr saddr;
@@ -63,10 +82,10 @@ u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
        net_secret_init();
        hash = siphash(&combined, offsetofend(typeof(combined), dport),
                       &net_secret);
-       *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0;
+       *tsoff = secure_tcpv6_ts_off(saddr, daddr);
        return seq_scale(hash);
 }
-EXPORT_SYMBOL(secure_tcpv6_sequence_number);
+EXPORT_SYMBOL(secure_tcpv6_seq_and_tsoff);
 
 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
                               __be16 dport)
@@ -88,22 +107,29 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
 #endif
 
 #ifdef CONFIG_INET
+static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
+{
+       if (sysctl_tcp_timestamps != 1)
+               return 0;
 
-/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
+       return siphash_2u32((__force u32)saddr, (__force u32)daddr,
+                           &ts_secret);
+}
+
+/* secure_tcp_seq_and_tsoff(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
  * but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
  * it would be easy enough to have the former function use siphash_4u32, passing
  * the arguments as separate u32.
  */
-
-u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
-                              __be16 sport, __be16 dport, u32 *tsoff)
+u32 secure_tcp_seq_and_tsoff(__be32 saddr, __be32 daddr,
+                            __be16 sport, __be16 dport, u32 *tsoff)
 {
        u64 hash;
        net_secret_init();
        hash = siphash_3u32((__force u32)saddr, (__force u32)daddr,
                            (__force u32)sport << 16 | (__force u32)dport,
                            &net_secret);
-       *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0;
+       *tsoff = secure_tcp_ts_off(saddr, daddr);
        return seq_scale(hash);
 }
 
index f3557958e9bf147631a90b51fef0630920acd97b..5d9a11eafbf56d510a31b52ef4926d5490b9323b 100644 (file)
@@ -3093,7 +3093,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
                         * containing the same amount of data.
                         */
                        skb_walk_frags(head_skb, iter) {
-                               if (skb_headlen(iter))
+                               if (skb_headlen(iter) && !iter->head_frag)
                                        goto normal;
 
                                len -= iter->len;
@@ -3694,6 +3694,15 @@ static void sock_rmem_free(struct sk_buff *skb)
        atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
 }
 
+static void skb_set_err_queue(struct sk_buff *skb)
+{
+       /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
+        * So, it is safe to (mis)use it to mark skbs on the error queue.
+        */
+       skb->pkt_type = PACKET_OUTGOING;
+       BUILD_BUG_ON(PACKET_OUTGOING == 0);
+}
+
 /*
  * Note: We dont mem charge error packets (no sk_forward_alloc changes)
  */
@@ -3707,6 +3716,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
        skb->sk = sk;
        skb->destructor = sock_rmem_free;
        atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+       skb_set_err_queue(skb);
 
        /* before exiting rcu section, make sure dst is refcounted */
        skb_dst_force(skb);
@@ -3783,16 +3793,20 @@ EXPORT_SYMBOL(skb_clone_sk);
 
 static void __skb_complete_tx_timestamp(struct sk_buff *skb,
                                        struct sock *sk,
-                                       int tstype)
+                                       int tstype,
+                                       bool opt_stats)
 {
        struct sock_exterr_skb *serr;
        int err;
 
+       BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
+
        serr = SKB_EXT_ERR(skb);
        memset(serr, 0, sizeof(*serr));
        serr->ee.ee_errno = ENOMSG;
        serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
        serr->ee.ee_info = tstype;
+       serr->opt_stats = opt_stats;
        if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
                serr->ee.ee_data = skb_shinfo(skb)->tskey;
                if (sk->sk_protocol == IPPROTO_TCP &&
@@ -3828,13 +3842,14 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
        if (!skb_may_tx_timestamp(sk, false))
                return;
 
-       /* take a reference to prevent skb_orphan() from freeing the socket */
-       sock_hold(sk);
-
-       *skb_hwtstamps(skb) = *hwtstamps;
-       __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
-
-       sock_put(sk);
+       /* Take a reference to prevent skb_orphan() from freeing the socket,
+        * but only if the socket refcount is not zero.
+        */
+       if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
+               *skb_hwtstamps(skb) = *hwtstamps;
+               __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
+               sock_put(sk);
+       }
 }
 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
 
@@ -3843,7 +3858,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
                     struct sock *sk, int tstype)
 {
        struct sk_buff *skb;
-       bool tsonly;
+       bool tsonly, opt_stats = false;
 
        if (!sk)
                return;
@@ -3856,9 +3871,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
 #ifdef CONFIG_INET
                if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
                    sk->sk_protocol == IPPROTO_TCP &&
-                   sk->sk_type == SOCK_STREAM)
+                   sk->sk_type == SOCK_STREAM) {
                        skb = tcp_get_timestamping_opt_stats(sk);
-               else
+                       opt_stats = true;
+               } else
 #endif
                        skb = alloc_skb(0, GFP_ATOMIC);
        } else {
@@ -3877,7 +3893,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
        else
                skb->tstamp = ktime_get_real();
 
-       __skb_complete_tx_timestamp(skb, sk, tstype);
+       __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
 }
 EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
 
@@ -3893,7 +3909,7 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
 {
        struct sock *sk = skb->sk;
        struct sock_exterr_skb *serr;
-       int err;
+       int err = 1;
 
        skb->wifi_acked_valid = 1;
        skb->wifi_acked = acked;
@@ -3903,14 +3919,15 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
        serr->ee.ee_errno = ENOMSG;
        serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
 
-       /* take a reference to prevent skb_orphan() from freeing the socket */
-       sock_hold(sk);
-
-       err = sock_queue_err_skb(sk, skb);
+       /* Take a reference to prevent skb_orphan() from freeing the socket,
+        * but only if the socket refcount is not zero.
+        */
+       if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
+               err = sock_queue_err_skb(sk, skb);
+               sock_put(sk);
+       }
        if (err)
                kfree_skb(skb);
-
-       sock_put(sk);
 }
 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
 
index f6fd79f33097f3fa279fcb0b610286259af9b111..a06bb7a2a689b63275994034e47e452c11c4acff 100644 (file)
@@ -197,73 +197,117 @@ EXPORT_SYMBOL(sk_net_capable);
 
 /*
  * Each address family might have different locking rules, so we have
- * one slock key per address family:
+ * one slock key per address family and separate keys for internal and
+ * userspace sockets.
  */
 static struct lock_class_key af_family_keys[AF_MAX];
+static struct lock_class_key af_family_kern_keys[AF_MAX];
 static struct lock_class_key af_family_slock_keys[AF_MAX];
+static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
 
 /*
  * Make lock validator output more readable. (we pre-construct these
  * strings build-time, so that runtime initialization of socket
  * locks is fast):
  */
+
+#define _sock_locks(x)                                           \
+  x "AF_UNSPEC",       x "AF_UNIX"     ,       x "AF_INET"     , \
+  x "AF_AX25"  ,       x "AF_IPX"      ,       x "AF_APPLETALK", \
+  x "AF_NETROM",       x "AF_BRIDGE"   ,       x "AF_ATMPVC"   , \
+  x "AF_X25"   ,       x "AF_INET6"    ,       x "AF_ROSE"     , \
+  x "AF_DECnet",       x "AF_NETBEUI"  ,       x "AF_SECURITY" , \
+  x "AF_KEY"   ,       x "AF_NETLINK"  ,       x "AF_PACKET"   , \
+  x "AF_ASH"   ,       x "AF_ECONET"   ,       x "AF_ATMSVC"   , \
+  x "AF_RDS"   ,       x "AF_SNA"      ,       x "AF_IRDA"     , \
+  x "AF_PPPOX" ,       x "AF_WANPIPE"  ,       x "AF_LLC"      , \
+  x "27"       ,       x "28"          ,       x "AF_CAN"      , \
+  x "AF_TIPC"  ,       x "AF_BLUETOOTH",       x "IUCV"        , \
+  x "AF_RXRPC" ,       x "AF_ISDN"     ,       x "AF_PHONET"   , \
+  x "AF_IEEE802154",   x "AF_CAIF"     ,       x "AF_ALG"      , \
+  x "AF_NFC"   ,       x "AF_VSOCK"    ,       x "AF_KCM"      , \
+  x "AF_QIPCRTR",      x "AF_SMC"      ,       x "AF_MAX"
+
 static const char *const af_family_key_strings[AF_MAX+1] = {
-  "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
-  "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
-  "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
-  "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
-  "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
-  "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
-  "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
-  "sk_lock-AF_RDS"   , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
-  "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
-  "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
-  "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
-  "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
-  "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
-  "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_KCM"      ,
-  "sk_lock-AF_QIPCRTR", "sk_lock-AF_SMC"     , "sk_lock-AF_MAX"
+       _sock_locks("sk_lock-")
 };
 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
-  "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
-  "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
-  "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
-  "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
-  "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
-  "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
-  "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
-  "slock-AF_RDS"   , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
-  "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
-  "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
-  "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
-  "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
-  "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
-  "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_KCM"       ,
-  "slock-AF_QIPCRTR", "slock-AF_SMC"     , "slock-AF_MAX"
+       _sock_locks("slock-")
 };
 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
-  "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
-  "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
-  "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
-  "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
-  "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
-  "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
-  "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
-  "clock-AF_RDS"   , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
-  "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
-  "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
-  "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
-  "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
-  "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
-  "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_KCM"      ,
-  "clock-AF_QIPCRTR", "clock-AF_SMC"     , "clock-AF_MAX"
+       _sock_locks("clock-")
+};
+
+static const char *const af_family_kern_key_strings[AF_MAX+1] = {
+       _sock_locks("k-sk_lock-")
+};
+static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
+       _sock_locks("k-slock-")
+};
+static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
+       _sock_locks("k-clock-")
+};
+static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
+  "rlock-AF_UNSPEC", "rlock-AF_UNIX"     , "rlock-AF_INET"     ,
+  "rlock-AF_AX25"  , "rlock-AF_IPX"      , "rlock-AF_APPLETALK",
+  "rlock-AF_NETROM", "rlock-AF_BRIDGE"   , "rlock-AF_ATMPVC"   ,
+  "rlock-AF_X25"   , "rlock-AF_INET6"    , "rlock-AF_ROSE"     ,
+  "rlock-AF_DECnet", "rlock-AF_NETBEUI"  , "rlock-AF_SECURITY" ,
+  "rlock-AF_KEY"   , "rlock-AF_NETLINK"  , "rlock-AF_PACKET"   ,
+  "rlock-AF_ASH"   , "rlock-AF_ECONET"   , "rlock-AF_ATMSVC"   ,
+  "rlock-AF_RDS"   , "rlock-AF_SNA"      , "rlock-AF_IRDA"     ,
+  "rlock-AF_PPPOX" , "rlock-AF_WANPIPE"  , "rlock-AF_LLC"      ,
+  "rlock-27"       , "rlock-28"          , "rlock-AF_CAN"      ,
+  "rlock-AF_TIPC"  , "rlock-AF_BLUETOOTH", "rlock-AF_IUCV"     ,
+  "rlock-AF_RXRPC" , "rlock-AF_ISDN"     , "rlock-AF_PHONET"   ,
+  "rlock-AF_IEEE802154", "rlock-AF_CAIF" , "rlock-AF_ALG"      ,
+  "rlock-AF_NFC"   , "rlock-AF_VSOCK"    , "rlock-AF_KCM"      ,
+  "rlock-AF_QIPCRTR", "rlock-AF_SMC"     , "rlock-AF_MAX"
+};
+static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
+  "wlock-AF_UNSPEC", "wlock-AF_UNIX"     , "wlock-AF_INET"     ,
+  "wlock-AF_AX25"  , "wlock-AF_IPX"      , "wlock-AF_APPLETALK",
+  "wlock-AF_NETROM", "wlock-AF_BRIDGE"   , "wlock-AF_ATMPVC"   ,
+  "wlock-AF_X25"   , "wlock-AF_INET6"    , "wlock-AF_ROSE"     ,
+  "wlock-AF_DECnet", "wlock-AF_NETBEUI"  , "wlock-AF_SECURITY" ,
+  "wlock-AF_KEY"   , "wlock-AF_NETLINK"  , "wlock-AF_PACKET"   ,
+  "wlock-AF_ASH"   , "wlock-AF_ECONET"   , "wlock-AF_ATMSVC"   ,
+  "wlock-AF_RDS"   , "wlock-AF_SNA"      , "wlock-AF_IRDA"     ,
+  "wlock-AF_PPPOX" , "wlock-AF_WANPIPE"  , "wlock-AF_LLC"      ,
+  "wlock-27"       , "wlock-28"          , "wlock-AF_CAN"      ,
+  "wlock-AF_TIPC"  , "wlock-AF_BLUETOOTH", "wlock-AF_IUCV"     ,
+  "wlock-AF_RXRPC" , "wlock-AF_ISDN"     , "wlock-AF_PHONET"   ,
+  "wlock-AF_IEEE802154", "wlock-AF_CAIF" , "wlock-AF_ALG"      ,
+  "wlock-AF_NFC"   , "wlock-AF_VSOCK"    , "wlock-AF_KCM"      ,
+  "wlock-AF_QIPCRTR", "wlock-AF_SMC"     , "wlock-AF_MAX"
+};
+static const char *const af_family_elock_key_strings[AF_MAX+1] = {
+  "elock-AF_UNSPEC", "elock-AF_UNIX"     , "elock-AF_INET"     ,
+  "elock-AF_AX25"  , "elock-AF_IPX"      , "elock-AF_APPLETALK",
+  "elock-AF_NETROM", "elock-AF_BRIDGE"   , "elock-AF_ATMPVC"   ,
+  "elock-AF_X25"   , "elock-AF_INET6"    , "elock-AF_ROSE"     ,
+  "elock-AF_DECnet", "elock-AF_NETBEUI"  , "elock-AF_SECURITY" ,
+  "elock-AF_KEY"   , "elock-AF_NETLINK"  , "elock-AF_PACKET"   ,
+  "elock-AF_ASH"   , "elock-AF_ECONET"   , "elock-AF_ATMSVC"   ,
+  "elock-AF_RDS"   , "elock-AF_SNA"      , "elock-AF_IRDA"     ,
+  "elock-AF_PPPOX" , "elock-AF_WANPIPE"  , "elock-AF_LLC"      ,
+  "elock-27"       , "elock-28"          , "elock-AF_CAN"      ,
+  "elock-AF_TIPC"  , "elock-AF_BLUETOOTH", "elock-AF_IUCV"     ,
+  "elock-AF_RXRPC" , "elock-AF_ISDN"     , "elock-AF_PHONET"   ,
+  "elock-AF_IEEE802154", "elock-AF_CAIF" , "elock-AF_ALG"      ,
+  "elock-AF_NFC"   , "elock-AF_VSOCK"    , "elock-AF_KCM"      ,
+  "elock-AF_QIPCRTR", "elock-AF_SMC"     , "elock-AF_MAX"
 };
 
 /*
- * sk_callback_lock locking rules are per-address-family,
+ * sk_callback_lock and sk queues locking rules are per-address-family,
  * so split the lock classes by using a per-AF key:
  */
 static struct lock_class_key af_callback_keys[AF_MAX];
+static struct lock_class_key af_rlock_keys[AF_MAX];
+static struct lock_class_key af_wlock_keys[AF_MAX];
+static struct lock_class_key af_elock_keys[AF_MAX];
+static struct lock_class_key af_kern_callback_keys[AF_MAX];
 
 /* Take into consideration the size of the struct sk_buff overhead in the
  * determination of these values, since that is non-constant across
@@ -1039,6 +1083,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
 
        union {
                int val;
+               u64 val64;
                struct linger ling;
                struct timeval tm;
        } v;
@@ -1269,6 +1314,40 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                v.val = sk->sk_incoming_cpu;
                break;
 
+       case SO_MEMINFO:
+       {
+               u32 meminfo[SK_MEMINFO_VARS];
+
+               if (get_user(len, optlen))
+                       return -EFAULT;
+
+               sk_get_meminfo(sk, meminfo);
+
+               len = min_t(unsigned int, len, sizeof(meminfo));
+               if (copy_to_user(optval, &meminfo, len))
+                       return -EFAULT;
+
+               goto lenout;
+       }
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       case SO_INCOMING_NAPI_ID:
+               v.val = READ_ONCE(sk->sk_napi_id);
+
+               /* aggregate non-NAPI IDs down to 0 */
+               if (v.val < MIN_NAPI_ID)
+                       v.val = 0;
+
+               break;
+#endif
+
+       case SO_COOKIE:
+               lv = sizeof(u64);
+               if (len < lv)
+                       return -EINVAL;
+               v.val64 = sock_gen_cookie(sk);
+               break;
+
        default:
                /* We implement the SO_SNDLOWAT etc to not be settable
                 * (1003.1g 7).
@@ -1293,7 +1372,16 @@ lenout:
  */
 static inline void sock_lock_init(struct sock *sk)
 {
-       sock_lock_init_class_and_name(sk,
+       if (sk->sk_kern_sock)
+               sock_lock_init_class_and_name(
+                       sk,
+                       af_family_kern_slock_key_strings[sk->sk_family],
+                       af_family_kern_slock_keys + sk->sk_family,
+                       af_family_kern_key_strings[sk->sk_family],
+                       af_family_kern_keys + sk->sk_family);
+       else
+               sock_lock_init_class_and_name(
+                       sk,
                        af_family_slock_key_strings[sk->sk_family],
                        af_family_slock_keys + sk->sk_family,
                        af_family_key_strings[sk->sk_family],
@@ -1399,6 +1487,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
                 * why we need sk_prot_creator -acme
                 */
                sk->sk_prot = sk->sk_prot_creator = prot;
+               sk->sk_kern_sock = kern;
                sock_lock_init(sk);
                sk->sk_net_refcnt = kern ? 0 : 1;
                if (likely(sk->sk_net_refcnt))
@@ -1442,6 +1531,11 @@ static void __sk_destruct(struct rcu_head *head)
                pr_debug("%s: optmem leakage (%d bytes) detected\n",
                         __func__, atomic_read(&sk->sk_omem_alloc));
 
+       if (sk->sk_frag.page) {
+               put_page(sk->sk_frag.page);
+               sk->sk_frag.page = NULL;
+       }
+
        if (sk->sk_peer_cred)
                put_cred(sk->sk_peer_cred);
        put_pid(sk->sk_peer_pid);
@@ -1478,6 +1572,27 @@ void sk_free(struct sock *sk)
 }
 EXPORT_SYMBOL(sk_free);
 
+static void sk_init_common(struct sock *sk)
+{
+       skb_queue_head_init(&sk->sk_receive_queue);
+       skb_queue_head_init(&sk->sk_write_queue);
+       skb_queue_head_init(&sk->sk_error_queue);
+
+       rwlock_init(&sk->sk_callback_lock);
+       lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
+                       af_rlock_keys + sk->sk_family,
+                       af_family_rlock_key_strings[sk->sk_family]);
+       lockdep_set_class_and_name(&sk->sk_write_queue.lock,
+                       af_wlock_keys + sk->sk_family,
+                       af_family_wlock_key_strings[sk->sk_family]);
+       lockdep_set_class_and_name(&sk->sk_error_queue.lock,
+                       af_elock_keys + sk->sk_family,
+                       af_family_elock_key_strings[sk->sk_family]);
+       lockdep_set_class_and_name(&sk->sk_callback_lock,
+                       af_callback_keys + sk->sk_family,
+                       af_family_clock_key_strings[sk->sk_family]);
+}
+
 /**
  *     sk_clone_lock - clone a socket, and lock its clone
  *     @sk: the socket to clone
@@ -1511,13 +1626,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                 */
                atomic_set(&newsk->sk_wmem_alloc, 1);
                atomic_set(&newsk->sk_omem_alloc, 0);
-               skb_queue_head_init(&newsk->sk_receive_queue);
-               skb_queue_head_init(&newsk->sk_write_queue);
-
-               rwlock_init(&newsk->sk_callback_lock);
-               lockdep_set_class_and_name(&newsk->sk_callback_lock,
-                               af_callback_keys + newsk->sk_family,
-                               af_family_clock_key_strings[newsk->sk_family]);
+               sk_init_common(newsk);
 
                newsk->sk_dst_cache     = NULL;
                newsk->sk_dst_pending_confirm = 0;
@@ -1528,7 +1637,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                newsk->sk_userlocks     = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
 
                sock_reset_flag(newsk, SOCK_DONE);
-               skb_queue_head_init(&newsk->sk_error_queue);
 
                filter = rcu_dereference_protected(newsk->sk_filter, 1);
                if (filter != NULL)
@@ -1539,6 +1647,12 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                        is_charged = sk_filter_charge(newsk, filter);
 
                if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
+                       /* We need to make sure that we don't uncharge the new
+                        * socket if we couldn't charge it in the first place
+                        * as otherwise we uncharge the parent's filter.
+                        */
+                       if (!is_charged)
+                               RCU_INIT_POINTER(newsk->sk_filter, NULL);
                        sk_free_unlock_clone(newsk);
                        newsk = NULL;
                        goto out;
@@ -2277,7 +2391,8 @@ int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
 }
 EXPORT_SYMBOL(sock_no_socketpair);
 
-int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
+int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
+                  bool kern)
 {
        return -EOPNOTSUPP;
 }
@@ -2454,10 +2569,7 @@ EXPORT_SYMBOL(sk_stop_timer);
 
 void sock_init_data(struct socket *sock, struct sock *sk)
 {
-       skb_queue_head_init(&sk->sk_receive_queue);
-       skb_queue_head_init(&sk->sk_write_queue);
-       skb_queue_head_init(&sk->sk_error_queue);
-
+       sk_init_common(sk);
        sk->sk_send_head        =       NULL;
 
        init_timer(&sk->sk_timer);
@@ -2481,7 +2593,14 @@ void sock_init_data(struct socket *sock, struct sock *sk)
        }
 
        rwlock_init(&sk->sk_callback_lock);
-       lockdep_set_class_and_name(&sk->sk_callback_lock,
+       if (sk->sk_kern_sock)
+               lockdep_set_class_and_name(
+                       &sk->sk_callback_lock,
+                       af_kern_callback_keys + sk->sk_family,
+                       af_family_kern_clock_key_strings[sk->sk_family]);
+       else
+               lockdep_set_class_and_name(
+                       &sk->sk_callback_lock,
                        af_callback_keys + sk->sk_family,
                        af_family_clock_key_strings[sk->sk_family]);
 
@@ -2502,7 +2621,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
        sk->sk_rcvtimeo         =       MAX_SCHEDULE_TIMEOUT;
        sk->sk_sndtimeo         =       MAX_SCHEDULE_TIMEOUT;
 
-       sk->sk_stamp = ktime_set(-1L, 0);
+       sk->sk_stamp = SK_DEFAULT_STAMP;
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
        sk->sk_napi_id          =       0;
@@ -2779,15 +2898,25 @@ void sk_common_release(struct sock *sk)
 
        sk_refcnt_debug_release(sk);
 
-       if (sk->sk_frag.page) {
-               put_page(sk->sk_frag.page);
-               sk->sk_frag.page = NULL;
-       }
-
        sock_put(sk);
 }
 EXPORT_SYMBOL(sk_common_release);
 
+void sk_get_meminfo(const struct sock *sk, u32 *mem)
+{
+       memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
+
+       mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
+       mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
+       mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
+       mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
+       mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
+       mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
+       mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
+       mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
+       mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
+}
+
 #ifdef CONFIG_PROC_FS
 #define PROTO_INUSE_NR 64      /* should be enough for the first time */
 struct prot_inuse {
@@ -3128,3 +3257,14 @@ static int __init proto_init(void)
 subsys_initcall(proto_init);
 
 #endif /* PROC_FS */
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+bool sk_busy_loop_end(void *p, unsigned long start_time)
+{
+       struct sock *sk = p;
+
+       return !skb_queue_empty(&sk->sk_receive_queue) ||
+              sk_busy_loop_timeout(sk, start_time);
+}
+EXPORT_SYMBOL(sk_busy_loop_end);
+#endif /* CONFIG_NET_RX_BUSY_POLL */
index 6b10573cc9faa790fe261b452b85f3b774c3ec21..fb9d0e2fd148aa78fa9c33e27e341af5a47c530f 100644 (file)
@@ -19,7 +19,7 @@ static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
 static DEFINE_MUTEX(sock_diag_table_mutex);
 static struct workqueue_struct *broadcast_wq;
 
-static u64 sock_gen_cookie(struct sock *sk)
+u64 sock_gen_cookie(struct sock *sk)
 {
        while (1) {
                u64 res = atomic64_read(&sk->sk_cookie);
@@ -59,15 +59,7 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
 {
        u32 mem[SK_MEMINFO_VARS];
 
-       mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
-       mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
-       mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
-       mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
-       mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
-       mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
-       mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
-       mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
-       mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
+       sk_get_meminfo(sk, mem);
 
        return nla_put(skb, attrtype, sizeof(mem), &mem);
 }
index 9a1a352fd1ebe598e4925bcda037dc0e4a2288bc..eed1ebf7f29d0fac552074b127e5636fecede65f 100644 (file)
@@ -13,9 +13,9 @@
 
 static DEFINE_SPINLOCK(reuseport_lock);
 
-static struct sock_reuseport *__reuseport_alloc(u16 max_socks)
+static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
 {
-       size_t size = sizeof(struct sock_reuseport) +
+       unsigned int size = sizeof(struct sock_reuseport) +
                      sizeof(struct sock *) * max_socks;
        struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
 
index 4ead336e14ea0b8fc5fdcf8e679da54dfca0716b..7f9cc400eca08c01c9014476aa4daf0852505b20 100644 (file)
@@ -408,14 +408,16 @@ static struct ctl_table net_core_table[] = {
                .data           = &sysctl_net_busy_poll,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
        },
        {
                .procname       = "busy_read",
                .data           = &sysctl_net_busy_read,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
        },
 #endif
 #ifdef CONFIG_NET_SCHED
index 6592d7bbed394086a8ba8efcb370fb1d75db4449..d758880c09a73ee07fe406f42fd934eae0b300d0 100644 (file)
@@ -51,7 +51,7 @@ EXPORT_SYMBOL(net_ratelimit);
 
 __be32 in_aton(const char *str)
 {
-       unsigned long l;
+       unsigned int l;
        unsigned int val;
        int i;
 
index f053198e730c48c7ea8114706c3d4904228f41fb..5e3a7302f7747e4c4f3134eacab2f2c65b13402f 100644 (file)
@@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk)
        for (i = 0; i < hc->tx_seqbufc; i++)
                kfree(hc->tx_seqbuf[i]);
        hc->tx_seqbufc = 0;
+       dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
 }
 
 static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
index 409d0cfd34474812c3bf74f26cd423a3d65ee441..b99168b0fabf2a8c65defdd0b93d362630774e1a 100644 (file)
@@ -289,7 +289,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
 
        switch (type) {
        case ICMP_REDIRECT:
-               dccp_do_redirect(skb, sk);
+               if (!sock_owned_by_user(sk))
+                       dccp_do_redirect(skb, sk);
                goto out;
        case ICMP_SOURCE_QUENCH:
                /* Just silently ignore these. */
index 233b57367758c64c09ed40f7359cb8fcb1918d93..d9b6a4e403e701fd9b9ecf92bac496e45570054e 100644 (file)
@@ -122,10 +122,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        np = inet6_sk(sk);
 
        if (type == NDISC_REDIRECT) {
-               struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+               if (!sock_owned_by_user(sk)) {
+                       struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 
-               if (dst)
-                       dst->ops->redirect(dst, sk, skb);
+                       if (dst)
+                               dst->ops->redirect(dst, sk, skb);
+               }
                goto out;
        }
 
index e267e6f4c9a5566b369a03a600a408e5bd41cbad..abd07a443219853b022bef41cb072e90ff8f07f0 100644 (file)
@@ -142,6 +142,13 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
        struct dccp_request_sock *dreq = dccp_rsk(req);
        bool own_req;
 
+       /* TCP/DCCP listeners became lockless.
+        * DCCP stores complex state in its request_sock, so we need
+        * a protection for them, now this code runs without being protected
+        * by the parent (listener) lock.
+        */
+       spin_lock_bh(&dreq->dreq_lock);
+
        /* Check for retransmitted REQUEST */
        if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
 
@@ -156,7 +163,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
                        inet_rtx_syn_ack(sk, req);
                }
                /* Network Duplicate, discard packet */
-               return NULL;
+               goto out;
        }
 
        DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
@@ -182,20 +189,20 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
 
        child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
                                                         req, &own_req);
-       if (!child)
-               goto listen_overflow;
-
-       return inet_csk_complete_hashdance(sk, child, req, own_req);
+       if (child) {
+               child = inet_csk_complete_hashdance(sk, child, req, own_req);
+               goto out;
+       }
 
-listen_overflow:
-       dccp_pr_debug("listen_overflow!\n");
        DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
 drop:
        if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
                req->rsk_ops->send_reset(sk, skb);
 
        inet_csk_reqsk_queue_drop(sk, req);
-       return NULL;
+out:
+       spin_unlock_bh(&dreq->dreq_lock);
+       return child;
 }
 
 EXPORT_SYMBOL_GPL(dccp_check_req);
@@ -246,6 +253,7 @@ int dccp_reqsk_init(struct request_sock *req,
 {
        struct dccp_request_sock *dreq = dccp_rsk(req);
 
+       spin_lock_init(&dreq->dreq_lock);
        inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
        inet_rsk(req)->ir_num      = ntohs(dccp_hdr(skb)->dccph_dport);
        inet_rsk(req)->acked       = 0;
index 0ec8cb4363e97514b29657f0dbce43a82b19026e..9afa2a5030b2570c89de8decc3b20aad3a224e5c 100644 (file)
@@ -1071,7 +1071,8 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
        return skb == NULL ? ERR_PTR(err) : skb;
 }
 
-static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
+static int dn_accept(struct socket *sock, struct socket *newsock, int flags,
+                    bool kern)
 {
        struct sock *sk = sock->sk, *newsk;
        struct sk_buff *skb = NULL;
@@ -1100,7 +1101,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
 
        cb = DN_SKB_CB(skb);
        sk->sk_ack_backlog--;
-       newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, 0);
+       newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern);
        if (newsk == NULL) {
                release_sock(sk);
                kfree_skb(skb);
index 9649238eef404095a89d34a006dc0b504bc47038..aa21f49f12156a403086e1625a95829fdf9513f5 100644 (file)
@@ -6,7 +6,7 @@ config HAVE_NET_DSA
 
 config NET_DSA
        tristate "Distributed Switch Architecture"
-       depends on HAVE_NET_DSA
+       depends on HAVE_NET_DSA && MAY_USE_DEVLINK
        select NET_SWITCHDEV
        select PHYLIB
        ---help---
@@ -31,4 +31,6 @@ config NET_DSA_TAG_TRAILER
 config NET_DSA_TAG_QCA
        bool
 
+config NET_DSA_TAG_MTK
+       bool
 endif
index 31d343796251da06c979b3428f275f5911dfb2ab..9b1d478f3713fef59c12de3d578ca62c7e881819 100644 (file)
@@ -8,3 +8,4 @@ dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
 dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
 dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
 dsa_core-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
+dsa_core-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
index b6d4f6a23f06c9d794a5eedc4c9f79810d5b06e5..1fb9cf7aaaf498b332ec9af913b0279c369a5605 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/module.h>
-#include <net/dsa.h>
 #include <linux/of.h>
 #include <linux/of_mdio.h>
 #include <linux/of_platform.h>
 #include <linux/of_net.h>
 #include <linux/of_gpio.h>
+#include <linux/netdevice.h>
 #include <linux/sysfs.h>
 #include <linux/phy_fixed.h>
 #include <linux/gpio/consumer.h>
+#include <linux/etherdevice.h>
+#include <net/dsa.h>
 #include "dsa_priv.h"
 
 static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
@@ -52,6 +54,9 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
 #endif
 #ifdef CONFIG_NET_DSA_TAG_QCA
        [DSA_TAG_PROTO_QCA] = &qca_netdev_ops,
+#endif
+#ifdef CONFIG_NET_DSA_TAG_MTK
+       [DSA_TAG_PROTO_MTK] = &mtk_netdev_ops,
 #endif
        [DSA_TAG_PROTO_NONE] = &none_ops,
 };
@@ -896,13 +901,34 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
                          struct packet_type *pt, struct net_device *orig_dev)
 {
        struct dsa_switch_tree *dst = dev->dsa_ptr;
+       struct sk_buff *nskb = NULL;
 
        if (unlikely(dst == NULL)) {
                kfree_skb(skb);
                return 0;
        }
 
-       return dst->rcv(skb, dev, pt, orig_dev);
+       skb = skb_unshare(skb, GFP_ATOMIC);
+       if (!skb)
+               return 0;
+
+       nskb = dst->rcv(skb, dev, pt, orig_dev);
+       if (!nskb) {
+               kfree_skb(skb);
+               return 0;
+       }
+
+       skb = nskb;
+       skb_push(skb, ETH_HLEN);
+       skb->pkt_type = PACKET_HOST;
+       skb->protocol = eth_type_trans(skb, skb->dev);
+
+       skb->dev->stats.rx_packets++;
+       skb->dev->stats.rx_bytes += skb->len;
+
+       netif_receive_skb(skb);
+
+       return 0;
 }
 
 static struct packet_type dsa_pack_type __read_mostly = {
index 737be6470c7f27ba032d01667e039f3c03c17ae8..033b3bfb63dc1887b15b3e08f00a00f70b706ec4 100644 (file)
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/list.h>
+#include <linux/netdevice.h>
 #include <linux/slab.h>
 #include <linux/rtnetlink.h>
-#include <net/dsa.h>
 #include <linux/of.h>
 #include <linux/of_net.h>
+#include <net/dsa.h>
 #include "dsa_priv.h"
 
 static LIST_HEAD(dsa_switch_trees);
 static DEFINE_MUTEX(dsa2_mutex);
 
+static const struct devlink_ops dsa_devlink_ops = {
+};
+
 static struct dsa_switch_tree *dsa_get_dst(u32 tree)
 {
        struct dsa_switch_tree *dst;
@@ -222,12 +226,18 @@ static int dsa_dsa_port_apply(struct dsa_port *port, u32 index,
                return err;
        }
 
-       return 0;
+       memset(&ds->ports[index].devlink_port, 0,
+              sizeof(ds->ports[index].devlink_port));
+
+       return devlink_port_register(ds->devlink,
+                                    &ds->ports[index].devlink_port,
+                                    index);
 }
 
 static void dsa_dsa_port_unapply(struct dsa_port *port, u32 index,
                                 struct dsa_switch *ds)
 {
+       devlink_port_unregister(&ds->ports[index].devlink_port);
        dsa_cpu_dsa_destroy(port);
 }
 
@@ -245,12 +255,17 @@ static int dsa_cpu_port_apply(struct dsa_port *port, u32 index,
 
        ds->cpu_port_mask |= BIT(index);
 
-       return 0;
+       memset(&ds->ports[index].devlink_port, 0,
+              sizeof(ds->ports[index].devlink_port));
+       err = devlink_port_register(ds->devlink, &ds->ports[index].devlink_port,
+                                   index);
+       return err;
 }
 
 static void dsa_cpu_port_unapply(struct dsa_port *port, u32 index,
                                 struct dsa_switch *ds)
 {
+       devlink_port_unregister(&ds->ports[index].devlink_port);
        dsa_cpu_dsa_destroy(port);
        ds->cpu_port_mask &= ~BIT(index);
 
@@ -275,12 +290,23 @@ static int dsa_user_port_apply(struct dsa_port *port, u32 index,
                return err;
        }
 
+       memset(&ds->ports[index].devlink_port, 0,
+              sizeof(ds->ports[index].devlink_port));
+       err = devlink_port_register(ds->devlink, &ds->ports[index].devlink_port,
+                                   index);
+       if (err)
+               return err;
+
+       devlink_port_type_eth_set(&ds->ports[index].devlink_port,
+                                 ds->ports[index].netdev);
+
        return 0;
 }
 
 static void dsa_user_port_unapply(struct dsa_port *port, u32 index,
                                  struct dsa_switch *ds)
 {
+       devlink_port_unregister(&ds->ports[index].devlink_port);
        if (ds->ports[index].netdev) {
                dsa_slave_destroy(ds->ports[index].netdev);
                ds->ports[index].netdev = NULL;
@@ -301,6 +327,17 @@ static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
         */
        ds->phys_mii_mask = ds->enabled_port_mask;
 
+       /* Add the switch to devlink before calling setup, so that setup can
+        * add dpipe tables
+        */
+       ds->devlink = devlink_alloc(&dsa_devlink_ops, 0);
+       if (!ds->devlink)
+               return -ENOMEM;
+
+       err = devlink_register(ds->devlink, ds->dev);
+       if (err)
+               return err;
+
        err = ds->ops->setup(ds);
        if (err < 0)
                return err;
@@ -381,6 +418,13 @@ static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
                mdiobus_unregister(ds->slave_mii_bus);
 
        dsa_switch_unregister_notifier(ds);
+
+       if (ds->devlink) {
+               devlink_unregister(ds->devlink);
+               devlink_free(ds->devlink);
+               ds->devlink = NULL;
+       }
+
 }
 
 static int dsa_dst_apply(struct dsa_switch_tree *dst)
index 0706a511244e92ff0174173eb388a41ff59141f4..107138a55bd8697f4bf741cf883b4b94093ac282 100644 (file)
@@ -17,8 +17,9 @@
 
 struct dsa_device_ops {
        struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
-       int (*rcv)(struct sk_buff *skb, struct net_device *dev,
-                  struct packet_type *pt, struct net_device *orig_dev);
+       struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev,
+                              struct packet_type *pt,
+                              struct net_device *orig_dev);
 };
 
 struct dsa_slave_priv {
@@ -85,4 +86,7 @@ extern const struct dsa_device_ops brcm_netdev_ops;
 /* tag_qca.c */
 extern const struct dsa_device_ops qca_netdev_ops;
 
+/* tag_mtk.c */
+extern const struct dsa_device_ops mtk_netdev_ops;
+
 #endif
index c34872e1febc4b75d1b69b18a8a1189405ca30fa..7693182df81e61d14540cd43be3ae7e134eef576 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/of_mdio.h>
 #include <linux/mdio.h>
 #include <linux/list.h>
+#include <net/dsa.h>
 #include <net/rtnetlink.h>
 #include <net/switchdev.h>
 #include <net/pkt_cls.h>
@@ -419,8 +420,8 @@ static int dsa_slave_vlan_filtering(struct net_device *dev,
        return 0;
 }
 
-static int dsa_fastest_ageing_time(struct dsa_switch *ds,
-                                  unsigned int ageing_time)
+static unsigned int dsa_fastest_ageing_time(struct dsa_switch *ds,
+                                           unsigned int ageing_time)
 {
        int i;
 
@@ -443,9 +444,13 @@ static int dsa_slave_ageing_time(struct net_device *dev,
        unsigned long ageing_jiffies = clock_t_to_jiffies(attr->u.ageing_time);
        unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
 
-       /* bridge skips -EOPNOTSUPP, so skip the prepare phase */
-       if (switchdev_trans_ph_prepare(trans))
+       if (switchdev_trans_ph_prepare(trans)) {
+               if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
+                       return -ERANGE;
+               if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
+                       return -ERANGE;
                return 0;
+       }
 
        /* Keep the fastest ageing time in case of multiple bridges */
        p->dp->ageing_time = ageing_time;
index 6456dacf9ae9e0b88585defc026179423a80e4e4..ca6e26e514f089cfa8991ef6a7bd790bd8db14fe 100644 (file)
@@ -1,7 +1,8 @@
 /*
  * Handling of a single switch chip, part of a switch fabric
  *
- * Copyright (c) 2017 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ * Copyright (c) 2017 Savoir-faire Linux Inc.
+ *     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -19,9 +20,9 @@ static int dsa_switch_bridge_join(struct dsa_switch *ds,
        if (ds->index == info->sw_index && ds->ops->port_bridge_join)
                return ds->ops->port_bridge_join(ds, info->port, info->br);
 
-       if (ds->index != info->sw_index)
-               dev_dbg(ds->dev, "crosschip DSA port %d.%d bridged to %s\n",
-                       info->sw_index, info->port, netdev_name(info->br));
+       if (ds->index != info->sw_index && ds->ops->crosschip_bridge_join)
+               return ds->ops->crosschip_bridge_join(ds, info->sw_index,
+                                                     info->port, info->br);
 
        return 0;
 }
@@ -32,9 +33,9 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
        if (ds->index == info->sw_index && ds->ops->port_bridge_leave)
                ds->ops->port_bridge_leave(ds, info->port, info->br);
 
-       if (ds->index != info->sw_index)
-               dev_dbg(ds->dev, "crosschip DSA port %d.%d unbridged from %s\n",
-                       info->sw_index, info->port, netdev_name(info->br));
+       if (ds->index != info->sw_index && ds->ops->crosschip_bridge_leave)
+               ds->ops->crosschip_bridge_leave(ds, info->sw_index, info->port,
+                                               info->br);
 
        return 0;
 }
index 5d925b6b2bb14f78f84a06b84b4fa19bd6846e82..2a9b52c5af86b5308d7d71a3340a0e48768bdb60 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/etherdevice.h>
 #include <linux/list.h>
 #include <linux/slab.h>
+#include <net/dsa.h>
 #include "dsa_priv.h"
 
 /* This tag length is 4 bytes, older ones were 6 bytes, we do not
@@ -91,23 +92,17 @@ out_free:
        return NULL;
 }
 
-static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
-                       struct packet_type *pt, struct net_device *orig_dev)
+static struct sk_buff *brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
+                                   struct packet_type *pt,
+                                   struct net_device *orig_dev)
 {
        struct dsa_switch_tree *dst = dev->dsa_ptr;
        struct dsa_switch *ds;
        int source_port;
        u8 *brcm_tag;
 
-       if (unlikely(dst == NULL))
-               goto out_drop;
-
        ds = dst->cpu_switch;
 
-       skb = skb_unshare(skb, GFP_ATOMIC);
-       if (skb == NULL)
-               goto out;
-
        if (unlikely(!pskb_may_pull(skb, BRCM_TAG_LEN)))
                goto out_drop;
 
@@ -139,22 +134,12 @@ static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
                skb->data - ETH_HLEN - BRCM_TAG_LEN,
                2 * ETH_ALEN);
 
-       skb_push(skb, ETH_HLEN);
-       skb->pkt_type = PACKET_HOST;
        skb->dev = ds->ports[source_port].netdev;
-       skb->protocol = eth_type_trans(skb, skb->dev);
-
-       skb->dev->stats.rx_packets++;
-       skb->dev->stats.rx_bytes += skb->len;
 
-       netif_receive_skb(skb);
-
-       return 0;
+       return skb;
 
 out_drop:
-       kfree_skb(skb);
-out:
-       return 0;
+       return NULL;
 }
 
 const struct dsa_device_ops brcm_netdev_ops = {
index 72579ceea381b7e2bce99a28208810b707434f09..1c6633f0de01909f950a03349b1b4c08c2151839 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/etherdevice.h>
 #include <linux/list.h>
 #include <linux/slab.h>
+#include <net/dsa.h>
 #include "dsa_priv.h"
 
 #define DSA_HLEN       4
@@ -67,8 +68,9 @@ out_free:
        return NULL;
 }
 
-static int dsa_rcv(struct sk_buff *skb, struct net_device *dev,
-                  struct packet_type *pt, struct net_device *orig_dev)
+static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev,
+                              struct packet_type *pt,
+                              struct net_device *orig_dev)
 {
        struct dsa_switch_tree *dst = dev->dsa_ptr;
        struct dsa_switch *ds;
@@ -76,13 +78,6 @@ static int dsa_rcv(struct sk_buff *skb, struct net_device *dev,
        int source_device;
        int source_port;
 
-       if (unlikely(dst == NULL))
-               goto out_drop;
-
-       skb = skb_unshare(skb, GFP_ATOMIC);
-       if (skb == NULL)
-               goto out;
-
        if (unlikely(!pskb_may_pull(skb, DSA_HLEN)))
                goto out_drop;
 
@@ -164,21 +159,11 @@ static int dsa_rcv(struct sk_buff *skb, struct net_device *dev,
        }
 
        skb->dev = ds->ports[source_port].netdev;
-       skb_push(skb, ETH_HLEN);
-       skb->pkt_type = PACKET_HOST;
-       skb->protocol = eth_type_trans(skb, skb->dev);
-
-       skb->dev->stats.rx_packets++;
-       skb->dev->stats.rx_bytes += skb->len;
 
-       netif_receive_skb(skb);
-
-       return 0;
+       return skb;
 
 out_drop:
-       kfree_skb(skb);
-out:
-       return 0;
+       return NULL;
 }
 
 const struct dsa_device_ops dsa_netdev_ops = {
index 648c051817a1b4a4e64cda67bab8c81288027a4e..d9c668aa5e54682914a0e61b3cb6373a71a0ee8b 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/etherdevice.h>
 #include <linux/list.h>
 #include <linux/slab.h>
+#include <net/dsa.h>
 #include "dsa_priv.h"
 
 #define DSA_HLEN       4
@@ -80,8 +81,9 @@ out_free:
        return NULL;
 }
 
-static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
-                   struct packet_type *pt, struct net_device *orig_dev)
+static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
+                               struct packet_type *pt,
+                               struct net_device *orig_dev)
 {
        struct dsa_switch_tree *dst = dev->dsa_ptr;
        struct dsa_switch *ds;
@@ -89,13 +91,6 @@ static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
        int source_device;
        int source_port;
 
-       if (unlikely(dst == NULL))
-               goto out_drop;
-
-       skb = skb_unshare(skb, GFP_ATOMIC);
-       if (skb == NULL)
-               goto out;
-
        if (unlikely(!pskb_may_pull(skb, EDSA_HLEN)))
                goto out_drop;
 
@@ -183,21 +178,11 @@ static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
        }
 
        skb->dev = ds->ports[source_port].netdev;
-       skb_push(skb, ETH_HLEN);
-       skb->pkt_type = PACKET_HOST;
-       skb->protocol = eth_type_trans(skb, skb->dev);
-
-       skb->dev->stats.rx_packets++;
-       skb->dev->stats.rx_bytes += skb->len;
 
-       netif_receive_skb(skb);
-
-       return 0;
+       return skb;
 
 out_drop:
-       kfree_skb(skb);
-out:
-       return 0;
+       return NULL;
 }
 
 const struct dsa_device_ops edsa_netdev_ops = {
diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
new file mode 100644 (file)
index 0000000..837cddd
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Mediatek DSA Tag support
+ * Copyright (C) 2017 Landen Chao <landen.chao@mediatek.com>
+ *                   Sean Wang <sean.wang@mediatek.com>
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/etherdevice.h>
+#include <net/dsa.h>
+#include "dsa_priv.h"
+
+#define MTK_HDR_LEN            4
+#define MTK_HDR_RECV_SOURCE_PORT_MASK  GENMASK(2, 0)
+#define MTK_HDR_XMIT_DP_BIT_MASK       GENMASK(5, 0)
+
+static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
+                                   struct net_device *dev)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       u8 *mtk_tag;
+
+       if (skb_cow_head(skb, MTK_HDR_LEN) < 0)
+               goto out_free;
+
+       skb_push(skb, MTK_HDR_LEN);
+
+       memmove(skb->data, skb->data + MTK_HDR_LEN, 2 * ETH_ALEN);
+
+       /* Build the tag after the MAC Source Address */
+       mtk_tag = skb->data + 2 * ETH_ALEN;
+       mtk_tag[0] = 0;
+       mtk_tag[1] = (1 << p->dp->index) & MTK_HDR_XMIT_DP_BIT_MASK;
+       mtk_tag[2] = 0;
+       mtk_tag[3] = 0;
+
+       return skb;
+
+out_free:
+       kfree_skb(skb);
+       return NULL;
+}
+
+static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev,
+                                  struct packet_type *pt,
+                                  struct net_device *orig_dev)
+{
+       struct dsa_switch_tree *dst = dev->dsa_ptr;
+       struct dsa_switch *ds;
+       int port;
+       __be16 *phdr, hdr;
+
+       if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
+               goto out_drop;
+
+       /* The MTK header is added by the switch between src addr
+        * and ethertype at this point, skb->data points to 2 bytes
+        * after src addr so header should be 2 bytes right before.
+        */
+       phdr = (__be16 *)(skb->data - 2);
+       hdr = ntohs(*phdr);
+
+       /* Remove MTK tag and recalculate checksum. */
+       skb_pull_rcsum(skb, MTK_HDR_LEN);
+
+       memmove(skb->data - ETH_HLEN,
+               skb->data - ETH_HLEN - MTK_HDR_LEN,
+               2 * ETH_ALEN);
+
+       /* This protocol doesn't support cascading multiple
+        * switches so it's safe to assume the switch is first
+        * in the tree.
+        */
+       ds = dst->ds[0];
+       if (!ds)
+               goto out_drop;
+
+       /* Get source port information */
+       port = (hdr & MTK_HDR_RECV_SOURCE_PORT_MASK);
+       if (!ds->ports[port].netdev)
+               goto out_drop;
+
+       skb->dev = ds->ports[port].netdev;
+
+       return skb;
+
+out_drop:
+       return NULL;
+}
+
+const struct dsa_device_ops mtk_netdev_ops = {
+       .xmit   = mtk_tag_xmit,
+       .rcv    = mtk_tag_rcv,
+};
index 30240f343aea8450b13936159b4b9a76126a8977..3ba3f59f7a3433b3731a5b3dc501eb22660d7cce 100644 (file)
@@ -12,6 +12,7 @@
  */
 
 #include <linux/etherdevice.h>
+#include <net/dsa.h>
 #include "dsa_priv.h"
 
 #define QCA_HDR_LEN    2
@@ -65,8 +66,9 @@ out_free:
        return NULL;
 }
 
-static int qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
-                      struct packet_type *pt, struct net_device *orig_dev)
+static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
+                                  struct packet_type *pt,
+                                  struct net_device *orig_dev)
 {
        struct dsa_switch_tree *dst = dev->dsa_ptr;
        struct dsa_switch *ds;
@@ -74,13 +76,6 @@ static int qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
        int port;
        __be16 *phdr, hdr;
 
-       if (unlikely(!dst))
-               goto out_drop;
-
-       skb = skb_unshare(skb, GFP_ATOMIC);
-       if (!skb)
-               goto out;
-
        if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN)))
                goto out_drop;
 
@@ -114,22 +109,12 @@ static int qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
                goto out_drop;
 
        /* Update skb & forward the frame accordingly */
-       skb_push(skb, ETH_HLEN);
-       skb->pkt_type = PACKET_HOST;
        skb->dev = ds->ports[port].netdev;
-       skb->protocol = eth_type_trans(skb, skb->dev);
-
-       skb->dev->stats.rx_packets++;
-       skb->dev->stats.rx_bytes += skb->len;
 
-       netif_receive_skb(skb);
-
-       return 0;
+       return skb;
 
 out_drop:
-       kfree_skb(skb);
-out:
-       return 0;
+       return NULL;
 }
 
 const struct dsa_device_ops qca_netdev_ops = {
index 26f977176978085af9c034319c754a1ac7501d4c..aafc2fc74c3067dd05c67c409e40e8ceef33cf0c 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/etherdevice.h>
 #include <linux/list.h>
 #include <linux/slab.h>
+#include <net/dsa.h>
 #include "dsa_priv.h"
 
 static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -57,22 +58,17 @@ static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
        return nskb;
 }
 
-static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
-                      struct packet_type *pt, struct net_device *orig_dev)
+static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev,
+                                  struct packet_type *pt,
+                                  struct net_device *orig_dev)
 {
        struct dsa_switch_tree *dst = dev->dsa_ptr;
        struct dsa_switch *ds;
        u8 *trailer;
        int source_port;
 
-       if (unlikely(dst == NULL))
-               goto out_drop;
        ds = dst->cpu_switch;
 
-       skb = skb_unshare(skb, GFP_ATOMIC);
-       if (skb == NULL)
-               goto out;
-
        if (skb_linearize(skb))
                goto out_drop;
 
@@ -88,21 +84,11 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
        pskb_trim_rcsum(skb, skb->len - 4);
 
        skb->dev = ds->ports[source_port].netdev;
-       skb_push(skb, ETH_HLEN);
-       skb->pkt_type = PACKET_HOST;
-       skb->protocol = eth_type_trans(skb, skb->dev);
-
-       skb->dev->stats.rx_packets++;
-       skb->dev->stats.rx_bytes += skb->len;
 
-       netif_receive_skb(skb);
-
-       return 0;
+       return skb;
 
 out_drop:
-       kfree_skb(skb);
-out:
-       return 0;
+       return NULL;
 }
 
 const struct dsa_device_ops trailer_netdev_ops = {
index c6d4238ff94a8a329bf44bf59b30fb09fb07f707..f83de23a30e7e77303d011dee35eb92342adab5c 100644 (file)
@@ -11,7 +11,7 @@ obj-y     := route.o inetpeer.o protocol.o \
             tcp_rate.o tcp_recovery.o \
             tcp_offload.o datagram.o raw.o udp.o udplite.o \
             udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \
-            fib_frontend.o fib_semantics.o fib_trie.o \
+            fib_frontend.o fib_semantics.o fib_trie.o fib_notifier.o \
             inet_fragment.o ping.o ip_tunnel_core.o gre_offload.o
 
 obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o
index 602d40f43687c91db7250822439bacbe85318fa3..d1a11707a12682fcd70f22f6df77087b779a5826 100644 (file)
@@ -689,11 +689,12 @@ EXPORT_SYMBOL(inet_stream_connect);
  *     Accept a pending connection. The TCP layer now gives BSD semantics.
  */
 
-int inet_accept(struct socket *sock, struct socket *newsock, int flags)
+int inet_accept(struct socket *sock, struct socket *newsock, int flags,
+               bool kern)
 {
        struct sock *sk1 = sock->sk;
        int err = -EINVAL;
-       struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err);
+       struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err, kern);
 
        if (!sk2)
                goto do_err;
@@ -1487,8 +1488,10 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
        int proto = iph->protocol;
        int err = -ENOSYS;
 
-       if (skb->encapsulation)
+       if (skb->encapsulation) {
+               skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
                skb_set_inner_network_header(skb, nhoff);
+       }
 
        csum_replace2(&iph->check, iph->tot_len, newlen);
        iph->tot_len = newlen;
@@ -1596,8 +1599,9 @@ static const struct net_protocol igmp_protocol = {
 };
 #endif
 
-static const struct net_protocol tcp_protocol = {
+static struct net_protocol tcp_protocol = {
        .early_demux    =       tcp_v4_early_demux,
+       .early_demux_handler =  tcp_v4_early_demux,
        .handler        =       tcp_v4_rcv,
        .err_handler    =       tcp_v4_err,
        .no_policy      =       1,
@@ -1605,8 +1609,9 @@ static const struct net_protocol tcp_protocol = {
        .icmp_strict_tag_validation = 1,
 };
 
-static const struct net_protocol udp_protocol = {
+static struct net_protocol udp_protocol = {
        .early_demux =  udp_v4_early_demux,
+       .early_demux_handler =  udp_v4_early_demux,
        .handler =      udp_rcv,
        .err_handler =  udp_err,
        .no_policy =    1,
@@ -1717,6 +1722,8 @@ static __net_init int inet_init_net(struct net *net)
        net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
        net->ipv4.sysctl_ip_dynaddr = 0;
        net->ipv4.sysctl_ip_early_demux = 1;
+       net->ipv4.sysctl_udp_early_demux = 1;
+       net->ipv4.sysctl_tcp_early_demux = 1;
 #ifdef CONFIG_SYSCTL
        net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
 #endif
index 51b27ae09fbd725bcd8030982e5850215ac4ce5c..0937b34c27cacb2dec73a67a76ff11fe26722500 100644 (file)
@@ -872,7 +872,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
                    skb->pkt_type != PACKET_HOST)
                        state = NUD_STALE;
                neigh_update(n, sha, state,
-                            override ? NEIGH_UPDATE_F_OVERRIDE : 0);
+                            override ? NEIGH_UPDATE_F_OVERRIDE : 0, 0);
                neigh_release(n);
        }
 
@@ -1033,7 +1033,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
                err = neigh_update(neigh, (r->arp_flags & ATF_COM) ?
                                   r->arp_ha.sa_data : NULL, state,
                                   NEIGH_UPDATE_F_OVERRIDE |
-                                  NEIGH_UPDATE_F_ADMIN);
+                                  NEIGH_UPDATE_F_ADMIN, 0);
                neigh_release(neigh);
        }
        return err;
@@ -1084,7 +1084,7 @@ static int arp_invalidate(struct net_device *dev, __be32 ip)
                if (neigh->nud_state & ~NUD_NOARP)
                        err = neigh_update(neigh, NULL, NUD_FAILED,
                                           NEIGH_UPDATE_F_OVERRIDE|
-                                          NEIGH_UPDATE_F_ADMIN);
+                                          NEIGH_UPDATE_F_ADMIN, 0);
                neigh_release(neigh);
        }
 
index cebedd545e5e2863afcfe116309725e2cd57206c..6d3602ec640c7ae620692527000f7b03556b0ef7 100644 (file)
@@ -1192,6 +1192,18 @@ out:
        return done;
 }
 
+static __be32 in_dev_select_addr(const struct in_device *in_dev,
+                                int scope)
+{
+       for_primary_ifa(in_dev) {
+               if (ifa->ifa_scope != RT_SCOPE_LINK &&
+                   ifa->ifa_scope <= scope)
+                       return ifa->ifa_local;
+       } endfor_ifa(in_dev);
+
+       return 0;
+}
+
 __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
 {
        __be32 addr = 0;
@@ -1228,13 +1240,9 @@ no_in_dev:
        if (master_idx &&
            (dev = dev_get_by_index_rcu(net, master_idx)) &&
            (in_dev = __in_dev_get_rcu(dev))) {
-               for_primary_ifa(in_dev) {
-                       if (ifa->ifa_scope != RT_SCOPE_LINK &&
-                           ifa->ifa_scope <= scope) {
-                               addr = ifa->ifa_local;
-                               goto out_unlock;
-                       }
-               } endfor_ifa(in_dev);
+               addr = in_dev_select_addr(in_dev, scope);
+               if (addr)
+                       goto out_unlock;
        }
 
        /* Not loopback addresses on loopback should be preferred
@@ -1249,13 +1257,9 @@ no_in_dev:
                if (!in_dev)
                        continue;
 
-               for_primary_ifa(in_dev) {
-                       if (ifa->ifa_scope != RT_SCOPE_LINK &&
-                           ifa->ifa_scope <= scope) {
-                               addr = ifa->ifa_local;
-                               goto out_unlock;
-                       }
-               } endfor_ifa(in_dev);
+               addr = in_dev_select_addr(in_dev, scope);
+               if (addr)
+                       goto out_unlock;
        }
 out_unlock:
        rcu_read_unlock();
@@ -1798,6 +1802,9 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
        if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
                goto nla_put_failure;
 
+       if (!devconf)
+               goto out;
+
        if ((all || type == NETCONFA_FORWARDING) &&
            nla_put_s32(skb, NETCONFA_FORWARDING,
                        IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
@@ -1819,6 +1826,7 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
                        IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
                goto nla_put_failure;
 
+out:
        nlmsg_end(skb, nlh);
        return 0;
 
@@ -1827,8 +1835,8 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
-                                struct ipv4_devconf *devconf)
+void inet_netconf_notify_devconf(struct net *net, int event, int type,
+                                int ifindex, struct ipv4_devconf *devconf)
 {
        struct sk_buff *skb;
        int err = -ENOBUFS;
@@ -1838,7 +1846,7 @@ void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
                goto errout;
 
        err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
-                                       RTM_NEWNETCONF, 0, type);
+                                       event, 0, type);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
                WARN_ON(err == -EMSGSIZE);
@@ -2017,10 +2025,12 @@ static void inet_forward_change(struct net *net)
 
        IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
        IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
-       inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
+       inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
+                                   NETCONFA_FORWARDING,
                                    NETCONFA_IFINDEX_ALL,
                                    net->ipv4.devconf_all);
-       inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
+       inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
+                                   NETCONFA_FORWARDING,
                                    NETCONFA_IFINDEX_DEFAULT,
                                    net->ipv4.devconf_dflt);
 
@@ -2033,7 +2043,8 @@ static void inet_forward_change(struct net *net)
                in_dev = __in_dev_get_rtnl(dev);
                if (in_dev) {
                        IN_DEV_CONF_SET(in_dev, FORWARDING, on);
-                       inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
+                       inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
+                                                   NETCONFA_FORWARDING,
                                                    dev->ifindex, &in_dev->cnf);
                }
        }
@@ -2078,19 +2089,22 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write,
                if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
                    new_value != old_value) {
                        ifindex = devinet_conf_ifindex(net, cnf);
-                       inet_netconf_notify_devconf(net, NETCONFA_RP_FILTER,
+                       inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
+                                                   NETCONFA_RP_FILTER,
                                                    ifindex, cnf);
                }
                if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
                    new_value != old_value) {
                        ifindex = devinet_conf_ifindex(net, cnf);
-                       inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
+                       inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
+                                                   NETCONFA_PROXY_NEIGH,
                                                    ifindex, cnf);
                }
                if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
                    new_value != old_value) {
                        ifindex = devinet_conf_ifindex(net, cnf);
-                       inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+                       inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
+                                                   NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
                                                    ifindex, cnf);
                }
        }
@@ -2125,7 +2139,7 @@ static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
                                        container_of(cnf, struct in_device, cnf);
                                if (*valp)
                                        dev_disable_lro(idev->dev);
-                               inet_netconf_notify_devconf(net,
+                               inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
                                                            NETCONFA_FORWARDING,
                                                            idev->dev->ifindex,
                                                            cnf);
@@ -2133,7 +2147,8 @@ static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
                        rtnl_unlock();
                        rt_cache_flush(net);
                } else
-                       inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
+                       inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
+                                                   NETCONFA_FORWARDING,
                                                    NETCONFA_IFINDEX_DEFAULT,
                                                    net->ipv4.devconf_dflt);
        }
@@ -2255,7 +2270,8 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
 
        p->sysctl = t;
 
-       inet_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
+       inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
+                                   ifindex, p);
        return 0;
 
 free:
@@ -2264,16 +2280,18 @@ out:
        return -ENOBUFS;
 }
 
-static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
+static void __devinet_sysctl_unregister(struct net *net,
+                                       struct ipv4_devconf *cnf, int ifindex)
 {
        struct devinet_sysctl_table *t = cnf->sysctl;
 
-       if (!t)
-               return;
+       if (t) {
+               cnf->sysctl = NULL;
+               unregister_net_sysctl_table(t->sysctl_header);
+               kfree(t);
+       }
 
-       cnf->sysctl = NULL;
-       unregister_net_sysctl_table(t->sysctl_header);
-       kfree(t);
+       inet_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
 }
 
 static int devinet_sysctl_register(struct in_device *idev)
@@ -2295,7 +2313,9 @@ static int devinet_sysctl_register(struct in_device *idev)
 
 static void devinet_sysctl_unregister(struct in_device *idev)
 {
-       __devinet_sysctl_unregister(&idev->cnf);
+       struct net *net = dev_net(idev->dev);
+
+       __devinet_sysctl_unregister(net, &idev->cnf, idev->dev->ifindex);
        neigh_sysctl_unregister(idev->arp_parms);
 }
 
@@ -2370,9 +2390,9 @@ static __net_init int devinet_init_net(struct net *net)
 
 #ifdef CONFIG_SYSCTL
 err_reg_ctl:
-       __devinet_sysctl_unregister(dflt);
+       __devinet_sysctl_unregister(net, dflt, NETCONFA_IFINDEX_DEFAULT);
 err_reg_dflt:
-       __devinet_sysctl_unregister(all);
+       __devinet_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
 err_reg_all:
        if (tbl != ctl_forward_entry)
                kfree(tbl);
@@ -2394,8 +2414,10 @@ static __net_exit void devinet_exit_net(struct net *net)
 
        tbl = net->ipv4.forw_hdr->ctl_table_arg;
        unregister_net_sysctl_table(net->ipv4.forw_hdr);
-       __devinet_sysctl_unregister(net->ipv4.devconf_dflt);
-       __devinet_sysctl_unregister(net->ipv4.devconf_all);
+       __devinet_sysctl_unregister(net, net->ipv4.devconf_dflt,
+                                   NETCONFA_IFINDEX_DEFAULT);
+       __devinet_sysctl_unregister(net, net->ipv4.devconf_all,
+                                   NETCONFA_IFINDEX_ALL);
        kfree(tbl);
 #endif
        kfree(net->ipv4.devconf_dflt);
index 42bfd08109dd78ab509493e8d2205d72845bb3eb..8f2133ffc2ff1b94871408a5f934cb938d3462b5 100644 (file)
@@ -1083,7 +1083,8 @@ static void nl_fib_input(struct sk_buff *skb)
 
        net = sock_net(skb->sk);
        nlh = nlmsg_hdr(skb);
-       if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
+       if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
+           skb->len < nlh->nlmsg_len ||
            nlmsg_len(nlh) < sizeof(*frn))
                return;
 
diff --git a/net/ipv4/fib_notifier.c b/net/ipv4/fib_notifier.c
new file mode 100644 (file)
index 0000000..e0714d9
--- /dev/null
@@ -0,0 +1,86 @@
+#include <linux/rtnetlink.h>
+#include <linux/notifier.h>
+#include <linux/rcupdate.h>
+#include <linux/kernel.h>
+#include <net/net_namespace.h>
+#include <net/netns/ipv4.h>
+#include <net/ip_fib.h>
+
+static ATOMIC_NOTIFIER_HEAD(fib_chain);
+
+int call_fib_notifier(struct notifier_block *nb, struct net *net,
+                     enum fib_event_type event_type,
+                     struct fib_notifier_info *info)
+{
+       info->net = net;
+       return nb->notifier_call(nb, event_type, info);
+}
+
+int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
+                      struct fib_notifier_info *info)
+{
+       net->ipv4.fib_seq++;
+       info->net = net;
+       return atomic_notifier_call_chain(&fib_chain, event_type, info);
+}
+
+static unsigned int fib_seq_sum(void)
+{
+       unsigned int fib_seq = 0;
+       struct net *net;
+
+       rtnl_lock();
+       for_each_net(net)
+               fib_seq += net->ipv4.fib_seq;
+       rtnl_unlock();
+
+       return fib_seq;
+}
+
+static bool fib_dump_is_consistent(struct notifier_block *nb,
+                                  void (*cb)(struct notifier_block *nb),
+                                  unsigned int fib_seq)
+{
+       atomic_notifier_chain_register(&fib_chain, nb);
+       if (fib_seq == fib_seq_sum())
+               return true;
+       atomic_notifier_chain_unregister(&fib_chain, nb);
+       if (cb)
+               cb(nb);
+       return false;
+}
+
+#define FIB_DUMP_MAX_RETRIES 5
+int register_fib_notifier(struct notifier_block *nb,
+                         void (*cb)(struct notifier_block *nb))
+{
+       int retries = 0;
+
+       do {
+               unsigned int fib_seq = fib_seq_sum();
+               struct net *net;
+
+               /* Mutex semantics guarantee that every change done to
+                * FIB tries before we read the change sequence counter
+                * is now visible to us.
+                */
+               rcu_read_lock();
+               for_each_net_rcu(net) {
+                       fib_rules_notify(net, nb);
+                       fib_notify(net, nb);
+               }
+               rcu_read_unlock();
+
+               if (fib_dump_is_consistent(nb, cb, fib_seq))
+                       return 0;
+       } while (++retries < FIB_DUMP_MAX_RETRIES);
+
+       return -EBUSY;
+}
+EXPORT_SYMBOL(register_fib_notifier);
+
+int unregister_fib_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_unregister(&fib_chain, nb);
+}
+EXPORT_SYMBOL(unregister_fib_notifier);
index 2e50062f642d61bdc0c0893de4e4ff84b5be6c8d..778ecf977eb2bd7b7b3808691aaf818fc4bd680d 100644 (file)
@@ -47,6 +47,27 @@ struct fib4_rule {
 #endif
 };
 
+static bool fib4_rule_matchall(const struct fib_rule *rule)
+{
+       struct fib4_rule *r = container_of(rule, struct fib4_rule, common);
+
+       if (r->dst_len || r->src_len || r->tos)
+               return false;
+       return fib_rule_matchall(rule);
+}
+
+bool fib4_rule_default(const struct fib_rule *rule)
+{
+       if (!fib4_rule_matchall(rule) || rule->action != FR_ACT_TO_TBL ||
+           rule->l3mdev)
+               return false;
+       if (rule->table != RT_TABLE_LOCAL && rule->table != RT_TABLE_MAIN &&
+           rule->table != RT_TABLE_DEFAULT)
+               return false;
+       return true;
+}
+EXPORT_SYMBOL_GPL(fib4_rule_default);
+
 int __fib_lookup(struct net *net, struct flowi4 *flp,
                 struct fib_result *res, unsigned int flags)
 {
@@ -164,12 +185,36 @@ static struct fib_table *fib_empty_table(struct net *net)
        return NULL;
 }
 
+static int call_fib_rule_notifier(struct notifier_block *nb, struct net *net,
+                                 enum fib_event_type event_type,
+                                 struct fib_rule *rule)
+{
+       struct fib_rule_notifier_info info = {
+               .rule = rule,
+       };
+
+       return call_fib_notifier(nb, net, event_type, &info.info);
+}
+
 static int call_fib_rule_notifiers(struct net *net,
-                                  enum fib_event_type event_type)
+                                  enum fib_event_type event_type,
+                                  struct fib_rule *rule)
+{
+       struct fib_rule_notifier_info info = {
+               .rule = rule,
+       };
+
+       return call_fib_notifiers(net, event_type, &info.info);
+}
+
+/* Called with rcu_read_lock() */
+void fib_rules_notify(struct net *net, struct notifier_block *nb)
 {
-       struct fib_notifier_info info;
+       struct fib_rules_ops *ops = net->ipv4.rules_ops;
+       struct fib_rule *rule;
 
-       return call_fib_notifiers(net, event_type, &info);
+       list_for_each_entry_rcu(rule, &ops->rules_list, list)
+               call_fib_rule_notifier(nb, net, FIB_EVENT_RULE_ADD, rule);
 }
 
 static const struct nla_policy fib4_rule_policy[FRA_MAX+1] = {
@@ -228,7 +273,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
        rule4->tos = frh->tos;
 
        net->ipv4.fib_has_custom_rules = true;
-       call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD);
+       call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD, rule);
 
        err = 0;
 errout:
@@ -250,7 +295,7 @@ static int fib4_rule_delete(struct fib_rule *rule)
                net->ipv4.fib_num_tclassid_users--;
 #endif
        net->ipv4.fib_has_custom_rules = true;
-       call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL);
+       call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule);
 errout:
        return err;
 }
index 317026a39cfa2b49bf06182d89a11af0fa2688af..da449ddb8cc172bd9091c00057a69a095f98b56d 100644 (file)
@@ -57,7 +57,6 @@ static unsigned int fib_info_cnt;
 static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-u32 fib_multipath_secret __read_mostly;
 
 #define for_nexthops(fi) {                                             \
        int nhsel; const struct fib_nh *nh;                             \
@@ -576,9 +575,6 @@ static void fib_rebalance(struct fib_info *fi)
 
                atomic_set(&nexthop_nh->nh_upper_bound, upper_bound);
        } endfor_nexthops(fi);
-
-       net_get_random_once(&fib_multipath_secret,
-                           sizeof(fib_multipath_secret));
 }
 
 static inline void fib_add_weight(struct fib_info *fi,
@@ -1641,7 +1637,7 @@ void fib_select_multipath(struct fib_result *res, int hash)
 #endif
 
 void fib_select_path(struct net *net, struct fib_result *res,
-                    struct flowi4 *fl4, int mp_hash)
+                    struct flowi4 *fl4, const struct sk_buff *skb)
 {
        bool oif_check;
 
@@ -1650,10 +1646,9 @@ void fib_select_path(struct net *net, struct fib_result *res,
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        if (res->fi->fib_nhs > 1 && oif_check) {
-               if (mp_hash < 0)
-                       mp_hash = get_hash_from_flowi4(fl4) >> 1;
+               int h = fib_multipath_hash(res->fi, fl4, skb);
 
-               fib_select_multipath(res, mp_hash);
+               fib_select_multipath(res, h);
        }
        else
 #endif
index 2f0d8233950faeac91f287644bc9476f19f74578..1201409ba1dcb18ee028003b065410b87bf4a602 100644 (file)
 #include <trace/events/fib.h>
 #include "fib_lookup.h"
 
-static unsigned int fib_seq_sum(void)
-{
-       unsigned int fib_seq = 0;
-       struct net *net;
-
-       rtnl_lock();
-       for_each_net(net)
-               fib_seq += net->ipv4.fib_seq;
-       rtnl_unlock();
-
-       return fib_seq;
-}
-
-static ATOMIC_NOTIFIER_HEAD(fib_chain);
-
-static int call_fib_notifier(struct notifier_block *nb, struct net *net,
-                            enum fib_event_type event_type,
-                            struct fib_notifier_info *info)
-{
-       info->net = net;
-       return nb->notifier_call(nb, event_type, info);
-}
-
-static void fib_rules_notify(struct net *net, struct notifier_block *nb,
-                            enum fib_event_type event_type)
-{
-#ifdef CONFIG_IP_MULTIPLE_TABLES
-       struct fib_notifier_info info;
-
-       if (net->ipv4.fib_has_custom_rules)
-               call_fib_notifier(nb, net, event_type, &info);
-#endif
-}
-
-static void fib_notify(struct net *net, struct notifier_block *nb,
-                      enum fib_event_type event_type);
-
 static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net,
                                   enum fib_event_type event_type, u32 dst,
                                   int dst_len, struct fib_info *fi,
@@ -137,62 +100,6 @@ static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net,
        return call_fib_notifier(nb, net, event_type, &info.info);
 }
 
-static bool fib_dump_is_consistent(struct notifier_block *nb,
-                                  void (*cb)(struct notifier_block *nb),
-                                  unsigned int fib_seq)
-{
-       atomic_notifier_chain_register(&fib_chain, nb);
-       if (fib_seq == fib_seq_sum())
-               return true;
-       atomic_notifier_chain_unregister(&fib_chain, nb);
-       if (cb)
-               cb(nb);
-       return false;
-}
-
-#define FIB_DUMP_MAX_RETRIES 5
-int register_fib_notifier(struct notifier_block *nb,
-                         void (*cb)(struct notifier_block *nb))
-{
-       int retries = 0;
-
-       do {
-               unsigned int fib_seq = fib_seq_sum();
-               struct net *net;
-
-               /* Mutex semantics guarantee that every change done to
-                * FIB tries before we read the change sequence counter
-                * is now visible to us.
-                */
-               rcu_read_lock();
-               for_each_net_rcu(net) {
-                       fib_rules_notify(net, nb, FIB_EVENT_RULE_ADD);
-                       fib_notify(net, nb, FIB_EVENT_ENTRY_ADD);
-               }
-               rcu_read_unlock();
-
-               if (fib_dump_is_consistent(nb, cb, fib_seq))
-                       return 0;
-       } while (++retries < FIB_DUMP_MAX_RETRIES);
-
-       return -EBUSY;
-}
-EXPORT_SYMBOL(register_fib_notifier);
-
-int unregister_fib_notifier(struct notifier_block *nb)
-{
-       return atomic_notifier_chain_unregister(&fib_chain, nb);
-}
-EXPORT_SYMBOL(unregister_fib_notifier);
-
-int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
-                      struct fib_notifier_info *info)
-{
-       net->ipv4.fib_seq++;
-       info->net = net;
-       return atomic_notifier_call_chain(&fib_chain, event_type, info);
-}
-
 static int call_fib_entry_notifiers(struct net *net,
                                    enum fib_event_type event_type, u32 dst,
                                    int dst_len, struct fib_info *fi,
@@ -1995,8 +1902,7 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
 }
 
 static void fib_leaf_notify(struct net *net, struct key_vector *l,
-                           struct fib_table *tb, struct notifier_block *nb,
-                           enum fib_event_type event_type)
+                           struct fib_table *tb, struct notifier_block *nb)
 {
        struct fib_alias *fa;
 
@@ -2012,22 +1918,21 @@ static void fib_leaf_notify(struct net *net, struct key_vector *l,
                if (tb->tb_id != fa->tb_id)
                        continue;
 
-               call_fib_entry_notifier(nb, net, event_type, l->key,
+               call_fib_entry_notifier(nb, net, FIB_EVENT_ENTRY_ADD, l->key,
                                        KEYLENGTH - fa->fa_slen, fi, fa->fa_tos,
                                        fa->fa_type, fa->tb_id);
        }
 }
 
 static void fib_table_notify(struct net *net, struct fib_table *tb,
-                            struct notifier_block *nb,
-                            enum fib_event_type event_type)
+                            struct notifier_block *nb)
 {
        struct trie *t = (struct trie *)tb->tb_data;
        struct key_vector *l, *tp = t->kv;
        t_key key = 0;
 
        while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
-               fib_leaf_notify(net, l, tb, nb, event_type);
+               fib_leaf_notify(net, l, tb, nb);
 
                key = l->key + 1;
                /* stop in case of wrap around */
@@ -2036,8 +1941,7 @@ static void fib_table_notify(struct net *net, struct fib_table *tb,
        }
 }
 
-static void fib_notify(struct net *net, struct notifier_block *nb,
-                      enum fib_event_type event_type)
+void fib_notify(struct net *net, struct notifier_block *nb)
 {
        unsigned int h;
 
@@ -2046,7 +1950,7 @@ static void fib_notify(struct net *net, struct notifier_block *nb,
                struct fib_table *tb;
 
                hlist_for_each_entry_rcu(tb, head, tb_hlist)
-                       fib_table_notify(net, tb, nb, event_type);
+                       fib_table_notify(net, tb, nb);
        }
 }
 
index fc310db2708bf6c9e96befe413e89ac931818f74..43318b5f56474bc15253e74e156962dd2c8df01f 100644 (file)
@@ -464,22 +464,6 @@ out_bh_enable:
        local_bh_enable();
 }
 
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
-
-/* Source and destination is swapped. See ip_multipath_icmp_hash */
-static int icmp_multipath_hash_skb(const struct sk_buff *skb)
-{
-       const struct iphdr *iph = ip_hdr(skb);
-
-       return fib_multipath_hash(iph->daddr, iph->saddr);
-}
-
-#else
-
-#define icmp_multipath_hash_skb(skb) (-1)
-
-#endif
-
 static struct rtable *icmp_route_lookup(struct net *net,
                                        struct flowi4 *fl4,
                                        struct sk_buff *skb_in,
@@ -505,8 +489,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
        fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev);
 
        security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
-       rt = __ip_route_output_key_hash(net, fl4,
-                                       icmp_multipath_hash_skb(skb_in));
+       rt = __ip_route_output_key_hash(net, fl4, skb_in);
        if (IS_ERR(rt))
                return rt;
 
index b4d5980ade3b584c444d0f0c6523f03a2f71f884..5e313c1ac94fc88eca5fe3a0e9e46e551e955ff0 100644 (file)
@@ -424,7 +424,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
 /*
  * This will accept the next outstanding connection.
  */
-struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
+struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct request_sock_queue *queue = &icsk->icsk_accept_queue;
index bbe7f72db9c157ba2d6c5292637c2f58ad39a123..b3cdeec85f1f2c612c362590e828f50596a5c247 100644 (file)
@@ -198,6 +198,7 @@ static void ip_expire(unsigned long arg)
        qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
        net = container_of(qp->q.net, struct net, ipv4.frags);
 
+       rcu_read_lock();
        spin_lock(&qp->q.lock);
 
        if (qp->q.flags & INET_FRAG_COMPLETE)
@@ -207,7 +208,7 @@ static void ip_expire(unsigned long arg)
        __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
 
        if (!inet_frag_evicting(&qp->q)) {
-               struct sk_buff *head = qp->q.fragments;
+               struct sk_buff *clone, *head = qp->q.fragments;
                const struct iphdr *iph;
                int err;
 
@@ -216,32 +217,40 @@ static void ip_expire(unsigned long arg)
                if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
                        goto out;
 
-               rcu_read_lock();
                head->dev = dev_get_by_index_rcu(net, qp->iif);
                if (!head->dev)
-                       goto out_rcu_unlock;
+                       goto out;
+
 
                /* skb has no dst, perform route lookup again */
                iph = ip_hdr(head);
                err = ip_route_input_noref(head, iph->daddr, iph->saddr,
                                           iph->tos, head->dev);
                if (err)
-                       goto out_rcu_unlock;
+                       goto out;
 
                /* Only an end host needs to send an ICMP
                 * "Fragment Reassembly Timeout" message, per RFC792.
                 */
                if (frag_expire_skip_icmp(qp->user) &&
                    (skb_rtable(head)->rt_type != RTN_LOCAL))
-                       goto out_rcu_unlock;
+                       goto out;
+
+               clone = skb_clone(head, GFP_ATOMIC);
 
                /* Send an ICMP "Fragment Reassembly Timeout" message. */
-               icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
-out_rcu_unlock:
-               rcu_read_unlock();
+               if (clone) {
+                       spin_unlock(&qp->q.lock);
+                       icmp_send(clone, ICMP_TIME_EXCEEDED,
+                                 ICMP_EXC_FRAGTIME, 0);
+                       consume_skb(clone);
+                       goto out_rcu_unlock;
+               }
        }
 out:
        spin_unlock(&qp->q.lock);
+out_rcu_unlock:
+       rcu_read_unlock();
        ipq_put(qp);
 }
 
index d6feabb0351607f282e1f78f159c0ccb88bcec96..fa2dc8f692c631f1ff7fe814c3ee27f0de2a41d8 100644 (file)
@@ -313,6 +313,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
        const struct iphdr *iph = ip_hdr(skb);
        struct rtable *rt;
        struct net_device *dev = skb->dev;
+       void (*edemux)(struct sk_buff *skb);
 
        /* if ingress device is enslaved to an L3 master device pass the
         * skb to its handler for processing
@@ -329,8 +330,8 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
                int protocol = iph->protocol;
 
                ipprot = rcu_dereference(inet_protos[protocol]);
-               if (ipprot && ipprot->early_demux) {
-                       ipprot->early_demux(skb);
+               if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
+                       edemux(skb);
                        /* must reload iph, skb->head might have changed */
                        iph = ip_hdr(skb);
                }
index 737ce826d7ecfa040d07d7f8e8d6dedd01ca7330..7a3fd25e8913a99d0fcbb256bc9001f6f1d4dd6f 100644 (file)
@@ -966,7 +966,7 @@ static int __ip_append_data(struct sock *sk,
        cork->length += length;
        if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
-           (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+           (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
            (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
                err = ip_ufo_append_data(sk, queue, getfrag, from, length,
                                         hh_len, fragheaderlen, transhdrlen,
index fd9f34bbd7408a0e9b0342ec6512c69cc30edc39..c3b12b1c71621b942dd4f9ad1d60ab5bb3c66e20 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/export.h>
 #include <net/net_namespace.h>
 #include <net/arp.h>
+#include <net/dsa.h>
 #include <net/ip.h>
 #include <net/ipconfig.h>
 #include <net/route.h>
@@ -306,7 +307,7 @@ static void __init ic_close_devs(void)
        while ((d = next)) {
                next = d->next;
                dev = d->dev;
-               if ((!ic_dev || dev != ic_dev->dev) && !netdev_uses_dsa(dev)) {
+               if (d != ic_dev && !netdev_uses_dsa(dev)) {
                        pr_debug("IP-Config: Downing %s\n", dev->name);
                        dev_change_flags(dev, d->flags);
                }
index c0317c940bcdc303015f500b52198e0862440e17..5bca64fc71b717b95f196866adc3c6d951304eed 100644 (file)
@@ -631,7 +631,7 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
        in_dev = __in_dev_get_rtnl(dev);
        if (in_dev) {
                IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
-               inet_netconf_notify_devconf(dev_net(dev),
+               inet_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
                                            NETCONFA_MC_FORWARDING,
                                            dev->ifindex, &in_dev->cnf);
                ip_rt_multicast_event(in_dev);
@@ -820,8 +820,8 @@ static int vif_add(struct net *net, struct mr_table *mrt,
                return -EADDRNOTAVAIL;
        }
        IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
-       inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, dev->ifindex,
-                                   &in_dev->cnf);
+       inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING,
+                                   dev->ifindex, &in_dev->cnf);
        ip_rt_multicast_event(in_dev);
 
        /* Fill in the VIF structures */
@@ -1282,7 +1282,8 @@ static void mrtsock_destruct(struct sock *sk)
        ipmr_for_each_table(mrt, net) {
                if (sk == rtnl_dereference(mrt->mroute_sk)) {
                        IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
-                       inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
+                       inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
+                                                   NETCONFA_MC_FORWARDING,
                                                    NETCONFA_IFINDEX_ALL,
                                                    net->ipv4.devconf_all);
                        RCU_INIT_POINTER(mrt->mroute_sk, NULL);
@@ -1344,7 +1345,8 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
                if (ret == 0) {
                        rcu_assign_pointer(mrt->mroute_sk, sk);
                        IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
-                       inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
+                       inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
+                                                   NETCONFA_MC_FORWARDING,
                                                    NETCONFA_IFINDEX_ALL,
                                                    net->ipv4.devconf_all);
                }
index 6241a81fd7f5a3df8fb3cf251bfdd407dda6a1f6..f17dab1dee6e171148a386302081188c0e83ee5d 100644 (file)
@@ -562,8 +562,6 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
                    XT_ERROR_TARGET) == 0)
                        ++newinfo->stacksize;
        }
-       if (ret != 0)
-               goto out_free;
 
        ret = -EINVAL;
        if (i != repl->num_entries)
index 52f26459efc345a8a0c00d356306fb5fd398547e..fcbdc0c49b0e514d338bb48a84910b0ebf7ba6bd 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/icmp.h>
 #include <linux/if_arp.h>
 #include <linux/seq_file.h>
+#include <linux/refcount.h>
 #include <linux/netfilter_arp.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter_ipv4/ip_tables.h>
@@ -40,8 +41,8 @@ MODULE_DESCRIPTION("Xtables: CLUSTERIP target");
 
 struct clusterip_config {
        struct list_head list;                  /* list of all configs */
-       atomic_t refcount;                      /* reference count */
-       atomic_t entries;                       /* number of entries/rules
+       refcount_t refcount;                    /* reference count */
+       refcount_t entries;                     /* number of entries/rules
                                                 * referencing us */
 
        __be32 clusterip;                       /* the IP address */
@@ -77,7 +78,7 @@ struct clusterip_net {
 static inline void
 clusterip_config_get(struct clusterip_config *c)
 {
-       atomic_inc(&c->refcount);
+       refcount_inc(&c->refcount);
 }
 
 
@@ -89,7 +90,7 @@ static void clusterip_config_rcu_free(struct rcu_head *head)
 static inline void
 clusterip_config_put(struct clusterip_config *c)
 {
-       if (atomic_dec_and_test(&c->refcount))
+       if (refcount_dec_and_test(&c->refcount))
                call_rcu_bh(&c->rcu, clusterip_config_rcu_free);
 }
 
@@ -103,7 +104,7 @@ clusterip_config_entry_put(struct clusterip_config *c)
        struct clusterip_net *cn = net_generic(net, clusterip_net_id);
 
        local_bh_disable();
-       if (atomic_dec_and_lock(&c->entries, &cn->lock)) {
+       if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
                list_del_rcu(&c->list);
                spin_unlock(&cn->lock);
                local_bh_enable();
@@ -149,10 +150,10 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
                        c = NULL;
                else
 #endif
-               if (unlikely(!atomic_inc_not_zero(&c->refcount)))
+               if (unlikely(!refcount_inc_not_zero(&c->refcount)))
                        c = NULL;
                else if (entry)
-                       atomic_inc(&c->entries);
+                       refcount_inc(&c->entries);
        }
        rcu_read_unlock_bh();
 
@@ -188,8 +189,8 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
        clusterip_config_init_nodelist(c, i);
        c->hash_mode = i->hash_mode;
        c->hash_initval = i->hash_initval;
-       atomic_set(&c->refcount, 1);
-       atomic_set(&c->entries, 1);
+       refcount_set(&c->refcount, 1);
+       refcount_set(&c->entries, 1);
 
        spin_lock_bh(&cn->lock);
        if (__clusterip_config_find(net, ip)) {
index bc1486f2c0643355ddac067cb79f075cafd788d1..2e14ed11a35cfc83db845e972521b2e8894f97c6 100644 (file)
@@ -165,6 +165,10 @@ static unsigned int ipv4_conntrack_local(void *priv,
        if (skb->len < sizeof(struct iphdr) ||
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
+
+       if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
+               return NF_ACCEPT;
+
        return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
 }
 
index f8aad03d674b05008edb5b9883b3a26b2fa7461f..6f5e8d01b876933a68e5f6cf8b2a48f8c4e17262 100644 (file)
@@ -255,11 +255,6 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
        /* maniptype == SRC for postrouting. */
        enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
 
-       /* We never see fragments: conntrack defrags on pre-routing
-        * and local-out, and nf_nat_out protects post-routing.
-        */
-       NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
-
        ct = nf_ct_get(skb, &ctinfo);
        /* Can't track?  It's not due to stress, or conntrack would
         * have dropped it.  Hence it's the user's responsibilty to
index c9b52c361da2e6acc746c2de86d8c7f3af0a9b39..da04b9c33ef39761466a10867164c8a8b49f54b3 100644 (file)
@@ -998,18 +998,6 @@ err_id_free:
  *
  *****************************************************************************/
 
-static void hex_dump(const unsigned char *buf, size_t len)
-{
-       size_t i;
-
-       for (i = 0; i < len; i++) {
-               if (i && !(i % 16))
-                       printk("\n");
-               printk("%02x ", *(buf + i));
-       }
-       printk("\n");
-}
-
 /*
  * Parse and mangle SNMP message according to mapping.
  * (And this is the fucking 'basic' method).
@@ -1026,7 +1014,8 @@ static int snmp_parse_mangle(unsigned char *msg,
        struct snmp_object *obj;
 
        if (debug > 1)
-               hex_dump(msg, len);
+               print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 1,
+                              msg, len, 0);
 
        asn1_open(&ctx, msg, len);
 
@@ -1260,16 +1249,6 @@ static const struct nf_conntrack_expect_policy snmp_exp_policy = {
        .timeout        = 180,
 };
 
-static struct nf_conntrack_helper snmp_helper __read_mostly = {
-       .me                     = THIS_MODULE,
-       .help                   = help,
-       .expect_policy          = &snmp_exp_policy,
-       .name                   = "snmp",
-       .tuple.src.l3num        = AF_INET,
-       .tuple.src.u.udp.port   = cpu_to_be16(SNMP_PORT),
-       .tuple.dst.protonum     = IPPROTO_UDP,
-};
-
 static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
        .me                     = THIS_MODULE,
        .help                   = help,
@@ -1288,22 +1267,16 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
 
 static int __init nf_nat_snmp_basic_init(void)
 {
-       int ret = 0;
-
        BUG_ON(nf_nat_snmp_hook != NULL);
        RCU_INIT_POINTER(nf_nat_snmp_hook, help);
 
-       ret = nf_conntrack_helper_register(&snmp_trap_helper);
-       if (ret < 0) {
-               nf_conntrack_helper_unregister(&snmp_helper);
-               return ret;
-       }
-       return ret;
+       return nf_conntrack_helper_register(&snmp_trap_helper);
 }
 
 static void __exit nf_nat_snmp_basic_fini(void)
 {
        RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
+       synchronize_rcu();
        nf_conntrack_helper_unregister(&snmp_trap_helper);
 }
 
index 146d86105183e1a456a0f17ed6bb5371aa1e8f76..7cd8d0d918f82e275e0ecd31c1cea8ec8fcf345d 100644 (file)
@@ -104,7 +104,6 @@ EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put);
 void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
 {
        struct sk_buff *nskb;
-       const struct iphdr *oiph;
        struct iphdr *niph;
        const struct tcphdr *oth;
        struct tcphdr _oth;
@@ -116,8 +115,6 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
        if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
                return;
 
-       oiph = ip_hdr(oldskb);
-
        nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
                         LL_MAX_HEADER, GFP_ATOMIC);
        if (!nskb)
index 2981291910dd2cac2d508fcde89083afc22affd4..f4e4462cb5bb1b877fac32d7718ead86e97e91f2 100644 (file)
@@ -90,7 +90,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
 
        if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
            nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
-               nft_fib_store_result(dest, priv->result, pkt,
+               nft_fib_store_result(dest, priv, pkt,
                                     nft_in(pkt)->ifindex);
                return;
        }
@@ -99,7 +99,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
        if (ipv4_is_zeronet(iph->saddr)) {
                if (ipv4_is_lbcast(iph->daddr) ||
                    ipv4_is_local_multicast(iph->daddr)) {
-                       nft_fib_store_result(dest, priv->result, pkt,
+                       nft_fib_store_result(dest, priv, pkt,
                                             get_ifindex(pkt->skb->dev));
                        return;
                }
index a0ea8aad1bf150bcb9e8e0aa2e6b45a5347599e4..f18677277119305aeea043d81deb4e6ee7d20b7c 100644 (file)
@@ -26,10 +26,10 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr,
        memset(&range, 0, sizeof(range));
        range.flags = priv->flags;
        if (priv->sreg_proto_min) {
-               range.min_proto.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_min];
-               range.max_proto.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_max];
+               range.min_proto.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_min]);
+               range.max_proto.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_max]);
        }
        regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt),
                                                    &range, nft_out(pkt));
index 1650ed23c15dd00bb8e4bd741dc2d02d6cbf2c4e..5120be1d31185dd5c879419f8889d36ddb363591 100644 (file)
@@ -26,10 +26,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
 
        memset(&mr, 0, sizeof(mr));
        if (priv->sreg_proto_min) {
-               mr.range[0].min.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_min];
-               mr.range[0].max.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_max];
+               mr.range[0].min.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_min]);
+               mr.range[0].max.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_max]);
                mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
index 2af6244b83e27ae384e96cf071c10c5a89674804..ccfbce13a6333a65dab64e4847dd510dfafb1b43 100644 (file)
@@ -156,17 +156,18 @@ int ping_hash(struct sock *sk)
 void ping_unhash(struct sock *sk)
 {
        struct inet_sock *isk = inet_sk(sk);
+
        pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
+       write_lock_bh(&ping_table.lock);
        if (sk_hashed(sk)) {
-               write_lock_bh(&ping_table.lock);
                hlist_nulls_del(&sk->sk_nulls_node);
                sk_nulls_node_init(&sk->sk_nulls_node);
                sock_put(sk);
                isk->inet_num = 0;
                isk->inet_sport = 0;
                sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
-               write_unlock_bh(&ping_table.lock);
        }
+       write_unlock_bh(&ping_table.lock);
 }
 EXPORT_SYMBOL_GPL(ping_unhash);
 
index 69cf49e8356d0184f774840c9dc96560f2ae2f2b..4ccbf464d1acf5f433dd2a0768691f5d22e3033d 100644 (file)
@@ -199,7 +199,6 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TW", LINUX_MIB_TIMEWAITED),
        SNMP_MIB_ITEM("TWRecycled", LINUX_MIB_TIMEWAITRECYCLED),
        SNMP_MIB_ITEM("TWKilled", LINUX_MIB_TIMEWAITKILLED),
-       SNMP_MIB_ITEM("PAWSPassive", LINUX_MIB_PAWSPASSIVEREJECTED),
        SNMP_MIB_ITEM("PAWSActive", LINUX_MIB_PAWSACTIVEREJECTED),
        SNMP_MIB_ITEM("PAWSEstab", LINUX_MIB_PAWSESTABREJECTED),
        SNMP_MIB_ITEM("DelayedACKs", LINUX_MIB_DELAYEDACKS),
index 4b7c0ec65251ef40577a2d5e360fcbaed391a566..32a691b7ce2c7e79eab6491b52457a11e666f7d3 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/spinlock.h>
 #include <net/protocol.h>
 
-const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
+struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
 const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly;
 EXPORT_SYMBOL(inet_offloads);
 
index 8471dd116771462d149e1da2807e446b69b74bcc..5e1e60546fce7db5092c816e51ec38f6975ff0d2 100644 (file)
@@ -1734,45 +1734,97 @@ out:
 }
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-
 /* To make ICMP packets follow the right flow, the multipath hash is
- * calculated from the inner IP addresses in reverse order.
+ * calculated from the inner IP addresses.
  */
-static int ip_multipath_icmp_hash(struct sk_buff *skb)
+static void ip_multipath_l3_keys(const struct sk_buff *skb,
+                                struct flow_keys *hash_keys)
 {
        const struct iphdr *outer_iph = ip_hdr(skb);
-       struct icmphdr _icmph;
+       const struct iphdr *inner_iph;
        const struct icmphdr *icmph;
        struct iphdr _inner_iph;
-       const struct iphdr *inner_iph;
+       struct icmphdr _icmph;
+
+       hash_keys->addrs.v4addrs.src = outer_iph->saddr;
+       hash_keys->addrs.v4addrs.dst = outer_iph->daddr;
+       if (likely(outer_iph->protocol != IPPROTO_ICMP))
+               return;
 
        if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
-               goto standard_hash;
+               return;
 
        icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
                                   &_icmph);
        if (!icmph)
-               goto standard_hash;
+               return;
 
        if (icmph->type != ICMP_DEST_UNREACH &&
            icmph->type != ICMP_REDIRECT &&
            icmph->type != ICMP_TIME_EXCEEDED &&
-           icmph->type != ICMP_PARAMETERPROB) {
-               goto standard_hash;
-       }
+           icmph->type != ICMP_PARAMETERPROB)
+               return;
 
        inner_iph = skb_header_pointer(skb,
                                       outer_iph->ihl * 4 + sizeof(_icmph),
                                       sizeof(_inner_iph), &_inner_iph);
        if (!inner_iph)
-               goto standard_hash;
+               return;
+       hash_keys->addrs.v4addrs.src = inner_iph->saddr;
+       hash_keys->addrs.v4addrs.dst = inner_iph->daddr;
+}
 
-       return fib_multipath_hash(inner_iph->daddr, inner_iph->saddr);
+/* if skb is set it will be used and fl4 can be NULL */
+int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
+                      const struct sk_buff *skb)
+{
+       struct net *net = fi->fib_net;
+       struct flow_keys hash_keys;
+       u32 mhash;
 
-standard_hash:
-       return fib_multipath_hash(outer_iph->saddr, outer_iph->daddr);
-}
+       switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
+       case 0:
+               memset(&hash_keys, 0, sizeof(hash_keys));
+               hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+               if (skb) {
+                       ip_multipath_l3_keys(skb, &hash_keys);
+               } else {
+                       hash_keys.addrs.v4addrs.src = fl4->saddr;
+                       hash_keys.addrs.v4addrs.dst = fl4->daddr;
+               }
+               break;
+       case 1:
+               /* skb is currently provided only when forwarding */
+               if (skb) {
+                       unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
+                       struct flow_keys keys;
+
+                       /* short-circuit if we already have L4 hash present */
+                       if (skb->l4_hash)
+                               return skb_get_hash_raw(skb) >> 1;
+                       memset(&hash_keys, 0, sizeof(hash_keys));
+                       skb_flow_dissect_flow_keys(skb, &keys, flag);
+                       hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
+                       hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
+                       hash_keys.ports.src = keys.ports.src;
+                       hash_keys.ports.dst = keys.ports.dst;
+                       hash_keys.basic.ip_proto = keys.basic.ip_proto;
+               } else {
+                       memset(&hash_keys, 0, sizeof(hash_keys));
+                       hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+                       hash_keys.addrs.v4addrs.src = fl4->saddr;
+                       hash_keys.addrs.v4addrs.dst = fl4->daddr;
+                       hash_keys.ports.src = fl4->fl4_sport;
+                       hash_keys.ports.dst = fl4->fl4_dport;
+                       hash_keys.basic.ip_proto = fl4->flowi4_proto;
+               }
+               break;
+       }
+       mhash = flow_hash_from_keys(&hash_keys);
 
+       return mhash >> 1;
+}
+EXPORT_SYMBOL_GPL(fib_multipath_hash);
 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
 
 static int ip_mkroute_input(struct sk_buff *skb,
@@ -1782,12 +1834,8 @@ static int ip_mkroute_input(struct sk_buff *skb,
 {
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        if (res->fi && res->fi->fib_nhs > 1) {
-               int h;
+               int h = fib_multipath_hash(res->fi, NULL, skb);
 
-               if (unlikely(ip_hdr(skb)->protocol == IPPROTO_ICMP))
-                       h = ip_multipath_icmp_hash(skb);
-               else
-                       h = fib_multipath_hash(saddr, daddr);
                fib_select_multipath(res, h);
        }
 #endif
@@ -2203,7 +2251,7 @@ add:
  */
 
 struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
-                                         int mp_hash)
+                                         const struct sk_buff *skb)
 {
        struct net_device *dev_out = NULL;
        __u8 tos = RT_FL_TOS(fl4);
@@ -2365,7 +2413,7 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
                goto make_route;
        }
 
-       fib_select_path(net, &res, fl4, mp_hash);
+       fib_select_path(net, &res, fl4, skb);
 
        dev_out = FIB_RES_DEV(res);
        fl4->flowi4_oif = dev_out->ifindex;
@@ -2619,10 +2667,6 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
 
-       /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
-       ip_hdr(skb)->protocol = IPPROTO_ICMP;
-       skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
-
        src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
        dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
        iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
@@ -2632,6 +2676,15 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        else
                uid = (iif ? INVALID_UID : current_uid());
 
+       /* Bugfix: need to give ip_route_input enough of an IP header to
+        * not gag.
+        */
+       ip_hdr(skb)->protocol = IPPROTO_UDP;
+       ip_hdr(skb)->saddr = src;
+       ip_hdr(skb)->daddr = dst;
+
+       skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
+
        memset(&fl4, 0, sizeof(fl4));
        fl4.daddr = dst;
        fl4.saddr = src;
index d6880a6149ee80c6c75f4fe75b46a9d18d204d5d..6fb25693c00b92cbf881a13b06f2276b288853b1 100644 (file)
@@ -24,6 +24,7 @@
 #include <net/cipso_ipv4.h>
 #include <net/inet_frag.h>
 #include <net/ping.h>
+#include <net/protocol.h>
 
 static int zero;
 static int one = 1;
@@ -294,6 +295,58 @@ bad_key:
        return ret;
 }
 
+static void proc_configure_early_demux(int enabled, int protocol)
+{
+       struct net_protocol *ipprot;
+#if IS_ENABLED(CONFIG_IPV6)
+       struct inet6_protocol *ip6prot;
+#endif
+
+       ipprot = rcu_dereference(inet_protos[protocol]);
+       if (ipprot)
+               ipprot->early_demux = enabled ? ipprot->early_demux_handler :
+                                               NULL;
+
+#if IS_ENABLED(CONFIG_IPV6)
+       ip6prot = rcu_dereference(inet6_protos[protocol]);
+       if (ip6prot)
+               ip6prot->early_demux = enabled ? ip6prot->early_demux_handler :
+                                                NULL;
+#endif
+}
+
+static int proc_tcp_early_demux(struct ctl_table *table, int write,
+                               void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       int ret = 0;
+
+       ret = proc_dointvec(table, write, buffer, lenp, ppos);
+
+       if (write && !ret) {
+               int enabled = init_net.ipv4.sysctl_tcp_early_demux;
+
+               proc_configure_early_demux(enabled, IPPROTO_TCP);
+       }
+
+       return ret;
+}
+
+static int proc_udp_early_demux(struct ctl_table *table, int write,
+                               void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       int ret = 0;
+
+       ret = proc_dointvec(table, write, buffer, lenp, ppos);
+
+       if (write && !ret) {
+               int enabled = init_net.ipv4.sysctl_udp_early_demux;
+
+               proc_configure_early_demux(enabled, IPPROTO_UDP);
+       }
+
+       return ret;
+}
+
 static struct ctl_table ipv4_table[] = {
        {
                .procname       = "tcp_timestamps",
@@ -749,6 +802,20 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "udp_early_demux",
+               .data           = &init_net.ipv4.sysctl_udp_early_demux,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_udp_early_demux
+       },
+       {
+               .procname       = "tcp_early_demux",
+               .data           = &init_net.ipv4.sysctl_tcp_early_demux,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_tcp_early_demux
+       },
        {
                .procname       = "ip_default_ttl",
                .data           = &init_net.ipv4.sysctl_ip_default_ttl,
@@ -980,13 +1047,6 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-       {
-               .procname       = "tcp_tw_recycle",
-               .data           = &init_net.ipv4.tcp_death_row.sysctl_tw_recycle,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        {
                .procname       = "tcp_max_syn_backlog",
                .data           = &init_net.ipv4.sysctl_max_syn_backlog,
@@ -1004,6 +1064,15 @@ static struct ctl_table ipv4_net_table[] = {
                .extra1         = &zero,
                .extra2         = &one,
        },
+       {
+               .procname       = "fib_multipath_hash_policy",
+               .data           = &init_net.ipv4.sysctl_fib_multipath_hash_policy,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &one,
+       },
 #endif
        {
                .procname       = "ip_unprivileged_port_start",
index cf4555581282c608f920254078264e36e18584c6..94f0b5b50e0d728c3edab175aee9d769cd80907f 100644 (file)
@@ -2393,7 +2393,7 @@ static int tcp_repair_options_est(struct tcp_sock *tp,
                                u16 snd_wscale = opt.opt_val & 0xFFFF;
                                u16 rcv_wscale = opt.opt_val >> 16;
 
-                               if (snd_wscale > 14 || rcv_wscale > 14)
+                               if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE)
                                        return -EFBIG;
 
                                tp->rx_opt.snd_wscale = snd_wscale;
@@ -2470,7 +2470,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                /* Values greater than interface MTU won't take effect. However
                 * at the point when this call is done we typically don't yet
                 * know which interface is going to be used */
-               if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
+               if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) {
                        err = -EINVAL;
                        break;
                }
@@ -2770,7 +2770,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
 {
        const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
        const struct inet_connection_sock *icsk = inet_csk(sk);
-       u32 now = tcp_time_stamp, intv;
+       u32 now, intv;
        u64 rate64;
        bool slow;
        u32 rate;
@@ -2839,6 +2839,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
        info->tcpi_retrans = tp->retrans_out;
        info->tcpi_fackets = tp->fackets_out;
 
+       now = tcp_time_stamp;
        info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
        info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
        info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
index 39c393cc0fd3c17130cd5d8d8b37f31ad3aeafd9..31f2765ef85126bdced6b6efca29ca17585b9d28 100644 (file)
@@ -126,7 +126,8 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
 #define REXMIT_LOST    1 /* retransmit packets marked lost */
 #define REXMIT_NEW     2 /* FRTO-style transmit of unsent/new packets */
 
-static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb)
+static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb,
+                            unsigned int len)
 {
        static bool __once __read_mostly;
 
@@ -137,8 +138,9 @@ static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb)
 
                rcu_read_lock();
                dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
-               pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
-                       dev ? dev->name : "Unknown driver");
+               if (!dev || len >= dev->mtu)
+                       pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
+                               dev ? dev->name : "Unknown driver");
                rcu_read_unlock();
        }
 }
@@ -161,8 +163,10 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
        if (len >= icsk->icsk_ack.rcv_mss) {
                icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
                                               tcp_sk(sk)->advmss);
-               if (unlikely(icsk->icsk_ack.rcv_mss != len))
-                       tcp_gro_dev_warn(sk, skb);
+               /* Account for possibly-removed options */
+               if (unlikely(len > icsk->icsk_ack.rcv_mss +
+                                  MAX_TCP_OPTION_SPACE))
+                       tcp_gro_dev_warn(sk, skb, len);
        } else {
                /* Otherwise, we make more careful check taking into account,
                 * that SACKs block is variable.
@@ -874,22 +878,11 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
                                  const int ts)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       if (metric > tp->reordering) {
-               int mib_idx;
+       int mib_idx;
 
+       if (metric > tp->reordering) {
                tp->reordering = min(sysctl_tcp_max_reordering, metric);
 
-               /* This exciting event is worth to be remembered. 8) */
-               if (ts)
-                       mib_idx = LINUX_MIB_TCPTSREORDER;
-               else if (tcp_is_reno(tp))
-                       mib_idx = LINUX_MIB_TCPRENOREORDER;
-               else if (tcp_is_fack(tp))
-                       mib_idx = LINUX_MIB_TCPFACKREORDER;
-               else
-                       mib_idx = LINUX_MIB_TCPSACKREORDER;
-
-               NET_INC_STATS(sock_net(sk), mib_idx);
 #if FASTRETRANS_DEBUG > 1
                pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
                         tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -902,6 +895,18 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
        }
 
        tp->rack.reord = 1;
+
+       /* This exciting event is worth to be remembered. 8) */
+       if (ts)
+               mib_idx = LINUX_MIB_TCPTSREORDER;
+       else if (tcp_is_reno(tp))
+               mib_idx = LINUX_MIB_TCPRENOREORDER;
+       else if (tcp_is_fack(tp))
+               mib_idx = LINUX_MIB_TCPFACKREORDER;
+       else
+               mib_idx = LINUX_MIB_TCPSACKREORDER;
+
+       NET_INC_STATS(sock_net(sk), mib_idx);
 }
 
 /* This must be called before lost_out is incremented */
@@ -3759,11 +3764,12 @@ void tcp_parse_options(const struct sk_buff *skb,
                                    !estab && sysctl_tcp_window_scaling) {
                                        __u8 snd_wscale = *(__u8 *)ptr;
                                        opt_rx->wscale_ok = 1;
-                                       if (snd_wscale > 14) {
-                                               net_info_ratelimited("%s: Illegal window scaling value %d >14 received\n",
+                                       if (snd_wscale > TCP_MAX_WSCALE) {
+                                               net_info_ratelimited("%s: Illegal window scaling value %d > %u received\n",
                                                                     __func__,
-                                                                    snd_wscale);
-                                               snd_wscale = 14;
+                                                                    snd_wscale,
+                                                                    TCP_MAX_WSCALE);
+                                               snd_wscale = TCP_MAX_WSCALE;
                                        }
                                        opt_rx->snd_wscale = snd_wscale;
                                }
@@ -5541,6 +5547,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
        struct inet_connection_sock *icsk = inet_csk(sk);
 
        tcp_set_state(sk, TCP_ESTABLISHED);
+       icsk->icsk_ack.lrcvtime = tcp_time_stamp;
 
        if (skb) {
                icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
@@ -5759,7 +5766,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                         * to stand against the temptation 8)     --ANK
                         */
                        inet_csk_schedule_ack(sk);
-                       icsk->icsk_ack.lrcvtime = tcp_time_stamp;
                        tcp_enter_quickack_mode(sk);
                        inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
                                                  TCP_DELACK_MAX, TCP_RTO_MAX);
@@ -6324,36 +6330,14 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
                goto drop_and_free;
 
        if (isn && tmp_opt.tstamp_ok)
-               af_ops->init_seq(skb, &tcp_rsk(req)->ts_off);
+               af_ops->init_seq_tsoff(skb, &tcp_rsk(req)->ts_off);
 
        if (!want_cookie && !isn) {
-               /* VJ's idea. We save last timestamp seen
-                * from the destination in peer table, when entering
-                * state TIME-WAIT, and check against it before
-                * accepting new connection request.
-                *
-                * If "isn" is not zero, this request hit alive
-                * timewait bucket, so that all the necessary checks
-                * are made in the function processing timewait state.
-                */
-               if (net->ipv4.tcp_death_row.sysctl_tw_recycle) {
-                       bool strict;
-
-                       dst = af_ops->route_req(sk, &fl, req, &strict);
-
-                       if (dst && strict &&
-                           !tcp_peer_is_proven(req, dst, true,
-                                               tmp_opt.saw_tstamp)) {
-                               NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
-                               goto drop_and_release;
-                       }
-               }
                /* Kill the following clause, if you dislike this way. */
-               else if (!net->ipv4.sysctl_tcp_syncookies &&
-                        (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
-                         (net->ipv4.sysctl_max_syn_backlog >> 2)) &&
-                        !tcp_peer_is_proven(req, dst, false,
-                                            tmp_opt.saw_tstamp)) {
+               if (!net->ipv4.sysctl_tcp_syncookies &&
+                   (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
+                    (net->ipv4.sysctl_max_syn_backlog >> 2)) &&
+                   !tcp_peer_is_proven(req, dst)) {
                        /* Without syncookies last quarter of
                         * backlog is filled with destinations,
                         * proven to be alive.
@@ -6366,10 +6350,10 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
                        goto drop_and_release;
                }
 
-               isn = af_ops->init_seq(skb, &tcp_rsk(req)->ts_off);
+               isn = af_ops->init_seq_tsoff(skb, &tcp_rsk(req)->ts_off);
        }
        if (!dst) {
-               dst = af_ops->route_req(sk, &fl, req, NULL);
+               dst = af_ops->route_req(sk, &fl, req);
                if (!dst)
                        goto drop_and_free;
        }
index 9a89b8deafae1e9b2e8d1d9bc211c9c30b8dd8ec..20cbd2f07f281717c1cb4e901c4c4e22f7c46bd6 100644 (file)
@@ -94,12 +94,12 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
 struct inet_hashinfo tcp_hashinfo;
 EXPORT_SYMBOL(tcp_hashinfo);
 
-static u32 tcp_v4_init_sequence(const struct sk_buff *skb, u32 *tsoff)
+static u32 tcp_v4_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff)
 {
-       return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
-                                         ip_hdr(skb)->saddr,
-                                         tcp_hdr(skb)->dest,
-                                         tcp_hdr(skb)->source, tsoff);
+       return secure_tcp_seq_and_tsoff(ip_hdr(skb)->daddr,
+                                       ip_hdr(skb)->saddr,
+                                       tcp_hdr(skb)->dest,
+                                       tcp_hdr(skb)->source, tsoff);
 }
 
 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@ -198,10 +198,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                        tp->write_seq      = 0;
        }
 
-       if (tcp_death_row->sysctl_tw_recycle &&
-           !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
-               tcp_fetch_timewait_stamp(sk, &rt->dst);
-
        inet->inet_dport = usin->sin_port;
        sk_daddr_set(sk, daddr);
 
@@ -236,11 +232,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        rt = NULL;
 
        if (likely(!tp->repair)) {
-               seq = secure_tcp_sequence_number(inet->inet_saddr,
-                                                inet->inet_daddr,
-                                                inet->inet_sport,
-                                                usin->sin_port,
-                                                &tp->tsoffset);
+               seq = secure_tcp_seq_and_tsoff(inet->inet_saddr,
+                                              inet->inet_daddr,
+                                              inet->inet_sport,
+                                              usin->sin_port,
+                                              &tp->tsoffset);
                if (!tp->write_seq)
                        tp->write_seq = seq;
        }
@@ -279,10 +275,13 @@ EXPORT_SYMBOL(tcp_v4_connect);
  */
 void tcp_v4_mtu_reduced(struct sock *sk)
 {
-       struct dst_entry *dst;
        struct inet_sock *inet = inet_sk(sk);
-       u32 mtu = tcp_sk(sk)->mtu_info;
+       struct dst_entry *dst;
+       u32 mtu;
 
+       if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+               return;
+       mtu = tcp_sk(sk)->mtu_info;
        dst = inet_csk_update_pmtu(sk, mtu);
        if (!dst)
                return;
@@ -428,7 +427,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 
        switch (type) {
        case ICMP_REDIRECT:
-               do_redirect(icmp_skb, sk);
+               if (!sock_owned_by_user(sk))
+                       do_redirect(icmp_skb, sk);
                goto out;
        case ICMP_SOURCE_QUENCH:
                /* Just silently ignore these. */
@@ -1213,19 +1213,9 @@ static void tcp_v4_init_req(struct request_sock *req,
 
 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
                                          struct flowi *fl,
-                                         const struct request_sock *req,
-                                         bool *strict)
+                                         const struct request_sock *req)
 {
-       struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
-
-       if (strict) {
-               if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
-                       *strict = true;
-               else
-                       *strict = false;
-       }
-
-       return dst;
+       return inet_csk_route_req(sk, &fl->u.ip4, req);
 }
 
 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
@@ -1249,7 +1239,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
        .cookie_init_seq =      cookie_v4_init_sequence,
 #endif
        .route_req      =       tcp_v4_route_req,
-       .init_seq       =       tcp_v4_init_sequence,
+       .init_seq_tsoff =       tcp_v4_init_seq_and_tsoff,
        .send_synack    =       tcp_v4_send_synack,
 };
 
@@ -1419,8 +1409,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
                if (!nsk)
                        goto discard;
                if (nsk != sk) {
-                       sock_rps_save_rxhash(nsk, skb);
-                       sk_mark_napi_id(nsk, skb);
                        if (tcp_child_process(sk, nsk, skb)) {
                                rsk = nsk;
                                goto reset;
@@ -2462,7 +2450,6 @@ static int __net_init tcp_sk_init(struct net *net)
        net->ipv4.sysctl_tcp_tw_reuse = 0;
 
        cnt = tcp_hashinfo.ehash_mask + 1;
-       net->ipv4.tcp_death_row.sysctl_tw_recycle = 0;
        net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
        net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
 
index 0f46e5fe31ad1b6809ada1f70bce7b63df4f8c9c..9d0d4f39e42be15d8ad7389bc7562449b92ea5fa 100644 (file)
@@ -45,8 +45,6 @@ struct tcp_metrics_block {
        struct inetpeer_addr            tcpm_saddr;
        struct inetpeer_addr            tcpm_daddr;
        unsigned long                   tcpm_stamp;
-       u32                             tcpm_ts;
-       u32                             tcpm_ts_stamp;
        u32                             tcpm_lock;
        u32                             tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
        struct tcp_fastopen_metrics     tcpm_fastopen;
@@ -123,8 +121,6 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
        tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
        tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
        tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
-       tm->tcpm_ts = 0;
-       tm->tcpm_ts_stamp = 0;
        if (fastopen_clear) {
                tm->tcpm_fastopen.mss = 0;
                tm->tcpm_fastopen.syn_loss = 0;
@@ -273,48 +269,6 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
        return tm;
 }
 
-static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
-{
-       struct tcp_metrics_block *tm;
-       struct inetpeer_addr saddr, daddr;
-       unsigned int hash;
-       struct net *net;
-
-       if (tw->tw_family == AF_INET) {
-               inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
-               inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
-               hash = ipv4_addr_hash(tw->tw_daddr);
-       }
-#if IS_ENABLED(CONFIG_IPV6)
-       else if (tw->tw_family == AF_INET6) {
-               if (ipv6_addr_v4mapped(&tw->tw_v6_daddr)) {
-                       inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
-                       inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
-                       hash = ipv4_addr_hash(tw->tw_daddr);
-               } else {
-                       inetpeer_set_addr_v6(&saddr, &tw->tw_v6_rcv_saddr);
-                       inetpeer_set_addr_v6(&daddr, &tw->tw_v6_daddr);
-                       hash = ipv6_addr_hash(&tw->tw_v6_daddr);
-               }
-       }
-#endif
-       else
-               return NULL;
-
-       net = twsk_net(tw);
-       hash ^= net_hash_mix(net);
-       hash = hash_32(hash, tcp_metrics_hash_log);
-
-       for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
-            tm = rcu_dereference(tm->tcpm_next)) {
-               if (addr_same(&tm->tcpm_saddr, &saddr) &&
-                   addr_same(&tm->tcpm_daddr, &daddr) &&
-                   net_eq(tm_net(tm), net))
-                       break;
-       }
-       return tm;
-}
-
 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
                                                 struct dst_entry *dst,
                                                 bool create)
@@ -573,8 +527,7 @@ reset:
        tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
-bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
-                       bool paws_check, bool timestamps)
+bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
 {
        struct tcp_metrics_block *tm;
        bool ret;
@@ -584,94 +537,10 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
 
        rcu_read_lock();
        tm = __tcp_get_metrics_req(req, dst);
-       if (paws_check) {
-               if (tm &&
-                   (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
-                   ((s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW ||
-                    !timestamps))
-                       ret = false;
-               else
-                       ret = true;
-       } else {
-               if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
-                       ret = true;
-               else
-                       ret = false;
-       }
-       rcu_read_unlock();
-
-       return ret;
-}
-
-void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
-{
-       struct tcp_metrics_block *tm;
-
-       rcu_read_lock();
-       tm = tcp_get_metrics(sk, dst, true);
-       if (tm) {
-               struct tcp_sock *tp = tcp_sk(sk);
-
-               if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
-                       tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
-                       tp->rx_opt.ts_recent = tm->tcpm_ts;
-               }
-       }
-       rcu_read_unlock();
-}
-EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
-
-/* VJ's idea. Save last timestamp seen from this destination and hold
- * it at least for normal timewait interval to use for duplicate
- * segment detection in subsequent connections, before they enter
- * synchronized state.
- */
-bool tcp_remember_stamp(struct sock *sk)
-{
-       struct dst_entry *dst = __sk_dst_get(sk);
-       bool ret = false;
-
-       if (dst) {
-               struct tcp_metrics_block *tm;
-
-               rcu_read_lock();
-               tm = tcp_get_metrics(sk, dst, true);
-               if (tm) {
-                       struct tcp_sock *tp = tcp_sk(sk);
-
-                       if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
-                           ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
-                            tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
-                               tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
-                               tm->tcpm_ts = tp->rx_opt.ts_recent;
-                       }
-                       ret = true;
-               }
-               rcu_read_unlock();
-       }
-       return ret;
-}
-
-bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
-{
-       struct tcp_metrics_block *tm;
-       bool ret = false;
-
-       rcu_read_lock();
-       tm = __tcp_get_metrics_tw(tw);
-       if (tm) {
-               const struct tcp_timewait_sock *tcptw;
-               struct sock *sk = (struct sock *) tw;
-
-               tcptw = tcp_twsk(sk);
-               if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
-                   ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
-                    tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
-                       tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
-                       tm->tcpm_ts        = tcptw->tw_ts_recent;
-               }
+       if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
                ret = true;
-       }
+       else
+               ret = false;
        rcu_read_unlock();
 
        return ret;
@@ -791,14 +660,6 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
                          jiffies - tm->tcpm_stamp,
                          TCP_METRICS_ATTR_PAD) < 0)
                goto nla_put_failure;
-       if (tm->tcpm_ts_stamp) {
-               if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
-                               (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
-                       goto nla_put_failure;
-               if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
-                               tm->tcpm_ts) < 0)
-                       goto nla_put_failure;
-       }
 
        {
                int n = 0;
index 7e16243cdb58c830f869fe483730e86400e2eb00..8f6373b0cd7729e7afde1b733879058197e9c5ca 100644 (file)
@@ -26,6 +26,7 @@
 #include <net/tcp.h>
 #include <net/inet_common.h>
 #include <net/xfrm.h>
+#include <net/busy_poll.h>
 
 int sysctl_tcp_abort_on_overflow __read_mostly;
 
@@ -94,7 +95,6 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
        struct tcp_options_received tmp_opt;
        struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
        bool paws_reject = false;
-       struct inet_timewait_death_row *tcp_death_row = &sock_net((struct sock*)tw)->ipv4.tcp_death_row;
 
        tmp_opt.saw_tstamp = 0;
        if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
@@ -149,12 +149,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
                        tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
                }
 
-               if (tcp_death_row->sysctl_tw_recycle &&
-                   tcptw->tw_ts_recent_stamp &&
-                   tcp_tw_remember_stamp(tw))
-                       inet_twsk_reschedule(tw, tw->tw_timeout);
-               else
-                       inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
+               inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
                return TCP_TW_ACK;
        }
 
@@ -259,12 +254,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
        const struct inet_connection_sock *icsk = inet_csk(sk);
        const struct tcp_sock *tp = tcp_sk(sk);
        struct inet_timewait_sock *tw;
-       bool recycle_ok = false;
        struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
 
-       if (tcp_death_row->sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
-               recycle_ok = tcp_remember_stamp(sk);
-
        tw = inet_twsk_alloc(sk, tcp_death_row, state);
 
        if (tw) {
@@ -317,13 +308,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                if (timeo < rto)
                        timeo = rto;
 
-               if (recycle_ok) {
-                       tw->tw_timeout = rto;
-               } else {
-                       tw->tw_timeout = TCP_TIMEWAIT_LEN;
-                       if (state == TCP_TIME_WAIT)
-                               timeo = TCP_TIMEWAIT_LEN;
-               }
+               tw->tw_timeout = TCP_TIMEWAIT_LEN;
+               if (state == TCP_TIME_WAIT)
+                       timeo = TCP_TIMEWAIT_LEN;
 
                inet_twsk_schedule(tw, timeo);
                /* Linkage updates. */
@@ -460,6 +447,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
                newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
                minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
                newicsk->icsk_rto = TCP_TIMEOUT_INIT;
+               newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
 
                newtp->packets_out = 0;
                newtp->retrans_out = 0;
@@ -812,6 +800,9 @@ int tcp_child_process(struct sock *parent, struct sock *child,
        int ret = 0;
        int state = child->sk_state;
 
+       /* record NAPI ID of child */
+       sk_mark_napi_id(child, skb);
+
        tcp_segs_in(tcp_sk(child), skb);
        if (!sock_owned_by_user(child)) {
                ret = tcp_rcv_state_process(child, skb);
index 22548b5f05cbe5a655e0c53df2d31c5cc2e8a702..0e807a83c1bc510debda6c516f633a8b380d3679 100644 (file)
@@ -212,12 +212,12 @@ void tcp_select_initial_window(int __space, __u32 mss,
 
        /* If no clamp set the clamp to the max possible scaled window */
        if (*window_clamp == 0)
-               (*window_clamp) = (65535 << 14);
+               (*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
        space = min(*window_clamp, space);
 
        /* Quantize space offering to a multiple of mss if possible. */
        if (space > mss)
-               space = (space / mss) * mss;
+               space = rounddown(space, mss);
 
        /* NOTE: offering an initial window larger than 32767
         * will break some buggy TCP stacks. If the admin tells us
@@ -234,13 +234,11 @@ void tcp_select_initial_window(int __space, __u32 mss,
 
        (*rcv_wscale) = 0;
        if (wscale_ok) {
-               /* Set window scaling on max possible window
-                * See RFC1323 for an explanation of the limit to 14
-                */
+               /* Set window scaling on max possible window */
                space = max_t(u32, space, sysctl_tcp_rmem[2]);
                space = max_t(u32, space, sysctl_rmem_max);
                space = min_t(u32, space, *window_clamp);
-               while (space > 65535 && (*rcv_wscale) < 14) {
+               while (space > U16_MAX && (*rcv_wscale) < TCP_MAX_WSCALE) {
                        space >>= 1;
                        (*rcv_wscale)++;
                }
@@ -253,7 +251,7 @@ void tcp_select_initial_window(int __space, __u32 mss,
        }
 
        /* Set the clamp no higher than max representable value */
-       (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
+       (*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
 }
 EXPORT_SYMBOL(tcp_select_initial_window);
 
@@ -2561,7 +2559,6 @@ u32 __tcp_select_window(struct sock *sk)
        /* Don't do rounding if we are using window scaling, since the
         * scaled window will not line up with the MSS boundary anyway.
         */
-       window = tp->rcv_wnd;
        if (tp->rx_opt.rcv_wscale) {
                window = free_space;
 
@@ -2569,10 +2566,9 @@ u32 __tcp_select_window(struct sock *sk)
                 * Import case: prevent zero window announcement if
                 * 1<<rcv_wscale > mss.
                 */
-               if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
-                       window = (((window >> tp->rx_opt.rcv_wscale) + 1)
-                                 << tp->rx_opt.rcv_wscale);
+               window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
        } else {
+               window = tp->rcv_wnd;
                /* Get the largest window that is a nice multiple of mss.
                 * Window clamp already applied above.
                 * If our current window offering is within 1 mss of the
@@ -2582,7 +2578,7 @@ u32 __tcp_select_window(struct sock *sk)
                 * is too small.
                 */
                if (window <= free_space - mss || window > free_space)
-                       window = (free_space / mss) * mss;
+                       window = rounddown(free_space, mss);
                else if (mss == full_space &&
                         free_space > window + (full_space >> 1))
                        window = free_space;
index 4ecb38ae85042db7fa59e1aa6c74c9c3da0b1099..d8acbd9f477a2ac6b0f8eee1bf59f3ab43abff07 100644 (file)
@@ -12,7 +12,8 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
                /* Account for retransmits that are lost again */
                TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
                tp->retrans_out -= tcp_skb_pcount(skb);
-               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
+               NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
+                             tcp_skb_pcount(skb));
        }
 }
 
index 40d893556e6701ace6a02903e53c45822d6fa56d..b2ab411c6d3728fa7dbdebde045532a7317f5166 100644 (file)
@@ -249,7 +249,8 @@ void tcp_delack_timer_handler(struct sock *sk)
 
        sk_mem_reclaim_partial(sk);
 
-       if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
+       if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+           !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
                goto out;
 
        if (time_after(icsk->icsk_ack.timeout, jiffies)) {
@@ -552,7 +553,8 @@ void tcp_write_timer_handler(struct sock *sk)
        struct inet_connection_sock *icsk = inet_csk(sk);
        int event;
 
-       if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
+       if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+           !icsk->icsk_pending)
                goto out;
 
        if (time_after(icsk->icsk_timeout, jiffies)) {
index fed66dc0e0f5f242cf0af25434fa9cfa89998958..9775453b8d174c848dc09df83d1fa185422cd8cc 100644 (file)
@@ -265,8 +265,8 @@ static size_t tcp_westwood_info(struct sock *sk, u32 ext, int *attr,
        if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
                info->vegas.tcpv_enabled = 1;
                info->vegas.tcpv_rttcnt = 0;
-               info->vegas.tcpv_rtt    = jiffies_to_usecs(ca->rtt),
-               info->vegas.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min),
+               info->vegas.tcpv_rtt    = jiffies_to_usecs(ca->rtt);
+               info->vegas.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min);
 
                *attr = INET_DIAG_VEGASINFO;
                return sizeof(struct tcpvegas_info);
index e2afe677a9d944a2c6c27a2e7b2d06227712cf89..48c452959d2c2fe687472c3732fe40246a8c863a 100644 (file)
@@ -307,6 +307,7 @@ config IPV6_SEG6_LWTUNNEL
        bool "IPv6: Segment Routing Header encapsulation support"
        depends on IPV6
        select LWTUNNEL
+       select DST_CACHE
        ---help---
          Support for encapsulation of packets within an outer IPv6
          header and a Segment Routing Header using the lightweight
index 8c69768a5c4606548333842e86b1416a7897ebf5..67ec87ea5fb699eb8ba4634c91815f97d750ddf1 100644 (file)
@@ -224,6 +224,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
        .accept_ra_rtr_pref     = 1,
        .rtr_probe_interval     = 60 * HZ,
 #ifdef CONFIG_IPV6_ROUTE_INFO
+       .accept_ra_rt_info_min_plen = 0,
        .accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
@@ -277,6 +278,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
        .accept_ra_rtr_pref     = 1,
        .rtr_probe_interval     = 60 * HZ,
 #ifdef CONFIG_IPV6_ROUTE_INFO
+       .accept_ra_rt_info_min_plen = 0,
        .accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
@@ -547,6 +549,9 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
        if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
                goto nla_put_failure;
 
+       if (!devconf)
+               goto out;
+
        if ((all || type == NETCONFA_FORWARDING) &&
            nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
                goto nla_put_failure;
@@ -565,6 +570,7 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
                        devconf->ignore_routes_with_linkdown) < 0)
                goto nla_put_failure;
 
+out:
        nlmsg_end(skb, nlh);
        return 0;
 
@@ -573,8 +579,8 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
-                                 struct ipv6_devconf *devconf)
+void inet6_netconf_notify_devconf(struct net *net, int event, int type,
+                                 int ifindex, struct ipv6_devconf *devconf)
 {
        struct sk_buff *skb;
        int err = -ENOBUFS;
@@ -584,7 +590,7 @@ void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
                goto errout;
 
        err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
-                                        RTM_NEWNETCONF, 0, type);
+                                        event, 0, type);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
                WARN_ON(err == -EMSGSIZE);
@@ -767,7 +773,8 @@ static void dev_forward_change(struct inet6_dev *idev)
                else
                        addrconf_leave_anycast(ifa);
        }
-       inet6_netconf_notify_devconf(dev_net(dev), NETCONFA_FORWARDING,
+       inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
+                                    NETCONFA_FORWARDING,
                                     dev->ifindex, &idev->cnf);
 }
 
@@ -802,7 +809,8 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
 
        if (p == &net->ipv6.devconf_dflt->forwarding) {
                if ((!newf) ^ (!old))
-                       inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
+                       inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
+                                                    NETCONFA_FORWARDING,
                                                     NETCONFA_IFINDEX_DEFAULT,
                                                     net->ipv6.devconf_dflt);
                rtnl_unlock();
@@ -814,13 +822,15 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
 
                net->ipv6.devconf_dflt->forwarding = newf;
                if ((!newf) ^ (!old_dflt))
-                       inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
+                       inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
+                                                    NETCONFA_FORWARDING,
                                                     NETCONFA_IFINDEX_DEFAULT,
                                                     net->ipv6.devconf_dflt);
 
                addrconf_forward_change(net, newf);
                if ((!newf) ^ (!old))
-                       inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
+                       inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
+                                                    NETCONFA_FORWARDING,
                                                     NETCONFA_IFINDEX_ALL,
                                                     net->ipv6.devconf_all);
        } else if ((!newf) ^ (!old))
@@ -845,6 +855,7 @@ static void addrconf_linkdown_change(struct net *net, __s32 newf)
                        idev->cnf.ignore_routes_with_linkdown = newf;
                        if (changed)
                                inet6_netconf_notify_devconf(dev_net(dev),
+                                                            RTM_NEWNETCONF,
                                                             NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
                                                             dev->ifindex,
                                                             &idev->cnf);
@@ -867,6 +878,7 @@ static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
        if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
                if ((!newf) ^ (!old))
                        inet6_netconf_notify_devconf(net,
+                                                    RTM_NEWNETCONF,
                                                     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
                                                     NETCONFA_IFINDEX_DEFAULT,
                                                     net->ipv6.devconf_dflt);
@@ -879,6 +891,7 @@ static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
                addrconf_linkdown_change(net, newf);
                if ((!newf) ^ (!old))
                        inet6_netconf_notify_devconf(net,
+                                                    RTM_NEWNETCONF,
                                                     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
                                                     NETCONFA_IFINDEX_ALL,
                                                     net->ipv6.devconf_all);
@@ -4979,6 +4992,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
        array[DEVCONF_RTR_PROBE_INTERVAL] =
                jiffies_to_msecs(cnf->rtr_probe_interval);
 #ifdef CONFIG_IPV6_ROUTE_INFO
+       array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
        array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
 #endif
 #endif
@@ -5672,17 +5686,20 @@ int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
                        return restart_syscall();
 
                if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
-                       inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
+                       inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
+                                                    NETCONFA_PROXY_NEIGH,
                                                     NETCONFA_IFINDEX_DEFAULT,
                                                     net->ipv6.devconf_dflt);
                else if (valp == &net->ipv6.devconf_all->proxy_ndp)
-                       inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
+                       inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
+                                                    NETCONFA_PROXY_NEIGH,
                                                     NETCONFA_IFINDEX_ALL,
                                                     net->ipv6.devconf_all);
                else {
                        struct inet6_dev *idev = ctl->extra1;
 
-                       inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
+                       inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
+                                                    NETCONFA_PROXY_NEIGH,
                                                     idev->dev->ifindex,
                                                     &idev->cnf);
                }
@@ -6121,6 +6138,13 @@ static const struct ctl_table addrconf_sysctl[] = {
                .proc_handler   = proc_dointvec_jiffies,
        },
 #ifdef CONFIG_IPV6_ROUTE_INFO
+       {
+               .procname       = "accept_ra_rt_info_min_plen",
+               .data           = &ipv6_devconf.accept_ra_rt_info_min_plen,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
        {
                .procname       = "accept_ra_rt_info_max_plen",
                .data           = &ipv6_devconf.accept_ra_rt_info_max_plen,
@@ -6338,7 +6362,8 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
                ifindex = NETCONFA_IFINDEX_DEFAULT;
        else
                ifindex = idev->dev->ifindex;
-       inet6_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
+       inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
+                                    ifindex, p);
        return 0;
 
 free:
@@ -6347,7 +6372,8 @@ out:
        return -ENOBUFS;
 }
 
-static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
+static void __addrconf_sysctl_unregister(struct net *net,
+                                        struct ipv6_devconf *p, int ifindex)
 {
        struct ctl_table *table;
 
@@ -6358,6 +6384,8 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
        unregister_net_sysctl_table(p->sysctl_header);
        p->sysctl_header = NULL;
        kfree(table);
+
+       inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
 }
 
 static int addrconf_sysctl_register(struct inet6_dev *idev)
@@ -6381,7 +6409,8 @@ static int addrconf_sysctl_register(struct inet6_dev *idev)
 
 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
 {
-       __addrconf_sysctl_unregister(&idev->cnf);
+       __addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf,
+                                    idev->dev->ifindex);
        neigh_sysctl_unregister(idev->nd_parms);
 }
 
@@ -6424,7 +6453,7 @@ static int __net_init addrconf_init_net(struct net *net)
 
 #ifdef CONFIG_SYSCTL
 err_reg_dflt:
-       __addrconf_sysctl_unregister(all);
+       __addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
 err_reg_all:
        kfree(dflt);
 #endif
@@ -6437,8 +6466,10 @@ err_alloc_all:
 static void __net_exit addrconf_exit_net(struct net *net)
 {
 #ifdef CONFIG_SYSCTL
-       __addrconf_sysctl_unregister(net->ipv6.devconf_dflt);
-       __addrconf_sysctl_unregister(net->ipv6.devconf_all);
+       __addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt,
+                                    NETCONFA_IFINDEX_DEFAULT);
+       __addrconf_sysctl_unregister(net, net->ipv6.devconf_all,
+                                    NETCONFA_IFINDEX_ALL);
 #endif
        kfree(net->ipv6.devconf_dflt);
        kfree(net->ipv6.devconf_all);
index 04db40620ea65c1f369ef63490383e92def722ff..1635d218735e48f3d90d049db95fbbee82b54e81 100644 (file)
@@ -920,12 +920,12 @@ static int __init inet6_init(void)
        err = register_pernet_subsys(&inet6_net_ops);
        if (err)
                goto register_pernet_fail;
-       err = icmpv6_init();
-       if (err)
-               goto icmp_fail;
        err = ip6_mr_init();
        if (err)
                goto ipmr_fail;
+       err = icmpv6_init();
+       if (err)
+               goto icmp_fail;
        err = ndisc_init();
        if (err)
                goto ndisc_fail;
@@ -1005,6 +1005,10 @@ static int __init inet6_init(void)
        if (err)
                goto seg6_fail;
 
+       err = igmp6_late_init();
+       if (err)
+               goto igmp6_late_err;
+
 #ifdef CONFIG_SYSCTL
        err = ipv6_sysctl_register();
        if (err)
@@ -1015,8 +1019,10 @@ out:
 
 #ifdef CONFIG_SYSCTL
 sysctl_fail:
-       seg6_exit();
+       igmp6_late_cleanup();
 #endif
+igmp6_late_err:
+       seg6_exit();
 seg6_fail:
        calipso_exit();
 calipso_fail:
@@ -1061,10 +1067,10 @@ igmp_fail:
        ndisc_cleanup();
 ndisc_fail:
        ip6_mr_cleanup();
-ipmr_fail:
-       icmpv6_cleanup();
 icmp_fail:
        unregister_pernet_subsys(&inet6_net_ops);
+ipmr_fail:
+       icmpv6_cleanup();
 register_pernet_fail:
        sock_unregister(PF_INET6);
        rtnl_unregister_all(PF_INET6);
index e4266746e4a2af67562bb05dd50ace54e55d3edd..d4bf2c68a545b44873e433930e4e999920de78c9 100644 (file)
@@ -923,6 +923,8 @@ add:
                        ins = &rt->dst.rt6_next;
                        iter = *ins;
                        while (iter) {
+                               if (iter->rt6i_metric > rt->rt6i_metric)
+                                       break;
                                if (rt6_qualify_for_ecmp(iter)) {
                                        *ins = iter->dst.rt6_next;
                                        fib6_purge_rt(iter, fn, info->nl_net);
index aacfb4bce1533b3f3b38e1173c18cb1bb6b33099..b04539dd4629d2b71b5db27c4a64a89151b2d5d7 100644 (file)
@@ -49,6 +49,8 @@
 
 int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+       void (*edemux)(struct sk_buff *skb);
+
        /* if ingress device is enslaved to an L3 master device pass the
         * skb to its handler for processing
         */
@@ -60,8 +62,8 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
                const struct inet6_protocol *ipprot;
 
                ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
-               if (ipprot && ipprot->early_demux)
-                       ipprot->early_demux(skb);
+               if (ipprot && (edemux = READ_ONCE(ipprot->early_demux)))
+                       edemux(skb);
        }
        if (!skb_valid_dst(skb))
                ip6_route_input(skb);
index 0838e6d01d2e4979559cae63a20ca339a3e2c22c..93e58a5e18374bee41f5a17f0c5911e381acb142 100644 (file)
@@ -294,8 +294,10 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
        struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
        int err = -ENOSYS;
 
-       if (skb->encapsulation)
+       if (skb->encapsulation) {
+               skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
                skb_set_inner_network_header(skb, nhoff);
+       }
 
        iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
 
index 528b3c1f3fdee4314e1c23007ae76333b4af7505..58f6288e9ba53e6964b74d71dde7615ead695c06 100644 (file)
@@ -768,13 +768,14 @@ slow_path:
         *      Fragment the datagram.
         */
 
-       *prevhdr = NEXTHDR_FRAGMENT;
        troom = rt->dst.dev->needed_tailroom;
 
        /*
         *      Keep copying data until we run out.
         */
        while (left > 0)        {
+               u8 *fragnexthdr_offset;
+
                len = left;
                /* IF: it doesn't fit, use 'mtu' - the data space left */
                if (len > mtu)
@@ -819,6 +820,10 @@ slow_path:
                 */
                skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
 
+               fragnexthdr_offset = skb_network_header(frag);
+               fragnexthdr_offset += prevhdr - skb_network_header(skb);
+               *fragnexthdr_offset = NEXTHDR_FRAGMENT;
+
                /*
                 *      Build fragment header.
                 */
@@ -1385,7 +1390,7 @@ emsgsize:
        if ((((length + fragheaderlen) > mtu) ||
             (skb && skb_is_gso(skb))) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
-           (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+           (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
            (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
                err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
                                          hh_len, fragheaderlen, exthdrlen,
index 644ba59fbd9d5ed8d6ba4a8082dd327589c9bb68..3d8a3b63b4fdbec7d488194e21e0c9013f0ff6da 100644 (file)
@@ -485,11 +485,15 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        if (!skb->ignore_df && skb->len > mtu) {
                skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
 
-               if (skb->protocol == htons(ETH_P_IPV6))
+               if (skb->protocol == htons(ETH_P_IPV6)) {
+                       if (mtu < IPV6_MIN_MTU)
+                               mtu = IPV6_MIN_MTU;
+
                        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
-               else
+               } else {
                        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                  htonl(mtu));
+               }
 
                return -EMSGSIZE;
        }
index 6ba6c900ebcf430cf313a2bef55ff69c114af218..fb4546e80c8282cdf17e7429506d5f4630809cc4 100644 (file)
@@ -815,7 +815,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
        in6_dev = __in6_dev_get(dev);
        if (in6_dev) {
                in6_dev->cnf.mc_forwarding--;
-               inet6_netconf_notify_devconf(dev_net(dev),
+               inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
                                             NETCONFA_MC_FORWARDING,
                                             dev->ifindex, &in6_dev->cnf);
        }
@@ -974,7 +974,7 @@ static int mif6_add(struct net *net, struct mr6_table *mrt,
        in6_dev = __in6_dev_get(dev);
        if (in6_dev) {
                in6_dev->cnf.mc_forwarding++;
-               inet6_netconf_notify_devconf(dev_net(dev),
+               inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
                                             NETCONFA_MC_FORWARDING,
                                             dev->ifindex, &in6_dev->cnf);
        }
@@ -1599,7 +1599,8 @@ static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
        write_unlock_bh(&mrt_lock);
 
        if (!err)
-               inet6_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
+               inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
+                                            NETCONFA_MC_FORWARDING,
                                             NETCONFA_IFINDEX_ALL,
                                             net->ipv6.devconf_all);
        rtnl_unlock();
@@ -1620,7 +1621,7 @@ int ip6mr_sk_done(struct sock *sk)
                        mrt->mroute6_sk = NULL;
                        net->ipv6.devconf_all->mc_forwarding--;
                        write_unlock_bh(&mrt_lock);
-                       inet6_netconf_notify_devconf(net,
+                       inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
                                                     NETCONFA_MC_FORWARDING,
                                                     NETCONFA_IFINDEX_ALL,
                                                     net->ipv6.devconf_all);
index 1bdc703cb9668bd77690c3d8f1ec0062d7b88c43..07403fa164e18aac704e3b77d1e2a094ad53c04c 100644 (file)
@@ -2463,7 +2463,6 @@ static void mld_ifc_event(struct inet6_dev *idev)
        mld_ifc_start_timer(idev, 1);
 }
 
-
 static void igmp6_timer_handler(unsigned long data)
 {
        struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data;
@@ -2599,6 +2598,44 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
        write_unlock_bh(&idev->lock);
 }
 
+static void ipv6_mc_rejoin_groups(struct inet6_dev *idev)
+{
+       struct ifmcaddr6 *pmc;
+
+       ASSERT_RTNL();
+
+       if (mld_in_v1_mode(idev)) {
+               read_lock_bh(&idev->lock);
+               for (pmc = idev->mc_list; pmc; pmc = pmc->next)
+                       igmp6_join_group(pmc);
+               read_unlock_bh(&idev->lock);
+       } else
+               mld_send_report(idev, NULL);
+}
+
+static int ipv6_mc_netdev_event(struct notifier_block *this,
+                               unsigned long event,
+                               void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct inet6_dev *idev = __in6_dev_get(dev);
+
+       switch (event) {
+       case NETDEV_RESEND_IGMP:
+               if (idev)
+                       ipv6_mc_rejoin_groups(idev);
+               break;
+       default:
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block igmp6_netdev_notifier = {
+       .notifier_call = ipv6_mc_netdev_event,
+};
+
 #ifdef CONFIG_PROC_FS
 struct igmp6_mc_iter_state {
        struct seq_net_private p;
@@ -2970,7 +3007,17 @@ int __init igmp6_init(void)
        return register_pernet_subsys(&igmp6_net_ops);
 }
 
+int __init igmp6_late_init(void)
+{
+       return register_netdevice_notifier(&igmp6_netdev_notifier);
+}
+
 void igmp6_cleanup(void)
 {
        unregister_pernet_subsys(&igmp6_net_ops);
 }
+
+void igmp6_late_cleanup(void)
+{
+       unregister_netdevice_notifier(&igmp6_netdev_notifier);
+}
index 7ebac630d3c603186be2fc0dcbaac7d7e74bfde6..b5812b3f75399df98ec3b487dc69f07ff01bc35f 100644 (file)
@@ -732,7 +732,7 @@ void ndisc_update(const struct net_device *dev, struct neighbour *neigh,
                  const u8 *lladdr, u8 new, u32 flags, u8 icmp6_type,
                  struct ndisc_options *ndopts)
 {
-       neigh_update(neigh, lladdr, new, flags);
+       neigh_update(neigh, lladdr, new, flags, 0);
        /* report ndisc ops about neighbour update */
        ndisc_ops_update(dev, neigh, flags, icmp6_type, ndopts);
 }
@@ -1418,6 +1418,8 @@ skip_linkparms:
                        if (ri->prefix_len == 0 &&
                            !in6_dev->cnf.accept_ra_defrtr)
                                continue;
+                       if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen)
+                               continue;
                        if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
                                continue;
                        rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3,
index 765facf03d45c47b9913b1adcdaf59b6fe09383c..e8d88d82636b759c68f348455ec17b526408b926 100644 (file)
@@ -159,7 +159,7 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
 
        if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
            nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
-               nft_fib_store_result(dest, priv->result, pkt,
+               nft_fib_store_result(dest, priv, pkt,
                                     nft_in(pkt)->ifindex);
                return;
        }
index 6c5b5b1830a74f52e2dbc4f4f2bb8127a0ba2efb..4146536e9c1517fc5e2e0ad066a8e87154446dda 100644 (file)
@@ -27,10 +27,10 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr,
        memset(&range, 0, sizeof(range));
        range.flags = priv->flags;
        if (priv->sreg_proto_min) {
-               range.min_proto.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_min];
-               range.max_proto.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_max];
+               range.min_proto.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_min]);
+               range.max_proto.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_max]);
        }
        regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range,
                                                    nft_out(pkt));
index f5ac080fc0849b0f65751458432cf4e693353c8a..a27e424f690d699fafc5f2a7135637f36fb66388 100644 (file)
@@ -26,10 +26,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
 
        memset(&range, 0, sizeof(range));
        if (priv->sreg_proto_min) {
-               range.min_proto.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_min],
-               range.max_proto.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_max],
+               range.min_proto.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_min]);
+               range.max_proto.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_max]);
                range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
index e3770abe688a3a9059456fe9195adbfcdfb73157..b5d54d4f995c0f4bade2e3f1c4def9616252ca55 100644 (file)
@@ -26,7 +26,7 @@
 #include <net/protocol.h>
 
 #if IS_ENABLED(CONFIG_IPV6)
-const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly;
+struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly;
 EXPORT_SYMBOL(inet6_protos);
 
 int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
index 229bfcc451ef5004e9e9d14c071937c1b9658711..9db1418993f2b8a5b4194895f243441033d4729a 100644 (file)
@@ -3299,7 +3299,6 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt)
                nexthop_len = nla_total_size(0)  /* RTA_MULTIPATH */
                            + NLA_ALIGN(sizeof(struct rtnexthop))
                            + nla_total_size(16) /* RTA_GATEWAY */
-                           + nla_total_size(4)  /* RTA_OIF */
                            + lwtunnel_get_encap_size(rt->dst.lwtstate);
 
                nexthop_len *= rt->rt6i_nsiblings;
@@ -3323,7 +3322,7 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt)
 }
 
 static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
-                           unsigned int *flags)
+                           unsigned int *flags, bool skip_oif)
 {
        if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
                *flags |= RTNH_F_LINKDOWN;
@@ -3336,7 +3335,8 @@ static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
                        goto nla_put_failure;
        }
 
-       if (rt->dst.dev &&
+       /* not needed for multipath encoding b/c it has a rtnexthop struct */
+       if (!skip_oif && rt->dst.dev &&
            nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
                goto nla_put_failure;
 
@@ -3350,6 +3350,7 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
+/* add multipath next hop */
 static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
 {
        struct rtnexthop *rtnh;
@@ -3362,7 +3363,7 @@ static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
        rtnh->rtnh_hops = 0;
        rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
 
-       if (rt6_nexthop_info(skb, rt, &flags) < 0)
+       if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
                goto nla_put_failure;
 
        rtnh->rtnh_flags = flags;
@@ -3422,6 +3423,8 @@ static int rt6_fill_node(struct net *net,
        }
        else if (rt->rt6i_flags & RTF_LOCAL)
                rtm->rtm_type = RTN_LOCAL;
+       else if (rt->rt6i_flags & RTF_ANYCAST)
+               rtm->rtm_type = RTN_ANYCAST;
        else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
                rtm->rtm_type = RTN_LOCAL;
        else
@@ -3515,7 +3518,7 @@ static int rt6_fill_node(struct net *net,
 
                nla_nest_end(skb, mp);
        } else {
-               if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags) < 0)
+               if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
                        goto nla_put_failure;
        }
 
index 85582257d3af88146d435ef6c2e98f0bbef94a41..a644aaecdfd30cf629625127e422a4fe150821b7 100644 (file)
 #include <linux/seg6_iptunnel.h>
 #include <net/addrconf.h>
 #include <net/ip6_route.h>
-#ifdef CONFIG_DST_CACHE
 #include <net/dst_cache.h>
-#endif
 #ifdef CONFIG_IPV6_SEG6_HMAC
 #include <net/seg6_hmac.h>
 #endif
 
 struct seg6_lwt {
-#ifdef CONFIG_DST_CACHE
        struct dst_cache cache;
-#endif
        struct seg6_iptunnel_encap tuninfo[0];
 };
 
@@ -105,7 +101,7 @@ static int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
        hdrlen = (osrh->hdrlen + 1) << 3;
        tot_len = hdrlen + sizeof(*hdr);
 
-       err = pskb_expand_head(skb, tot_len, 0, GFP_ATOMIC);
+       err = skb_cow_head(skb, tot_len);
        if (unlikely(err))
                return err;
 
@@ -156,7 +152,7 @@ static int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
 
        hdrlen = (osrh->hdrlen + 1) << 3;
 
-       err = pskb_expand_head(skb, hdrlen, 0, GFP_ATOMIC);
+       err = skb_cow_head(skb, hdrlen);
        if (unlikely(err))
                return err;
 
@@ -237,6 +233,9 @@ static int seg6_do_srh(struct sk_buff *skb)
 
 static int seg6_input(struct sk_buff *skb)
 {
+       struct dst_entry *orig_dst = skb_dst(skb);
+       struct dst_entry *dst = NULL;
+       struct seg6_lwt *slwt;
        int err;
 
        err = seg6_do_srh(skb);
@@ -245,8 +244,26 @@ static int seg6_input(struct sk_buff *skb)
                return err;
        }
 
+       slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+
+       preempt_disable();
+       dst = dst_cache_get(&slwt->cache);
+       preempt_enable();
+
        skb_dst_drop(skb);
-       ip6_route_input(skb);
+
+       if (!dst) {
+               ip6_route_input(skb);
+               dst = skb_dst(skb);
+               if (!dst->error) {
+                       preempt_disable();
+                       dst_cache_set_ip6(&slwt->cache, dst,
+                                         &ipv6_hdr(skb)->saddr);
+                       preempt_enable();
+               }
+       } else {
+               skb_dst_set(skb, dst);
+       }
 
        return dst_input(skb);
 }
@@ -264,11 +281,9 @@ static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 
        slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
 
-#ifdef CONFIG_DST_CACHE
        preempt_disable();
        dst = dst_cache_get(&slwt->cache);
        preempt_enable();
-#endif
 
        if (unlikely(!dst)) {
                struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -287,11 +302,9 @@ static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                        goto drop;
                }
 
-#ifdef CONFIG_DST_CACHE
                preempt_disable();
                dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
                preempt_enable();
-#endif
        }
 
        skb_dst_drop(skb);
@@ -355,13 +368,11 @@ static int seg6_build_state(struct nlattr *nla,
 
        slwt = seg6_lwt_lwtunnel(newts);
 
-#ifdef CONFIG_DST_CACHE
        err = dst_cache_init(&slwt->cache, GFP_KERNEL);
        if (err) {
                kfree(newts);
                return err;
        }
-#endif
 
        memcpy(&slwt->tuninfo, tuninfo, tuninfo_len);
 
@@ -375,12 +386,10 @@ static int seg6_build_state(struct nlattr *nla,
        return 0;
 }
 
-#ifdef CONFIG_DST_CACHE
 static void seg6_destroy_state(struct lwtunnel_state *lwt)
 {
        dst_cache_destroy(&seg6_lwt_lwtunnel(lwt)->cache);
 }
-#endif
 
 static int seg6_fill_encap_info(struct sk_buff *skb,
                                struct lwtunnel_state *lwtstate)
@@ -414,9 +423,7 @@ static int seg6_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
 
 static const struct lwtunnel_encap_ops seg6_iptun_ops = {
        .build_state = seg6_build_state,
-#ifdef CONFIG_DST_CACHE
        .destroy_state = seg6_destroy_state,
-#endif
        .output = seg6_output,
        .input = seg6_input,
        .fill_encap = seg6_fill_encap_info,
index 60a5295a7de6e877f5ab80ef32314c573c289d81..8e42e8f54b705ed8780890c7434feeff1055599a 100644 (file)
@@ -101,12 +101,12 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
        }
 }
 
-static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff)
+static u32 tcp_v6_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff)
 {
-       return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
-                                           ipv6_hdr(skb)->saddr.s6_addr32,
-                                           tcp_hdr(skb)->dest,
-                                           tcp_hdr(skb)->source, tsoff);
+       return secure_tcpv6_seq_and_tsoff(ipv6_hdr(skb)->daddr.s6_addr32,
+                                         ipv6_hdr(skb)->saddr.s6_addr32,
+                                         tcp_hdr(skb)->dest,
+                                         tcp_hdr(skb)->source, tsoff);
 }
 
 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
@@ -265,11 +265,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        sk->sk_gso_type = SKB_GSO_TCPV6;
        ip6_dst_store(sk, dst, NULL, NULL);
 
-       if (tcp_death_row->sysctl_tw_recycle &&
-           !tp->rx_opt.ts_recent_stamp &&
-           ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
-               tcp_fetch_timewait_stamp(sk, dst);
-
        icsk->icsk_ext_hdr_len = 0;
        if (opt)
                icsk->icsk_ext_hdr_len = opt->opt_flen +
@@ -287,11 +282,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        sk_set_txhash(sk);
 
        if (likely(!tp->repair)) {
-               seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
-                                                  sk->sk_v6_daddr.s6_addr32,
-                                                  inet->inet_sport,
-                                                  inet->inet_dport,
-                                                  &tp->tsoffset);
+               seq = secure_tcpv6_seq_and_tsoff(np->saddr.s6_addr32,
+                                                sk->sk_v6_daddr.s6_addr32,
+                                                inet->inet_sport,
+                                                inet->inet_dport,
+                                                &tp->tsoffset);
                if (!tp->write_seq)
                        tp->write_seq = seq;
        }
@@ -391,10 +386,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        np = inet6_sk(sk);
 
        if (type == NDISC_REDIRECT) {
-               struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+               if (!sock_owned_by_user(sk)) {
+                       struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 
-               if (dst)
-                       dst->ops->redirect(dst, sk, skb);
+                       if (dst)
+                               dst->ops->redirect(dst, sk, skb);
+               }
                goto out;
        }
 
@@ -725,11 +722,8 @@ static void tcp_v6_init_req(struct request_sock *req,
 
 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
                                          struct flowi *fl,
-                                         const struct request_sock *req,
-                                         bool *strict)
+                                         const struct request_sock *req)
 {
-       if (strict)
-               *strict = true;
        return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
 }
 
@@ -755,7 +749,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
        .cookie_init_seq =      cookie_v6_init_sequence,
 #endif
        .route_req      =       tcp_v6_route_req,
-       .init_seq       =       tcp_v6_init_sequence,
+       .init_seq_tsoff =       tcp_v6_init_seq_and_tsoff,
        .send_synack    =       tcp_v6_send_synack,
 };
 
@@ -1299,8 +1293,6 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
                        goto discard;
 
                if (nsk != sk) {
-                       sock_rps_save_rxhash(nsk, skb);
-                       sk_mark_napi_id(nsk, skb);
                        if (tcp_child_process(sk, nsk, skb))
                                goto reset;
                        if (opt_skb)
@@ -1931,8 +1923,9 @@ struct proto tcpv6_prot = {
        .diag_destroy           = tcp_abort,
 };
 
-static const struct inet6_protocol tcpv6_protocol = {
+static struct inet6_protocol tcpv6_protocol = {
        .early_demux    =       tcp_v6_early_demux,
+       .early_demux_handler =  tcp_v6_early_demux,
        .handler        =       tcp_v6_rcv,
        .err_handler    =       tcp_v6_err,
        .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
index 4e4c401e3bc69020deaa4af1c10633288faedf13..fd4b1c98a47230b94641c31fe3213b3dff6ac915 100644 (file)
@@ -864,6 +864,64 @@ discard:
        return 0;
 }
 
+static struct sock *__udp6_lib_demux_lookup(struct net *net,
+                       __be16 loc_port, const struct in6_addr *loc_addr,
+                       __be16 rmt_port, const struct in6_addr *rmt_addr,
+                       int dif)
+{
+       struct sock *sk;
+
+       rcu_read_lock();
+       sk = __udp6_lib_lookup(net, rmt_addr, rmt_port, loc_addr, loc_port,
+                              dif, &udp_table, NULL);
+       if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+               sk = NULL;
+       rcu_read_unlock();
+
+       return sk;
+}
+
+static void udp_v6_early_demux(struct sk_buff *skb)
+{
+       struct net *net = dev_net(skb->dev);
+       const struct udphdr *uh;
+       struct sock *sk;
+       struct dst_entry *dst;
+       int dif = skb->dev->ifindex;
+
+       if (!pskb_may_pull(skb, skb_transport_offset(skb) +
+           sizeof(struct udphdr)))
+               return;
+
+       uh = udp_hdr(skb);
+
+       if (skb->pkt_type == PACKET_HOST)
+               sk = __udp6_lib_demux_lookup(net, uh->dest,
+                                            &ipv6_hdr(skb)->daddr,
+                                            uh->source, &ipv6_hdr(skb)->saddr,
+                                            dif);
+       else
+               return;
+
+       if (!sk)
+               return;
+
+       skb->sk = sk;
+       skb->destructor = sock_efree;
+       dst = READ_ONCE(sk->sk_rx_dst);
+
+       if (dst)
+               dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
+       if (dst) {
+               if (dst->flags & DST_NOCACHE) {
+                       if (likely(atomic_inc_not_zero(&dst->__refcnt)))
+                               skb_dst_set(skb, dst);
+               } else {
+                       skb_dst_set_noref(skb, dst);
+               }
+       }
+}
+
 static __inline__ int udpv6_rcv(struct sk_buff *skb)
 {
        return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
@@ -1035,6 +1093,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        ipc6.hlimit = -1;
        ipc6.tclass = -1;
        ipc6.dontfrag = -1;
+       sockc.tsflags = sk->sk_tsflags;
 
        /* destination address check */
        if (sin6) {
@@ -1159,7 +1218,6 @@ do_udp_sendmsg:
 
        fl6.flowi6_mark = sk->sk_mark;
        fl6.flowi6_uid = sk->sk_uid;
-       sockc.tsflags = sk->sk_tsflags;
 
        if (msg->msg_controllen) {
                opt = &opt_space;
@@ -1378,7 +1436,9 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
 }
 #endif
 
-static const struct inet6_protocol udpv6_protocol = {
+static struct inet6_protocol udpv6_protocol = {
+       .early_demux    =       udp_v6_early_demux,
+       .early_demux_handler =  udp_v6_early_demux,
        .handler        =       udpv6_rcv,
        .err_handler    =       udpv6_err,
        .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
index 81adc29a448dc5be56b96ddd5c42321417371d37..8d77ad5cadaff3aa1feb18f168e779c5a6e7f917 100644 (file)
@@ -828,7 +828,8 @@ out:
  *    Wait for incoming connection
  *
  */
-static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
+static int irda_accept(struct socket *sock, struct socket *newsock, int flags,
+                      bool kern)
 {
        struct sock *sk = sock->sk;
        struct irda_sock *new, *self = irda_sk(sk);
@@ -836,7 +837,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
        struct sk_buff *skb = NULL;
        int err;
 
-       err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0);
+       err = irda_create(sock_net(sk), newsock, sk->sk_protocol, kern);
        if (err)
                return err;
 
index 89bbde1081ce5eb56c0c6a1c7c18b030f3de1198..84de7b6326dcdf7fcf0d8cb73f738d9c21c2f9fe 100644 (file)
@@ -938,7 +938,7 @@ done:
 
 /* Accept a pending connection */
 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
-                           int flags)
+                           int flags, bool kern)
 {
        DECLARE_WAITQUEUE(wait, current);
        struct sock *sk = sock->sk, *nsk;
index 309062f3debe298c1cf7666f77505f8d353d76d8..31762f76cdb5f2a3ec322135068402be532218ed 100644 (file)
@@ -1687,7 +1687,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                struct kcm_attach info;
 
                if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
-                       err = -EFAULT;
+                       return -EFAULT;
 
                err = kcm_attach_ioctl(sock, &info);
 
@@ -1697,7 +1697,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                struct kcm_unattach info;
 
                if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
-                       err = -EFAULT;
+                       return -EFAULT;
 
                err = kcm_unattach_ioctl(sock, &info);
 
@@ -1708,7 +1708,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                struct socket *newsock = NULL;
 
                if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
-                       err = -EFAULT;
+                       return -EFAULT;
 
                err = kcm_clone(sock, &info, &newsock);
 
index c6252ed42c1de65dee149d7d869b62b96616e22a..60cf2fb78d458bdbb7253e6a557845bb6a465dc1 100644 (file)
@@ -3792,7 +3792,6 @@ static inline void pfkey_exit_proc(struct net *net)
 
 static struct xfrm_mgr pfkeyv2_mgr =
 {
-       .id             = "pfkeyv2",
        .notify         = pfkey_send_notify,
        .acquire        = pfkey_send_acquire,
        .compile_policy = pfkey_compile_policy,
index 8adab6335ced9f1018318094be20c132a70f8475..fa0342574b8986ad98f458febe8e3e6314171eae 100644 (file)
@@ -120,7 +120,7 @@ static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
        return sk->sk_user_data;
 }
 
-static inline struct l2tp_net *l2tp_pernet(struct net *net)
+static inline struct l2tp_net *l2tp_pernet(const struct net *net)
 {
        BUG_ON(!net);
 
@@ -217,27 +217,6 @@ static void l2tp_tunnel_sock_put(struct sock *sk)
        sock_put(sk);
 }
 
-/* Lookup a session by id in the global session list
- */
-static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
-{
-       struct l2tp_net *pn = l2tp_pernet(net);
-       struct hlist_head *session_list =
-               l2tp_session_id_hash_2(pn, session_id);
-       struct l2tp_session *session;
-
-       rcu_read_lock_bh();
-       hlist_for_each_entry_rcu(session, session_list, global_hlist) {
-               if (session->session_id == session_id) {
-                       rcu_read_unlock_bh();
-                       return session;
-               }
-       }
-       rcu_read_unlock_bh();
-
-       return NULL;
-}
-
 /* Session hash list.
  * The session_id SHOULD be random according to RFC2661, but several
  * L2TP implementations (Cisco and Microsoft) use incrementing
@@ -250,25 +229,46 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
        return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
 }
 
-/* Lookup a session by id
+/* Lookup a session. A new reference is held on the returned session.
+ * Optionally calls session->ref() too if do_ref is true.
  */
-struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id)
+struct l2tp_session *l2tp_session_get(const struct net *net,
+                                     struct l2tp_tunnel *tunnel,
+                                     u32 session_id, bool do_ref)
 {
        struct hlist_head *session_list;
        struct l2tp_session *session;
 
-       /* In L2TPv3, session_ids are unique over all tunnels and we
-        * sometimes need to look them up before we know the
-        * tunnel.
-        */
-       if (tunnel == NULL)
-               return l2tp_session_find_2(net, session_id);
+       if (!tunnel) {
+               struct l2tp_net *pn = l2tp_pernet(net);
+
+               session_list = l2tp_session_id_hash_2(pn, session_id);
+
+               rcu_read_lock_bh();
+               hlist_for_each_entry_rcu(session, session_list, global_hlist) {
+                       if (session->session_id == session_id) {
+                               l2tp_session_inc_refcount(session);
+                               if (do_ref && session->ref)
+                                       session->ref(session);
+                               rcu_read_unlock_bh();
+
+                               return session;
+                       }
+               }
+               rcu_read_unlock_bh();
+
+               return NULL;
+       }
 
        session_list = l2tp_session_id_hash(tunnel, session_id);
        read_lock_bh(&tunnel->hlist_lock);
        hlist_for_each_entry(session, session_list, hlist) {
                if (session->session_id == session_id) {
+                       l2tp_session_inc_refcount(session);
+                       if (do_ref && session->ref)
+                               session->ref(session);
                        read_unlock_bh(&tunnel->hlist_lock);
+
                        return session;
                }
        }
@@ -276,9 +276,10 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
 
        return NULL;
 }
-EXPORT_SYMBOL_GPL(l2tp_session_find);
+EXPORT_SYMBOL_GPL(l2tp_session_get);
 
-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+                                         bool do_ref)
 {
        int hash;
        struct l2tp_session *session;
@@ -288,6 +289,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
        for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
                hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
                        if (++count > nth) {
+                               l2tp_session_inc_refcount(session);
+                               if (do_ref && session->ref)
+                                       session->ref(session);
                                read_unlock_bh(&tunnel->hlist_lock);
                                return session;
                        }
@@ -298,12 +302,14 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
 
        return NULL;
 }
-EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
+EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
 
 /* Lookup a session by interface name.
  * This is very inefficient but is only used by management interfaces.
  */
-struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
+struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
+                                               const char *ifname,
+                                               bool do_ref)
 {
        struct l2tp_net *pn = l2tp_pernet(net);
        int hash;
@@ -313,7 +319,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
        for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
                hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
                        if (!strcmp(session->ifname, ifname)) {
+                               l2tp_session_inc_refcount(session);
+                               if (do_ref && session->ref)
+                                       session->ref(session);
                                rcu_read_unlock_bh();
+
                                return session;
                        }
                }
@@ -323,11 +333,53 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
 
        return NULL;
 }
-EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
+EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
+
+static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
+                                     struct l2tp_session *session)
+{
+       struct l2tp_session *session_walk;
+       struct hlist_head *g_head;
+       struct hlist_head *head;
+       struct l2tp_net *pn;
+
+       head = l2tp_session_id_hash(tunnel, session->session_id);
+
+       write_lock_bh(&tunnel->hlist_lock);
+       hlist_for_each_entry(session_walk, head, hlist)
+               if (session_walk->session_id == session->session_id)
+                       goto exist;
+
+       if (tunnel->version == L2TP_HDR_VER_3) {
+               pn = l2tp_pernet(tunnel->l2tp_net);
+               g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net),
+                                               session->session_id);
+
+               spin_lock_bh(&pn->l2tp_session_hlist_lock);
+               hlist_for_each_entry(session_walk, g_head, global_hlist)
+                       if (session_walk->session_id == session->session_id)
+                               goto exist_glob;
+
+               hlist_add_head_rcu(&session->global_hlist, g_head);
+               spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+       }
+
+       hlist_add_head(&session->hlist, head);
+       write_unlock_bh(&tunnel->hlist_lock);
+
+       return 0;
+
+exist_glob:
+       spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+exist:
+       write_unlock_bh(&tunnel->hlist_lock);
+
+       return -EEXIST;
+}
 
 /* Lookup a tunnel by id
  */
-struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
+struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id)
 {
        struct l2tp_tunnel *tunnel;
        struct l2tp_net *pn = l2tp_pernet(net);
@@ -345,7 +397,7 @@ struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
 }
 EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
 
-struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth)
+struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth)
 {
        struct l2tp_net *pn = l2tp_pernet(net);
        struct l2tp_tunnel *tunnel;
@@ -633,6 +685,9 @@ discard:
  * a data (not control) frame before coming here. Fields up to the
  * session-id have already been parsed and ptr points to the data
  * after the session-id.
+ *
+ * session->ref() must have been called prior to l2tp_recv_common().
+ * session->deref() will be called automatically after skb is processed.
  */
 void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
                      unsigned char *ptr, unsigned char *optr, u16 hdrflags,
@@ -642,14 +697,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
        int offset;
        u32 ns, nr;
 
-       /* The ref count is increased since we now hold a pointer to
-        * the session. Take care to decrement the refcnt when exiting
-        * this function from now on...
-        */
-       l2tp_session_inc_refcount(session);
-       if (session->ref)
-               (*session->ref)(session);
-
        /* Parse and check optional cookie */
        if (session->peer_cookie_len > 0) {
                if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
@@ -802,8 +849,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
        /* Try to dequeue as many skbs from reorder_q as we can. */
        l2tp_recv_dequeue(session);
 
-       l2tp_session_dec_refcount(session);
-
        return;
 
 discard:
@@ -812,8 +857,6 @@ discard:
 
        if (session->deref)
                (*session->deref)(session);
-
-       l2tp_session_dec_refcount(session);
 }
 EXPORT_SYMBOL(l2tp_recv_common);
 
@@ -920,8 +963,14 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
        }
 
        /* Find the session context */
-       session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
+       session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true);
        if (!session || !session->recv_skb) {
+               if (session) {
+                       if (session->deref)
+                               session->deref(session);
+                       l2tp_session_dec_refcount(session);
+               }
+
                /* Not found? Pass to userspace to deal with */
                l2tp_info(tunnel, L2TP_MSG_DATA,
                          "%s: no session found (%u/%u). Passing up.\n",
@@ -930,6 +979,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
        }
 
        l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
+       l2tp_session_dec_refcount(session);
 
        return 0;
 
@@ -1738,6 +1788,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
 {
        struct l2tp_session *session;
+       int err;
 
        session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
        if (session != NULL) {
@@ -1793,6 +1844,13 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
 
                l2tp_session_set_header_len(session, tunnel->version);
 
+               err = l2tp_session_add_to_tunnel(tunnel, session);
+               if (err) {
+                       kfree(session);
+
+                       return ERR_PTR(err);
+               }
+
                /* Bump the reference count. The session context is deleted
                 * only when this drops to zero.
                 */
@@ -1802,28 +1860,14 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
                /* Ensure tunnel socket isn't deleted */
                sock_hold(tunnel->sock);
 
-               /* Add session to the tunnel's hash list */
-               write_lock_bh(&tunnel->hlist_lock);
-               hlist_add_head(&session->hlist,
-                              l2tp_session_id_hash(tunnel, session_id));
-               write_unlock_bh(&tunnel->hlist_lock);
-
-               /* And to the global session list if L2TPv3 */
-               if (tunnel->version != L2TP_HDR_VER_2) {
-                       struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
-
-                       spin_lock_bh(&pn->l2tp_session_hlist_lock);
-                       hlist_add_head_rcu(&session->global_hlist,
-                                          l2tp_session_id_hash_2(pn, session_id));
-                       spin_unlock_bh(&pn->l2tp_session_hlist_lock);
-               }
-
                /* Ignore management session in session count value */
                if (session->session_id != 0)
                        atomic_inc(&l2tp_session_count);
+
+               return session;
        }
 
-       return session;
+       return ERR_PTR(-ENOMEM);
 }
 EXPORT_SYMBOL_GPL(l2tp_session_create);
 
index aebf281d09eeb31c531eb624bd2ddd78cab8da9b..eec5ad2ebb93c32bca7a57b377f80df8e10b2591 100644 (file)
@@ -230,13 +230,16 @@ out:
        return tunnel;
 }
 
-struct l2tp_session *l2tp_session_find(struct net *net,
-                                      struct l2tp_tunnel *tunnel,
-                                      u32 session_id);
-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
-struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
-struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
-struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
+struct l2tp_session *l2tp_session_get(const struct net *net,
+                                     struct l2tp_tunnel *tunnel,
+                                     u32 session_id, bool do_ref);
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+                                         bool do_ref);
+struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
+                                               const char *ifname,
+                                               bool do_ref);
+struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id);
+struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth);
 
 int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
                       u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
index 2d6760a2ae347b96d465e30192ab8a7957258d32..d100aed3d06fb63b8851a00c55350f1728b18599 100644 (file)
@@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
 
 static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
 {
-       pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+       pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
        pd->session_idx++;
 
        if (pd->session == NULL) {
@@ -238,10 +238,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
        }
 
        /* Show the tunnel or session context */
-       if (pd->session == NULL)
+       if (!pd->session) {
                l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
-       else
+       } else {
                l2tp_dfs_seq_session_show(m, pd->session);
+               if (pd->session->deref)
+                       pd->session->deref(pd->session);
+               l2tp_session_dec_refcount(pd->session);
+       }
 
 out:
        return 0;
index 8bf18a5f66e0c465ef3640ae4168c875c4c9e1ed..138566a6312341e24ac3ed4e9d1e840bab1606d3 100644 (file)
@@ -30,6 +30,9 @@
 #include <net/xfrm.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
 
 #include "l2tp_core.h"
 
@@ -204,6 +207,53 @@ static void l2tp_eth_show(struct seq_file *m, void *arg)
 }
 #endif
 
+static void l2tp_eth_adjust_mtu(struct l2tp_tunnel *tunnel,
+                               struct l2tp_session *session,
+                               struct net_device *dev)
+{
+       unsigned int overhead = 0;
+       struct dst_entry *dst;
+       u32 l3_overhead = 0;
+
+       /* if the encap is UDP, account for UDP header size */
+       if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
+               overhead += sizeof(struct udphdr);
+               dev->needed_headroom += sizeof(struct udphdr);
+       }
+       if (session->mtu != 0) {
+               dev->mtu = session->mtu;
+               dev->needed_headroom += session->hdr_len;
+               return;
+       }
+       l3_overhead = kernel_sock_ip_overhead(tunnel->sock);
+       if (l3_overhead == 0) {
+               /* L3 Overhead couldn't be identified, this could be
+                * because tunnel->sock was NULL or the socket's
+                * address family was not IPv4 or IPv6,
+                * dev mtu stays at 1500.
+                */
+               return;
+       }
+       /* Adjust MTU, factor overhead - underlay L3, overlay L2 hdr
+        * UDP overhead, if any, was already factored in above.
+        */
+       overhead += session->hdr_len + ETH_HLEN + l3_overhead;
+
+       /* If PMTU discovery was enabled, use discovered MTU on L2TP device */
+       dst = sk_dst_get(tunnel->sock);
+       if (dst) {
+               /* dst_mtu will use PMTU if found, else fallback to intf MTU */
+               u32 pmtu = dst_mtu(dst);
+
+               if (pmtu != 0)
+                       dev->mtu = pmtu;
+               dst_release(dst);
+       }
+       session->mtu = dev->mtu - overhead;
+       dev->mtu = session->mtu;
+       dev->needed_headroom += session->hdr_len;
+}
+
 static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
 {
        struct net_device *dev;
@@ -221,12 +271,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
                goto out;
        }
 
-       session = l2tp_session_find(net, tunnel, session_id);
-       if (session) {
-               rc = -EEXIST;
-               goto out;
-       }
-
        if (cfg->ifname) {
                dev = dev_get_by_name(net, cfg->ifname);
                if (dev) {
@@ -240,8 +284,8 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
 
        session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
                                      peer_session_id, cfg);
-       if (!session) {
-               rc = -ENOMEM;
+       if (IS_ERR(session)) {
+               rc = PTR_ERR(session);
                goto out;
        }
 
@@ -253,12 +297,9 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
        }
 
        dev_net_set(dev, net);
-       if (session->mtu == 0)
-               session->mtu = dev->mtu - session->hdr_len;
-       dev->mtu = session->mtu;
-       dev->needed_headroom += session->hdr_len;
        dev->min_mtu = 0;
        dev->max_mtu = ETH_MAX_MTU;
+       l2tp_eth_adjust_mtu(tunnel, session, dev);
 
        priv = netdev_priv(dev);
        priv->dev = dev;
index d25038cfd64e1ae5d5819fe1e7049529f4b5a2e4..4d322c1b7233e5b546ff75a585a3603503e076bc 100644 (file)
@@ -143,19 +143,19 @@ static int l2tp_ip_recv(struct sk_buff *skb)
        }
 
        /* Ok, this is a data packet. Lookup the session. */
-       session = l2tp_session_find(net, NULL, session_id);
-       if (session == NULL)
+       session = l2tp_session_get(net, NULL, session_id, true);
+       if (!session)
                goto discard;
 
        tunnel = session->tunnel;
-       if (tunnel == NULL)
-               goto discard;
+       if (!tunnel)
+               goto discard_sess;
 
        /* Trace packet contents, if enabled */
        if (tunnel->debug & L2TP_MSG_DATA) {
                length = min(32u, skb->len);
                if (!pskb_may_pull(skb, length))
-                       goto discard;
+                       goto discard_sess;
 
                /* Point to L2TP header */
                optr = ptr = skb->data;
@@ -165,6 +165,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
        }
 
        l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
+       l2tp_session_dec_refcount(session);
 
        return 0;
 
@@ -178,9 +179,10 @@ pass_up:
 
        tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
        tunnel = l2tp_tunnel_find(net, tunnel_id);
-       if (tunnel != NULL)
+       if (tunnel) {
                sk = tunnel->sock;
-       else {
+               sock_hold(sk);
+       } else {
                struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
 
                read_lock_bh(&l2tp_ip_lock);
@@ -202,6 +204,12 @@ pass_up:
 
        return sk_receive_skb(sk, skb, 1);
 
+discard_sess:
+       if (session->deref)
+               session->deref(session);
+       l2tp_session_dec_refcount(session);
+       goto discard;
+
 discard_put:
        sock_put(sk);
 
index a4abcbc4c09ae65424a701a1200b7535fa3635ac..88b397c30d86af8d6a22daeb466cedac36aac57e 100644 (file)
@@ -156,19 +156,19 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
        }
 
        /* Ok, this is a data packet. Lookup the session. */
-       session = l2tp_session_find(net, NULL, session_id);
-       if (session == NULL)
+       session = l2tp_session_get(net, NULL, session_id, true);
+       if (!session)
                goto discard;
 
        tunnel = session->tunnel;
-       if (tunnel == NULL)
-               goto discard;
+       if (!tunnel)
+               goto discard_sess;
 
        /* Trace packet contents, if enabled */
        if (tunnel->debug & L2TP_MSG_DATA) {
                length = min(32u, skb->len);
                if (!pskb_may_pull(skb, length))
-                       goto discard;
+                       goto discard_sess;
 
                /* Point to L2TP header */
                optr = ptr = skb->data;
@@ -179,6 +179,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
 
        l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
                         tunnel->recv_payload_hook);
+       l2tp_session_dec_refcount(session);
+
        return 0;
 
 pass_up:
@@ -191,9 +193,10 @@ pass_up:
 
        tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
        tunnel = l2tp_tunnel_find(net, tunnel_id);
-       if (tunnel != NULL)
+       if (tunnel) {
                sk = tunnel->sock;
-       else {
+               sock_hold(sk);
+       } else {
                struct ipv6hdr *iph = ipv6_hdr(skb);
 
                read_lock_bh(&l2tp_ip6_lock);
@@ -215,6 +218,12 @@ pass_up:
 
        return sk_receive_skb(sk, skb, 1);
 
+discard_sess:
+       if (session->deref)
+               session->deref(session);
+       l2tp_session_dec_refcount(session);
+       goto discard;
+
 discard_put:
        sock_put(sk);
 
index 3620fba317863dc59c93c1089faf63451e831aa5..12cfcd0ca807396d18e061e9bcf4c29e760b2563 100644 (file)
@@ -48,7 +48,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq,
 /* Accessed under genl lock */
 static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
 
-static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
+static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
+                                               bool do_ref)
 {
        u32 tunnel_id;
        u32 session_id;
@@ -59,14 +60,15 @@ static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
 
        if (info->attrs[L2TP_ATTR_IFNAME]) {
                ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
-               session = l2tp_session_find_by_ifname(net, ifname);
+               session = l2tp_session_get_by_ifname(net, ifname, do_ref);
        } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
                   (info->attrs[L2TP_ATTR_CONN_ID])) {
                tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
                session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
                tunnel = l2tp_tunnel_find(net, tunnel_id);
                if (tunnel)
-                       session = l2tp_session_find(net, tunnel, session_id);
+                       session = l2tp_session_get(net, tunnel, session_id,
+                                                  do_ref);
        }
 
        return session;
@@ -519,11 +521,6 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
                goto out;
        }
        session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
-       session = l2tp_session_find(net, tunnel, session_id);
-       if (session) {
-               ret = -EEXIST;
-               goto out;
-       }
 
        if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) {
                ret = -EINVAL;
@@ -642,10 +639,12 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
                        session_id, peer_session_id, &cfg);
 
        if (ret >= 0) {
-               session = l2tp_session_find(net, tunnel, session_id);
-               if (session)
+               session = l2tp_session_get(net, tunnel, session_id, false);
+               if (session) {
                        ret = l2tp_session_notify(&l2tp_nl_family, info, session,
                                                  L2TP_CMD_SESSION_CREATE);
+                       l2tp_session_dec_refcount(session);
+               }
        }
 
 out:
@@ -658,7 +657,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
        struct l2tp_session *session;
        u16 pw_type;
 
-       session = l2tp_nl_session_find(info);
+       session = l2tp_nl_session_get(info, true);
        if (session == NULL) {
                ret = -ENODEV;
                goto out;
@@ -672,6 +671,10 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
                if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
                        ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
 
+       if (session->deref)
+               session->deref(session);
+       l2tp_session_dec_refcount(session);
+
 out:
        return ret;
 }
@@ -681,7 +684,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
        int ret = 0;
        struct l2tp_session *session;
 
-       session = l2tp_nl_session_find(info);
+       session = l2tp_nl_session_get(info, false);
        if (session == NULL) {
                ret = -ENODEV;
                goto out;
@@ -716,6 +719,8 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
        ret = l2tp_session_notify(&l2tp_nl_family, info,
                                  session, L2TP_CMD_SESSION_MODIFY);
 
+       l2tp_session_dec_refcount(session);
+
 out:
        return ret;
 }
@@ -811,29 +816,34 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
        struct sk_buff *msg;
        int ret;
 
-       session = l2tp_nl_session_find(info);
+       session = l2tp_nl_session_get(info, false);
        if (session == NULL) {
                ret = -ENODEV;
-               goto out;
+               goto err;
        }
 
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!msg) {
                ret = -ENOMEM;
-               goto out;
+               goto err_ref;
        }
 
        ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
                                   0, session, L2TP_CMD_SESSION_GET);
        if (ret < 0)
-               goto err_out;
+               goto err_ref_msg;
 
-       return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
+       ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
 
-err_out:
-       nlmsg_free(msg);
+       l2tp_session_dec_refcount(session);
 
-out:
+       return ret;
+
+err_ref_msg:
+       nlmsg_free(msg);
+err_ref:
+       l2tp_session_dec_refcount(session);
+err:
        return ret;
 }
 
@@ -852,7 +862,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
                                goto out;
                }
 
-               session = l2tp_session_find_nth(tunnel, si);
+               session = l2tp_session_get_nth(tunnel, si, false);
                if (session == NULL) {
                        ti++;
                        tunnel = NULL;
@@ -862,8 +872,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
 
                if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
                                         cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                                        session, L2TP_CMD_SESSION_GET) < 0)
+                                        session, L2TP_CMD_SESSION_GET) < 0) {
+                       l2tp_session_dec_refcount(session);
                        break;
+               }
+               l2tp_session_dec_refcount(session);
 
                si++;
        }
index 36cc56fd041871c73796cc0a52241ef3e38483c9..861b255a2d5195ac4155de919154d993bdaddf92 100644 (file)
@@ -450,6 +450,10 @@ static void pppol2tp_session_close(struct l2tp_session *session)
 static void pppol2tp_session_destruct(struct sock *sk)
 {
        struct l2tp_session *session = sk->sk_user_data;
+
+       skb_queue_purge(&sk->sk_receive_queue);
+       skb_queue_purge(&sk->sk_write_queue);
+
        if (session) {
                sk->sk_user_data = NULL;
                BUG_ON(session->magic != L2TP_SESSION_MAGIC);
@@ -488,9 +492,6 @@ static int pppol2tp_release(struct socket *sock)
                l2tp_session_queue_purge(session);
                sock_put(sk);
        }
-       skb_queue_purge(&sk->sk_receive_queue);
-       skb_queue_purge(&sk->sk_write_queue);
-
        release_sock(sk);
 
        /* This will delete the session context via
@@ -582,6 +583,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
        int error = 0;
        u32 tunnel_id, peer_tunnel_id;
        u32 session_id, peer_session_id;
+       bool drop_refcnt = false;
        int ver = 2;
        int fd;
 
@@ -683,36 +685,36 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
        if (tunnel->peer_tunnel_id == 0)
                tunnel->peer_tunnel_id = peer_tunnel_id;
 
-       /* Create session if it doesn't already exist. We handle the
-        * case where a session was previously created by the netlink
-        * interface by checking that the session doesn't already have
-        * a socket and its tunnel socket are what we expect. If any
-        * of those checks fail, return EEXIST to the caller.
-        */
-       session = l2tp_session_find(sock_net(sk), tunnel, session_id);
-       if (session == NULL) {
-               /* Default MTU must allow space for UDP/L2TP/PPP
-                * headers.
+       session = l2tp_session_get(sock_net(sk), tunnel, session_id, false);
+       if (session) {
+               drop_refcnt = true;
+               ps = l2tp_session_priv(session);
+
+               /* Using a pre-existing session is fine as long as it hasn't
+                * been connected yet.
                 */
-               cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+               if (ps->sock) {
+                       error = -EEXIST;
+                       goto end;
+               }
 
-               /* Allocate and initialize a new session context. */
-               session = l2tp_session_create(sizeof(struct pppol2tp_session),
-                                             tunnel, session_id,
-                                             peer_session_id, &cfg);
-               if (session == NULL) {
-                       error = -ENOMEM;
+               /* consistency checks */
+               if (ps->tunnel_sock != tunnel->sock) {
+                       error = -EEXIST;
                        goto end;
                }
        } else {
-               ps = l2tp_session_priv(session);
-               error = -EEXIST;
-               if (ps->sock != NULL)
-                       goto end;
+               /* Default MTU must allow space for UDP/L2TP/PPP headers */
+               cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+               cfg.mru = cfg.mtu;
 
-               /* consistency checks */
-               if (ps->tunnel_sock != tunnel->sock)
+               session = l2tp_session_create(sizeof(struct pppol2tp_session),
+                                             tunnel, session_id,
+                                             peer_session_id, &cfg);
+               if (IS_ERR(session)) {
+                       error = PTR_ERR(session);
                        goto end;
+               }
        }
 
        /* Associate session with its PPPoL2TP socket */
@@ -777,6 +779,8 @@ out_no_ppp:
                  session->name);
 
 end:
+       if (drop_refcnt)
+               l2tp_session_dec_refcount(session);
        release_sock(sk);
 
        return error;
@@ -804,12 +808,6 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
        if (tunnel->sock == NULL)
                goto out;
 
-       /* Check that this session doesn't already exist */
-       error = -EEXIST;
-       session = l2tp_session_find(net, tunnel, session_id);
-       if (session != NULL)
-               goto out;
-
        /* Default MTU values. */
        if (cfg->mtu == 0)
                cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
@@ -817,12 +815,13 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
                cfg->mru = cfg->mtu;
 
        /* Allocate and initialize a new session context. */
-       error = -ENOMEM;
        session = l2tp_session_create(sizeof(struct pppol2tp_session),
                                      tunnel, session_id,
                                      peer_session_id, cfg);
-       if (session == NULL)
+       if (IS_ERR(session)) {
+               error = PTR_ERR(session);
                goto out;
+       }
 
        ps = l2tp_session_priv(session);
        ps->tunnel_sock = tunnel->sock;
@@ -1140,11 +1139,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
                if (stats.session_id != 0) {
                        /* resend to session ioctl handler */
                        struct l2tp_session *session =
-                               l2tp_session_find(sock_net(sk), tunnel, stats.session_id);
-                       if (session != NULL)
-                               err = pppol2tp_session_ioctl(session, cmd, arg);
-                       else
+                               l2tp_session_get(sock_net(sk), tunnel,
+                                                stats.session_id, true);
+
+                       if (session) {
+                               err = pppol2tp_session_ioctl(session, cmd,
+                                                            arg);
+                               if (session->deref)
+                                       session->deref(session);
+                               l2tp_session_dec_refcount(session);
+                       } else {
                                err = -EBADR;
+                       }
                        break;
                }
 #ifdef CONFIG_XFRM
@@ -1554,7 +1560,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
 
 static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
 {
-       pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+       pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
        pd->session_idx++;
 
        if (pd->session == NULL) {
@@ -1681,10 +1687,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v)
 
        /* Show the tunnel or session context.
         */
-       if (pd->session == NULL)
+       if (!pd->session) {
                pppol2tp_seq_tunnel_show(m, pd->tunnel);
-       else
+       } else {
                pppol2tp_seq_session_show(m, pd->session);
+               if (pd->session->deref)
+                       pd->session->deref(pd->session);
+               l2tp_session_dec_refcount(pd->session);
+       }
 
 out:
        return 0;
@@ -1843,4 +1853,4 @@ MODULE_DESCRIPTION("PPP over L2TP over UDP");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(PPPOL2TP_DRV_VERSION);
 MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP);
-MODULE_ALIAS_L2TP_PWTYPE(11);
+MODULE_ALIAS_L2TP_PWTYPE(7);
index 06186d608a274eb46cd768610c67e8a5a8e84c15..cb4fff785cbf5aaad520442dc243ae62dc5750ea 100644 (file)
@@ -641,11 +641,13 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
  *     @sock: Socket which connections arrive on.
  *     @newsock: Socket to move incoming connection to.
  *     @flags: User specified operational flags.
+ *     @kern: If the socket is kernel internal
  *
  *     Accept a new incoming connection.
  *     Returns 0 upon success, negative otherwise.
  */
-static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags)
+static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags,
+                        bool kern)
 {
        struct sock *sk = sock->sk, *newsk;
        struct llc_sock *llc, *newllc;
index 40813dd3301c600978374e259953ca5d661022ce..5bb0c501281954dfe656c5e886c9032b958061be 100644 (file)
@@ -718,7 +718,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
        ieee80211_recalc_ps(local);
 
        if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
-           sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+           sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+           local->ops->wake_tx_queue) {
                /* XXX: for AP_VLAN, actually track AP queues */
                netif_tx_start_all_queues(dev);
        } else if (dev) {
index 56ccffa3f2bfc7731adfaabb1026ef7e8af68d32..62141dcec2d66020fb4eb7bb698e880809878028 100644 (file)
@@ -19,6 +19,7 @@
 #ifndef __IEEE802154_I_H
 #define __IEEE802154_I_H
 
+#include <linux/interrupt.h>
 #include <linux/mutex.h>
 #include <linux/hrtimer.h>
 #include <net/cfg802154.h>
index 3818686182b210be11025ff69d82f58e4e08e401..5928d22ba9c86813a13621fec6ebdcc62fd7aeba 100644 (file)
@@ -24,6 +24,9 @@
 #include <net/nexthop.h>
 #include "internal.h"
 
+/* max memory we will use for mpls_route */
+#define MAX_MPLS_ROUTE_MEM     4096
+
 /* Maximum number of labels to look ahead at when selecting a path of
  * a multipath route
  */
@@ -32,7 +35,9 @@
 #define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1)
 
 static int zero = 0;
+static int one = 1;
 static int label_limit = (1 << 20) - 1;
+static int ttl_max = 255;
 
 static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
                       struct nlmsghdr *nlh, struct net *net, u32 portid,
@@ -58,10 +63,7 @@ EXPORT_SYMBOL_GPL(mpls_output_possible);
 
 static u8 *__mpls_nh_via(struct mpls_route *rt, struct mpls_nh *nh)
 {
-       u8 *nh0_via = PTR_ALIGN((u8 *)&rt->rt_nh[rt->rt_nhn], VIA_ALEN_ALIGN);
-       int nh_index = nh - rt->rt_nh;
-
-       return nh0_via + rt->rt_max_alen * nh_index;
+       return (u8 *)nh + rt->rt_via_offset;
 }
 
 static const u8 *mpls_nh_via(const struct mpls_route *rt,
@@ -187,21 +189,32 @@ static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
        return hash;
 }
 
+static struct mpls_nh *mpls_get_nexthop(struct mpls_route *rt, u8 index)
+{
+       return (struct mpls_nh *)((u8 *)rt->rt_nh + index * rt->rt_nh_size);
+}
+
+/* number of alive nexthops (rt->rt_nhn_alive) and the flags for
+ * a next hop (nh->nh_flags) are modified by netdev event handlers.
+ * Since those fields can change at any moment, use READ_ONCE to
+ * access both.
+ */
 static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
                                             struct sk_buff *skb)
 {
-       int alive = ACCESS_ONCE(rt->rt_nhn_alive);
        u32 hash = 0;
        int nh_index = 0;
        int n = 0;
+       u8 alive;
 
        /* No need to look further into packet if there's only
         * one path
         */
        if (rt->rt_nhn == 1)
-               goto out;
+               return rt->rt_nh;
 
-       if (alive <= 0)
+       alive = READ_ONCE(rt->rt_nhn_alive);
+       if (alive == 0)
                return NULL;
 
        hash = mpls_multipath_hash(rt, skb);
@@ -209,7 +222,9 @@ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
        if (alive == rt->rt_nhn)
                goto out;
        for_nexthops(rt) {
-               if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+               unsigned int nh_flags = READ_ONCE(nh->nh_flags);
+
+               if (nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
                        continue;
                if (n == nh_index)
                        return nh;
@@ -217,11 +232,11 @@ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
        } endfor_nexthops(rt);
 
 out:
-       return &rt->rt_nh[nh_index];
+       return mpls_get_nexthop(rt, nh_index);
 }
 
-static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
-                       struct mpls_entry_decoded dec)
+static bool mpls_egress(struct net *net, struct mpls_route *rt,
+                       struct sk_buff *skb, struct mpls_entry_decoded dec)
 {
        enum mpls_payload_type payload_type;
        bool success = false;
@@ -246,22 +261,46 @@ static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
        switch (payload_type) {
        case MPT_IPV4: {
                struct iphdr *hdr4 = ip_hdr(skb);
+               u8 new_ttl;
                skb->protocol = htons(ETH_P_IP);
+
+               /* If propagating TTL, take the decremented TTL from
+                * the incoming MPLS header, otherwise decrement the
+                * TTL, but only if not 0 to avoid underflow.
+                */
+               if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
+                   (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
+                    net->mpls.ip_ttl_propagate))
+                       new_ttl = dec.ttl;
+               else
+                       new_ttl = hdr4->ttl ? hdr4->ttl - 1 : 0;
+
                csum_replace2(&hdr4->check,
                              htons(hdr4->ttl << 8),
-                             htons(dec.ttl << 8));
-               hdr4->ttl = dec.ttl;
+                             htons(new_ttl << 8));
+               hdr4->ttl = new_ttl;
                success = true;
                break;
        }
        case MPT_IPV6: {
                struct ipv6hdr *hdr6 = ipv6_hdr(skb);
                skb->protocol = htons(ETH_P_IPV6);
-               hdr6->hop_limit = dec.ttl;
+
+               /* If propagating TTL, take the decremented TTL from
+                * the incoming MPLS header, otherwise decrement the
+                * hop limit, but only if not 0 to avoid underflow.
+                */
+               if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
+                   (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
+                    net->mpls.ip_ttl_propagate))
+                       hdr6->hop_limit = dec.ttl;
+               else if (hdr6->hop_limit)
+                       hdr6->hop_limit = hdr6->hop_limit - 1;
                success = true;
                break;
        }
        case MPT_UNSPEC:
+               /* Should have decided which protocol it is by now */
                break;
        }
 
@@ -361,7 +400,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
 
        if (unlikely(!new_header_size && dec.bos)) {
                /* Penultimate hop popping */
-               if (!mpls_egress(rt, skb, dec))
+               if (!mpls_egress(dev_net(out_dev), rt, skb, dec))
                        goto err;
        } else {
                bool bos;
@@ -412,6 +451,7 @@ static struct packet_type mpls_packet_type __read_mostly = {
 static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
        [RTA_DST]               = { .type = NLA_U32 },
        [RTA_OIF]               = { .type = NLA_U32 },
+       [RTA_TTL_PROPAGATE]     = { .type = NLA_U8 },
 };
 
 struct mpls_route_config {
@@ -421,6 +461,7 @@ struct mpls_route_config {
        u8                      rc_via_alen;
        u8                      rc_via[MAX_VIA_ALEN];
        u32                     rc_label;
+       u8                      rc_ttl_propagate;
        u8                      rc_output_labels;
        u32                     rc_output_label[MAX_NEW_LABELS];
        u32                     rc_nlflags;
@@ -430,20 +471,27 @@ struct mpls_route_config {
        int                     rc_mp_len;
 };
 
-static struct mpls_route *mpls_rt_alloc(int num_nh, u8 max_alen)
+/* all nexthops within a route have the same size based on max
+ * number of labels and max via length for a hop
+ */
+static struct mpls_route *mpls_rt_alloc(u8 num_nh, u8 max_alen, u8 max_labels)
 {
-       u8 max_alen_aligned = ALIGN(max_alen, VIA_ALEN_ALIGN);
+       u8 nh_size = MPLS_NH_SIZE(max_labels, max_alen);
        struct mpls_route *rt;
+       size_t size;
 
-       rt = kzalloc(ALIGN(sizeof(*rt) + num_nh * sizeof(*rt->rt_nh),
-                          VIA_ALEN_ALIGN) +
-                    num_nh * max_alen_aligned,
-                    GFP_KERNEL);
-       if (rt) {
-               rt->rt_nhn = num_nh;
-               rt->rt_nhn_alive = num_nh;
-               rt->rt_max_alen = max_alen_aligned;
-       }
+       size = sizeof(*rt) + num_nh * nh_size;
+       if (size > MAX_MPLS_ROUTE_MEM)
+               return ERR_PTR(-EINVAL);
+
+       rt = kzalloc(size, GFP_KERNEL);
+       if (!rt)
+               return ERR_PTR(-ENOMEM);
+
+       rt->rt_nhn = num_nh;
+       rt->rt_nhn_alive = num_nh;
+       rt->rt_nh_size = nh_size;
+       rt->rt_via_offset = MPLS_NH_VIA_OFF(max_labels);
 
        return rt;
 }
@@ -648,9 +696,6 @@ static int mpls_nh_build_from_cfg(struct mpls_route_config *cfg,
                return -ENOMEM;
 
        err = -EINVAL;
-       /* Ensure only a supported number of labels are present */
-       if (cfg->rc_output_labels > MAX_NEW_LABELS)
-               goto errout;
 
        nh->nh_labels = cfg->rc_output_labels;
        for (i = 0; i < nh->nh_labels; i++)
@@ -675,7 +720,7 @@ errout:
 
 static int mpls_nh_build(struct net *net, struct mpls_route *rt,
                         struct mpls_nh *nh, int oif, struct nlattr *via,
-                        struct nlattr *newdst)
+                        struct nlattr *newdst, u8 max_labels)
 {
        int err = -ENOMEM;
 
@@ -683,7 +728,7 @@ static int mpls_nh_build(struct net *net, struct mpls_route *rt,
                goto errout;
 
        if (newdst) {
-               err = nla_get_labels(newdst, MAX_NEW_LABELS,
+               err = nla_get_labels(newdst, max_labels,
                                     &nh->nh_labels, nh->nh_label);
                if (err)
                        goto errout;
@@ -708,22 +753,20 @@ errout:
        return err;
 }
 
-static int mpls_count_nexthops(struct rtnexthop *rtnh, int len,
-                              u8 cfg_via_alen, u8 *max_via_alen)
+static u8 mpls_count_nexthops(struct rtnexthop *rtnh, int len,
+                             u8 cfg_via_alen, u8 *max_via_alen,
+                             u8 *max_labels)
 {
-       int nhs = 0;
        int remaining = len;
-
-       if (!rtnh) {
-               *max_via_alen = cfg_via_alen;
-               return 1;
-       }
+       u8 nhs = 0;
 
        *max_via_alen = 0;
+       *max_labels = 0;
 
        while (rtnh_ok(rtnh, remaining)) {
                struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
                int attrlen;
+               u8 n_labels = 0;
 
                attrlen = rtnh_attrlen(rtnh);
                nla = nla_find(attrs, attrlen, RTA_VIA);
@@ -737,7 +780,20 @@ static int mpls_count_nexthops(struct rtnexthop *rtnh, int len,
                                                      via_alen);
                }
 
+               nla = nla_find(attrs, attrlen, RTA_NEWDST);
+               if (nla &&
+                   nla_get_labels(nla, MAX_NEW_LABELS, &n_labels, NULL) != 0)
+                       return 0;
+
+               *max_labels = max_t(u8, *max_labels, n_labels);
+
+               /* number of nexthops is tracked by a u8.
+                * Check for overflow.
+                */
+               if (nhs == 255)
+                       return 0;
                nhs++;
+
                rtnh = rtnh_next(rtnh, &remaining);
        }
 
@@ -746,13 +802,13 @@ static int mpls_count_nexthops(struct rtnexthop *rtnh, int len,
 }
 
 static int mpls_nh_build_multi(struct mpls_route_config *cfg,
-                              struct mpls_route *rt)
+                              struct mpls_route *rt, u8 max_labels)
 {
        struct rtnexthop *rtnh = cfg->rc_mp;
        struct nlattr *nla_via, *nla_newdst;
        int remaining = cfg->rc_mp_len;
-       int nhs = 0;
        int err = 0;
+       u8 nhs = 0;
 
        change_nexthops(rt) {
                int attrlen;
@@ -779,7 +835,8 @@ static int mpls_nh_build_multi(struct mpls_route_config *cfg,
                }
 
                err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh,
-                                   rtnh->rtnh_ifindex, nla_via, nla_newdst);
+                                   rtnh->rtnh_ifindex, nla_via, nla_newdst,
+                                   max_labels);
                if (err)
                        goto errout;
 
@@ -806,7 +863,8 @@ static int mpls_route_add(struct mpls_route_config *cfg)
        int err = -EINVAL;
        u8 max_via_alen;
        unsigned index;
-       int nhs;
+       u8 max_labels;
+       u8 nhs;
 
        index = cfg->rc_label;
 
@@ -844,21 +902,32 @@ static int mpls_route_add(struct mpls_route_config *cfg)
                goto errout;
 
        err = -EINVAL;
-       nhs = mpls_count_nexthops(cfg->rc_mp, cfg->rc_mp_len,
-                                 cfg->rc_via_alen, &max_via_alen);
+       if (cfg->rc_mp) {
+               nhs = mpls_count_nexthops(cfg->rc_mp, cfg->rc_mp_len,
+                                         cfg->rc_via_alen, &max_via_alen,
+                                         &max_labels);
+       } else {
+               max_via_alen = cfg->rc_via_alen;
+               max_labels = cfg->rc_output_labels;
+               nhs = 1;
+       }
+
        if (nhs == 0)
                goto errout;
 
        err = -ENOMEM;
-       rt = mpls_rt_alloc(nhs, max_via_alen);
-       if (!rt)
+       rt = mpls_rt_alloc(nhs, max_via_alen, max_labels);
+       if (IS_ERR(rt)) {
+               err = PTR_ERR(rt);
                goto errout;
+       }
 
        rt->rt_protocol = cfg->rc_protocol;
        rt->rt_payload_type = cfg->rc_payload_type;
+       rt->rt_ttl_propagate = cfg->rc_ttl_propagate;
 
        if (cfg->rc_mp)
-               err = mpls_nh_build_multi(cfg, rt);
+               err = mpls_nh_build_multi(cfg, rt, max_labels);
        else
                err = mpls_nh_build_from_cfg(cfg, rt);
        if (err)
@@ -1011,8 +1080,8 @@ static int mpls_netconf_msgsize_devconf(int type)
        return size;
 }
 
-static void mpls_netconf_notify_devconf(struct net *net, int type,
-                                       struct mpls_dev *mdev)
+static void mpls_netconf_notify_devconf(struct net *net, int event,
+                                       int type, struct mpls_dev *mdev)
 {
        struct sk_buff *skb;
        int err = -ENOBUFS;
@@ -1021,8 +1090,7 @@ static void mpls_netconf_notify_devconf(struct net *net, int type,
        if (!skb)
                goto errout;
 
-       err = mpls_netconf_fill_devconf(skb, mdev, 0, 0, RTM_NEWNETCONF,
-                                       0, type);
+       err = mpls_netconf_fill_devconf(skb, mdev, 0, 0, event, 0, type);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */
                WARN_ON(err == -EMSGSIZE);
@@ -1155,9 +1223,8 @@ static int mpls_conf_proc(struct ctl_table *ctl, int write,
 
                if (i == offsetof(struct mpls_dev, input_enabled) &&
                    val != oval) {
-                       mpls_netconf_notify_devconf(net,
-                                                   NETCONFA_INPUT,
-                                                   mdev);
+                       mpls_netconf_notify_devconf(net, RTM_NEWNETCONF,
+                                                   NETCONFA_INPUT, mdev);
                }
        }
 
@@ -1198,10 +1265,11 @@ static int mpls_dev_sysctl_register(struct net_device *dev,
 
        snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name);
 
-       mdev->sysctl = register_net_sysctl(dev_net(dev), path, table);
+       mdev->sysctl = register_net_sysctl(net, path, table);
        if (!mdev->sysctl)
                goto free;
 
+       mpls_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL, mdev);
        return 0;
 
 free:
@@ -1210,13 +1278,17 @@ out:
        return -ENOBUFS;
 }
 
-static void mpls_dev_sysctl_unregister(struct mpls_dev *mdev)
+static void mpls_dev_sysctl_unregister(struct net_device *dev,
+                                      struct mpls_dev *mdev)
 {
+       struct net *net = dev_net(dev);
        struct ctl_table *table;
 
        table = mdev->sysctl->ctl_table_arg;
        unregister_net_sysctl_table(mdev->sysctl);
        kfree(table);
+
+       mpls_netconf_notify_devconf(net, RTM_DELNETCONF, 0, mdev);
 }
 
 static struct mpls_dev *mpls_add_dev(struct net_device *dev)
@@ -1242,11 +1314,12 @@ static struct mpls_dev *mpls_add_dev(struct net_device *dev)
                u64_stats_init(&mpls_stats->syncp);
        }
 
+       mdev->dev = dev;
+
        err = mpls_dev_sysctl_register(dev, mdev);
        if (err)
                goto free;
 
-       mdev->dev = dev;
        rcu_assign_pointer(dev->mpls_ptr, mdev);
 
        return mdev;
@@ -1269,6 +1342,7 @@ static void mpls_ifdown(struct net_device *dev, int event)
 {
        struct mpls_route __rcu **platform_label;
        struct net *net = dev_net(dev);
+       u8 alive, deleted;
        unsigned index;
 
        platform_label = rtnl_dereference(net->mpls.platform_label);
@@ -1278,31 +1352,49 @@ static void mpls_ifdown(struct net_device *dev, int event)
                if (!rt)
                        continue;
 
+               alive = 0;
+               deleted = 0;
                change_nexthops(rt) {
+                       unsigned int nh_flags = nh->nh_flags;
+
                        if (rtnl_dereference(nh->nh_dev) != dev)
-                               continue;
+                               goto next;
+
                        switch (event) {
                        case NETDEV_DOWN:
                        case NETDEV_UNREGISTER:
-                               nh->nh_flags |= RTNH_F_DEAD;
+                               nh_flags |= RTNH_F_DEAD;
                                /* fall through */
                        case NETDEV_CHANGE:
-                               nh->nh_flags |= RTNH_F_LINKDOWN;
-                               ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
+                               nh_flags |= RTNH_F_LINKDOWN;
                                break;
                        }
                        if (event == NETDEV_UNREGISTER)
                                RCU_INIT_POINTER(nh->nh_dev, NULL);
+
+                       if (nh->nh_flags != nh_flags)
+                               WRITE_ONCE(nh->nh_flags, nh_flags);
+next:
+                       if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)))
+                               alive++;
+                       if (!rtnl_dereference(nh->nh_dev))
+                               deleted++;
                } endfor_nexthops(rt);
+
+               WRITE_ONCE(rt->rt_nhn_alive, alive);
+
+               /* if there are no more nexthops, delete the route */
+               if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn)
+                       mpls_route_update(net, index, NULL, NULL);
        }
 }
 
-static void mpls_ifup(struct net_device *dev, unsigned int nh_flags)
+static void mpls_ifup(struct net_device *dev, unsigned int flags)
 {
        struct mpls_route __rcu **platform_label;
        struct net *net = dev_net(dev);
        unsigned index;
-       int alive;
+       u8 alive;
 
        platform_label = rtnl_dereference(net->mpls.platform_label);
        for (index = 0; index < net->mpls.platform_labels; index++) {
@@ -1313,20 +1405,22 @@ static void mpls_ifup(struct net_device *dev, unsigned int nh_flags)
 
                alive = 0;
                change_nexthops(rt) {
+                       unsigned int nh_flags = nh->nh_flags;
                        struct net_device *nh_dev =
                                rtnl_dereference(nh->nh_dev);
 
-                       if (!(nh->nh_flags & nh_flags)) {
+                       if (!(nh_flags & flags)) {
                                alive++;
                                continue;
                        }
                        if (nh_dev != dev)
                                continue;
                        alive++;
-                       nh->nh_flags &= ~nh_flags;
+                       nh_flags &= ~flags;
+                       WRITE_ONCE(nh->nh_flags, flags);
                } endfor_nexthops(rt);
 
-               ACCESS_ONCE(rt->rt_nhn_alive) = alive;
+               WRITE_ONCE(rt->rt_nhn_alive, alive);
        }
 }
 
@@ -1377,7 +1471,7 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
                mpls_ifdown(dev, event);
                mdev = mpls_dev_get(dev);
                if (mdev) {
-                       mpls_dev_sysctl_unregister(mdev);
+                       mpls_dev_sysctl_unregister(dev, mdev);
                        RCU_INIT_POINTER(dev->mpls_ptr, NULL);
                        call_rcu(&mdev->rcu, mpls_dev_destroy_rcu);
                }
@@ -1387,7 +1481,7 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
                if (mdev) {
                        int err;
 
-                       mpls_dev_sysctl_unregister(mdev);
+                       mpls_dev_sysctl_unregister(dev, mdev);
                        err = mpls_dev_sysctl_register(dev, mdev);
                        if (err)
                                return notifier_from_errno(err);
@@ -1447,16 +1541,18 @@ int nla_put_labels(struct sk_buff *skb, int attrtype,
 EXPORT_SYMBOL_GPL(nla_put_labels);
 
 int nla_get_labels(const struct nlattr *nla,
-                  u32 max_labels, u8 *labels, u32 label[])
+                  u8 max_labels, u8 *labels, u32 label[])
 {
        unsigned len = nla_len(nla);
-       unsigned nla_labels;
        struct mpls_shim_hdr *nla_label;
+       u8 nla_labels;
        bool bos;
        int i;
 
-       /* len needs to be an even multiple of 4 (the label size) */
-       if (len & 3)
+       /* len needs to be an even multiple of 4 (the label size). Number
+        * of labels is a u8 so check for overflow.
+        */
+       if (len & 3 || len / 4 > 255)
                return -EINVAL;
 
        /* Limit the number of new labels allowed */
@@ -1464,6 +1560,10 @@ int nla_get_labels(const struct nlattr *nla,
        if (nla_labels > max_labels)
                return -EINVAL;
 
+       /* when label == NULL, caller wants number of labels */
+       if (!label)
+               goto out;
+
        nla_label = nla_data(nla);
        bos = true;
        for (i = nla_labels - 1; i >= 0; i--, bos = false) {
@@ -1487,6 +1587,7 @@ int nla_get_labels(const struct nlattr *nla,
 
                label[i] = dec.label;
        }
+out:
        *labels = nla_labels;
        return 0;
 }
@@ -1548,7 +1649,6 @@ static int rtm_to_route_config(struct sk_buff *skb,  struct nlmsghdr *nlh,
 
        err = -EINVAL;
        rtm = nlmsg_data(nlh);
-       memset(cfg, 0, sizeof(*cfg));
 
        if (rtm->rtm_family != AF_MPLS)
                goto errout;
@@ -1576,6 +1676,7 @@ static int rtm_to_route_config(struct sk_buff *skb,  struct nlmsghdr *nlh,
        cfg->rc_label           = LABEL_NOT_SPECIFIED;
        cfg->rc_protocol        = rtm->rtm_protocol;
        cfg->rc_via_table       = MPLS_NEIGH_TABLE_UNSPEC;
+       cfg->rc_ttl_propagate   = MPLS_TTL_PROP_DEFAULT;
        cfg->rc_nlflags         = nlh->nlmsg_flags;
        cfg->rc_nlinfo.portid   = NETLINK_CB(skb).portid;
        cfg->rc_nlinfo.nlh      = nlh;
@@ -1622,6 +1723,17 @@ static int rtm_to_route_config(struct sk_buff *skb,  struct nlmsghdr *nlh,
                        cfg->rc_mp_len = nla_len(nla);
                        break;
                }
+               case RTA_TTL_PROPAGATE:
+               {
+                       u8 ttl_propagate = nla_get_u8(nla);
+
+                       if (ttl_propagate > 1)
+                               goto errout;
+                       cfg->rc_ttl_propagate = ttl_propagate ?
+                               MPLS_TTL_PROP_ENABLED :
+                               MPLS_TTL_PROP_DISABLED;
+                       break;
+               }
                default:
                        /* Unsupported attribute */
                        goto errout;
@@ -1635,27 +1747,43 @@ errout:
 
 static int mpls_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
-       struct mpls_route_config cfg;
+       struct mpls_route_config *cfg;
        int err;
 
-       err = rtm_to_route_config(skb, nlh, &cfg);
+       cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+       if (!cfg)
+               return -ENOMEM;
+
+       err = rtm_to_route_config(skb, nlh, cfg);
        if (err < 0)
-               return err;
+               goto out;
 
-       return mpls_route_del(&cfg);
+       err = mpls_route_del(cfg);
+out:
+       kfree(cfg);
+
+       return err;
 }
 
 
 static int mpls_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
-       struct mpls_route_config cfg;
+       struct mpls_route_config *cfg;
        int err;
 
-       err = rtm_to_route_config(skb, nlh, &cfg);
+       cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+       if (!cfg)
+               return -ENOMEM;
+
+       err = rtm_to_route_config(skb, nlh, cfg);
        if (err < 0)
-               return err;
+               goto out;
 
-       return mpls_route_add(&cfg);
+       err = mpls_route_add(cfg);
+out:
+       kfree(cfg);
+
+       return err;
 }
 
 static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
@@ -1682,6 +1810,15 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
 
        if (nla_put_labels(skb, RTA_DST, 1, &label))
                goto nla_put_failure;
+
+       if (rt->rt_ttl_propagate != MPLS_TTL_PROP_DEFAULT) {
+               bool ttl_propagate =
+                       rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED;
+
+               if (nla_put_u8(skb, RTA_TTL_PROPAGATE,
+                              ttl_propagate))
+                       goto nla_put_failure;
+       }
        if (rt->rt_nhn == 1) {
                const struct mpls_nh *nh = rt->rt_nh;
 
@@ -1703,21 +1840,23 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
        } else {
                struct rtnexthop *rtnh;
                struct nlattr *mp;
-               int dead = 0;
-               int linkdown = 0;
+               u8 linkdown = 0;
+               u8 dead = 0;
 
                mp = nla_nest_start(skb, RTA_MULTIPATH);
                if (!mp)
                        goto nla_put_failure;
 
                for_nexthops(rt) {
+                       dev = rtnl_dereference(nh->nh_dev);
+                       if (!dev)
+                               continue;
+
                        rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
                        if (!rtnh)
                                goto nla_put_failure;
 
-                       dev = rtnl_dereference(nh->nh_dev);
-                       if (dev)
-                               rtnh->rtnh_ifindex = dev->ifindex;
+                       rtnh->rtnh_ifindex = dev->ifindex;
                        if (nh->nh_flags & RTNH_F_LINKDOWN) {
                                rtnh->rtnh_flags |= RTNH_F_LINKDOWN;
                                linkdown++;
@@ -1792,7 +1931,8 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
 {
        size_t payload =
                NLMSG_ALIGN(sizeof(struct rtmsg))
-               + nla_total_size(4);                    /* RTA_DST */
+               + nla_total_size(4)                     /* RTA_DST */
+               + nla_total_size(1);                    /* RTA_TTL_PROPAGATE */
 
        if (rt->rt_nhn == 1) {
                struct mpls_nh *nh = rt->rt_nh;
@@ -1808,6 +1948,8 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
                size_t nhsize = 0;
 
                for_nexthops(rt) {
+                       if (!rtnl_dereference(nh->nh_dev))
+                               continue;
                        nhsize += nla_total_size(sizeof(struct rtnexthop));
                        /* RTA_VIA */
                        if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC)
@@ -1870,12 +2012,13 @@ static int resize_platform_label_table(struct net *net, size_t limit)
        /* In case the predefined labels need to be populated */
        if (limit > MPLS_LABEL_IPV4NULL) {
                struct net_device *lo = net->loopback_dev;
-               rt0 = mpls_rt_alloc(1, lo->addr_len);
-               if (!rt0)
+               rt0 = mpls_rt_alloc(1, lo->addr_len, 0);
+               if (IS_ERR(rt0))
                        goto nort0;
                RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
                rt0->rt_protocol = RTPROT_KERNEL;
                rt0->rt_payload_type = MPT_IPV4;
+               rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
                rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
                rt0->rt_nh->nh_via_alen = lo->addr_len;
                memcpy(__mpls_nh_via(rt0, rt0->rt_nh), lo->dev_addr,
@@ -1883,12 +2026,13 @@ static int resize_platform_label_table(struct net *net, size_t limit)
        }
        if (limit > MPLS_LABEL_IPV6NULL) {
                struct net_device *lo = net->loopback_dev;
-               rt2 = mpls_rt_alloc(1, lo->addr_len);
-               if (!rt2)
+               rt2 = mpls_rt_alloc(1, lo->addr_len, 0);
+               if (IS_ERR(rt2))
                        goto nort2;
                RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
                rt2->rt_protocol = RTPROT_KERNEL;
                rt2->rt_payload_type = MPT_IPV6;
+               rt2->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
                rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
                rt2->rt_nh->nh_via_alen = lo->addr_len;
                memcpy(__mpls_nh_via(rt2, rt2->rt_nh), lo->dev_addr,
@@ -1970,6 +2114,9 @@ static int mpls_platform_labels(struct ctl_table *table, int write,
        return ret;
 }
 
+#define MPLS_NS_SYSCTL_OFFSET(field)           \
+       (&((struct net *)0)->field)
+
 static const struct ctl_table mpls_table[] = {
        {
                .procname       = "platform_labels",
@@ -1978,21 +2125,47 @@ static const struct ctl_table mpls_table[] = {
                .mode           = 0644,
                .proc_handler   = mpls_platform_labels,
        },
+       {
+               .procname       = "ip_ttl_propagate",
+               .data           = MPLS_NS_SYSCTL_OFFSET(mpls.ip_ttl_propagate),
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &one,
+       },
+       {
+               .procname       = "default_ttl",
+               .data           = MPLS_NS_SYSCTL_OFFSET(mpls.default_ttl),
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &one,
+               .extra2         = &ttl_max,
+       },
        { }
 };
 
 static int mpls_net_init(struct net *net)
 {
        struct ctl_table *table;
+       int i;
 
        net->mpls.platform_labels = 0;
        net->mpls.platform_label = NULL;
+       net->mpls.ip_ttl_propagate = 1;
+       net->mpls.default_ttl = 255;
 
        table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL);
        if (table == NULL)
                return -ENOMEM;
 
-       table[0].data = net;
+       /* Table data contains only offsets relative to the base of
+        * the mdev at this point, so make them absolute.
+        */
+       for (i = 0; i < ARRAY_SIZE(mpls_table) - 1; i++)
+               table[i].data = (char *)net + (uintptr_t)table[i].data;
+
        net->mpls.ctl = register_net_sysctl(net, "net/mpls", table);
        if (net->mpls.ctl == NULL) {
                kfree(table);
@@ -2028,6 +2201,7 @@ static void mpls_net_exit(struct net *net)
        for (index = 0; index < platform_labels; index++) {
                struct mpls_route *rt = rtnl_dereference(platform_label[index]);
                RCU_INIT_POINTER(platform_label[index], NULL);
+               mpls_notify_route(net, index, rt, NULL, NULL);
                mpls_rt_free(rt);
        }
        rtnl_unlock();
index 76360d8b95798e148c5d6a48ce3375eeadcad5a5..4db6a59713220dcefac8be970ab06b1ae25bcd5c 100644 (file)
@@ -2,6 +2,11 @@
 #define MPLS_INTERNAL_H
 #include <net/mpls.h>
 
+/* put a reasonable limit on the number of labels
+ * we will accept from userspace
+ */
+#define MAX_NEW_LABELS 30
+
 struct mpls_entry_decoded {
        u32 label;
        u8 ttl;
@@ -64,7 +69,6 @@ struct mpls_dev {
 struct sk_buff;
 
 #define LABEL_NOT_SPECIFIED (1 << 20)
-#define MAX_NEW_LABELS 2
 
 /* This maximum ha length copied from the definition of struct neighbour */
 #define VIA_ALEN_ALIGN sizeof(unsigned long)
@@ -83,11 +87,35 @@ enum mpls_payload_type {
 
 struct mpls_nh { /* next hop label forwarding entry */
        struct net_device __rcu *nh_dev;
+
+       /* nh_flags is accessed under RCU in the packet path; it is
+        * modified handling netdev events with rtnl lock held
+        */
        unsigned int            nh_flags;
-       u32                     nh_label[MAX_NEW_LABELS];
        u8                      nh_labels;
        u8                      nh_via_alen;
        u8                      nh_via_table;
+       u8                      nh_reserved1;
+
+       u32                     nh_label[0];
+};
+
+/* offset of via from beginning of mpls_nh */
+#define MPLS_NH_VIA_OFF(num_labels) \
+               ALIGN(sizeof(struct mpls_nh) + (num_labels) * sizeof(u32), \
+                     VIA_ALEN_ALIGN)
+
+/* all nexthops within a route have the same size based on the
+ * max number of labels and max via length across all nexthops
+ */
+#define MPLS_NH_SIZE(num_labels, max_via_alen)         \
+               (MPLS_NH_VIA_OFF((num_labels)) +        \
+               ALIGN((max_via_alen), VIA_ALEN_ALIGN))
+
+enum mpls_ttl_propagation {
+       MPLS_TTL_PROP_DEFAULT,
+       MPLS_TTL_PROP_ENABLED,
+       MPLS_TTL_PROP_DISABLED,
 };
 
 /* The route, nexthops and vias are stored together in the same memory
@@ -98,16 +126,16 @@ struct mpls_nh { /* next hop label forwarding entry */
  * +----------------------+
  * | mpls_nh 0            |
  * +----------------------+
- * | ...                  |
- * +----------------------+
- * | mpls_nh n-1          |
- * +----------------------+
- * | alignment padding    |
+ * | alignment padding    |   4 bytes for odd number of labels
  * +----------------------+
  * | via[rt_max_alen] 0   |
  * +----------------------+
+ * | alignment padding    |   via's aligned on sizeof(unsigned long)
+ * +----------------------+
  * | ...                  |
  * +----------------------+
+ * | mpls_nh n-1          |
+ * +----------------------+
  * | via[rt_max_alen] n-1 |
  * +----------------------+
  */
@@ -116,22 +144,30 @@ struct mpls_route { /* next hop label forwarding entry */
        u8                      rt_protocol;
        u8                      rt_payload_type;
        u8                      rt_max_alen;
-       unsigned int            rt_nhn;
-       unsigned int            rt_nhn_alive;
+       u8                      rt_ttl_propagate;
+       u8                      rt_nhn;
+       /* rt_nhn_alive is accessed under RCU in the packet path; it
+        * is modified handling netdev events with rtnl lock held
+        */
+       u8                      rt_nhn_alive;
+       u8                      rt_nh_size;
+       u8                      rt_via_offset;
+       u8                      rt_reserved1;
        struct mpls_nh          rt_nh[0];
 };
 
 #define for_nexthops(rt) {                                             \
-       int nhsel; struct mpls_nh *nh;                  \
-       for (nhsel = 0, nh = (rt)->rt_nh;                               \
+       int nhsel; struct mpls_nh *nh;  u8 *__nh;                       \
+       for (nhsel = 0, nh = (rt)->rt_nh, __nh = (u8 *)((rt)->rt_nh);   \
             nhsel < (rt)->rt_nhn;                                      \
-            nh++, nhsel++)
+            __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++)
 
 #define change_nexthops(rt) {                                          \
-       int nhsel; struct mpls_nh *nh;                          \
-       for (nhsel = 0, nh = (struct mpls_nh *)((rt)->rt_nh);   \
+       int nhsel; struct mpls_nh *nh; u8 *__nh;                        \
+       for (nhsel = 0, nh = (struct mpls_nh *)((rt)->rt_nh),           \
+                       __nh = (u8 *)((rt)->rt_nh);                     \
             nhsel < (rt)->rt_nhn;                                      \
-            nh++, nhsel++)
+            __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++)
 
 #define endfor_nexthops(rt) }
 
@@ -166,7 +202,7 @@ static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev)
 
 int nla_put_labels(struct sk_buff *skb, int attrtype,  u8 labels,
                   const u32 label[]);
-int nla_get_labels(const struct nlattr *nla, u32 max_labels, u8 *labels,
+int nla_get_labels(const struct nlattr *nla, u8 max_labels, u8 *labels,
                   u32 label[]);
 int nla_get_via(const struct nlattr *nla, u8 *via_alen, u8 *via_table,
                u8 via[]);
index e4e4424f9eb1f5531d22463687d74c2e2ca971a6..fe00e98667cf603c5f01150fb4899f98c86c8496 100644 (file)
@@ -29,6 +29,7 @@
 
 static const struct nla_policy mpls_iptunnel_policy[MPLS_IPTUNNEL_MAX + 1] = {
        [MPLS_IPTUNNEL_DST]     = { .type = NLA_U32 },
+       [MPLS_IPTUNNEL_TTL]     = { .type = NLA_U8 },
 };
 
 static unsigned int mpls_encap_size(struct mpls_iptunnel_encap *en)
@@ -49,6 +50,7 @@ static int mpls_xmit(struct sk_buff *skb)
        struct rtable *rt = NULL;
        struct rt6_info *rt6 = NULL;
        struct mpls_dev *out_mdev;
+       struct net *net;
        int err = 0;
        bool bos;
        int i;
@@ -56,17 +58,7 @@ static int mpls_xmit(struct sk_buff *skb)
 
        /* Find the output device */
        out_dev = dst->dev;
-
-       /* Obtain the ttl */
-       if (dst->ops->family == AF_INET) {
-               ttl = ip_hdr(skb)->ttl;
-               rt = (struct rtable *)dst;
-       } else if (dst->ops->family == AF_INET6) {
-               ttl = ipv6_hdr(skb)->hop_limit;
-               rt6 = (struct rt6_info *)dst;
-       } else {
-               goto drop;
-       }
+       net = dev_net(out_dev);
 
        skb_orphan(skb);
 
@@ -78,6 +70,38 @@ static int mpls_xmit(struct sk_buff *skb)
 
        tun_encap_info = mpls_lwtunnel_encap(dst->lwtstate);
 
+       /* Obtain the ttl using the following set of rules.
+        *
+        * LWT ttl propagation setting:
+        *  - disabled => use default TTL value from LWT
+        *  - enabled  => use TTL value from IPv4/IPv6 header
+        *  - default  =>
+        *   Global ttl propagation setting:
+        *    - disabled => use default TTL value from global setting
+        *    - enabled => use TTL value from IPv4/IPv6 header
+        */
+       if (dst->ops->family == AF_INET) {
+               if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DISABLED)
+                       ttl = tun_encap_info->default_ttl;
+               else if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
+                        !net->mpls.ip_ttl_propagate)
+                       ttl = net->mpls.default_ttl;
+               else
+                       ttl = ip_hdr(skb)->ttl;
+               rt = (struct rtable *)dst;
+       } else if (dst->ops->family == AF_INET6) {
+               if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DISABLED)
+                       ttl = tun_encap_info->default_ttl;
+               else if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
+                        !net->mpls.ip_ttl_propagate)
+                       ttl = net->mpls.default_ttl;
+               else
+                       ttl = ipv6_hdr(skb)->hop_limit;
+               rt6 = (struct rt6_info *)dst;
+       } else {
+               goto drop;
+       }
+
        /* Verify the destination can hold the packet */
        new_header_size = mpls_encap_size(tun_encap_info);
        mtu = mpls_dev_mtu(out_dev);
@@ -140,6 +164,7 @@ static int mpls_build_state(struct nlattr *nla,
        struct mpls_iptunnel_encap *tun_encap_info;
        struct nlattr *tb[MPLS_IPTUNNEL_MAX + 1];
        struct lwtunnel_state *newts;
+       u8 n_labels;
        int ret;
 
        ret = nla_parse_nested(tb, MPLS_IPTUNNEL_MAX, nla,
@@ -151,15 +176,32 @@ static int mpls_build_state(struct nlattr *nla,
                return -EINVAL;
 
 
-       newts = lwtunnel_state_alloc(sizeof(*tun_encap_info));
+       /* determine number of labels */
+       if (nla_get_labels(tb[MPLS_IPTUNNEL_DST],
+                          MAX_NEW_LABELS, &n_labels, NULL))
+               return -EINVAL;
+
+       newts = lwtunnel_state_alloc(sizeof(*tun_encap_info) +
+                                    n_labels * sizeof(u32));
        if (!newts)
                return -ENOMEM;
 
        tun_encap_info = mpls_lwtunnel_encap(newts);
-       ret = nla_get_labels(tb[MPLS_IPTUNNEL_DST], MAX_NEW_LABELS,
+       ret = nla_get_labels(tb[MPLS_IPTUNNEL_DST], n_labels,
                             &tun_encap_info->labels, tun_encap_info->label);
        if (ret)
                goto errout;
+
+       tun_encap_info->ttl_propagate = MPLS_TTL_PROP_DEFAULT;
+
+       if (tb[MPLS_IPTUNNEL_TTL]) {
+               tun_encap_info->default_ttl = nla_get_u8(tb[MPLS_IPTUNNEL_TTL]);
+               /* TTL 0 implies propagate from IP header */
+               tun_encap_info->ttl_propagate = tun_encap_info->default_ttl ?
+                       MPLS_TTL_PROP_DISABLED :
+                       MPLS_TTL_PROP_ENABLED;
+       }
+
        newts->type = LWTUNNEL_ENCAP_MPLS;
        newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT;
        newts->headroom = mpls_encap_size(tun_encap_info);
@@ -186,6 +228,10 @@ static int mpls_fill_encap_info(struct sk_buff *skb,
                           tun_encap_info->label))
                goto nla_put_failure;
 
+       if (tun_encap_info->ttl_propagate != MPLS_TTL_PROP_DEFAULT &&
+           nla_put_u8(skb, MPLS_IPTUNNEL_TTL, tun_encap_info->default_ttl))
+               goto nla_put_failure;
+
        return 0;
 
 nla_put_failure:
@@ -195,10 +241,16 @@ nla_put_failure:
 static int mpls_encap_nlsize(struct lwtunnel_state *lwtstate)
 {
        struct mpls_iptunnel_encap *tun_encap_info;
+       int nlsize;
 
        tun_encap_info = mpls_lwtunnel_encap(lwtstate);
 
-       return nla_total_size(tun_encap_info->labels * 4);
+       nlsize = nla_total_size(tun_encap_info->labels * 4);
+
+       if (tun_encap_info->ttl_propagate != MPLS_TTL_PROP_DEFAULT)
+               nlsize += nla_total_size(1);
+
+       return nlsize;
 }
 
 static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
@@ -207,10 +259,12 @@ static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
        struct mpls_iptunnel_encap *b_hdr = mpls_lwtunnel_encap(b);
        int l;
 
-       if (a_hdr->labels != b_hdr->labels)
+       if (a_hdr->labels != b_hdr->labels ||
+           a_hdr->ttl_propagate != b_hdr->ttl_propagate ||
+           a_hdr->default_ttl != b_hdr->default_ttl)
                return 1;
 
-       for (l = 0; l < MAX_NEW_LABELS; l++)
+       for (l = 0; l < a_hdr->labels; l++)
                if (a_hdr->label[l] != b_hdr->label[l])
                        return 1;
        return 0;
index e6a2753dff9e91dac406e657ad0d49875f052503..3d2ac71a83ec411294361037e7b1d77b6d4bb7e2 100644 (file)
@@ -181,7 +181,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
 
        if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
                cp->flags |= IP_VS_CONN_F_HASHED;
-               atomic_inc(&cp->refcnt);
+               refcount_inc(&cp->refcnt);
                hlist_add_head_rcu(&cp->c_list, &ip_vs_conn_tab[hash]);
                ret = 1;
        } else {
@@ -215,7 +215,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
        if (cp->flags & IP_VS_CONN_F_HASHED) {
                hlist_del_rcu(&cp->c_list);
                cp->flags &= ~IP_VS_CONN_F_HASHED;
-               atomic_dec(&cp->refcnt);
+               refcount_dec(&cp->refcnt);
                ret = 1;
        } else
                ret = 0;
@@ -242,13 +242,13 @@ static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp)
        if (cp->flags & IP_VS_CONN_F_HASHED) {
                ret = false;
                /* Decrease refcnt and unlink conn only if we are last user */
-               if (atomic_cmpxchg(&cp->refcnt, 1, 0) == 1) {
+               if (refcount_dec_if_one(&cp->refcnt)) {
                        hlist_del_rcu(&cp->c_list);
                        cp->flags &= ~IP_VS_CONN_F_HASHED;
                        ret = true;
                }
        } else
-               ret = atomic_read(&cp->refcnt) ? false : true;
+               ret = refcount_read(&cp->refcnt) ? false : true;
 
        spin_unlock(&cp->lock);
        ct_write_unlock_bh(hash);
@@ -475,7 +475,7 @@ static void __ip_vs_conn_put_timer(struct ip_vs_conn *cp)
 void ip_vs_conn_put(struct ip_vs_conn *cp)
 {
        if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) &&
-           (atomic_read(&cp->refcnt) == 1) &&
+           (refcount_read(&cp->refcnt) == 1) &&
            !timer_pending(&cp->timer))
                /* expire connection immediately */
                __ip_vs_conn_put_notimer(cp);
@@ -617,8 +617,8 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
                      IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
                      IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
                      ip_vs_fwd_tag(cp), cp->state,
-                     cp->flags, atomic_read(&cp->refcnt),
-                     atomic_read(&dest->refcnt));
+                     cp->flags, refcount_read(&cp->refcnt),
+                     refcount_read(&dest->refcnt));
 
        /* Update the connection counters */
        if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
@@ -714,8 +714,8 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
                      IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
                      IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
                      ip_vs_fwd_tag(cp), cp->state,
-                     cp->flags, atomic_read(&cp->refcnt),
-                     atomic_read(&dest->refcnt));
+                     cp->flags, refcount_read(&cp->refcnt),
+                     refcount_read(&dest->refcnt));
 
        /* Update the connection counters */
        if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
@@ -863,10 +863,10 @@ static void ip_vs_conn_expire(unsigned long data)
 
   expire_later:
        IP_VS_DBG(7, "delayed: conn->refcnt=%d conn->n_control=%d\n",
-                 atomic_read(&cp->refcnt),
+                 refcount_read(&cp->refcnt),
                  atomic_read(&cp->n_control));
 
-       atomic_inc(&cp->refcnt);
+       refcount_inc(&cp->refcnt);
        cp->timeout = 60*HZ;
 
        if (ipvs->sync_state & IP_VS_STATE_MASTER)
@@ -941,7 +941,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
         * it in the table, so that other thread run ip_vs_random_dropentry
         * but cannot drop this entry.
         */
-       atomic_set(&cp->refcnt, 1);
+       refcount_set(&cp->refcnt, 1);
 
        cp->control = NULL;
        atomic_set(&cp->n_control, 0);
index db40050f8785eb9205a7bf493a71c9b956b93ab8..b4a746d0e39bcc4b7418e53c2623a057f119a39d 100644 (file)
@@ -542,7 +542,7 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
                      IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
                      IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
                      IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
-                     cp->flags, atomic_read(&cp->refcnt));
+                     cp->flags, refcount_read(&cp->refcnt));
 
        ip_vs_conn_stats(cp, svc);
        return cp;
@@ -1193,7 +1193,7 @@ struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
                      IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
                      IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
                      IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
-                     cp->flags, atomic_read(&cp->refcnt));
+                     cp->flags, refcount_read(&cp->refcnt));
        LeaveFunction(12);
        return cp;
 }
@@ -2231,8 +2231,6 @@ static int __net_init __ip_vs_init(struct net *net)
        if (ip_vs_sync_net_init(ipvs) < 0)
                goto sync_fail;
 
-       printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
-                        sizeof(struct netns_ipvs), ipvs->gen);
        return 0;
 /*
  * Error handling
index 5aeb0dde6ccc5e525e740ca5fde3ba2c58c50070..541aa76947755e1228deb8d3ad40e576b182a83f 100644 (file)
@@ -699,7 +699,7 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, int dest_af,
                              dest->vfwmark,
                              IP_VS_DBG_ADDR(dest->af, &dest->addr),
                              ntohs(dest->port),
-                             atomic_read(&dest->refcnt));
+                             refcount_read(&dest->refcnt));
                if (dest->af == dest_af &&
                    ip_vs_addr_equal(dest_af, &dest->addr, daddr) &&
                    dest->port == dport &&
@@ -934,7 +934,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
        atomic_set(&dest->activeconns, 0);
        atomic_set(&dest->inactconns, 0);
        atomic_set(&dest->persistconns, 0);
-       atomic_set(&dest->refcnt, 1);
+       refcount_set(&dest->refcnt, 1);
 
        INIT_HLIST_NODE(&dest->d_list);
        spin_lock_init(&dest->dst_lock);
@@ -998,7 +998,7 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
                IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, "
                              "dest->refcnt=%d, service %u/%s:%u\n",
                              IP_VS_DBG_ADDR(udest->af, &daddr), ntohs(dport),
-                             atomic_read(&dest->refcnt),
+                             refcount_read(&dest->refcnt),
                              dest->vfwmark,
                              IP_VS_DBG_ADDR(svc->af, &dest->vaddr),
                              ntohs(dest->vport));
@@ -1074,7 +1074,7 @@ static void __ip_vs_del_dest(struct netns_ipvs *ipvs, struct ip_vs_dest *dest,
        spin_lock_bh(&ipvs->dest_trash_lock);
        IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n",
                      IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
-                     atomic_read(&dest->refcnt));
+                     refcount_read(&dest->refcnt));
        if (list_empty(&ipvs->dest_trash) && !cleanup)
                mod_timer(&ipvs->dest_trash_timer,
                          jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
@@ -1157,7 +1157,7 @@ static void ip_vs_dest_trash_expire(unsigned long data)
 
        spin_lock(&ipvs->dest_trash_lock);
        list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) {
-               if (atomic_read(&dest->refcnt) > 1)
+               if (refcount_read(&dest->refcnt) > 1)
                        continue;
                if (dest->idle_start) {
                        if (time_before(now, dest->idle_start +
@@ -1545,7 +1545,7 @@ ip_vs_forget_dev(struct ip_vs_dest *dest, struct net_device *dev)
                              dev->name,
                              IP_VS_DBG_ADDR(dest->af, &dest->addr),
                              ntohs(dest->port),
-                             atomic_read(&dest->refcnt));
+                             refcount_read(&dest->refcnt));
                __ip_vs_dst_cache_reset(dest);
        }
        spin_unlock_bh(&dest->dst_lock);
index 5824927cf8e02b7fa02f319177d96219c9427033..b6aa4a970c6e97678e8c88e8fe0e0e0b4e4d476d 100644 (file)
@@ -448,7 +448,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
                      IP_VS_DBG_ADDR(least->af, &least->addr),
                      ntohs(least->port),
                      atomic_read(&least->activeconns),
-                     atomic_read(&least->refcnt),
+                     refcount_read(&least->refcnt),
                      atomic_read(&least->weight), loh);
 
        return least;
index 703f11877beece84cb56ec62d4bd13e87c0d67c3..c13ff575f9f73ab9fb53837ff1b01cd279156c9f 100644 (file)
@@ -204,7 +204,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
                      IP_VS_DBG_ADDR(least->af, &least->addr),
                      ntohs(least->port),
                      atomic_read(&least->activeconns),
-                     atomic_read(&least->refcnt),
+                     refcount_read(&least->refcnt),
                      atomic_read(&least->weight), loh);
        return least;
 }
@@ -249,7 +249,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
                      __func__,
                      IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port),
                      atomic_read(&most->activeconns),
-                     atomic_read(&most->refcnt),
+                     refcount_read(&most->refcnt),
                      atomic_read(&most->weight), moh);
        return most;
 }
@@ -612,7 +612,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
                      IP_VS_DBG_ADDR(least->af, &least->addr),
                      ntohs(least->port),
                      atomic_read(&least->activeconns),
-                     atomic_read(&least->refcnt),
+                     refcount_read(&least->refcnt),
                      atomic_read(&least->weight), loh);
 
        return least;
index a8b63401e7731e6c8fef37b62c43425e2f96b43c..7d9d4ac596ca5809a9322aa3c1276f6dc08f146c 100644 (file)
@@ -110,7 +110,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
                      IP_VS_DBG_ADDR(least->af, &least->addr),
                      ntohs(least->port),
                      atomic_read(&least->activeconns),
-                     atomic_read(&least->refcnt),
+                     refcount_read(&least->refcnt),
                      atomic_read(&least->weight), loh);
 
        return least;
index d952d67f904d1124ed0c5adfa20a51f82207181c..56f8e4b204ffcc4840a1042097a0f7d0f004df18 100644 (file)
@@ -447,7 +447,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
                                ntohs(cp->cport),
                                sctp_state_name(cp->state),
                                sctp_state_name(next_state),
-                               atomic_read(&cp->refcnt));
+                               refcount_read(&cp->refcnt));
                if (dest) {
                        if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
                                (next_state != IP_VS_SCTP_S_ESTABLISHED)) {
index 5117bcb7d2f00604d5ab73f0610246a7d4fef755..12dc8d5bc37d7ea03ba8448514a6d3caf03a62b0 100644 (file)
@@ -557,7 +557,7 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
                              ntohs(cp->cport),
                              tcp_state_name(cp->state),
                              tcp_state_name(new_state),
-                             atomic_read(&cp->refcnt));
+                             refcount_read(&cp->refcnt));
 
                if (dest) {
                        if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
index 58bacfc461ee6a1d6df4e6e024032fb52a044e35..ee0530d14c5f9d468e199bcb7cd53aad022b748b 100644 (file)
@@ -97,7 +97,7 @@ stop:
                      "activeconns %d refcnt %d weight %d\n",
                      IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
                      atomic_read(&dest->activeconns),
-                     atomic_read(&dest->refcnt), atomic_read(&dest->weight));
+                     refcount_read(&dest->refcnt), atomic_read(&dest->weight));
 
        return dest;
 }
index f8e2d00f528b945e774564854fc66f53dbc61970..ab23cf203437772407f800861ea68687d78f8ff6 100644 (file)
@@ -111,7 +111,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
                      IP_VS_DBG_ADDR(least->af, &least->addr),
                      ntohs(least->port),
                      atomic_read(&least->activeconns),
-                     atomic_read(&least->refcnt),
+                     refcount_read(&least->refcnt),
                      atomic_read(&least->weight), loh);
 
        return least;
index 6b366fd905542ff086a36da4111bd11646d274d8..6add39e0ec20d61d21883082e5079d16cab6942c 100644 (file)
@@ -83,7 +83,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
                      IP_VS_DBG_ADDR(least->af, &least->addr),
                      ntohs(least->port),
                      atomic_read(&least->activeconns),
-                     atomic_read(&least->refcnt),
+                     refcount_read(&least->refcnt),
                      atomic_read(&least->weight), loh);
 
        return least;
index 17e6d4406ca7c32657eff5e103d0aa1b9317e813..62258dd457ac9825aa14f26bfebb08bb2f2f1755 100644 (file)
@@ -218,7 +218,7 @@ found:
                      "activeconns %d refcnt %d weight %d\n",
                      IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
                      atomic_read(&dest->activeconns),
-                     atomic_read(&dest->refcnt),
+                     refcount_read(&dest->refcnt),
                      atomic_read(&dest->weight));
        mark->cl = dest;
 
index 071b97fcbefb083ded417e06e739a4622b237fe8..3d621b8d7b8a7ba6318ac1359e1efa795c63a479 100644 (file)
@@ -181,7 +181,11 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
 unsigned int nf_conntrack_max __read_mostly;
 seqcount_t nf_conntrack_generation __read_mostly;
 
-DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
+/* nf_conn must be 8 bytes aligned, as the 3 LSB bits are used
+ * for the nfctinfo. We cheat by (ab)using the PER CPU cache line
+ * alignment to enforce this.
+ */
+DEFINE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
 
 static unsigned int nf_conntrack_hash_rnd __read_mostly;
@@ -1129,7 +1133,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_free);
 
 /* Allocate a new conntrack: we return -ENOMEM if classification
    failed due to stress.  Otherwise it really is unclassifiable. */
-static struct nf_conntrack_tuple_hash *
+static noinline struct nf_conntrack_tuple_hash *
 init_conntrack(struct net *net, struct nf_conn *tmpl,
               const struct nf_conntrack_tuple *tuple,
               struct nf_conntrack_l3proto *l3proto,
@@ -1237,21 +1241,20 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
        return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
 }
 
-/* On success, returns conntrack ptr, sets skb->_nfct | ctinfo */
-static inline struct nf_conn *
+/* On success, returns 0, sets skb->_nfct | ctinfo */
+static int
 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
                  struct sk_buff *skb,
                  unsigned int dataoff,
                  u_int16_t l3num,
                  u_int8_t protonum,
                  struct nf_conntrack_l3proto *l3proto,
-                 struct nf_conntrack_l4proto *l4proto,
-                 int *set_reply,
-                 enum ip_conntrack_info *ctinfo)
+                 struct nf_conntrack_l4proto *l4proto)
 {
        const struct nf_conntrack_zone *zone;
        struct nf_conntrack_tuple tuple;
        struct nf_conntrack_tuple_hash *h;
+       enum ip_conntrack_info ctinfo;
        struct nf_conntrack_zone tmp;
        struct nf_conn *ct;
        u32 hash;
@@ -1260,7 +1263,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
                             dataoff, l3num, protonum, net, &tuple, l3proto,
                             l4proto)) {
                pr_debug("Can't get tuple\n");
-               return NULL;
+               return 0;
        }
 
        /* look for tuple match */
@@ -1271,33 +1274,30 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
                h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
                                   skb, dataoff, hash);
                if (!h)
-                       return NULL;
+                       return 0;
                if (IS_ERR(h))
-                       return (void *)h;
+                       return PTR_ERR(h);
        }
        ct = nf_ct_tuplehash_to_ctrack(h);
 
        /* It exists; we have (non-exclusive) reference. */
        if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
-               *ctinfo = IP_CT_ESTABLISHED_REPLY;
-               /* Please set reply bit if this packet OK */
-               *set_reply = 1;
+               ctinfo = IP_CT_ESTABLISHED_REPLY;
        } else {
                /* Once we've had two way comms, always ESTABLISHED. */
                if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
                        pr_debug("normal packet for %p\n", ct);
-                       *ctinfo = IP_CT_ESTABLISHED;
+                       ctinfo = IP_CT_ESTABLISHED;
                } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
                        pr_debug("related packet for %p\n", ct);
-                       *ctinfo = IP_CT_RELATED;
+                       ctinfo = IP_CT_RELATED;
                } else {
                        pr_debug("new packet for %p\n", ct);
-                       *ctinfo = IP_CT_NEW;
+                       ctinfo = IP_CT_NEW;
                }
-               *set_reply = 0;
        }
-       nf_ct_set(skb, ct, *ctinfo);
-       return ct;
+       nf_ct_set(skb, ct, ctinfo);
+       return 0;
 }
 
 unsigned int
@@ -1311,7 +1311,6 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
        unsigned int *timeouts;
        unsigned int dataoff;
        u_int8_t protonum;
-       int set_reply = 0;
        int ret;
 
        tmpl = nf_ct_get(skb, &ctinfo);
@@ -1354,23 +1353,22 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
                        goto out;
        }
 repeat:
-       ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
-                              l3proto, l4proto, &set_reply, &ctinfo);
-       if (!ct) {
-               /* Not valid part of a connection */
-               NF_CT_STAT_INC_ATOMIC(net, invalid);
-               ret = NF_ACCEPT;
-               goto out;
-       }
-
-       if (IS_ERR(ct)) {
+       ret = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
+                               l3proto, l4proto);
+       if (ret < 0) {
                /* Too stressed to deal. */
                NF_CT_STAT_INC_ATOMIC(net, drop);
                ret = NF_DROP;
                goto out;
        }
 
-       NF_CT_ASSERT(skb_nfct(skb));
+       ct = nf_ct_get(skb, &ctinfo);
+       if (!ct) {
+               /* Not valid part of a connection */
+               NF_CT_STAT_INC_ATOMIC(net, invalid);
+               ret = NF_ACCEPT;
+               goto out;
+       }
 
        /* Decide what timeout policy we want to apply to this flow. */
        timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
@@ -1395,7 +1393,8 @@ repeat:
                goto out;
        }
 
-       if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
+       if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
+           !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
                nf_conntrack_event_cache(IPCT_REPLY, ct);
 out:
        if (tmpl)
index da9df2d56e669ed33d8126a484788e600dcaaabd..22fc32143e9c4ae17bc48edc68eb0decf11d931c 100644 (file)
@@ -290,6 +290,7 @@ void nf_conntrack_unregister_notifier(struct net *net,
        BUG_ON(notify != new);
        RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
        mutex_unlock(&nf_ct_ecache_mutex);
+       /* synchronize_rcu() is called from ctnetlink_exit. */
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
 
@@ -326,6 +327,7 @@ void nf_ct_expect_unregister_notifier(struct net *net,
        BUG_ON(notify != new);
        RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
        mutex_unlock(&nf_ct_ecache_mutex);
+       /* synchronize_rcu() is called from ctnetlink_exit. */
 }
 EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
 
index 4b2e1fb28bb438d695715fc492f52bf7809ade5d..cb29e598605f5e6c22961c007bf23291fab0bbaa 100644 (file)
@@ -133,7 +133,7 @@ nf_ct_expect_find_get(struct net *net,
 
        rcu_read_lock();
        i = __nf_ct_expect_find(net, zone, tuple);
-       if (i && !atomic_inc_not_zero(&i->use))
+       if (i && !refcount_inc_not_zero(&i->use))
                i = NULL;
        rcu_read_unlock();
 
@@ -186,7 +186,7 @@ nf_ct_find_expectation(struct net *net,
                return NULL;
 
        if (exp->flags & NF_CT_EXPECT_PERMANENT) {
-               atomic_inc(&exp->use);
+               refcount_inc(&exp->use);
                return exp;
        } else if (del_timer(&exp->timeout)) {
                nf_ct_unlink_expect(exp);
@@ -275,7 +275,7 @@ struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
                return NULL;
 
        new->master = me;
-       atomic_set(&new->use, 1);
+       refcount_set(&new->use, 1);
        return new;
 }
 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
@@ -348,7 +348,7 @@ static void nf_ct_expect_free_rcu(struct rcu_head *head)
 
 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
 {
-       if (atomic_dec_and_test(&exp->use))
+       if (refcount_dec_and_test(&exp->use))
                call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
 }
 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
@@ -361,7 +361,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
        unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple);
 
        /* two references : one for hash insert, one for the timer */
-       atomic_add(2, &exp->use);
+       refcount_add(2, &exp->use);
 
        hlist_add_head(&exp->lnode, &master_help->expectations);
        master_help->expecting[exp->class]++;
index 02bcf00c24920b332401cd7c82ab6ced951659f5..008299b7f78fe3754946cf0a58029090234ad905 100644 (file)
@@ -53,7 +53,11 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
 
        rcu_read_lock();
        t = rcu_dereference(nf_ct_ext_types[id]);
-       BUG_ON(t == NULL);
+       if (!t) {
+               rcu_read_unlock();
+               return NULL;
+       }
+
        off = ALIGN(sizeof(struct nf_ct_ext), t->align);
        len = off + t->len + var_alloc_len;
        alloc_size = t->alloc_size + var_alloc_len;
@@ -88,7 +92,10 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
 
        rcu_read_lock();
        t = rcu_dereference(nf_ct_ext_types[id]);
-       BUG_ON(t == NULL);
+       if (!t) {
+               rcu_read_unlock();
+               return NULL;
+       }
 
        newoff = ALIGN(old->len, t->align);
        newlen = newoff + t->len + var_alloc_len;
@@ -175,6 +182,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
        RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
        update_alloc_size(type);
        mutex_unlock(&nf_ct_ext_type_mutex);
-       rcu_barrier(); /* Wait for completion of call_rcu()'s */
+       synchronize_rcu();
 }
 EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
index 6806b5e73567bb0220b248682abed3e5e34f780e..ecdc324c77859303a2141e919b114e4d8c4c3576 100644 (file)
@@ -2693,7 +2693,7 @@ restart:
                                                    cb->nlh->nlmsg_seq,
                                                    IPCTNL_MSG_EXP_NEW,
                                                    exp) < 0) {
-                               if (!atomic_inc_not_zero(&exp->use))
+                               if (!refcount_inc_not_zero(&exp->use))
                                        continue;
                                cb->args[1] = (unsigned long)exp;
                                goto out;
@@ -2739,7 +2739,7 @@ restart:
                                            cb->nlh->nlmsg_seq,
                                            IPCTNL_MSG_EXP_NEW,
                                            exp) < 0) {
-                       if (!atomic_inc_not_zero(&exp->use))
+                       if (!refcount_inc_not_zero(&exp->use))
                                continue;
                        cb->args[1] = (unsigned long)exp;
                        goto out;
@@ -3442,6 +3442,7 @@ static void __exit ctnetlink_exit(void)
 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
        RCU_INIT_POINTER(nfnl_ct_hook, NULL);
 #endif
+       synchronize_rcu();
 }
 
 module_init(ctnetlink_init);
index 94b14c5a8b177277e218790da32eafebff3be963..82802e4a6640817e64eb3f3a6ffcb875ad14a747 100644 (file)
@@ -903,6 +903,8 @@ static void __exit nf_nat_cleanup(void)
 #ifdef CONFIG_XFRM
        RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
 #endif
+       synchronize_rcu();
+
        for (i = 0; i < NFPROTO_NUMPROTO; i++)
                kfree(nf_nat_l4protos[i]);
 
index 31d358691af0963c664c742d180e79c664590005..804e8a0ab36ef56b120ea89be1994b39eca5bc36 100644 (file)
@@ -33,8 +33,16 @@ sctp_manip_pkt(struct sk_buff *skb,
               enum nf_nat_manip_type maniptype)
 {
        sctp_sctphdr_t *hdr;
+       int hdrsize = 8;
 
-       if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+       /* This could be an inner header returned in imcp packet; in such
+        * cases we cannot update the checksum field since it is outside
+        * of the 8 bytes of transport layer headers we are guaranteed.
+        */
+       if (skb->len >= hdroff + sizeof(*hdr))
+               hdrsize = sizeof(*hdr);
+
+       if (!skb_make_writable(skb, hdroff + hdrsize))
                return false;
 
        hdr = (struct sctphdr *)(skb->data + hdroff);
@@ -47,6 +55,9 @@ sctp_manip_pkt(struct sk_buff *skb,
                hdr->dest = tuple->dst.u.sctp.port;
        }
 
+       if (hdrsize < sizeof(*hdr))
+               return true;
+
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
                hdr->checksum = sctp_compute_cksum(skb, hdroff);
                skb->ip_summed = CHECKSUM_NONE;
index 5e0ccfd5bb37d1cbebb7e03b0998b7c24cca024d..2d822d2fd83062d28f54737a9e7aae652f608d1b 100644 (file)
@@ -1772,8 +1772,19 @@ static int nf_tables_newexpr(const struct nft_ctx *ctx,
                        goto err1;
        }
 
+       if (ops->validate) {
+               const struct nft_data *data = NULL;
+
+               err = ops->validate(ctx, expr, &data);
+               if (err < 0)
+                       goto err2;
+       }
+
        return 0;
 
+err2:
+       if (ops->destroy)
+               ops->destroy(ctx, expr);
 err1:
        expr->ops = NULL;
        return err;
@@ -2523,8 +2534,8 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net,
        return 0;
 }
 
-struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
-                                    const struct nlattr *nla, u8 genmask)
+static struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
+                                           const struct nlattr *nla, u8 genmask)
 {
        struct nft_set *set;
 
@@ -2538,11 +2549,10 @@ struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
        }
        return ERR_PTR(-ENOENT);
 }
-EXPORT_SYMBOL_GPL(nf_tables_set_lookup);
 
-struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
-                                         const struct nlattr *nla,
-                                         u8 genmask)
+static struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
+                                                const struct nlattr *nla,
+                                                u8 genmask)
 {
        struct nft_trans *trans;
        u32 id = ntohl(nla_get_be32(nla));
@@ -2557,7 +2567,25 @@ struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
        }
        return ERR_PTR(-ENOENT);
 }
-EXPORT_SYMBOL_GPL(nf_tables_set_lookup_byid);
+
+struct nft_set *nft_set_lookup(const struct net *net,
+                              const struct nft_table *table,
+                              const struct nlattr *nla_set_name,
+                              const struct nlattr *nla_set_id,
+                              u8 genmask)
+{
+       struct nft_set *set;
+
+       set = nf_tables_set_lookup(table, nla_set_name, genmask);
+       if (IS_ERR(set)) {
+               if (!nla_set_id)
+                       return set;
+
+               set = nf_tables_set_lookup_byid(net, nla_set_id, genmask);
+       }
+       return set;
+}
+EXPORT_SYMBOL_GPL(nft_set_lookup);
 
 static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
                                    const char *name)
@@ -3145,7 +3173,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
                iter.count      = 0;
                iter.err        = 0;
                iter.fn         = nf_tables_bind_check_setelem;
-               iter.flush      = false;
 
                set->ops->walk(ctx, set, &iter);
                if (iter.err < 0)
@@ -3399,7 +3426,6 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
        args.iter.count         = 0;
        args.iter.err           = 0;
        args.iter.fn            = nf_tables_dump_setelem;
-       args.iter.flush         = false;
        set->ops->walk(&ctx, set, &args.iter);
 
        nla_nest_end(skb, nest);
@@ -3963,7 +3989,6 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
                struct nft_set_iter iter = {
                        .genmask        = genmask,
                        .fn             = nft_flush_set,
-                       .flush          = true,
                };
                set->ops->walk(&ctx, set, &iter);
 
@@ -4067,7 +4092,8 @@ static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = {
        [NFTA_OBJ_DATA]         = { .type = NLA_NESTED },
 };
 
-static struct nft_object *nft_obj_init(const struct nft_object_type *type,
+static struct nft_object *nft_obj_init(const struct nft_ctx *ctx,
+                                      const struct nft_object_type *type,
                                       const struct nlattr *attr)
 {
        struct nlattr *tb[type->maxattr + 1];
@@ -4087,7 +4113,7 @@ static struct nft_object *nft_obj_init(const struct nft_object_type *type,
        if (obj == NULL)
                goto err1;
 
-       err = type->init((const struct nlattr * const *)tb, obj);
+       err = type->init(ctx, (const struct nlattr * const *)tb, obj);
        if (err < 0)
                goto err2;
 
@@ -4195,7 +4221,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
        if (IS_ERR(type))
                return PTR_ERR(type);
 
-       obj = nft_obj_init(type, nla[NFTA_OBJ_DATA]);
+       obj = nft_obj_init(&ctx, type, nla[NFTA_OBJ_DATA]);
        if (IS_ERR(obj)) {
                err = PTR_ERR(obj);
                goto err1;
@@ -5114,7 +5140,6 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
                        iter.count      = 0;
                        iter.err        = 0;
                        iter.fn         = nf_tables_loop_check_setelem;
-                       iter.flush      = false;
 
                        set->ops->walk(ctx, set, &iter);
                        if (iter.err < 0)
index d44d89b561275e25bb31fe2b0ed198d13995533c..c86da174a5fced4e3c67307925332273b9f858fe 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/skbuff.h>
 #include <linux/atomic.h>
+#include <linux/refcount.h>
 #include <linux/netlink.h>
 #include <linux/rculist.h>
 #include <linux/slab.h>
@@ -32,7 +33,7 @@ struct nf_acct {
        atomic64_t              bytes;
        unsigned long           flags;
        struct list_head        head;
-       atomic_t                refcnt;
+       refcount_t              refcnt;
        char                    name[NFACCT_NAME_MAX];
        struct rcu_head         rcu_head;
        char                    data[0];
@@ -123,7 +124,7 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl,
                atomic64_set(&nfacct->pkts,
                             be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
        }
-       atomic_set(&nfacct->refcnt, 1);
+       refcount_set(&nfacct->refcnt, 1);
        list_add_tail_rcu(&nfacct->head, &net->nfnl_acct_list);
        return 0;
 }
@@ -166,7 +167,7 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                         NFACCT_PAD) ||
            nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes),
                         NFACCT_PAD) ||
-           nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt))))
+           nla_put_be32(skb, NFACCT_USE, htonl(refcount_read(&acct->refcnt))))
                goto nla_put_failure;
        if (acct->flags & NFACCT_F_QUOTA) {
                u64 *quota = (u64 *)acct->data;
@@ -329,7 +330,7 @@ static int nfnl_acct_try_del(struct nf_acct *cur)
        /* We want to avoid races with nfnl_acct_put. So only when the current
         * refcnt is 1, we decrease it to 0.
         */
-       if (atomic_cmpxchg(&cur->refcnt, 1, 0) == 1) {
+       if (refcount_dec_if_one(&cur->refcnt)) {
                /* We are protected by nfnl mutex. */
                list_del_rcu(&cur->head);
                kfree_rcu(cur, rcu_head);
@@ -413,7 +414,7 @@ struct nf_acct *nfnl_acct_find_get(struct net *net, const char *acct_name)
                if (!try_module_get(THIS_MODULE))
                        goto err;
 
-               if (!atomic_inc_not_zero(&cur->refcnt)) {
+               if (!refcount_inc_not_zero(&cur->refcnt)) {
                        module_put(THIS_MODULE);
                        goto err;
                }
@@ -429,7 +430,7 @@ EXPORT_SYMBOL_GPL(nfnl_acct_find_get);
 
 void nfnl_acct_put(struct nf_acct *acct)
 {
-       if (atomic_dec_and_test(&acct->refcnt))
+       if (refcount_dec_and_test(&acct->refcnt))
                kfree_rcu(acct, rcu_head);
 
        module_put(THIS_MODULE);
@@ -502,7 +503,7 @@ static void __net_exit nfnl_acct_net_exit(struct net *net)
        list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head) {
                list_del_rcu(&cur->head);
 
-               if (atomic_dec_and_test(&cur->refcnt))
+               if (refcount_dec_and_test(&cur->refcnt))
                        kfree_rcu(cur, rcu_head);
        }
 }
index de8782345c863777c8cedf95a5ccf60504e9586f..d45558178da5b62a8ad7c896e096c1862c512091 100644 (file)
@@ -32,6 +32,13 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
 
+struct nfnl_cthelper {
+       struct list_head                list;
+       struct nf_conntrack_helper      helper;
+};
+
+static LIST_HEAD(nfnl_cthelper_list);
+
 static int
 nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
                        struct nf_conn *ct, enum ip_conntrack_info ctinfo)
@@ -161,6 +168,7 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
        int i, ret;
        struct nf_conntrack_expect_policy *expect_policy;
        struct nlattr *tb[NFCTH_POLICY_SET_MAX+1];
+       unsigned int class_max;
 
        ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
                               nfnl_cthelper_expect_policy_set);
@@ -170,19 +178,18 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
        if (!tb[NFCTH_POLICY_SET_NUM])
                return -EINVAL;
 
-       helper->expect_class_max =
-               ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
-
-       if (helper->expect_class_max != 0 &&
-           helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES)
+       class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
+       if (class_max == 0)
+               return -EINVAL;
+       if (class_max > NF_CT_MAX_EXPECT_CLASSES)
                return -EOVERFLOW;
 
        expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) *
-                               helper->expect_class_max, GFP_KERNEL);
+                               class_max, GFP_KERNEL);
        if (expect_policy == NULL)
                return -ENOMEM;
 
-       for (i=0; i<helper->expect_class_max; i++) {
+       for (i = 0; i < class_max; i++) {
                if (!tb[NFCTH_POLICY_SET+i])
                        goto err;
 
@@ -191,6 +198,8 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
                if (ret < 0)
                        goto err;
        }
+
+       helper->expect_class_max = class_max - 1;
        helper->expect_policy = expect_policy;
        return 0;
 err:
@@ -203,18 +212,20 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
                     struct nf_conntrack_tuple *tuple)
 {
        struct nf_conntrack_helper *helper;
+       struct nfnl_cthelper *nfcth;
        int ret;
 
        if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
                return -EINVAL;
 
-       helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL);
-       if (helper == NULL)
+       nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL);
+       if (nfcth == NULL)
                return -ENOMEM;
+       helper = &nfcth->helper;
 
        ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
        if (ret < 0)
-               goto err;
+               goto err1;
 
        strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
        helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
@@ -245,14 +256,100 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
 
        ret = nf_conntrack_helper_register(helper);
        if (ret < 0)
-               goto err;
+               goto err2;
 
+       list_add_tail(&nfcth->list, &nfnl_cthelper_list);
        return 0;
-err:
-       kfree(helper);
+err2:
+       kfree(helper->expect_policy);
+err1:
+       kfree(nfcth);
        return ret;
 }
 
+static int
+nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy,
+                               struct nf_conntrack_expect_policy *new_policy,
+                               const struct nlattr *attr)
+{
+       struct nlattr *tb[NFCTH_POLICY_MAX + 1];
+       int err;
+
+       err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr,
+                              nfnl_cthelper_expect_pol);
+       if (err < 0)
+               return err;
+
+       if (!tb[NFCTH_POLICY_NAME] ||
+           !tb[NFCTH_POLICY_EXPECT_MAX] ||
+           !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
+               return -EINVAL;
+
+       if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name))
+               return -EBUSY;
+
+       new_policy->max_expected =
+               ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
+       new_policy->timeout =
+               ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
+
+       return 0;
+}
+
+static int nfnl_cthelper_update_policy_all(struct nlattr *tb[],
+                                          struct nf_conntrack_helper *helper)
+{
+       struct nf_conntrack_expect_policy new_policy[helper->expect_class_max + 1];
+       struct nf_conntrack_expect_policy *policy;
+       int i, err;
+
+       /* Check first that all policy attributes are well-formed, so we don't
+        * leave things in inconsistent state on errors.
+        */
+       for (i = 0; i < helper->expect_class_max + 1; i++) {
+
+               if (!tb[NFCTH_POLICY_SET + i])
+                       return -EINVAL;
+
+               err = nfnl_cthelper_update_policy_one(&helper->expect_policy[i],
+                                                     &new_policy[i],
+                                                     tb[NFCTH_POLICY_SET + i]);
+               if (err < 0)
+                       return err;
+       }
+       /* Now we can safely update them. */
+       for (i = 0; i < helper->expect_class_max + 1; i++) {
+               policy = (struct nf_conntrack_expect_policy *)
+                               &helper->expect_policy[i];
+               policy->max_expected = new_policy->max_expected;
+               policy->timeout = new_policy->timeout;
+       }
+
+       return 0;
+}
+
+static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper,
+                                      const struct nlattr *attr)
+{
+       struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1];
+       unsigned int class_max;
+       int err;
+
+       err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
+                              nfnl_cthelper_expect_policy_set);
+       if (err < 0)
+               return err;
+
+       if (!tb[NFCTH_POLICY_SET_NUM])
+               return -EINVAL;
+
+       class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
+       if (helper->expect_class_max + 1 != class_max)
+               return -EBUSY;
+
+       return nfnl_cthelper_update_policy_all(tb, helper);
+}
+
 static int
 nfnl_cthelper_update(const struct nlattr * const tb[],
                     struct nf_conntrack_helper *helper)
@@ -263,8 +360,7 @@ nfnl_cthelper_update(const struct nlattr * const tb[],
                return -EBUSY;
 
        if (tb[NFCTH_POLICY]) {
-               ret = nfnl_cthelper_parse_expect_policy(helper,
-                                                       tb[NFCTH_POLICY]);
+               ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
                if (ret < 0)
                        return ret;
        }
@@ -293,7 +389,8 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
        const char *helper_name;
        struct nf_conntrack_helper *cur, *helper = NULL;
        struct nf_conntrack_tuple tuple;
-       int ret = 0, i;
+       struct nfnl_cthelper *nlcth;
+       int ret = 0;
 
        if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
                return -EINVAL;
@@ -304,31 +401,22 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
        if (ret < 0)
                return ret;
 
-       rcu_read_lock();
-       for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
-               hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
+       list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
+               cur = &nlcth->helper;
 
-                       /* skip non-userspace conntrack helpers. */
-                       if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
-                               continue;
+               if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+                       continue;
 
-                       if (strncmp(cur->name, helper_name,
-                                       NF_CT_HELPER_NAME_LEN) != 0)
-                               continue;
+               if ((tuple.src.l3num != cur->tuple.src.l3num ||
+                    tuple.dst.protonum != cur->tuple.dst.protonum))
+                       continue;
 
-                       if ((tuple.src.l3num != cur->tuple.src.l3num ||
-                            tuple.dst.protonum != cur->tuple.dst.protonum))
-                               continue;
+               if (nlh->nlmsg_flags & NLM_F_EXCL)
+                       return -EEXIST;
 
-                       if (nlh->nlmsg_flags & NLM_F_EXCL) {
-                               ret = -EEXIST;
-                               goto err;
-                       }
-                       helper = cur;
-                       break;
-               }
+               helper = cur;
+               break;
        }
-       rcu_read_unlock();
 
        if (helper == NULL)
                ret = nfnl_cthelper_create(tb, &tuple);
@@ -336,9 +424,6 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
                ret = nfnl_cthelper_update(tb, helper);
 
        return ret;
-err:
-       rcu_read_unlock();
-       return ret;
 }
 
 static int
@@ -377,10 +462,10 @@ nfnl_cthelper_dump_policy(struct sk_buff *skb,
                goto nla_put_failure;
 
        if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM,
-                        htonl(helper->expect_class_max)))
+                        htonl(helper->expect_class_max + 1)))
                goto nla_put_failure;
 
-       for (i=0; i<helper->expect_class_max; i++) {
+       for (i = 0; i < helper->expect_class_max + 1; i++) {
                nest_parms2 = nla_nest_start(skb,
                                (NFCTH_POLICY_SET+i) | NLA_F_NESTED);
                if (nest_parms2 == NULL)
@@ -502,11 +587,12 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
                             struct sk_buff *skb, const struct nlmsghdr *nlh,
                             const struct nlattr * const tb[])
 {
-       int ret = -ENOENT, i;
+       int ret = -ENOENT;
        struct nf_conntrack_helper *cur;
        struct sk_buff *skb2;
        char *helper_name = NULL;
        struct nf_conntrack_tuple tuple;
+       struct nfnl_cthelper *nlcth;
        bool tuple_set = false;
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -527,45 +613,39 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
                tuple_set = true;
        }
 
-       for (i = 0; i < nf_ct_helper_hsize; i++) {
-               hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
+       list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
+               cur = &nlcth->helper;
+               if (helper_name &&
+                   strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+                       continue;
 
-                       /* skip non-userspace conntrack helpers. */
-                       if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
-                               continue;
+               if (tuple_set &&
+                   (tuple.src.l3num != cur->tuple.src.l3num ||
+                    tuple.dst.protonum != cur->tuple.dst.protonum))
+                       continue;
 
-                       if (helper_name && strncmp(cur->name, helper_name,
-                                               NF_CT_HELPER_NAME_LEN) != 0) {
-                               continue;
-                       }
-                       if (tuple_set &&
-                           (tuple.src.l3num != cur->tuple.src.l3num ||
-                            tuple.dst.protonum != cur->tuple.dst.protonum))
-                               continue;
-
-                       skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-                       if (skb2 == NULL) {
-                               ret = -ENOMEM;
-                               break;
-                       }
+               skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+               if (skb2 == NULL) {
+                       ret = -ENOMEM;
+                       break;
+               }
 
-                       ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
-                                               nlh->nlmsg_seq,
-                                               NFNL_MSG_TYPE(nlh->nlmsg_type),
-                                               NFNL_MSG_CTHELPER_NEW, cur);
-                       if (ret <= 0) {
-                               kfree_skb(skb2);
-                               break;
-                       }
+               ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
+                                             nlh->nlmsg_seq,
+                                             NFNL_MSG_TYPE(nlh->nlmsg_type),
+                                             NFNL_MSG_CTHELPER_NEW, cur);
+               if (ret <= 0) {
+                       kfree_skb(skb2);
+                       break;
+               }
 
-                       ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
-                                               MSG_DONTWAIT);
-                       if (ret > 0)
-                               ret = 0;
+               ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
+                                     MSG_DONTWAIT);
+               if (ret > 0)
+                       ret = 0;
 
-                       /* this avoids a loop in nfnetlink. */
-                       return ret == -EAGAIN ? -ENOBUFS : ret;
-               }
+               /* this avoids a loop in nfnetlink. */
+               return ret == -EAGAIN ? -ENOBUFS : ret;
        }
        return ret;
 }
@@ -576,10 +656,10 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
 {
        char *helper_name = NULL;
        struct nf_conntrack_helper *cur;
-       struct hlist_node *tmp;
        struct nf_conntrack_tuple tuple;
        bool tuple_set = false, found = false;
-       int i, j = 0, ret;
+       struct nfnl_cthelper *nlcth, *n;
+       int j = 0, ret;
 
        if (tb[NFCTH_NAME])
                helper_name = nla_data(tb[NFCTH_NAME]);
@@ -592,28 +672,27 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
                tuple_set = true;
        }
 
-       for (i = 0; i < nf_ct_helper_hsize; i++) {
-               hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
-                                                               hnode) {
-                       /* skip non-userspace conntrack helpers. */
-                       if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
-                               continue;
+       list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
+               cur = &nlcth->helper;
+               j++;
 
-                       j++;
+               if (helper_name &&
+                   strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+                       continue;
 
-                       if (helper_name && strncmp(cur->name, helper_name,
-                                               NF_CT_HELPER_NAME_LEN) != 0) {
-                               continue;
-                       }
-                       if (tuple_set &&
-                           (tuple.src.l3num != cur->tuple.src.l3num ||
-                            tuple.dst.protonum != cur->tuple.dst.protonum))
-                               continue;
+               if (tuple_set &&
+                   (tuple.src.l3num != cur->tuple.src.l3num ||
+                    tuple.dst.protonum != cur->tuple.dst.protonum))
+                       continue;
 
-                       found = true;
-                       nf_conntrack_helper_unregister(cur);
-               }
+               found = true;
+               nf_conntrack_helper_unregister(cur);
+               kfree(cur->expect_policy);
+
+               list_del(&nlcth->list);
+               kfree(nlcth);
        }
+
        /* Make sure we return success if we flush and there is no helpers */
        return (found || j == 0) ? 0 : -ENOENT;
 }
@@ -662,20 +741,16 @@ err_out:
 static void __exit nfnl_cthelper_exit(void)
 {
        struct nf_conntrack_helper *cur;
-       struct hlist_node *tmp;
-       int i;
+       struct nfnl_cthelper *nlcth, *n;
 
        nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
 
-       for (i=0; i<nf_ct_helper_hsize; i++) {
-               hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
-                                                                       hnode) {
-                       /* skip non-userspace conntrack helpers. */
-                       if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
-                               continue;
+       list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
+               cur = &nlcth->helper;
 
-                       nf_conntrack_helper_unregister(cur);
-               }
+               nf_conntrack_helper_unregister(cur);
+               kfree(cur->expect_policy);
+               kfree(nlcth);
        }
 }
 
index 139e0867e56e9e606942c98e75148eb17b2ec7eb..57c2cdf7b6912f9a2096c541355e09ad01f79cb2 100644 (file)
@@ -138,7 +138,7 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
        strcpy(timeout->name, nla_data(cda[CTA_TIMEOUT_NAME]));
        timeout->l3num = l3num;
        timeout->l4proto = l4proto;
-       atomic_set(&timeout->refcnt, 1);
+       refcount_set(&timeout->refcnt, 1);
        list_add_tail_rcu(&timeout->head, &net->nfct_timeout_list);
 
        return 0;
@@ -172,7 +172,7 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
            nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)) ||
            nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) ||
            nla_put_be32(skb, CTA_TIMEOUT_USE,
-                        htonl(atomic_read(&timeout->refcnt))))
+                        htonl(refcount_read(&timeout->refcnt))))
                goto nla_put_failure;
 
        if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
@@ -339,7 +339,7 @@ static int ctnl_timeout_try_del(struct net *net, struct ctnl_timeout *timeout)
        /* We want to avoid races with ctnl_timeout_put. So only when the
         * current refcnt is 1, we decrease it to 0.
         */
-       if (atomic_cmpxchg(&timeout->refcnt, 1, 0) == 1) {
+       if (refcount_dec_if_one(&timeout->refcnt)) {
                /* We are protected by nfnl mutex. */
                list_del_rcu(&timeout->head);
                nf_ct_l4proto_put(timeout->l4proto);
@@ -536,7 +536,7 @@ ctnl_timeout_find_get(struct net *net, const char *name)
                if (!try_module_get(THIS_MODULE))
                        goto err;
 
-               if (!atomic_inc_not_zero(&timeout->refcnt)) {
+               if (!refcount_inc_not_zero(&timeout->refcnt)) {
                        module_put(THIS_MODULE);
                        goto err;
                }
@@ -550,7 +550,7 @@ err:
 
 static void ctnl_timeout_put(struct ctnl_timeout *timeout)
 {
-       if (atomic_dec_and_test(&timeout->refcnt))
+       if (refcount_dec_and_test(&timeout->refcnt))
                kfree_rcu(timeout, rcu_head);
 
        module_put(THIS_MODULE);
@@ -601,7 +601,7 @@ static void __net_exit cttimeout_net_exit(struct net *net)
                list_del_rcu(&cur->head);
                nf_ct_l4proto_put(cur->l4proto);
 
-               if (atomic_dec_and_test(&cur->refcnt))
+               if (refcount_dec_and_test(&cur->refcnt))
                        kfree_rcu(cur, rcu_head);
        }
 }
@@ -646,8 +646,8 @@ static void __exit cttimeout_exit(void)
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
        RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
+       synchronize_rcu();
 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-       rcu_barrier();
 }
 
 module_init(cttimeout_init);
index 08247bf7d7b836828c8151ed07f438e05973c2c0..ecd857b75ffe631c5d52e5e8ed9967e9152bc9ab 100644 (file)
@@ -40,6 +40,8 @@
 #include <net/netfilter/nfnetlink_log.h>
 
 #include <linux/atomic.h>
+#include <linux/refcount.h>
+
 
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 #include "../bridge/br_private.h"
@@ -57,7 +59,7 @@
 struct nfulnl_instance {
        struct hlist_node hlist;        /* global list of instances */
        spinlock_t lock;
-       atomic_t use;                   /* use count */
+       refcount_t use;                 /* use count */
 
        unsigned int qlen;              /* number of nlmsgs in skb */
        struct sk_buff *skb;            /* pre-allocatd skb */
@@ -115,7 +117,7 @@ __instance_lookup(struct nfnl_log_net *log, u_int16_t group_num)
 static inline void
 instance_get(struct nfulnl_instance *inst)
 {
-       atomic_inc(&inst->use);
+       refcount_inc(&inst->use);
 }
 
 static struct nfulnl_instance *
@@ -125,7 +127,7 @@ instance_lookup_get(struct nfnl_log_net *log, u_int16_t group_num)
 
        rcu_read_lock_bh();
        inst = __instance_lookup(log, group_num);
-       if (inst && !atomic_inc_not_zero(&inst->use))
+       if (inst && !refcount_inc_not_zero(&inst->use))
                inst = NULL;
        rcu_read_unlock_bh();
 
@@ -145,7 +147,7 @@ static void nfulnl_instance_free_rcu(struct rcu_head *head)
 static void
 instance_put(struct nfulnl_instance *inst)
 {
-       if (inst && atomic_dec_and_test(&inst->use))
+       if (inst && refcount_dec_and_test(&inst->use))
                call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
 }
 
@@ -180,7 +182,7 @@ instance_create(struct net *net, u_int16_t group_num,
        INIT_HLIST_NODE(&inst->hlist);
        spin_lock_init(&inst->lock);
        /* needs to be two, since we _put() after creation */
-       atomic_set(&inst->use, 2);
+       refcount_set(&inst->use, 2);
 
        setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
 
@@ -1031,7 +1033,7 @@ static int seq_show(struct seq_file *s, void *v)
                   inst->group_num,
                   inst->peer_portid, inst->qlen,
                   inst->copy_mode, inst->copy_range,
-                  inst->flushtimeout, atomic_read(&inst->use));
+                  inst->flushtimeout, refcount_read(&inst->use));
 
        return 0;
 }
index 3ee0b8a000a41ec901faeb239e752a126428dc4d..933509ebf3d3e2e84aecd55fd7f19f21f69e46d4 100644 (file)
@@ -443,7 +443,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
        skb = alloc_skb(size, GFP_ATOMIC);
        if (!skb) {
                skb_tx_error(entskb);
-               return NULL;
+               goto nlmsg_failure;
        }
 
        nlh = nlmsg_put(skb, 0, 0,
@@ -452,7 +452,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
        if (!nlh) {
                skb_tx_error(entskb);
                kfree_skb(skb);
-               return NULL;
+               goto nlmsg_failure;
        }
        nfmsg = nlmsg_data(nlh);
        nfmsg->nfgen_family = entry->state.pf;
@@ -598,12 +598,17 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
        }
 
        nlh->nlmsg_len = skb->len;
+       if (seclen)
+               security_release_secctx(secdata, seclen);
        return skb;
 
 nla_put_failure:
        skb_tx_error(entskb);
        kfree_skb(skb);
        net_err_ratelimited("nf_queue: error creating packet message\n");
+nlmsg_failure:
+       if (seclen)
+               security_release_secctx(secdata, seclen);
        return NULL;
 }
 
index c21e7eb8dce02a6b73c5a466300ae793a2787b26..fab6bf3f955ed0a9251b4dfc72b41e5948fa4a21 100644 (file)
@@ -230,10 +230,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
        union nft_entry e = {};
        int ret;
 
-       ret = nft_compat_chain_validate_dependency(target->table, ctx->chain);
-       if (ret < 0)
-               goto err;
-
        target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info);
 
        if (ctx->nla[NFTA_RULE_COMPAT]) {
@@ -419,10 +415,6 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
        union nft_entry e = {};
        int ret;
 
-       ret = nft_compat_chain_validate_dependency(match->table, ctx->chain);
-       if (ret < 0)
-               goto err;
-
        match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info);
 
        if (ctx->nla[NFTA_RULE_COMPAT]) {
index 7f84222133414d4a65ed96508c83dea711295c9f..67a710ebde09da21464da4bb22a78a36c791274d 100644 (file)
@@ -82,7 +82,8 @@ static int nft_counter_do_init(const struct nlattr * const tb[],
        return 0;
 }
 
-static int nft_counter_obj_init(const struct nlattr * const tb[],
+static int nft_counter_obj_init(const struct nft_ctx *ctx,
+                               const struct nlattr * const tb[],
                                struct nft_object *obj)
 {
        struct nft_counter_percpu_priv *priv = nft_obj_data(obj);
index bf548a7a71ec9b49cf308af041811d2eb5f33c8c..640fe5a5865ef26ea71378535927f6bbd9a6aceb 100644 (file)
@@ -32,6 +32,12 @@ struct nft_ct {
        };
 };
 
+struct nft_ct_helper_obj  {
+       struct nf_conntrack_helper *helper4;
+       struct nf_conntrack_helper *helper6;
+       u8 l4proto;
+};
+
 #ifdef CONFIG_NF_CONNTRACK_ZONES
 static DEFINE_PER_CPU(struct nf_conn *, nft_ct_pcpu_template);
 static unsigned int nft_ct_pcpu_template_refcnt __read_mostly;
@@ -83,7 +89,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
 
        switch (priv->key) {
        case NFT_CT_DIRECTION:
-               *dest = CTINFO2DIR(ctinfo);
+               nft_reg_store8(dest, CTINFO2DIR(ctinfo));
                return;
        case NFT_CT_STATUS:
                *dest = ct->status;
@@ -151,20 +157,22 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
                return;
        }
        case NFT_CT_L3PROTOCOL:
-               *dest = nf_ct_l3num(ct);
+               nft_reg_store8(dest, nf_ct_l3num(ct));
                return;
        case NFT_CT_PROTOCOL:
-               *dest = nf_ct_protonum(ct);
+               nft_reg_store8(dest, nf_ct_protonum(ct));
                return;
 #ifdef CONFIG_NF_CONNTRACK_ZONES
        case NFT_CT_ZONE: {
                const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
+               u16 zoneid;
 
                if (priv->dir < IP_CT_DIR_MAX)
-                       *dest = nf_ct_zone_id(zone, priv->dir);
+                       zoneid = nf_ct_zone_id(zone, priv->dir);
                else
-                       *dest = zone->id;
+                       zoneid = zone->id;
 
+               nft_reg_store16(dest, zoneid);
                return;
        }
 #endif
@@ -183,10 +191,10 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
                       nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
                return;
        case NFT_CT_PROTO_SRC:
-               *dest = (__force __u16)tuple->src.u.all;
+               nft_reg_store16(dest, (__force u16)tuple->src.u.all);
                return;
        case NFT_CT_PROTO_DST:
-               *dest = (__force __u16)tuple->dst.u.all;
+               nft_reg_store16(dest, (__force u16)tuple->dst.u.all);
                return;
        default:
                break;
@@ -205,7 +213,7 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr,
        const struct nft_ct *priv = nft_expr_priv(expr);
        struct sk_buff *skb = pkt->skb;
        enum ip_conntrack_info ctinfo;
-       u16 value = regs->data[priv->sreg];
+       u16 value = nft_reg_load16(&regs->data[priv->sreg]);
        struct nf_conn *ct;
 
        ct = nf_ct_get(skb, &ctinfo);
@@ -542,7 +550,8 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
                case IP_CT_DIR_REPLY:
                        break;
                default:
-                       return -EINVAL;
+                       err = -EINVAL;
+                       goto err1;
                }
        }
 
@@ -730,6 +739,162 @@ static struct nft_expr_type nft_notrack_type __read_mostly = {
        .owner          = THIS_MODULE,
 };
 
+static int nft_ct_helper_obj_init(const struct nft_ctx *ctx,
+                                 const struct nlattr * const tb[],
+                                 struct nft_object *obj)
+{
+       struct nft_ct_helper_obj *priv = nft_obj_data(obj);
+       struct nf_conntrack_helper *help4, *help6;
+       char name[NF_CT_HELPER_NAME_LEN];
+       int family = ctx->afi->family;
+
+       if (!tb[NFTA_CT_HELPER_NAME] || !tb[NFTA_CT_HELPER_L4PROTO])
+               return -EINVAL;
+
+       priv->l4proto = nla_get_u8(tb[NFTA_CT_HELPER_L4PROTO]);
+       if (!priv->l4proto)
+               return -ENOENT;
+
+       nla_strlcpy(name, tb[NFTA_CT_HELPER_NAME], sizeof(name));
+
+       if (tb[NFTA_CT_HELPER_L3PROTO])
+               family = ntohs(nla_get_be16(tb[NFTA_CT_HELPER_L3PROTO]));
+
+       help4 = NULL;
+       help6 = NULL;
+
+       switch (family) {
+       case NFPROTO_IPV4:
+               if (ctx->afi->family == NFPROTO_IPV6)
+                       return -EINVAL;
+
+               help4 = nf_conntrack_helper_try_module_get(name, family,
+                                                          priv->l4proto);
+               break;
+       case NFPROTO_IPV6:
+               if (ctx->afi->family == NFPROTO_IPV4)
+                       return -EINVAL;
+
+               help6 = nf_conntrack_helper_try_module_get(name, family,
+                                                          priv->l4proto);
+               break;
+       case NFPROTO_NETDEV: /* fallthrough */
+       case NFPROTO_BRIDGE: /* same */
+       case NFPROTO_INET:
+               help4 = nf_conntrack_helper_try_module_get(name, NFPROTO_IPV4,
+                                                          priv->l4proto);
+               help6 = nf_conntrack_helper_try_module_get(name, NFPROTO_IPV6,
+                                                          priv->l4proto);
+               break;
+       default:
+               return -EAFNOSUPPORT;
+       }
+
+       /* && is intentional; only error if INET found neither ipv4 or ipv6 */
+       if (!help4 && !help6)
+               return -ENOENT;
+
+       priv->helper4 = help4;
+       priv->helper6 = help6;
+
+       return 0;
+}
+
+static void nft_ct_helper_obj_destroy(struct nft_object *obj)
+{
+       struct nft_ct_helper_obj *priv = nft_obj_data(obj);
+
+       if (priv->helper4)
+               module_put(priv->helper4->me);
+       if (priv->helper6)
+               module_put(priv->helper6->me);
+}
+
+static void nft_ct_helper_obj_eval(struct nft_object *obj,
+                                  struct nft_regs *regs,
+                                  const struct nft_pktinfo *pkt)
+{
+       const struct nft_ct_helper_obj *priv = nft_obj_data(obj);
+       struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb);
+       struct nf_conntrack_helper *to_assign = NULL;
+       struct nf_conn_help *help;
+
+       if (!ct ||
+           nf_ct_is_confirmed(ct) ||
+           nf_ct_is_template(ct) ||
+           priv->l4proto != nf_ct_protonum(ct))
+               return;
+
+       switch (nf_ct_l3num(ct)) {
+       case NFPROTO_IPV4:
+               to_assign = priv->helper4;
+               break;
+       case NFPROTO_IPV6:
+               to_assign = priv->helper6;
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               return;
+       }
+
+       if (!to_assign)
+               return;
+
+       if (test_bit(IPS_HELPER_BIT, &ct->status))
+               return;
+
+       help = nf_ct_helper_ext_add(ct, to_assign, GFP_ATOMIC);
+       if (help) {
+               rcu_assign_pointer(help->helper, to_assign);
+               set_bit(IPS_HELPER_BIT, &ct->status);
+       }
+}
+
+static int nft_ct_helper_obj_dump(struct sk_buff *skb,
+                                 struct nft_object *obj, bool reset)
+{
+       const struct nft_ct_helper_obj *priv = nft_obj_data(obj);
+       const struct nf_conntrack_helper *helper = priv->helper4;
+       u16 family;
+
+       if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name))
+               return -1;
+
+       if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto))
+               return -1;
+
+       if (priv->helper4 && priv->helper6)
+               family = NFPROTO_INET;
+       else if (priv->helper6)
+               family = NFPROTO_IPV6;
+       else
+               family = NFPROTO_IPV4;
+
+       if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family)))
+               return -1;
+
+       return 0;
+}
+
+static const struct nla_policy nft_ct_helper_policy[NFTA_CT_HELPER_MAX + 1] = {
+       [NFTA_CT_HELPER_NAME] = { .type = NLA_STRING,
+                                 .len = NF_CT_HELPER_NAME_LEN - 1 },
+       [NFTA_CT_HELPER_L3PROTO] = { .type = NLA_U16 },
+       [NFTA_CT_HELPER_L4PROTO] = { .type = NLA_U8 },
+};
+
+static struct nft_object_type nft_ct_helper_obj __read_mostly = {
+       .type           = NFT_OBJECT_CT_HELPER,
+       .size           = sizeof(struct nft_ct_helper_obj),
+       .maxattr        = NFTA_CT_HELPER_MAX,
+       .policy         = nft_ct_helper_policy,
+       .eval           = nft_ct_helper_obj_eval,
+       .init           = nft_ct_helper_obj_init,
+       .destroy        = nft_ct_helper_obj_destroy,
+       .dump           = nft_ct_helper_obj_dump,
+       .owner          = THIS_MODULE,
+};
+
 static int __init nft_ct_module_init(void)
 {
        int err;
@@ -744,7 +909,14 @@ static int __init nft_ct_module_init(void)
        if (err < 0)
                goto err1;
 
+       err = nft_register_obj(&nft_ct_helper_obj);
+       if (err < 0)
+               goto err2;
+
        return 0;
+
+err2:
+       nft_unregister_expr(&nft_notrack_type);
 err1:
        nft_unregister_expr(&nft_ct_type);
        return err;
@@ -752,6 +924,7 @@ err1:
 
 static void __exit nft_ct_module_exit(void)
 {
+       nft_unregister_obj(&nft_ct_helper_obj);
        nft_unregister_expr(&nft_notrack_type);
        nft_unregister_expr(&nft_ct_type);
 }
@@ -763,3 +936,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_EXPR("ct");
 MODULE_ALIAS_NFT_EXPR("notrack");
+MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_HELPER);
index 049ad2d9ee66959367a051903563dca6ba654edb..3948da380259538c2fd4823f65a9407241f8af4e 100644 (file)
@@ -133,16 +133,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
                        priv->invert = true;
        }
 
-       set = nf_tables_set_lookup(ctx->table, tb[NFTA_DYNSET_SET_NAME],
-                                  genmask);
-       if (IS_ERR(set)) {
-               if (tb[NFTA_DYNSET_SET_ID])
-                       set = nf_tables_set_lookup_byid(ctx->net,
-                                                       tb[NFTA_DYNSET_SET_ID],
-                                                       genmask);
-               if (IS_ERR(set))
-                       return PTR_ERR(set);
-       }
+       set = nft_set_lookup(ctx->net, ctx->table, tb[NFTA_DYNSET_SET_NAME],
+                            tb[NFTA_DYNSET_SET_ID], genmask);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
 
        if (set->ops->update == NULL)
                return -EOPNOTSUPP;
index c308920b194cdbe5e3a2e9a09cfb8aab7267f588..d212a85d2f3336e1b1a393c5ac4bf4fc8f1eebca 100644 (file)
@@ -98,14 +98,21 @@ static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
                        goto err;
 
                offset = i + priv->offset;
-               dest[priv->len / NFT_REG32_SIZE] = 0;
-               memcpy(dest, opt + offset, priv->len);
+               if (priv->flags & NFT_EXTHDR_F_PRESENT) {
+                       *dest = 1;
+               } else {
+                       dest[priv->len / NFT_REG32_SIZE] = 0;
+                       memcpy(dest, opt + offset, priv->len);
+               }
 
                return;
        }
 
 err:
-       regs->verdict.code = NFT_BREAK;
+       if (priv->flags & NFT_EXTHDR_F_PRESENT)
+               *dest = 0;
+       else
+               regs->verdict.code = NFT_BREAK;
 }
 
 static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
index 29a4906adc277cd3a2cf55242224e4c99fd070e7..21df8cccea6582e56d7bfbb6fba21f821b7c56d9 100644 (file)
@@ -24,7 +24,8 @@ const struct nla_policy nft_fib_policy[NFTA_FIB_MAX + 1] = {
 EXPORT_SYMBOL(nft_fib_policy);
 
 #define NFTA_FIB_F_ALL (NFTA_FIB_F_SADDR | NFTA_FIB_F_DADDR | \
-                       NFTA_FIB_F_MARK | NFTA_FIB_F_IIF | NFTA_FIB_F_OIF)
+                       NFTA_FIB_F_MARK | NFTA_FIB_F_IIF | NFTA_FIB_F_OIF | \
+                       NFTA_FIB_F_PRESENT)
 
 int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
                     const struct nft_data **data)
@@ -112,7 +113,7 @@ int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
        if (err < 0)
                return err;
 
-       return nft_fib_validate(ctx, expr, NULL);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(nft_fib_init);
 
@@ -133,19 +134,22 @@ int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr)
 }
 EXPORT_SYMBOL_GPL(nft_fib_dump);
 
-void nft_fib_store_result(void *reg, enum nft_fib_result r,
+void nft_fib_store_result(void *reg, const struct nft_fib *priv,
                          const struct nft_pktinfo *pkt, int index)
 {
        struct net_device *dev;
        u32 *dreg = reg;
 
-       switch (r) {
+       switch (priv->result) {
        case NFT_FIB_RESULT_OIF:
-               *dreg = index;
+               *dreg = (priv->flags & NFTA_FIB_F_PRESENT) ? !!index : index;
                break;
        case NFT_FIB_RESULT_OIFNAME:
                dev = dev_get_by_index_rcu(nft_net(pkt), index);
-               strncpy(reg, dev ? dev->name : "", IFNAMSIZ);
+               if (priv->flags & NFTA_FIB_F_PRESENT)
+                       *dreg = !!dev;
+               else
+                       strncpy(reg, dev ? dev->name : "", IFNAMSIZ);
                break;
        default:
                WARN_ON_ONCE(1);
index eb2721af898dbb54ab099f4878d7b2673cbb9522..a6a4633725bb4cba848112bc3ea9978fe34d6ea4 100644 (file)
@@ -17,7 +17,7 @@
 #include <net/netfilter/nf_tables_core.h>
 #include <linux/jhash.h>
 
-struct nft_hash {
+struct nft_jhash {
        enum nft_registers      sreg:8;
        enum nft_registers      dreg:8;
        u8                      len;
@@ -26,11 +26,11 @@ struct nft_hash {
        u32                     offset;
 };
 
-static void nft_hash_eval(const struct nft_expr *expr,
-                         struct nft_regs *regs,
-                         const struct nft_pktinfo *pkt)
+static void nft_jhash_eval(const struct nft_expr *expr,
+                          struct nft_regs *regs,
+                          const struct nft_pktinfo *pkt)
 {
-       struct nft_hash *priv = nft_expr_priv(expr);
+       struct nft_jhash *priv = nft_expr_priv(expr);
        const void *data = &regs->data[priv->sreg];
        u32 h;
 
@@ -38,6 +38,25 @@ static void nft_hash_eval(const struct nft_expr *expr,
        regs->data[priv->dreg] = h + priv->offset;
 }
 
+struct nft_symhash {
+       enum nft_registers      dreg:8;
+       u32                     modulus;
+       u32                     offset;
+};
+
+static void nft_symhash_eval(const struct nft_expr *expr,
+                            struct nft_regs *regs,
+                            const struct nft_pktinfo *pkt)
+{
+       struct nft_symhash *priv = nft_expr_priv(expr);
+       struct sk_buff *skb = pkt->skb;
+       u32 h;
+
+       h = reciprocal_scale(__skb_get_hash_symmetric(skb), priv->modulus);
+
+       regs->data[priv->dreg] = h + priv->offset;
+}
+
 static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = {
        [NFTA_HASH_SREG]        = { .type = NLA_U32 },
        [NFTA_HASH_DREG]        = { .type = NLA_U32 },
@@ -45,13 +64,14 @@ static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = {
        [NFTA_HASH_MODULUS]     = { .type = NLA_U32 },
        [NFTA_HASH_SEED]        = { .type = NLA_U32 },
        [NFTA_HASH_OFFSET]      = { .type = NLA_U32 },
+       [NFTA_HASH_TYPE]        = { .type = NLA_U32 },
 };
 
-static int nft_hash_init(const struct nft_ctx *ctx,
-                        const struct nft_expr *expr,
-                        const struct nlattr * const tb[])
+static int nft_jhash_init(const struct nft_ctx *ctx,
+                         const struct nft_expr *expr,
+                         const struct nlattr * const tb[])
 {
-       struct nft_hash *priv = nft_expr_priv(expr);
+       struct nft_jhash *priv = nft_expr_priv(expr);
        u32 len;
        int err;
 
@@ -92,10 +112,36 @@ static int nft_hash_init(const struct nft_ctx *ctx,
                                           NFT_DATA_VALUE, sizeof(u32));
 }
 
-static int nft_hash_dump(struct sk_buff *skb,
-                        const struct nft_expr *expr)
+static int nft_symhash_init(const struct nft_ctx *ctx,
+                           const struct nft_expr *expr,
+                           const struct nlattr * const tb[])
 {
-       const struct nft_hash *priv = nft_expr_priv(expr);
+       struct nft_symhash *priv = nft_expr_priv(expr);
+
+       if (!tb[NFTA_HASH_DREG]    ||
+           !tb[NFTA_HASH_MODULUS])
+               return -EINVAL;
+
+       if (tb[NFTA_HASH_OFFSET])
+               priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET]));
+
+       priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
+
+       priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
+       if (priv->modulus <= 1)
+               return -ERANGE;
+
+       if (priv->offset + priv->modulus - 1 < priv->offset)
+               return -EOVERFLOW;
+
+       return nft_validate_register_store(ctx, priv->dreg, NULL,
+                                          NFT_DATA_VALUE, sizeof(u32));
+}
+
+static int nft_jhash_dump(struct sk_buff *skb,
+                         const struct nft_expr *expr)
+{
+       const struct nft_jhash *priv = nft_expr_priv(expr);
 
        if (nft_dump_register(skb, NFTA_HASH_SREG, priv->sreg))
                goto nla_put_failure;
@@ -110,6 +156,28 @@ static int nft_hash_dump(struct sk_buff *skb,
        if (priv->offset != 0)
                if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset)))
                        goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_HASH_TYPE, htonl(NFT_HASH_JENKINS)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static int nft_symhash_dump(struct sk_buff *skb,
+                           const struct nft_expr *expr)
+{
+       const struct nft_symhash *priv = nft_expr_priv(expr);
+
+       if (nft_dump_register(skb, NFTA_HASH_DREG, priv->dreg))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus)))
+               goto nla_put_failure;
+       if (priv->offset != 0)
+               if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset)))
+                       goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_HASH_TYPE, htonl(NFT_HASH_SYM)))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -117,17 +185,46 @@ nla_put_failure:
 }
 
 static struct nft_expr_type nft_hash_type;
-static const struct nft_expr_ops nft_hash_ops = {
+static const struct nft_expr_ops nft_jhash_ops = {
        .type           = &nft_hash_type,
-       .size           = NFT_EXPR_SIZE(sizeof(struct nft_hash)),
-       .eval           = nft_hash_eval,
-       .init           = nft_hash_init,
-       .dump           = nft_hash_dump,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_jhash)),
+       .eval           = nft_jhash_eval,
+       .init           = nft_jhash_init,
+       .dump           = nft_jhash_dump,
 };
 
+static const struct nft_expr_ops nft_symhash_ops = {
+       .type           = &nft_hash_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_symhash)),
+       .eval           = nft_symhash_eval,
+       .init           = nft_symhash_init,
+       .dump           = nft_symhash_dump,
+};
+
+static const struct nft_expr_ops *
+nft_hash_select_ops(const struct nft_ctx *ctx,
+                   const struct nlattr * const tb[])
+{
+       u32 type;
+
+       if (!tb[NFTA_HASH_TYPE])
+               return &nft_jhash_ops;
+
+       type = ntohl(nla_get_be32(tb[NFTA_HASH_TYPE]));
+       switch (type) {
+       case NFT_HASH_SYM:
+               return &nft_symhash_ops;
+       case NFT_HASH_JENKINS:
+               return &nft_jhash_ops;
+       default:
+               break;
+       }
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
 static struct nft_expr_type nft_hash_type __read_mostly = {
        .name           = "hash",
-       .ops            = &nft_hash_ops,
+       .select_ops     = &nft_hash_select_ops,
        .policy         = nft_hash_policy,
        .maxattr        = NFTA_HASH_MAX,
        .owner          = THIS_MODULE,
index c6baf412236d662b0d165fec3b4ff26579c9c6d8..18dd57a526513bd726944fa7ad7a9e41fcfb0251 100644 (file)
@@ -17,9 +17,8 @@
 #include <linux/netfilter/nf_tables.h>
 #include <net/netfilter/nf_tables.h>
 
-static DEFINE_SPINLOCK(limit_lock);
-
 struct nft_limit {
+       spinlock_t      lock;
        u64             last;
        u64             tokens;
        u64             tokens_max;
@@ -34,7 +33,7 @@ static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
        u64 now, tokens;
        s64 delta;
 
-       spin_lock_bh(&limit_lock);
+       spin_lock_bh(&limit->lock);
        now = ktime_get_ns();
        tokens = limit->tokens + now - limit->last;
        if (tokens > limit->tokens_max)
@@ -44,11 +43,11 @@ static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
        delta = tokens - cost;
        if (delta >= 0) {
                limit->tokens = delta;
-               spin_unlock_bh(&limit_lock);
+               spin_unlock_bh(&limit->lock);
                return limit->invert;
        }
        limit->tokens = tokens;
-       spin_unlock_bh(&limit_lock);
+       spin_unlock_bh(&limit->lock);
        return !limit->invert;
 }
 
@@ -86,6 +85,7 @@ static int nft_limit_init(struct nft_limit *limit,
                        limit->invert = true;
        }
        limit->last = ktime_get_ns();
+       spin_lock_init(&limit->lock);
 
        return 0;
 }
index e21aea7e5ec8f141ea3155d1da3c491484c00a73..475570e89ede710b323868792bb16fd5f09d1b09 100644 (file)
@@ -71,16 +71,10 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
            tb[NFTA_LOOKUP_SREG] == NULL)
                return -EINVAL;
 
-       set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET], genmask);
-       if (IS_ERR(set)) {
-               if (tb[NFTA_LOOKUP_SET_ID]) {
-                       set = nf_tables_set_lookup_byid(ctx->net,
-                                                       tb[NFTA_LOOKUP_SET_ID],
-                                                       genmask);
-               }
-               if (IS_ERR(set))
-                       return PTR_ERR(set);
-       }
+       set = nft_set_lookup(ctx->net, ctx->table, tb[NFTA_LOOKUP_SET],
+                            tb[NFTA_LOOKUP_SET_ID], genmask);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
 
        if (set->flags & NFT_SET_EVAL)
                return -EOPNOTSUPP;
index 11ce016cd47948f3a2902402e0cbbda5e22d9438..6ac03d4266c9038cb4ffd3de412a216b769d07a1 100644 (file)
@@ -46,10 +46,6 @@ int nft_masq_init(const struct nft_ctx *ctx,
        struct nft_masq *priv = nft_expr_priv(expr);
        int err;
 
-       err = nft_masq_validate(ctx, expr, NULL);
-       if (err)
-               return err;
-
        if (tb[NFTA_MASQ_FLAGS]) {
                priv->flags = ntohl(nla_get_be32(tb[NFTA_MASQ_FLAGS]));
                if (priv->flags & ~NF_NAT_RANGE_MASK)
index e1f5ca9b423b5ffda43ec5519d4c8832ce695899..9563ce3c23aa078d781590c475f8981fe198d393 100644 (file)
@@ -45,16 +45,15 @@ void nft_meta_get_eval(const struct nft_expr *expr,
                *dest = skb->len;
                break;
        case NFT_META_PROTOCOL:
-               *dest = 0;
-               *(__be16 *)dest = skb->protocol;
+               nft_reg_store16(dest, (__force u16)skb->protocol);
                break;
        case NFT_META_NFPROTO:
-               *dest = nft_pf(pkt);
+               nft_reg_store8(dest, nft_pf(pkt));
                break;
        case NFT_META_L4PROTO:
                if (!pkt->tprot_set)
                        goto err;
-               *dest = pkt->tprot;
+               nft_reg_store8(dest, pkt->tprot);
                break;
        case NFT_META_PRIORITY:
                *dest = skb->priority;
@@ -85,14 +84,12 @@ void nft_meta_get_eval(const struct nft_expr *expr,
        case NFT_META_IIFTYPE:
                if (in == NULL)
                        goto err;
-               *dest = 0;
-               *(u16 *)dest = in->type;
+               nft_reg_store16(dest, in->type);
                break;
        case NFT_META_OIFTYPE:
                if (out == NULL)
                        goto err;
-               *dest = 0;
-               *(u16 *)dest = out->type;
+               nft_reg_store16(dest, out->type);
                break;
        case NFT_META_SKUID:
                sk = skb_to_full_sk(skb);
@@ -142,19 +139,19 @@ void nft_meta_get_eval(const struct nft_expr *expr,
 #endif
        case NFT_META_PKTTYPE:
                if (skb->pkt_type != PACKET_LOOPBACK) {
-                       *dest = skb->pkt_type;
+                       nft_reg_store8(dest, skb->pkt_type);
                        break;
                }
 
                switch (nft_pf(pkt)) {
                case NFPROTO_IPV4:
                        if (ipv4_is_multicast(ip_hdr(skb)->daddr))
-                               *dest = PACKET_MULTICAST;
+                               nft_reg_store8(dest, PACKET_MULTICAST);
                        else
-                               *dest = PACKET_BROADCAST;
+                               nft_reg_store8(dest, PACKET_BROADCAST);
                        break;
                case NFPROTO_IPV6:
-                       *dest = PACKET_MULTICAST;
+                       nft_reg_store8(dest, PACKET_MULTICAST);
                        break;
                case NFPROTO_NETDEV:
                        switch (skb->protocol) {
@@ -168,14 +165,14 @@ void nft_meta_get_eval(const struct nft_expr *expr,
                                        goto err;
 
                                if (ipv4_is_multicast(iph->daddr))
-                                       *dest = PACKET_MULTICAST;
+                                       nft_reg_store8(dest, PACKET_MULTICAST);
                                else
-                                       *dest = PACKET_BROADCAST;
+                                       nft_reg_store8(dest, PACKET_BROADCAST);
 
                                break;
                        }
                        case htons(ETH_P_IPV6):
-                               *dest = PACKET_MULTICAST;
+                               nft_reg_store8(dest, PACKET_MULTICAST);
                                break;
                        default:
                                WARN_ON_ONCE(1);
@@ -230,7 +227,9 @@ void nft_meta_set_eval(const struct nft_expr *expr,
 {
        const struct nft_meta *meta = nft_expr_priv(expr);
        struct sk_buff *skb = pkt->skb;
-       u32 value = regs->data[meta->sreg];
+       u32 *sreg = &regs->data[meta->sreg];
+       u32 value = *sreg;
+       u8 pkt_type;
 
        switch (meta->key) {
        case NFT_META_MARK:
@@ -240,9 +239,12 @@ void nft_meta_set_eval(const struct nft_expr *expr,
                skb->priority = value;
                break;
        case NFT_META_PKTTYPE:
-               if (skb->pkt_type != value &&
-                   skb_pkt_type_ok(value) && skb_pkt_type_ok(skb->pkt_type))
-                       skb->pkt_type = value;
+               pkt_type = nft_reg_load8(sreg);
+
+               if (skb->pkt_type != pkt_type &&
+                   skb_pkt_type_ok(pkt_type) &&
+                   skb_pkt_type_ok(skb->pkt_type))
+                       skb->pkt_type = pkt_type;
                break;
        case NFT_META_NFTRACE:
                skb->nf_trace = !!value;
@@ -370,10 +372,6 @@ int nft_meta_set_init(const struct nft_ctx *ctx,
                return -EOPNOTSUPP;
        }
 
-       err = nft_meta_set_validate(ctx, expr, NULL);
-       if (err < 0)
-               return err;
-
        priv->sreg = nft_parse_register(tb[NFTA_META_SREG]);
        err = nft_validate_register_load(priv->sreg, len);
        if (err < 0)
index 19a7bf3236f968725a29e827012af301781802df..ed548d06b6dda9a98888bb83f2baa6b45c965c15 100644 (file)
@@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
        }
 
        if (priv->sreg_proto_min) {
-               range.min_proto.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_min];
-               range.max_proto.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_max];
+               range.min_proto.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_min]);
+               range.max_proto.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_max]);
                range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
@@ -138,10 +138,6 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                return -EINVAL;
        }
 
-       err = nft_nat_validate(ctx, expr, NULL);
-       if (err < 0)
-               return err;
-
        if (tb[NFTA_NAT_FAMILY] == NULL)
                return -EINVAL;
 
index 1ae8c49ca4a1fac06f69c41f68a36b7e85593adb..1dd428fbaaa3f836e7bcecb50355f60c14d8faad 100644 (file)
@@ -116,16 +116,10 @@ static int nft_objref_map_init(const struct nft_ctx *ctx,
        struct nft_set *set;
        int err;
 
-       set = nf_tables_set_lookup(ctx->table, tb[NFTA_OBJREF_SET_NAME], genmask);
-       if (IS_ERR(set)) {
-               if (tb[NFTA_OBJREF_SET_ID]) {
-                       set = nf_tables_set_lookup_byid(ctx->net,
-                                                       tb[NFTA_OBJREF_SET_ID],
-                                                       genmask);
-               }
-               if (IS_ERR(set))
-                       return PTR_ERR(set);
-       }
+       set = nft_set_lookup(ctx->net, ctx->table, tb[NFTA_OBJREF_SET_NAME],
+                            tb[NFTA_OBJREF_SET_ID], genmask);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
 
        if (!(set->flags & NFT_SET_OBJECT))
                return -EINVAL;
index 2d6fe3559912674385e7679557fc31ddeb901b38..25e33159be57882fcf9875725079188dfaa7d113 100644 (file)
@@ -99,7 +99,8 @@ static int nft_quota_do_init(const struct nlattr * const tb[],
        return 0;
 }
 
-static int nft_quota_obj_init(const struct nlattr * const tb[],
+static int nft_quota_obj_init(const struct nft_ctx *ctx,
+                             const struct nlattr * const tb[],
                              struct nft_object *obj)
 {
        struct nft_quota *priv = nft_obj_data(obj);
index 40dcd05146d5fb0f346e6e170d16de9813e92804..1e66538bf0ff24e3286ec6312e4d593c6197bd9b 100644 (file)
@@ -47,10 +47,6 @@ int nft_redir_init(const struct nft_ctx *ctx,
        unsigned int plen;
        int err;
 
-       err = nft_redir_validate(ctx, expr, NULL);
-       if (err < 0)
-               return err;
-
        plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all);
        if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
                priv->sreg_proto_min =
index c64de3f7379df551fa413a4af186f3c16886f112..29f5bd2377b0deaf7ede8ec0573bf71cfeef7478 100644 (file)
@@ -42,11 +42,6 @@ int nft_reject_init(const struct nft_ctx *ctx,
                    const struct nlattr * const tb[])
 {
        struct nft_reject *priv = nft_expr_priv(expr);
-       int err;
-
-       err = nft_reject_validate(ctx, expr, NULL);
-       if (err < 0)
-               return err;
 
        if (tb[NFTA_REJECT_TYPE] == NULL)
                return -EINVAL;
index 9e90a02cb104dad81daf84208902e90390a8f504..5a7fb5ff867d382f04633a2fab53997d0ca1f2b2 100644 (file)
@@ -66,11 +66,7 @@ static int nft_reject_inet_init(const struct nft_ctx *ctx,
                                const struct nlattr * const tb[])
 {
        struct nft_reject *priv = nft_expr_priv(expr);
-       int icmp_code, err;
-
-       err = nft_reject_validate(ctx, expr, NULL);
-       if (err < 0)
-               return err;
+       int icmp_code;
 
        if (tb[NFTA_REJECT_TYPE] == NULL)
                return -EINVAL;
index 152d226552c174929fd8973f023eaac888e4b0a9..8ebbc2940f4c593d393c65bd5674d90feb585d98 100644 (file)
 #include <linux/netfilter/nf_tables.h>
 #include <net/netfilter/nf_tables.h>
 
+struct nft_bitmap_elem {
+       struct list_head        head;
+       struct nft_set_ext      ext;
+};
+
 /* This bitmap uses two bits to represent one element. These two bits determine
  * the element state in the current and the future generation.
  *
  *      restore its previous state.
  */
 struct nft_bitmap {
-       u16     bitmap_size;
-       u8      bitmap[];
+       struct  list_head       list;
+       u16                     bitmap_size;
+       u8                      bitmap[];
 };
 
-static inline void nft_bitmap_location(u32 key, u32 *idx, u32 *off)
+static inline void nft_bitmap_location(const struct nft_set *set,
+                                      const void *key,
+                                      u32 *idx, u32 *off)
 {
-       u32 k = (key << 1);
+       u32 k;
+
+       if (set->klen == 2)
+               k = *(u16 *)key;
+       else
+               k = *(u8 *)key;
+       k <<= 1;
 
        *idx = k / BITS_PER_BYTE;
        *off = k % BITS_PER_BYTE;
@@ -69,26 +83,48 @@ static bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
        u8 genmask = nft_genmask_cur(net);
        u32 idx, off;
 
-       nft_bitmap_location(*key, &idx, &off);
+       nft_bitmap_location(set, key, &idx, &off);
 
        return nft_bitmap_active(priv->bitmap, idx, off, genmask);
 }
 
+static struct nft_bitmap_elem *
+nft_bitmap_elem_find(const struct nft_set *set, struct nft_bitmap_elem *this,
+                    u8 genmask)
+{
+       const struct nft_bitmap *priv = nft_set_priv(set);
+       struct nft_bitmap_elem *be;
+
+       list_for_each_entry_rcu(be, &priv->list, head) {
+               if (memcmp(nft_set_ext_key(&be->ext),
+                          nft_set_ext_key(&this->ext), set->klen) ||
+                   !nft_set_elem_active(&be->ext, genmask))
+                       continue;
+
+               return be;
+       }
+       return NULL;
+}
+
 static int nft_bitmap_insert(const struct net *net, const struct nft_set *set,
                             const struct nft_set_elem *elem,
-                            struct nft_set_ext **_ext)
+                            struct nft_set_ext **ext)
 {
        struct nft_bitmap *priv = nft_set_priv(set);
-       struct nft_set_ext *ext = elem->priv;
+       struct nft_bitmap_elem *new = elem->priv, *be;
        u8 genmask = nft_genmask_next(net);
        u32 idx, off;
 
-       nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
-       if (nft_bitmap_active(priv->bitmap, idx, off, genmask))
+       be = nft_bitmap_elem_find(set, new, genmask);
+       if (be) {
+               *ext = &be->ext;
                return -EEXIST;
+       }
 
+       nft_bitmap_location(set, nft_set_ext_key(&new->ext), &idx, &off);
        /* Enter 01 state. */
        priv->bitmap[idx] |= (genmask << off);
+       list_add_tail_rcu(&new->head, &priv->list);
 
        return 0;
 }
@@ -98,13 +134,14 @@ static void nft_bitmap_remove(const struct net *net,
                              const struct nft_set_elem *elem)
 {
        struct nft_bitmap *priv = nft_set_priv(set);
-       struct nft_set_ext *ext = elem->priv;
+       struct nft_bitmap_elem *be = elem->priv;
        u8 genmask = nft_genmask_next(net);
        u32 idx, off;
 
-       nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
+       nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
        /* Enter 00 state. */
        priv->bitmap[idx] &= ~(genmask << off);
+       list_del_rcu(&be->head);
 }
 
 static void nft_bitmap_activate(const struct net *net,
@@ -112,74 +149,52 @@ static void nft_bitmap_activate(const struct net *net,
                                const struct nft_set_elem *elem)
 {
        struct nft_bitmap *priv = nft_set_priv(set);
-       struct nft_set_ext *ext = elem->priv;
+       struct nft_bitmap_elem *be = elem->priv;
        u8 genmask = nft_genmask_next(net);
        u32 idx, off;
 
-       nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
+       nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
        /* Enter 11 state. */
        priv->bitmap[idx] |= (genmask << off);
+       nft_set_elem_change_active(net, set, &be->ext);
 }
 
 static bool nft_bitmap_flush(const struct net *net,
-                            const struct nft_set *set, void *ext)
+                            const struct nft_set *set, void *_be)
 {
        struct nft_bitmap *priv = nft_set_priv(set);
        u8 genmask = nft_genmask_next(net);
+       struct nft_bitmap_elem *be = _be;
        u32 idx, off;
 
-       nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
+       nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
        /* Enter 10 state, similar to deactivation. */
        priv->bitmap[idx] &= ~(genmask << off);
+       nft_set_elem_change_active(net, set, &be->ext);
 
        return true;
 }
 
-static struct nft_set_ext *nft_bitmap_ext_alloc(const struct nft_set *set,
-                                               const struct nft_set_elem *elem)
-{
-       struct nft_set_ext_tmpl tmpl;
-       struct nft_set_ext *ext;
-
-       nft_set_ext_prepare(&tmpl);
-       nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
-
-       ext = kzalloc(tmpl.len, GFP_KERNEL);
-       if (!ext)
-               return NULL;
-
-       nft_set_ext_init(ext, &tmpl);
-       memcpy(nft_set_ext_key(ext), elem->key.val.data, set->klen);
-
-       return ext;
-}
-
 static void *nft_bitmap_deactivate(const struct net *net,
                                   const struct nft_set *set,
                                   const struct nft_set_elem *elem)
 {
        struct nft_bitmap *priv = nft_set_priv(set);
+       struct nft_bitmap_elem *this = elem->priv, *be;
        u8 genmask = nft_genmask_next(net);
-       struct nft_set_ext *ext;
-       u32 idx, off, key = 0;
-
-       memcpy(&key, elem->key.val.data, set->klen);
-       nft_bitmap_location(key, &idx, &off);
+       u32 idx, off;
 
-       if (!nft_bitmap_active(priv->bitmap, idx, off, genmask))
-               return NULL;
+       nft_bitmap_location(set, elem->key.val.data, &idx, &off);
 
-       /* We have no real set extension since this is a bitmap, allocate this
-        * dummy object that is released from the commit/abort path.
-        */
-       ext = nft_bitmap_ext_alloc(set, elem);
-       if (!ext)
+       be = nft_bitmap_elem_find(set, this, genmask);
+       if (!be)
                return NULL;
 
        /* Enter 10 state. */
        priv->bitmap[idx] &= ~(genmask << off);
+       nft_set_elem_change_active(net, set, &be->ext);
 
-       return ext;
+       return be;
 }
 
 static void nft_bitmap_walk(const struct nft_ctx *ctx,
@@ -187,47 +202,23 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx,
                            struct nft_set_iter *iter)
 {
        const struct nft_bitmap *priv = nft_set_priv(set);
-       struct nft_set_ext_tmpl tmpl;
+       struct nft_bitmap_elem *be;
        struct nft_set_elem elem;
-       struct nft_set_ext *ext;
-       int idx, off;
-       u16 key;
-
-       nft_set_ext_prepare(&tmpl);
-       nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
-
-       for (idx = 0; idx < priv->bitmap_size; idx++) {
-               for (off = 0; off < BITS_PER_BYTE; off += 2) {
-                       if (iter->count < iter->skip)
-                               goto cont;
-
-                       if (!nft_bitmap_active(priv->bitmap, idx, off,
-                                              iter->genmask))
-                               goto cont;
-
-                       ext = kzalloc(tmpl.len, GFP_KERNEL);
-                       if (!ext) {
-                               iter->err = -ENOMEM;
-                               return;
-                       }
-                       nft_set_ext_init(ext, &tmpl);
-                       key = ((idx * BITS_PER_BYTE) + off) >> 1;
-                       memcpy(nft_set_ext_key(ext), &key, set->klen);
-
-                       elem.priv = ext;
-                       iter->err = iter->fn(ctx, set, iter, &elem);
-
-                       /* On set flush, this dummy extension object is released
-                        * from the commit/abort path.
-                        */
-                       if (!iter->flush)
-                               kfree(ext);
-
-                       if (iter->err < 0)
-                               return;
+
+       list_for_each_entry_rcu(be, &priv->list, head) {
+               if (iter->count < iter->skip)
+                       goto cont;
+               if (!nft_set_elem_active(&be->ext, iter->genmask))
+                       goto cont;
+
+               elem.priv = be;
+
+               iter->err = iter->fn(ctx, set, iter, &elem);
+
+               if (iter->err < 0)
+                       return;
 cont:
-                       iter->count++;
-               }
+               iter->count++;
        }
 }
 
@@ -258,6 +249,7 @@ static int nft_bitmap_init(const struct nft_set *set,
 {
        struct nft_bitmap *priv = nft_set_priv(set);
 
+       INIT_LIST_HEAD(&priv->list);
        priv->bitmap_size = nft_bitmap_size(set->klen);
 
        return 0;
@@ -283,6 +275,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
 
 static struct nft_set_ops nft_bitmap_ops __read_mostly = {
        .privsize       = nft_bitmap_privsize,
+       .elemsize       = offsetof(struct nft_bitmap_elem, ext),
        .estimate       = nft_bitmap_estimate,
        .init           = nft_bitmap_init,
        .destroy        = nft_bitmap_destroy,
index 78dfbf9588b368107bdc385c7ef208a9abd3d297..e97e2fb53f0a107b0361322be10f16b4ab4b5d32 100644 (file)
@@ -18,9 +18,8 @@
 #include <linux/netfilter/nf_tables.h>
 #include <net/netfilter/nf_tables.h>
 
-static DEFINE_SPINLOCK(nft_rbtree_lock);
-
 struct nft_rbtree {
+       rwlock_t                lock;
        struct rb_root          root;
 };
 
@@ -44,14 +43,14 @@ static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
 static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
                              const u32 *key, const struct nft_set_ext **ext)
 {
-       const struct nft_rbtree *priv = nft_set_priv(set);
+       struct nft_rbtree *priv = nft_set_priv(set);
        const struct nft_rbtree_elem *rbe, *interval = NULL;
        u8 genmask = nft_genmask_cur(net);
        const struct rb_node *parent;
        const void *this;
        int d;
 
-       spin_lock_bh(&nft_rbtree_lock);
+       read_lock_bh(&priv->lock);
        parent = priv->root.rb_node;
        while (parent != NULL) {
                rbe = rb_entry(parent, struct nft_rbtree_elem, node);
@@ -75,7 +74,7 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
                        }
                        if (nft_rbtree_interval_end(rbe))
                                goto out;
-                       spin_unlock_bh(&nft_rbtree_lock);
+                       read_unlock_bh(&priv->lock);
 
                        *ext = &rbe->ext;
                        return true;
@@ -85,12 +84,12 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
        if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
            nft_set_elem_active(&interval->ext, genmask) &&
            !nft_rbtree_interval_end(interval)) {
-               spin_unlock_bh(&nft_rbtree_lock);
+               read_unlock_bh(&priv->lock);
                *ext = &interval->ext;
                return true;
        }
 out:
-       spin_unlock_bh(&nft_rbtree_lock);
+       read_unlock_bh(&priv->lock);
        return false;
 }
 
@@ -140,12 +139,13 @@ static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
                             const struct nft_set_elem *elem,
                             struct nft_set_ext **ext)
 {
+       struct nft_rbtree *priv = nft_set_priv(set);
        struct nft_rbtree_elem *rbe = elem->priv;
        int err;
 
-       spin_lock_bh(&nft_rbtree_lock);
+       write_lock_bh(&priv->lock);
        err = __nft_rbtree_insert(net, set, rbe, ext);
-       spin_unlock_bh(&nft_rbtree_lock);
+       write_unlock_bh(&priv->lock);
 
        return err;
 }
@@ -157,9 +157,9 @@ static void nft_rbtree_remove(const struct net *net,
        struct nft_rbtree *priv = nft_set_priv(set);
        struct nft_rbtree_elem *rbe = elem->priv;
 
-       spin_lock_bh(&nft_rbtree_lock);
+       write_lock_bh(&priv->lock);
        rb_erase(&rbe->node, &priv->root);
-       spin_unlock_bh(&nft_rbtree_lock);
+       write_unlock_bh(&priv->lock);
 }
 
 static void nft_rbtree_activate(const struct net *net,
@@ -224,12 +224,12 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
                            struct nft_set *set,
                            struct nft_set_iter *iter)
 {
-       const struct nft_rbtree *priv = nft_set_priv(set);
+       struct nft_rbtree *priv = nft_set_priv(set);
        struct nft_rbtree_elem *rbe;
        struct nft_set_elem elem;
        struct rb_node *node;
 
-       spin_lock_bh(&nft_rbtree_lock);
+       read_lock_bh(&priv->lock);
        for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
                rbe = rb_entry(node, struct nft_rbtree_elem, node);
 
@@ -242,13 +242,13 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
 
                iter->err = iter->fn(ctx, set, iter, &elem);
                if (iter->err < 0) {
-                       spin_unlock_bh(&nft_rbtree_lock);
+                       read_unlock_bh(&priv->lock);
                        return;
                }
 cont:
                iter->count++;
        }
-       spin_unlock_bh(&nft_rbtree_lock);
+       read_unlock_bh(&priv->lock);
 }
 
 static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[])
@@ -262,6 +262,7 @@ static int nft_rbtree_init(const struct nft_set *set,
 {
        struct nft_rbtree *priv = nft_set_priv(set);
 
+       rwlock_init(&priv->lock);
        priv->root = RB_ROOT;
        return 0;
 }
index dab962df178795612580a1c8e22257213bdab07d..d27b5f1ea619f9696912b58bd5012358206725d7 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/netfilter/xt_limit.h>
 
 struct xt_limit_priv {
+       spinlock_t lock;
        unsigned long prev;
        uint32_t credit;
 };
@@ -32,8 +33,6 @@ MODULE_ALIAS("ip6t_limit");
  * see net/sched/sch_tbf.c in the linux source tree
  */
 
-static DEFINE_SPINLOCK(limit_lock);
-
 /* Rusty: This is my (non-mathematically-inclined) understanding of
    this algorithm.  The `average rate' in jiffies becomes your initial
    amount of credit `credit' and the most credit you can ever have
@@ -72,7 +71,7 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
        struct xt_limit_priv *priv = r->master;
        unsigned long now = jiffies;
 
-       spin_lock_bh(&limit_lock);
+       spin_lock_bh(&priv->lock);
        priv->credit += (now - xchg(&priv->prev, now)) * CREDITS_PER_JIFFY;
        if (priv->credit > r->credit_cap)
                priv->credit = r->credit_cap;
@@ -80,11 +79,11 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
        if (priv->credit >= r->cost) {
                /* We're not limited. */
                priv->credit -= r->cost;
-               spin_unlock_bh(&limit_lock);
+               spin_unlock_bh(&priv->lock);
                return true;
        }
 
-       spin_unlock_bh(&limit_lock);
+       spin_unlock_bh(&priv->lock);
        return false;
 }
 
@@ -126,6 +125,8 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
                r->credit_cap = priv->credit; /* Credits full. */
                r->cost = user2credits(r->avg);
        }
+       spin_lock_init(&priv->lock);
+
        return 0;
 }
 
index 7b73c7c161a9680b8691a712c31073b7789620f7..fc232441cf230faacdebf83fd1735462a6453d03 100644 (file)
@@ -78,14 +78,6 @@ struct listeners {
 /* state bits */
 #define NETLINK_S_CONGESTED            0x0
 
-/* flags */
-#define NETLINK_F_KERNEL_SOCKET                0x1
-#define NETLINK_F_RECV_PKTINFO         0x2
-#define NETLINK_F_BROADCAST_SEND_ERROR 0x4
-#define NETLINK_F_RECV_NO_ENOBUFS      0x8
-#define NETLINK_F_LISTEN_ALL_NSID      0x10
-#define NETLINK_F_CAP_ACK              0x20
-
 static inline int netlink_is_kernel(struct sock *sk)
 {
        return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
@@ -96,6 +88,44 @@ EXPORT_SYMBOL_GPL(nl_table);
 
 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
 
+static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
+
+static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
+       "nlk_cb_mutex-ROUTE",
+       "nlk_cb_mutex-1",
+       "nlk_cb_mutex-USERSOCK",
+       "nlk_cb_mutex-FIREWALL",
+       "nlk_cb_mutex-SOCK_DIAG",
+       "nlk_cb_mutex-NFLOG",
+       "nlk_cb_mutex-XFRM",
+       "nlk_cb_mutex-SELINUX",
+       "nlk_cb_mutex-ISCSI",
+       "nlk_cb_mutex-AUDIT",
+       "nlk_cb_mutex-FIB_LOOKUP",
+       "nlk_cb_mutex-CONNECTOR",
+       "nlk_cb_mutex-NETFILTER",
+       "nlk_cb_mutex-IP6_FW",
+       "nlk_cb_mutex-DNRTMSG",
+       "nlk_cb_mutex-KOBJECT_UEVENT",
+       "nlk_cb_mutex-GENERIC",
+       "nlk_cb_mutex-17",
+       "nlk_cb_mutex-SCSITRANSPORT",
+       "nlk_cb_mutex-ECRYPTFS",
+       "nlk_cb_mutex-RDMA",
+       "nlk_cb_mutex-CRYPTO",
+       "nlk_cb_mutex-SMC",
+       "nlk_cb_mutex-23",
+       "nlk_cb_mutex-24",
+       "nlk_cb_mutex-25",
+       "nlk_cb_mutex-26",
+       "nlk_cb_mutex-27",
+       "nlk_cb_mutex-28",
+       "nlk_cb_mutex-29",
+       "nlk_cb_mutex-30",
+       "nlk_cb_mutex-31",
+       "nlk_cb_mutex-MAX_LINKS"
+};
+
 static int netlink_dump(struct sock *sk);
 static void netlink_skb_destructor(struct sk_buff *skb);
 
@@ -585,6 +615,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
        } else {
                nlk->cb_mutex = &nlk->cb_def_mutex;
                mutex_init(nlk->cb_mutex);
+               lockdep_set_class_and_name(nlk->cb_mutex,
+                                          nlk_cb_mutex_keys + protocol,
+                                          nlk_cb_mutex_key_strings[protocol]);
        }
        init_waitqueue_head(&nlk->wait);
 
index 4fdb3831897775547f77c069a8018c0d2a253c8c..f792f8d7f982d3e7d5b67b7cf1c5f01475728e1f 100644 (file)
@@ -6,6 +6,14 @@
 #include <linux/workqueue.h>
 #include <net/sock.h>
 
+/* flags */
+#define NETLINK_F_KERNEL_SOCKET                0x1
+#define NETLINK_F_RECV_PKTINFO         0x2
+#define NETLINK_F_BROADCAST_SEND_ERROR 0x4
+#define NETLINK_F_RECV_NO_ENOBUFS      0x8
+#define NETLINK_F_LISTEN_ALL_NSID      0x10
+#define NETLINK_F_CAP_ACK              0x20
+
 #define NLGRPSZ(x)     (ALIGN(x, sizeof(unsigned long) * 8) / 8)
 #define NLGRPLONGS(x)  (NLGRPSZ(x)/sizeof(unsigned long))
 
index a5546249fb1022b52144a40717b8a4268755b972..8faa20b4d4573f3e2b510f03f3d4a85a70352559 100644 (file)
@@ -19,6 +19,27 @@ static int sk_diag_dump_groups(struct sock *sk, struct sk_buff *nlskb)
                       nlk->groups);
 }
 
+static int sk_diag_put_flags(struct sock *sk, struct sk_buff *skb)
+{
+       struct netlink_sock *nlk = nlk_sk(sk);
+       u32 flags = 0;
+
+       if (nlk->cb_running)
+               flags |= NDIAG_FLAG_CB_RUNNING;
+       if (nlk->flags & NETLINK_F_RECV_PKTINFO)
+               flags |= NDIAG_FLAG_PKTINFO;
+       if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
+               flags |= NDIAG_FLAG_BROADCAST_ERROR;
+       if (nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)
+               flags |= NDIAG_FLAG_NO_ENOBUFS;
+       if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
+               flags |= NDIAG_FLAG_LISTEN_ALL_NSID;
+       if (nlk->flags & NETLINK_F_CAP_ACK)
+               flags |= NDIAG_FLAG_CAP_ACK;
+
+       return nla_put_u32(skb, NETLINK_DIAG_FLAGS, flags);
+}
+
 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
                        struct netlink_diag_req *req,
                        u32 portid, u32 seq, u32 flags, int sk_ino)
@@ -52,6 +73,10 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
            sock_diag_put_meminfo(sk, skb, NETLINK_DIAG_MEMINFO))
                goto out_nlmsg_trim;
 
+       if ((req->ndiag_show & NDIAG_SHOW_FLAGS) &&
+           sk_diag_put_flags(sk, skb))
+               goto out_nlmsg_trim;
+
        nlmsg_end(skb, nlh);
        return 0;
 
index fb6e10fdb2174320c96608aea63d3c484d3625a0..92e0981f74040d7029b65863167b459322612024 100644 (file)
@@ -783,8 +783,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
 
                if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
                                   cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                                  skb, CTRL_CMD_NEWFAMILY) < 0)
+                                  skb, CTRL_CMD_NEWFAMILY) < 0) {
+                       n--;
                        break;
+               }
        }
 
        cb->args[0] = n;
index 4bbf4526b88566d7c3f14e602f279b7e2570113c..ebf16f7f90892dd3029e643835859459baec4507 100644 (file)
@@ -765,7 +765,8 @@ out_release:
        return err;
 }
 
-static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
+static int nr_accept(struct socket *sock, struct socket *newsock, int flags,
+                    bool kern)
 {
        struct sk_buff *skb;
        struct sock *newsk;
index 879885b31cce5ff2461c3a1524612527b9383bbd..2ffb18e73df6c03072fffeb68b660fb2f884eb45 100644 (file)
@@ -441,7 +441,7 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
 }
 
 static int llcp_sock_accept(struct socket *sock, struct socket *newsock,
-                           int flags)
+                           int flags, bool kern)
 {
        DECLARE_WAITQUEUE(wait, current);
        struct sock *sk = sock->sk, *new_sk;
index c82301ce3fffb6caeb41a9882a53289ec7b63c8d..e4610676299bcdac626db1a30cd4da44ccc62c0b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007-2014 Nicira, Inc.
+ * Copyright (c) 2007-2017 Nicira, Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
 #include "conntrack.h"
 #include "vport.h"
 
-static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
-                             struct sw_flow_key *key,
-                             const struct nlattr *attr, int len);
-
 struct deferred_action {
        struct sk_buff *skb;
        const struct nlattr *actions;
+       int actions_len;
 
        /* Store pkt_key clone when creating deferred action. */
        struct sw_flow_key pkt_key;
@@ -82,14 +79,31 @@ struct action_fifo {
        struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
 };
 
-struct recirc_keys {
+struct action_flow_keys {
        struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
 };
 
 static struct action_fifo __percpu *action_fifos;
-static struct recirc_keys __percpu *recirc_keys;
+static struct action_flow_keys __percpu *flow_keys;
 static DEFINE_PER_CPU(int, exec_actions_level);
 
+/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
+ * space. Return NULL if out of key spaces.
+ */
+static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
+{
+       struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
+       int level = this_cpu_read(exec_actions_level);
+       struct sw_flow_key *key = NULL;
+
+       if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
+               key = &keys->key[level - 1];
+               *key = *key_;
+       }
+
+       return key;
+}
+
 static void action_fifo_init(struct action_fifo *fifo)
 {
        fifo->head = 0;
@@ -119,8 +133,9 @@ static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
 
 /* Return true if fifo is not full */
 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
-                                                   const struct sw_flow_key *key,
-                                                   const struct nlattr *attr)
+                                   const struct sw_flow_key *key,
+                                   const struct nlattr *actions,
+                                   const int actions_len)
 {
        struct action_fifo *fifo;
        struct deferred_action *da;
@@ -129,7 +144,8 @@ static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
        da = action_fifo_put(fifo);
        if (da) {
                da->skb = skb;
-               da->actions = attr;
+               da->actions = actions;
+               da->actions_len = actions_len;
                da->pkt_key = *key;
        }
 
@@ -146,6 +162,12 @@ static bool is_flow_key_valid(const struct sw_flow_key *key)
        return !(key->mac_proto & SW_FLOW_KEY_INVALID);
 }
 
+static int clone_execute(struct datapath *dp, struct sk_buff *skb,
+                        struct sw_flow_key *key,
+                        u32 recirc_id,
+                        const struct nlattr *actions, int len,
+                        bool last, bool clone_flow_key);
+
 static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
                             __be16 ethertype)
 {
@@ -908,72 +930,35 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
        return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
 }
 
+/* When 'last' is true, sample() should always consume the 'skb'.
+ * Otherwise, sample() should keep 'skb' intact regardless what
+ * actions are executed within sample().
+ */
 static int sample(struct datapath *dp, struct sk_buff *skb,
                  struct sw_flow_key *key, const struct nlattr *attr,
-                 const struct nlattr *actions, int actions_len)
+                 bool last)
 {
-       const struct nlattr *acts_list = NULL;
-       const struct nlattr *a;
-       int rem;
-       u32 cutlen = 0;
-
-       for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
-                a = nla_next(a, &rem)) {
-               u32 probability;
-
-               switch (nla_type(a)) {
-               case OVS_SAMPLE_ATTR_PROBABILITY:
-                       probability = nla_get_u32(a);
-                       if (!probability || prandom_u32() > probability)
-                               return 0;
-                       break;
-
-               case OVS_SAMPLE_ATTR_ACTIONS:
-                       acts_list = a;
-                       break;
-               }
-       }
-
-       rem = nla_len(acts_list);
-       a = nla_data(acts_list);
-
-       /* Actions list is empty, do nothing */
-       if (unlikely(!rem))
+       struct nlattr *actions;
+       struct nlattr *sample_arg;
+       int rem = nla_len(attr);
+       const struct sample_arg *arg;
+       bool clone_flow_key;
+
+       /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
+       sample_arg = nla_data(attr);
+       arg = nla_data(sample_arg);
+       actions = nla_next(sample_arg, &rem);
+
+       if ((arg->probability != U32_MAX) &&
+           (!arg->probability || prandom_u32() > arg->probability)) {
+               if (last)
+                       consume_skb(skb);
                return 0;
-
-       /* The only known usage of sample action is having a single user-space
-        * action, or having a truncate action followed by a single user-space
-        * action. Treat this usage as a special case.
-        * The output_userspace() should clone the skb to be sent to the
-        * user space. This skb will be consumed by its caller.
-        */
-       if (unlikely(nla_type(a) == OVS_ACTION_ATTR_TRUNC)) {
-               struct ovs_action_trunc *trunc = nla_data(a);
-
-               if (skb->len > trunc->max_len)
-                       cutlen = skb->len - trunc->max_len;
-
-               a = nla_next(a, &rem);
        }
 
-       if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
-                  nla_is_last(a, rem)))
-               return output_userspace(dp, skb, key, a, actions,
-                                       actions_len, cutlen);
-
-       skb = skb_clone(skb, GFP_ATOMIC);
-       if (!skb)
-               /* Skip the sample action when out of memory. */
-               return 0;
-
-       if (!add_deferred_actions(skb, key, a)) {
-               if (net_ratelimit())
-                       pr_warn("%s: deferred actions limit reached, dropping sample action\n",
-                               ovs_dp_name(dp));
-
-               kfree_skb(skb);
-       }
-       return 0;
+       clone_flow_key = !arg->exec;
+       return clone_execute(dp, skb, key, 0, actions, rem, last,
+                            clone_flow_key);
 }
 
 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
@@ -1084,10 +1069,9 @@ static int execute_masked_set_action(struct sk_buff *skb,
 
 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
                          struct sw_flow_key *key,
-                         const struct nlattr *a, int rem)
+                         const struct nlattr *a, bool last)
 {
-       struct deferred_action *da;
-       int level;
+       u32 recirc_id;
 
        if (!is_flow_key_valid(key)) {
                int err;
@@ -1098,43 +1082,8 @@ static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
        }
        BUG_ON(!is_flow_key_valid(key));
 
-       if (!nla_is_last(a, rem)) {
-               /* Recirc action is the not the last action
-                * of the action list, need to clone the skb.
-                */
-               skb = skb_clone(skb, GFP_ATOMIC);
-
-               /* Skip the recirc action when out of memory, but
-                * continue on with the rest of the action list.
-                */
-               if (!skb)
-                       return 0;
-       }
-
-       level = this_cpu_read(exec_actions_level);
-       if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
-               struct recirc_keys *rks = this_cpu_ptr(recirc_keys);
-               struct sw_flow_key *recirc_key = &rks->key[level - 1];
-
-               *recirc_key = *key;
-               recirc_key->recirc_id = nla_get_u32(a);
-               ovs_dp_process_packet(skb, recirc_key);
-
-               return 0;
-       }
-
-       da = add_deferred_actions(skb, key, NULL);
-       if (da) {
-               da->pkt_key.recirc_id = nla_get_u32(a);
-       } else {
-               kfree_skb(skb);
-
-               if (net_ratelimit())
-                       pr_warn("%s: deferred action limit reached, drop recirc action\n",
-                               ovs_dp_name(dp));
-       }
-
-       return 0;
+       recirc_id = nla_get_u32(a);
+       return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
 }
 
 /* Execute a list of actions against 'skb'. */
@@ -1206,9 +1155,11 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
                        err = pop_vlan(skb, key);
                        break;
 
-               case OVS_ACTION_ATTR_RECIRC:
-                       err = execute_recirc(dp, skb, key, a, rem);
-                       if (nla_is_last(a, rem)) {
+               case OVS_ACTION_ATTR_RECIRC: {
+                       bool last = nla_is_last(a, rem);
+
+                       err = execute_recirc(dp, skb, key, a, last);
+                       if (last) {
                                /* If this is the last action, the skb has
                                 * been consumed or freed.
                                 * Return immediately.
@@ -1216,6 +1167,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
                                return err;
                        }
                        break;
+               }
 
                case OVS_ACTION_ATTR_SET:
                        err = execute_set_action(skb, key, nla_data(a));
@@ -1226,9 +1178,15 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
                        err = execute_masked_set_action(skb, key, nla_data(a));
                        break;
 
-               case OVS_ACTION_ATTR_SAMPLE:
-                       err = sample(dp, skb, key, a, attr, len);
+               case OVS_ACTION_ATTR_SAMPLE: {
+                       bool last = nla_is_last(a, rem);
+
+                       err = sample(dp, skb, key, a, last);
+                       if (last)
+                               return err;
+
                        break;
+               }
 
                case OVS_ACTION_ATTR_CT:
                        if (!is_flow_key_valid(key)) {
@@ -1264,6 +1222,79 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
        return 0;
 }
 
+/* Execute the actions on the clone of the packet. The effect of the
+ * execution does not affect the original 'skb' nor the original 'key'.
+ *
+ * The execution may be deferred in case the actions can not be executed
+ * immediately.
+ */
+static int clone_execute(struct datapath *dp, struct sk_buff *skb,
+                        struct sw_flow_key *key, u32 recirc_id,
+                        const struct nlattr *actions, int len,
+                        bool last, bool clone_flow_key)
+{
+       struct deferred_action *da;
+       struct sw_flow_key *clone;
+
+       skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
+       if (!skb) {
+               /* Out of memory, skip this action.
+                */
+               return 0;
+       }
+
+       /* When clone_flow_key is false, the 'key' will not be change
+        * by the actions, then the 'key' can be used directly.
+        * Otherwise, try to clone key from the next recursion level of
+        * 'flow_keys'. If clone is successful, execute the actions
+        * without deferring.
+        */
+       clone = clone_flow_key ? clone_key(key) : key;
+       if (clone) {
+               int err = 0;
+
+               if (actions) { /* Sample action */
+                       if (clone_flow_key)
+                               __this_cpu_inc(exec_actions_level);
+
+                       err = do_execute_actions(dp, skb, clone,
+                                                actions, len);
+
+                       if (clone_flow_key)
+                               __this_cpu_dec(exec_actions_level);
+               } else { /* Recirc action */
+                       clone->recirc_id = recirc_id;
+                       ovs_dp_process_packet(skb, clone);
+               }
+               return err;
+       }
+
+       /* Out of 'flow_keys' space. Defer actions */
+       da = add_deferred_actions(skb, key, actions, len);
+       if (da) {
+               if (!actions) { /* Recirc action */
+                       key = &da->pkt_key;
+                       key->recirc_id = recirc_id;
+               }
+       } else {
+               /* Out of per CPU action FIFO space. Drop the 'skb' and
+                * log an error.
+                */
+               kfree_skb(skb);
+
+               if (net_ratelimit()) {
+                       if (actions) { /* Sample action */
+                               pr_warn("%s: deferred action limit reached, drop sample action\n",
+                                       ovs_dp_name(dp));
+                       } else {  /* Recirc action */
+                               pr_warn("%s: deferred action limit reached, drop recirc action\n",
+                                       ovs_dp_name(dp));
+                       }
+               }
+       }
+       return 0;
+}
+
 static void process_deferred_actions(struct datapath *dp)
 {
        struct action_fifo *fifo = this_cpu_ptr(action_fifos);
@@ -1278,10 +1309,10 @@ static void process_deferred_actions(struct datapath *dp)
                struct sk_buff *skb = da->skb;
                struct sw_flow_key *key = &da->pkt_key;
                const struct nlattr *actions = da->actions;
+               int actions_len = da->actions_len;
 
                if (actions)
-                       do_execute_actions(dp, skb, key, actions,
-                                          nla_len(actions));
+                       do_execute_actions(dp, skb, key, actions, actions_len);
                else
                        ovs_dp_process_packet(skb, key);
        } while (!action_fifo_is_empty(fifo));
@@ -1323,8 +1354,8 @@ int action_fifos_init(void)
        if (!action_fifos)
                return -ENOMEM;
 
-       recirc_keys = alloc_percpu(struct recirc_keys);
-       if (!recirc_keys) {
+       flow_keys = alloc_percpu(struct action_flow_keys);
+       if (!flow_keys) {
                free_percpu(action_fifos);
                return -ENOMEM;
        }
@@ -1335,5 +1366,5 @@ int action_fifos_init(void)
 void action_fifos_exit(void)
 {
        free_percpu(action_fifos);
-       free_percpu(recirc_keys);
+       free_percpu(flow_keys);
 }
index e0a87776a010a3be352c0b2b71859e56c75a6b6f..7b2c2fce408a02d4251f03a2e3f0b4d9e7fccb80 100644 (file)
@@ -643,8 +643,8 @@ static bool skb_nfct_cached(struct net *net,
                 */
                if (nf_ct_is_confirmed(ct))
                        nf_ct_delete(ct, 0, 0);
-               else
-                       nf_conntrack_put(&ct->ct_general);
+
+               nf_conntrack_put(&ct->ct_general);
                nf_ct_set(skb, NULL, 0);
                return false;
        }
index 1c6e9377436df1e93081c825c142b712a811277b..da931bdef8a7b5f25c189a5fe76e5fc2c4c1efdf 100644 (file)
@@ -34,8 +34,6 @@
 #define DP_MAX_PORTS           USHRT_MAX
 #define DP_VPORT_HASH_BUCKETS  1024
 
-#define SAMPLE_ACTION_DEPTH 3
-
 /**
  * struct dp_stats_percpu - per-cpu packet processing statistics for a given
  * datapath.
index 9d4bb8eb63f25c2e9e9e5f4190e6c943a32be547..3f76cb765e5bb71d18c3e9a4c220ed9fa3906186 100644 (file)
@@ -527,7 +527,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
 
        /* Link layer. */
        clear_vlan(key);
-       if (key->mac_proto == MAC_PROTO_NONE) {
+       if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
                if (unlikely(eth_type_vlan(skb->protocol)))
                        return -EINVAL;
 
@@ -745,7 +745,13 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
 
 int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
 {
-       return key_extract(skb, key);
+       int res;
+
+       res = key_extract(skb, key);
+       if (!res)
+               key->mac_proto &= ~SW_FLOW_KEY_INVALID;
+
+       return res;
 }
 
 static int key_extract_mac_proto(struct sk_buff *skb)
index 6f5fa50f716d066333b30edde43e5165b9fe94be..df82b81a9b357250b725b47660dc211ea9e957ae 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007-2014 Nicira, Inc.
+ * Copyright (c) 2007-2017 Nicira, Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -59,6 +59,39 @@ struct ovs_len_tbl {
 #define OVS_ATTR_NESTED -1
 #define OVS_ATTR_VARIABLE -2
 
+static bool actions_may_change_flow(const struct nlattr *actions)
+{
+       struct nlattr *nla;
+       int rem;
+
+       nla_for_each_nested(nla, actions, rem) {
+               u16 action = nla_type(nla);
+
+               switch (action) {
+               case OVS_ACTION_ATTR_OUTPUT:
+               case OVS_ACTION_ATTR_RECIRC:
+               case OVS_ACTION_ATTR_TRUNC:
+               case OVS_ACTION_ATTR_USERSPACE:
+                       break;
+
+               case OVS_ACTION_ATTR_CT:
+               case OVS_ACTION_ATTR_HASH:
+               case OVS_ACTION_ATTR_POP_ETH:
+               case OVS_ACTION_ATTR_POP_MPLS:
+               case OVS_ACTION_ATTR_POP_VLAN:
+               case OVS_ACTION_ATTR_PUSH_ETH:
+               case OVS_ACTION_ATTR_PUSH_MPLS:
+               case OVS_ACTION_ATTR_PUSH_VLAN:
+               case OVS_ACTION_ATTR_SAMPLE:
+               case OVS_ACTION_ATTR_SET:
+               case OVS_ACTION_ATTR_SET_MASKED:
+               default:
+                       return true;
+               }
+       }
+       return false;
+}
+
 static void update_range(struct sw_flow_match *match,
                         size_t offset, size_t size, bool is_mask)
 {
@@ -604,7 +637,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
                        ipv4 = true;
                        break;
                case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
-                       SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
+                       SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src,
                                        nla_get_in6_addr(a), is_mask);
                        ipv6 = true;
                        break;
@@ -665,6 +698,8 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
                        tun_flags |= TUNNEL_VXLAN_OPT;
                        opts_type = type;
                        break;
+               case OVS_TUNNEL_KEY_ATTR_PAD:
+                       break;
                default:
                        OVS_NLERR(log, "Unknown IP tunnel attribute %d",
                                  type);
@@ -2021,18 +2056,20 @@ static inline void add_nested_action_end(struct sw_flow_actions *sfa,
 
 static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
                                  const struct sw_flow_key *key,
-                                 int depth, struct sw_flow_actions **sfa,
+                                 struct sw_flow_actions **sfa,
                                  __be16 eth_type, __be16 vlan_tci, bool log);
 
 static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
-                                   const struct sw_flow_key *key, int depth,
+                                   const struct sw_flow_key *key,
                                    struct sw_flow_actions **sfa,
-                                   __be16 eth_type, __be16 vlan_tci, bool log)
+                                   __be16 eth_type, __be16 vlan_tci,
+                                   bool log, bool last)
 {
        const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
        const struct nlattr *probability, *actions;
        const struct nlattr *a;
-       int rem, start, err, st_acts;
+       int rem, start, err;
+       struct sample_arg arg;
 
        memset(attrs, 0, sizeof(attrs));
        nla_for_each_nested(a, attr, rem) {
@@ -2056,20 +2093,32 @@ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
        start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log);
        if (start < 0)
                return start;
-       err = ovs_nla_add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY,
-                                nla_data(probability), sizeof(u32), log);
+
+       /* When both skb and flow may be changed, put the sample
+        * into a deferred fifo. On the other hand, if only skb
+        * may be modified, the actions can be executed in place.
+        *
+        * Do this analysis at the flow installation time.
+        * Set 'clone_action->exec' to true if the actions can be
+        * executed without being deferred.
+        *
+        * If the sample is the last action, it can always be excuted
+        * rather than deferred.
+        */
+       arg.exec = last || !actions_may_change_flow(actions);
+       arg.probability = nla_get_u32(probability);
+
+       err = ovs_nla_add_action(sfa, OVS_SAMPLE_ATTR_ARG, &arg, sizeof(arg),
+                                log);
        if (err)
                return err;
-       st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS, log);
-       if (st_acts < 0)
-               return st_acts;
 
-       err = __ovs_nla_copy_actions(net, actions, key, depth + 1, sfa,
+       err = __ovs_nla_copy_actions(net, actions, key, sfa,
                                     eth_type, vlan_tci, log);
+
        if (err)
                return err;
 
-       add_nested_action_end(*sfa, st_acts);
        add_nested_action_end(*sfa, start);
 
        return 0;
@@ -2406,16 +2455,13 @@ static int copy_action(const struct nlattr *from,
 
 static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
                                  const struct sw_flow_key *key,
-                                 int depth, struct sw_flow_actions **sfa,
+                                 struct sw_flow_actions **sfa,
                                  __be16 eth_type, __be16 vlan_tci, bool log)
 {
        u8 mac_proto = ovs_key_mac_proto(key);
        const struct nlattr *a;
        int rem, err;
 
-       if (depth >= SAMPLE_ACTION_DEPTH)
-               return -EOVERFLOW;
-
        nla_for_each_nested(a, attr, rem) {
                /* Expected argument lengths, (u32)-1 for variable length. */
                static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
@@ -2553,13 +2599,17 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
                                return err;
                        break;
 
-               case OVS_ACTION_ATTR_SAMPLE:
-                       err = validate_and_copy_sample(net, a, key, depth, sfa,
-                                                      eth_type, vlan_tci, log);
+               case OVS_ACTION_ATTR_SAMPLE: {
+                       bool last = nla_is_last(a, rem);
+
+                       err = validate_and_copy_sample(net, a, key, sfa,
+                                                      eth_type, vlan_tci,
+                                                      log, last);
                        if (err)
                                return err;
                        skip_copy = true;
                        break;
+               }
 
                case OVS_ACTION_ATTR_CT:
                        err = ovs_ct_copy_action(net, a, key, sfa, log);
@@ -2613,7 +2663,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
                return PTR_ERR(*sfa);
 
        (*sfa)->orig_len = nla_len(attr);
-       err = __ovs_nla_copy_actions(net, attr, key, 0, sfa, key->eth.type,
+       err = __ovs_nla_copy_actions(net, attr, key, sfa, key->eth.type,
                                     key->eth.vlan.tci, log);
        if (err)
                ovs_nla_free_flow_actions(*sfa);
@@ -2621,39 +2671,44 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
        return err;
 }
 
-static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
+static int sample_action_to_attr(const struct nlattr *attr,
+                                struct sk_buff *skb)
 {
-       const struct nlattr *a;
-       struct nlattr *start;
-       int err = 0, rem;
+       struct nlattr *start, *ac_start = NULL, *sample_arg;
+       int err = 0, rem = nla_len(attr);
+       const struct sample_arg *arg;
+       struct nlattr *actions;
 
        start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
        if (!start)
                return -EMSGSIZE;
 
-       nla_for_each_nested(a, attr, rem) {
-               int type = nla_type(a);
-               struct nlattr *st_sample;
+       sample_arg = nla_data(attr);
+       arg = nla_data(sample_arg);
+       actions = nla_next(sample_arg, &rem);
 
-               switch (type) {
-               case OVS_SAMPLE_ATTR_PROBABILITY:
-                       if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY,
-                                   sizeof(u32), nla_data(a)))
-                               return -EMSGSIZE;
-                       break;
-               case OVS_SAMPLE_ATTR_ACTIONS:
-                       st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
-                       if (!st_sample)
-                               return -EMSGSIZE;
-                       err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
-                       if (err)
-                               return err;
-                       nla_nest_end(skb, st_sample);
-                       break;
-               }
+       if (nla_put_u32(skb, OVS_SAMPLE_ATTR_PROBABILITY, arg->probability)) {
+               err = -EMSGSIZE;
+               goto out;
+       }
+
+       ac_start = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
+       if (!ac_start) {
+               err = -EMSGSIZE;
+               goto out;
+       }
+
+       err = ovs_nla_put_actions(actions, rem, skb);
+
+out:
+       if (err) {
+               nla_nest_cancel(skb, ac_start);
+               nla_nest_cancel(skb, start);
+       } else {
+               nla_nest_end(skb, ac_start);
+               nla_nest_end(skb, start);
        }
 
-       nla_nest_end(skb, start);
        return err;
 }
 
index a0dbe7ca8f724cd33b675ea15fb263d82041994c..8489beff5c25c971067f38833ed4a790a98dd86e 100644 (file)
@@ -3665,6 +3665,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
                        return -EBUSY;
                if (copy_from_user(&val, optval, sizeof(val)))
                        return -EFAULT;
+               if (val > INT_MAX)
+                       return -EINVAL;
                po->tp_reserve = val;
                return 0;
        }
@@ -4193,8 +4195,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
                        goto out;
                if (po->tp_version >= TPACKET_V3 &&
-                   (int)(req->tp_block_size -
-                         BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
+                   req->tp_block_size <=
+                         BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
                        goto out;
                if (unlikely(req->tp_frame_size < po->tp_hdrlen +
                                        po->tp_reserve))
@@ -4205,6 +4207,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
                if (unlikely(rb->frames_per_block == 0))
                        goto out;
+               if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
+                       goto out;
                if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
                                        req->tp_frame_nr))
                        goto out;
index 222bedcd95754c80644748daba365dc00b10fd8c..e81537991ddf0d67e6eca19fc9eb6f442d3c06a4 100644 (file)
@@ -772,7 +772,8 @@ static void pep_sock_close(struct sock *sk, long timeout)
        sock_put(sk);
 }
 
-static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
+static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
+                                   bool kern)
 {
        struct pep_sock *pn = pep_sk(sk), *newpn;
        struct sock *newsk = NULL;
@@ -846,7 +847,8 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
        }
 
        /* Create a new to-be-accepted sock */
-       newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot, 0);
+       newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot,
+                        kern);
        if (!newsk) {
                pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
                err = -ENOBUFS;
index a6c8da3ee89349989a9f23e095b98293050da73a..64634e3ec2fc78ebb84ad8873f6e446d06844493 100644 (file)
@@ -305,7 +305,7 @@ out:
 }
 
 static int pn_socket_accept(struct socket *sock, struct socket *newsock,
-                               int flags)
+                           int flags, bool kern)
 {
        struct sock *sk = sock->sk;
        struct sock *newsk;
@@ -314,7 +314,7 @@ static int pn_socket_accept(struct socket *sock, struct socket *newsock,
        if (unlikely(sk->sk_state != TCP_LISTEN))
                return -EINVAL;
 
-       newsk = sk->sk_prot->accept(sk, flags, &err);
+       newsk = sk->sk_prot->accept(sk, flags, &err, kern);
        if (!newsk)
                return err;
 
index b83c6807a5ae5cedc63b073c7f928b1776f42524..326fd97444f5bed5c82af7d632d8a4424f9908d3 100644 (file)
@@ -16,7 +16,7 @@ if QRTR
 
 config QRTR_SMD
        tristate "SMD IPC Router channels"
-       depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
+       depends on RPMSG || (COMPILE_TEST && RPMSG=n)
        ---help---
          Say Y here to support SMD based ipcrouter channels.  SMD is the
          most common transport for IPC Router.
index 0d11132b3370a4024be644dede74a77d188a4f64..50615d5efac1529a0fd617c2692b1e9419da7137 100644 (file)
 
 #include <linux/module.h>
 #include <linux/skbuff.h>
-#include <linux/soc/qcom/smd.h>
+#include <linux/rpmsg.h>
 
 #include "qrtr.h"
 
 struct qrtr_smd_dev {
        struct qrtr_endpoint ep;
-       struct qcom_smd_channel *channel;
+       struct rpmsg_endpoint *channel;
        struct device *dev;
 };
 
 /* from smd to qrtr */
-static int qcom_smd_qrtr_callback(struct qcom_smd_channel *channel,
-                                 const void *data, size_t len)
+static int qcom_smd_qrtr_callback(struct rpmsg_device *rpdev,
+                                 void *data, int len, void *priv, u32 addr)
 {
-       struct qrtr_smd_dev *qdev = qcom_smd_get_drvdata(channel);
+       struct qrtr_smd_dev *qdev = dev_get_drvdata(&rpdev->dev);
        int rc;
 
        if (!qdev)
@@ -54,7 +54,7 @@ static int qcom_smd_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
        if (rc)
                goto out;
 
-       rc = qcom_smd_send(qdev->channel, skb->data, skb->len);
+       rc = rpmsg_send(qdev->channel, skb->data, skb->len);
 
 out:
        if (rc)
@@ -64,57 +64,55 @@ out:
        return rc;
 }
 
-static int qcom_smd_qrtr_probe(struct qcom_smd_device *sdev)
+static int qcom_smd_qrtr_probe(struct rpmsg_device *rpdev)
 {
        struct qrtr_smd_dev *qdev;
        int rc;
 
-       qdev = devm_kzalloc(&sdev->dev, sizeof(*qdev), GFP_KERNEL);
+       qdev = devm_kzalloc(&rpdev->dev, sizeof(*qdev), GFP_KERNEL);
        if (!qdev)
                return -ENOMEM;
 
-       qdev->channel = sdev->channel;
-       qdev->dev = &sdev->dev;
+       qdev->channel = rpdev->ept;
+       qdev->dev = &rpdev->dev;
        qdev->ep.xmit = qcom_smd_qrtr_send;
 
        rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO);
        if (rc)
                return rc;
 
-       qcom_smd_set_drvdata(sdev->channel, qdev);
-       dev_set_drvdata(&sdev->dev, qdev);
+       dev_set_drvdata(&rpdev->dev, qdev);
 
-       dev_dbg(&sdev->dev, "Qualcomm SMD QRTR driver probed\n");
+       dev_dbg(&rpdev->dev, "Qualcomm SMD QRTR driver probed\n");
 
        return 0;
 }
 
-static void qcom_smd_qrtr_remove(struct qcom_smd_device *sdev)
+static void qcom_smd_qrtr_remove(struct rpmsg_device *rpdev)
 {
-       struct qrtr_smd_dev *qdev = dev_get_drvdata(&sdev->dev);
+       struct qrtr_smd_dev *qdev = dev_get_drvdata(&rpdev->dev);
 
        qrtr_endpoint_unregister(&qdev->ep);
 
-       dev_set_drvdata(&sdev->dev, NULL);
+       dev_set_drvdata(&rpdev->dev, NULL);
 }
 
-static const struct qcom_smd_id qcom_smd_qrtr_smd_match[] = {
+static const struct rpmsg_device_id qcom_smd_qrtr_smd_match[] = {
        { "IPCRTR" },
        {}
 };
 
-static struct qcom_smd_driver qcom_smd_qrtr_driver = {
+static struct rpmsg_driver qcom_smd_qrtr_driver = {
        .probe = qcom_smd_qrtr_probe,
        .remove = qcom_smd_qrtr_remove,
        .callback = qcom_smd_qrtr_callback,
-       .smd_match_table = qcom_smd_qrtr_smd_match,
-       .driver = {
+       .id_table = qcom_smd_qrtr_smd_match,
+       .drv = {
                .name = "qcom_smd_qrtr",
-               .owner = THIS_MODULE,
        },
 };
 
-module_qcom_smd_driver(qcom_smd_qrtr_driver);
+module_rpmsg_driver(qcom_smd_qrtr_driver);
 
 MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver");
 MODULE_LICENSE("GPL v2");
index 0e04dcceb1d416438be8bb40fc68253f336f631d..6a5ebdea7d2e9eb3b624a01a8a87ef601e3b2f13 100644 (file)
@@ -333,11 +333,19 @@ void rds_conn_shutdown(struct rds_conn_path *cp)
                rds_conn_path_reset(cp);
 
                if (!rds_conn_path_transition(cp, RDS_CONN_DISCONNECTING,
+                                             RDS_CONN_DOWN) &&
+                   !rds_conn_path_transition(cp, RDS_CONN_ERROR,
                                              RDS_CONN_DOWN)) {
                        /* This can happen - eg when we're in the middle of tearing
                         * down the connection, and someone unloads the rds module.
-                        * Quite reproduceable with loopback connections.
+                        * Quite reproducible with loopback connections.
                         * Mostly harmless.
+                        *
+                        * Note that this also happens with rds-tcp because
+                        * we could have triggered rds_conn_path_drop in irq
+                        * mode from rds_tcp_state change on the receipt of
+                        * a FIN, thus we need to recheck for RDS_CONN_ERROR
+                        * here.
                         */
                        rds_conn_path_error(cp, "%s: failed to transition "
                                            "to state DOWN, current state "
@@ -429,6 +437,7 @@ void rds_conn_destroy(struct rds_connection *conn)
         */
        rds_cong_remove_conn(conn);
 
+       put_net(conn->c_net);
        kmem_cache_free(rds_conn_slab, conn);
 
        spin_lock_irqsave(&rds_conn_lock, flags);
index ce3775abc6e7a1d30e335aaea749a9840e949786..80fb6f63e768d3461c47533615c875526bb8bab9 100644 (file)
@@ -442,7 +442,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
                ic->i_send_cq = NULL;
                ibdev_put_vector(rds_ibdev, ic->i_scq_vector);
                rdsdebug("ib_create_cq send failed: %d\n", ret);
-               goto out;
+               goto rds_ibdev_out;
        }
 
        ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev);
@@ -456,19 +456,19 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
                ic->i_recv_cq = NULL;
                ibdev_put_vector(rds_ibdev, ic->i_rcq_vector);
                rdsdebug("ib_create_cq recv failed: %d\n", ret);
-               goto out;
+               goto send_cq_out;
        }
 
        ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
        if (ret) {
                rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
-               goto out;
+               goto recv_cq_out;
        }
 
        ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
        if (ret) {
                rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
-               goto out;
+               goto recv_cq_out;
        }
 
        /* XXX negotiate max send/recv with remote? */
@@ -494,7 +494,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
        ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
        if (ret) {
                rdsdebug("rdma_create_qp failed: %d\n", ret);
-               goto out;
+               goto recv_cq_out;
        }
 
        ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
@@ -504,7 +504,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
        if (!ic->i_send_hdrs) {
                ret = -ENOMEM;
                rdsdebug("ib_dma_alloc_coherent send failed\n");
-               goto out;
+               goto qp_out;
        }
 
        ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
@@ -514,7 +514,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
        if (!ic->i_recv_hdrs) {
                ret = -ENOMEM;
                rdsdebug("ib_dma_alloc_coherent recv failed\n");
-               goto out;
+               goto send_hdrs_dma_out;
        }
 
        ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
@@ -522,7 +522,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
        if (!ic->i_ack) {
                ret = -ENOMEM;
                rdsdebug("ib_dma_alloc_coherent ack failed\n");
-               goto out;
+               goto recv_hdrs_dma_out;
        }
 
        ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
@@ -530,7 +530,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
        if (!ic->i_sends) {
                ret = -ENOMEM;
                rdsdebug("send allocation failed\n");
-               goto out;
+               goto ack_dma_out;
        }
 
        ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
@@ -538,7 +538,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
        if (!ic->i_recvs) {
                ret = -ENOMEM;
                rdsdebug("recv allocation failed\n");
-               goto out;
+               goto sends_out;
        }
 
        rds_ib_recv_init_ack(ic);
@@ -546,8 +546,33 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
        rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
                 ic->i_send_cq, ic->i_recv_cq);
 
-out:
+       return ret;
+
+sends_out:
+       vfree(ic->i_sends);
+ack_dma_out:
+       ib_dma_free_coherent(dev, sizeof(struct rds_header),
+                            ic->i_ack, ic->i_ack_dma);
+recv_hdrs_dma_out:
+       ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr *
+                                       sizeof(struct rds_header),
+                                       ic->i_recv_hdrs, ic->i_recv_hdrs_dma);
+send_hdrs_dma_out:
+       ib_dma_free_coherent(dev, ic->i_send_ring.w_nr *
+                                       sizeof(struct rds_header),
+                                       ic->i_send_hdrs, ic->i_send_hdrs_dma);
+qp_out:
+       rdma_destroy_qp(ic->i_cm_id);
+recv_cq_out:
+       if (!ib_destroy_cq(ic->i_recv_cq))
+               ic->i_recv_cq = NULL;
+send_cq_out:
+       if (!ib_destroy_cq(ic->i_send_cq))
+               ic->i_send_cq = NULL;
+rds_ibdev_out:
+       rds_ib_remove_conn(rds_ibdev, conn);
        rds_ib_dev_put(rds_ibdev);
+
        return ret;
 }
 
@@ -677,9 +702,8 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
                event->param.conn.initiator_depth);
 
        /* rdma_accept() calls rdma_reject() internally if it fails */
-       err = rdma_accept(cm_id, &conn_param);
-       if (err)
-               rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err);
+       if (rdma_accept(cm_id, &conn_param))
+               rds_ib_conn_error(conn, "rdma_accept failed\n");
 
 out:
        if (conn)
index 4fe8f4fec4eee66c826b5beb5b02ae61d19b483a..86ef907067bb084e01ac4f8d5f00d0c17f40ac55 100644 (file)
@@ -78,17 +78,15 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
        return ibmr;
 
 out_no_cigar:
-       if (ibmr) {
-               if (fmr->fmr)
-                       ib_dealloc_fmr(fmr->fmr);
-               kfree(ibmr);
-       }
+       kfree(ibmr);
        atomic_dec(&pool->item_count);
+
        return ERR_PTR(err);
 }
 
-int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
-                  struct scatterlist *sg, unsigned int nents)
+static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
+                         struct rds_ib_mr *ibmr, struct scatterlist *sg,
+                         unsigned int nents)
 {
        struct ib_device *dev = rds_ibdev->dev;
        struct rds_ib_fmr *fmr = &ibmr->u.fmr;
@@ -114,29 +112,39 @@ int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
                u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
 
                if (dma_addr & ~PAGE_MASK) {
-                       if (i > 0)
+                       if (i > 0) {
+                               ib_dma_unmap_sg(dev, sg, nents,
+                                               DMA_BIDIRECTIONAL);
                                return -EINVAL;
-                       else
+                       } else {
                                ++page_cnt;
+                       }
                }
                if ((dma_addr + dma_len) & ~PAGE_MASK) {
-                       if (i < sg_dma_len - 1)
+                       if (i < sg_dma_len - 1) {
+                               ib_dma_unmap_sg(dev, sg, nents,
+                                               DMA_BIDIRECTIONAL);
                                return -EINVAL;
-                       else
+                       } else {
                                ++page_cnt;
+                       }
                }
 
                len += dma_len;
        }
 
        page_cnt += len >> PAGE_SHIFT;
-       if (page_cnt > ibmr->pool->fmr_attr.max_pages)
+       if (page_cnt > ibmr->pool->fmr_attr.max_pages) {
+               ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
                return -EINVAL;
+       }
 
        dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
                                 rdsibdev_to_node(rds_ibdev));
-       if (!dma_pages)
+       if (!dma_pages) {
+               ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
                return -ENOMEM;
+       }
 
        page_cnt = 0;
        for (i = 0; i < sg_dma_len; ++i) {
@@ -149,8 +157,10 @@ int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
        }
 
        ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
-       if (ret)
+       if (ret) {
+               ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
                goto out;
+       }
 
        /* Success - we successfully remapped the MR, so we can
         * safely tear down the old mapping.
index 5d6e98a79a5e4b3de1f472c5fc513fce545bf6f9..0ea4ab017a8cc3f807931e1194cddb5048a82956 100644 (file)
@@ -125,8 +125,6 @@ void rds_ib_mr_exit(void);
 void __rds_ib_teardown_mr(struct rds_ib_mr *);
 void rds_ib_teardown_mr(struct rds_ib_mr *);
 struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *, int);
-int rds_ib_map_fmr(struct rds_ib_device *, struct rds_ib_mr *,
-                  struct scatterlist *, unsigned int);
 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *);
 int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *, int, struct rds_ib_mr **);
 struct rds_ib_mr *rds_ib_reg_fmr(struct rds_ib_device *, struct scatterlist *,
index 39518ef7af4dfbada74af4a685cd8fe8dbaf9e40..82d38ccf5e8bcf99eefd20934744cbeb7410406b 100644 (file)
@@ -147,7 +147,7 @@ struct rds_connection {
 
        /* Protocol version */
        unsigned int            c_version;
-       possible_net_t          c_net;
+       struct net              *c_net;
 
        struct list_head        c_map_item;
        unsigned long           c_map_queued;
@@ -162,13 +162,13 @@ struct rds_connection {
 static inline
 struct net *rds_conn_net(struct rds_connection *conn)
 {
-       return read_pnet(&conn->c_net);
+       return conn->c_net;
 }
 
 static inline
 void rds_conn_net_set(struct rds_connection *conn, struct net *net)
 {
-       write_pnet(&conn->c_net, net);
+       conn->c_net = get_net(net);
 }
 
 #define RDS_FLAG_CONG_BITMAP   0x01
index a973d3b4dff0b2216bf3698cfbfeeb4b227dfc37..22569007677357ba40347ee46e6584c683de2597 100644 (file)
@@ -484,9 +484,10 @@ static void __net_exit rds_tcp_exit_net(struct net *net)
         * we do need to clean up the listen socket here.
         */
        if (rtn->rds_tcp_listen_sock) {
-               rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
+               struct socket *lsock = rtn->rds_tcp_listen_sock;
+
                rtn->rds_tcp_listen_sock = NULL;
-               flush_work(&rtn->rds_tcp_accept_w);
+               rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
        }
 }
 
@@ -523,13 +524,13 @@ static void rds_tcp_kill_sock(struct net *net)
        struct rds_tcp_connection *tc, *_tc;
        LIST_HEAD(tmp_list);
        struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+       struct socket *lsock = rtn->rds_tcp_listen_sock;
 
-       rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
        rtn->rds_tcp_listen_sock = NULL;
-       flush_work(&rtn->rds_tcp_accept_w);
+       rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
        spin_lock_irq(&rds_tcp_conn_lock);
        list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
-               struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
+               struct net *c_net = tc->t_cpath->cp_conn->c_net;
 
                if (net != c_net || !tc->t_sock)
                        continue;
@@ -546,8 +547,12 @@ static void rds_tcp_kill_sock(struct net *net)
 void *rds_tcp_listen_sock_def_readable(struct net *net)
 {
        struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+       struct socket *lsock = rtn->rds_tcp_listen_sock;
+
+       if (!lsock)
+               return NULL;
 
-       return rtn->rds_tcp_listen_sock->sk->sk_user_data;
+       return lsock->sk->sk_user_data;
 }
 
 static int rds_tcp_dev_event(struct notifier_block *this,
@@ -584,7 +589,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
 
        spin_lock_irq(&rds_tcp_conn_lock);
        list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
-               struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
+               struct net *c_net = tc->t_cpath->cp_conn->c_net;
 
                if (net != c_net || !tc->t_sock)
                        continue;
@@ -638,19 +643,19 @@ static int rds_tcp_init(void)
                goto out;
        }
 
-       ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
-       if (ret) {
-               pr_warn("could not register rds_tcp_dev_notifier\n");
+       ret = rds_tcp_recv_init();
+       if (ret)
                goto out_slab;
-       }
 
        ret = register_pernet_subsys(&rds_tcp_net_ops);
        if (ret)
-               goto out_notifier;
+               goto out_recv;
 
-       ret = rds_tcp_recv_init();
-       if (ret)
+       ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
+       if (ret) {
+               pr_warn("could not register rds_tcp_dev_notifier\n");
                goto out_pernet;
+       }
 
        rds_trans_register(&rds_tcp_transport);
 
@@ -660,9 +665,8 @@ static int rds_tcp_init(void)
 
 out_pernet:
        unregister_pernet_subsys(&rds_tcp_net_ops);
-out_notifier:
-       if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
-               pr_warn("could not unregister rds_tcp_dev_notifier\n");
+out_recv:
+       rds_tcp_recv_exit();
 out_slab:
        kmem_cache_destroy(rds_tcp_conn_slab);
 out:
index 9a1cc890657679798cf58888c42d5bb2372f0fef..56ea6620fcf97ce40d0926089b5e5b188ea1a1fe 100644 (file)
@@ -66,7 +66,7 @@ void rds_tcp_state_change(struct sock *sk);
 
 /* tcp_listen.c */
 struct socket *rds_tcp_listen_init(struct net *);
-void rds_tcp_listen_stop(struct socket *);
+void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor);
 void rds_tcp_listen_data_ready(struct sock *sk);
 int rds_tcp_accept_one(struct socket *sock);
 int rds_tcp_keepalive(struct socket *sock);
index 67d0929c7d3d0c97ed209af9a67b4d83343c3de1..507678853e6cb3bb769711d22d75f5099228faab 100644 (file)
@@ -133,7 +133,7 @@ int rds_tcp_accept_one(struct socket *sock)
 
        new_sock->type = sock->type;
        new_sock->ops = sock->ops;
-       ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
+       ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true);
        if (ret < 0)
                goto out;
 
@@ -223,6 +223,9 @@ void rds_tcp_listen_data_ready(struct sock *sk)
         * before it has been accepted and the accepter has set up their
         * data_ready.. we only want to queue listen work for our listening
         * socket
+        *
+        * (*ready)() may be null if we are racing with netns delete, and
+        * the listen socket is being torn down.
         */
        if (sk->sk_state == TCP_LISTEN)
                rds_tcp_accept_work(sk);
@@ -231,7 +234,8 @@ void rds_tcp_listen_data_ready(struct sock *sk)
 
 out:
        read_unlock_bh(&sk->sk_callback_lock);
-       ready(sk);
+       if (ready)
+               ready(sk);
 }
 
 struct socket *rds_tcp_listen_init(struct net *net)
@@ -271,7 +275,7 @@ out:
        return NULL;
 }
 
-void rds_tcp_listen_stop(struct socket *sock)
+void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor)
 {
        struct sock *sk;
 
@@ -292,5 +296,6 @@ void rds_tcp_listen_stop(struct socket *sock)
 
        /* wait for accepts to stop and close the socket */
        flush_workqueue(rds_wq);
+       flush_work(acceptor);
        sock_release(sock);
 }
index e36e333a0aa0d7430c852ce419ac7ed85094bbec..3e447d056d092a405311265a06a8596b2ce8dc87 100644 (file)
@@ -156,7 +156,7 @@ void rds_connect_worker(struct work_struct *work)
        struct rds_connection *conn = cp->cp_conn;
        int ret;
 
-       if (cp->cp_index > 1 && cp->cp_conn->c_laddr > cp->cp_conn->c_faddr)
+       if (cp->cp_index > 0 && cp->cp_conn->c_laddr > cp->cp_conn->c_faddr)
                return;
        clear_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
        ret = rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
index b8a1df2c97853246b2485d9d30caa0e19b61278a..4a9729257023676565a0ff8c140ef56823b6d374 100644 (file)
@@ -871,7 +871,8 @@ out_release:
        return err;
 }
 
-static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
+static int rose_accept(struct socket *sock, struct socket *newsock, int flags,
+                      bool kern)
 {
        struct sk_buff *skb;
        struct sock *newsk;
index 26a7b1db1361e554733b0ff40a54d8e68e59af09..7486926e60a88a56a364b727800f3cd1a671d808 100644 (file)
@@ -739,6 +739,25 @@ static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
        return ret;
 }
 
+/*
+ * Abort a call due to a protocol error.
+ */
+static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
+                                       struct sk_buff *skb,
+                                       const char *eproto_why,
+                                       const char *why,
+                                       u32 abort_code)
+{
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+       trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why);
+       return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO);
+}
+
+#define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \
+       __rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \
+                            (abort_why), (abort_code))
+
 /*
  * conn_client.c
  */
index 0ed181f53f32a0145c03b0006b92de5c7a0101aa..1752fcf8e8f1dd85866004a2ee38c8bbaa040138 100644 (file)
@@ -413,11 +413,11 @@ found_service:
 
        case RXRPC_CONN_REMOTELY_ABORTED:
                rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
-                                         conn->remote_abort, ECONNABORTED);
+                                         conn->remote_abort, -ECONNABORTED);
                break;
        case RXRPC_CONN_LOCALLY_ABORTED:
                rxrpc_abort_call("CON", call, sp->hdr.seq,
-                                conn->local_abort, ECONNABORTED);
+                                conn->local_abort, -ECONNABORTED);
                break;
        default:
                BUG();
@@ -600,7 +600,7 @@ int rxrpc_reject_call(struct rxrpc_sock *rx)
        write_lock_bh(&call->state_lock);
        switch (call->state) {
        case RXRPC_CALL_SERVER_ACCEPTING:
-               __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, ECONNABORTED);
+               __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
                abort = true;
                /* fall through */
        case RXRPC_CALL_COMPLETE:
index 97a17ada4431d58b7a0f9c07be3b13b0230a6390..7a77844aab16be5f32a16b7edd3a7f440006c221 100644 (file)
@@ -386,7 +386,7 @@ recheck_state:
 
        now = ktime_get_real();
        if (ktime_before(call->expire_at, now)) {
-               rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, ETIME);
+               rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, -ETIME);
                set_bit(RXRPC_CALL_EV_ABORT, &call->events);
                goto recheck_state;
        }
index d79cd36987a95b86f2af9fac4688ab86e20f41d5..47f7f4205653aa0643c77f7381688df79b51cbbe 100644 (file)
@@ -486,7 +486,7 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
                call = list_entry(rx->to_be_accepted.next,
                                  struct rxrpc_call, accept_link);
                list_del(&call->accept_link);
-               rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, ECONNRESET);
+               rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
                rxrpc_put_call(call, rxrpc_call_put);
        }
 
@@ -494,7 +494,7 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
                call = list_entry(rx->sock_calls.next,
                                  struct rxrpc_call, sock_link);
                rxrpc_get_call(call, rxrpc_call_got);
-               rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, ECONNRESET);
+               rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
                rxrpc_send_abort_packet(call);
                rxrpc_release_call(rx, call);
                rxrpc_put_call(call, rxrpc_call_put);
index c3be03e8d098213e4956bb644827865206470e19..e8dea0d49e7fedf2951616c1aed8e93ec693a1dd 100644 (file)
@@ -550,6 +550,7 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
        call->cid       = conn->proto.cid | channel;
        call->call_id   = call_id;
 
+       trace_rxrpc_connect_call(call);
        _net("CONNECT call %08x:%08x as call %d on conn %d",
             call->cid, call->call_id, call->debug_id, conn->debug_id);
 
index 3f9d8d7ec6323a95de3e08d01098abdfcf33ff4f..46babcf82ce8648190e018ee69ad16701e2e969a 100644 (file)
@@ -168,7 +168,7 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
  * generate a connection-level abort
  */
 static int rxrpc_abort_connection(struct rxrpc_connection *conn,
-                                 u32 error, u32 abort_code)
+                                 int error, u32 abort_code)
 {
        struct rxrpc_wire_header whdr;
        struct msghdr msg;
@@ -275,16 +275,23 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
                rxrpc_conn_retransmit_call(conn, skb);
                return 0;
 
+       case RXRPC_PACKET_TYPE_BUSY:
+               /* Just ignore BUSY packets for now. */
+               return 0;
+
        case RXRPC_PACKET_TYPE_ABORT:
                if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
-                                 &wtmp, sizeof(wtmp)) < 0)
+                                 &wtmp, sizeof(wtmp)) < 0) {
+                       trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
+                                             tracepoint_string("bad_abort"));
                        return -EPROTO;
+               }
                abort_code = ntohl(wtmp);
                _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
 
                conn->state = RXRPC_CONN_REMOTELY_ABORTED;
                rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED,
-                                 abort_code, ECONNABORTED);
+                                 abort_code, -ECONNABORTED);
                return -ECONNABORTED;
 
        case RXRPC_PACKET_TYPE_CHALLENGE:
@@ -323,7 +330,8 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
                return 0;
 
        default:
-               _leave(" = -EPROTO [%u]", sp->hdr.type);
+               trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
+                                     tracepoint_string("bad_conn_pkt"));
                return -EPROTO;
        }
 }
@@ -366,7 +374,7 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn)
 
 abort:
        _debug("abort %d, %d", ret, abort_code);
-       rxrpc_abort_connection(conn, -ret, abort_code);
+       rxrpc_abort_connection(conn, ret, abort_code);
        _leave(" [aborted]");
 }
 
@@ -415,9 +423,8 @@ requeue_and_leave:
        goto out;
 
 protocol_error:
-       if (rxrpc_abort_connection(conn, -ret, abort_code) < 0)
+       if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
                goto requeue_and_leave;
        rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
-       _leave(" [EPROTO]");
        goto out;
 }
index 9f4cfa25af7c92c406e81d8003b8aa07c7892a04..45dba732a3b4743ba37c46d05b02125f16eac23d 100644 (file)
@@ -30,7 +30,7 @@
 static void rxrpc_proto_abort(const char *why,
                              struct rxrpc_call *call, rxrpc_seq_t seq)
 {
-       if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, EBADMSG)) {
+       if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, -EBADMSG)) {
                set_bit(RXRPC_CALL_EV_ABORT, &call->events);
                rxrpc_queue_call(call);
        }
@@ -420,6 +420,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
                             u16 skew)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       enum rxrpc_call_state state;
        unsigned int offset = sizeof(struct rxrpc_wire_header);
        unsigned int ix;
        rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
@@ -434,14 +435,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
        _proto("Rx DATA %%%u { #%u f=%02x }",
               sp->hdr.serial, seq, sp->hdr.flags);
 
-       if (call->state >= RXRPC_CALL_COMPLETE)
+       state = READ_ONCE(call->state);
+       if (state >= RXRPC_CALL_COMPLETE)
                return;
 
        /* Received data implicitly ACKs all of the request packets we sent
         * when we're acting as a client.
         */
-       if ((call->state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
-            call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
+       if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
+            state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
            !rxrpc_receiving_reply(call))
                return;
 
@@ -650,6 +652,7 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        struct rxrpc_peer *peer;
        unsigned int mtu;
+       bool wake = false;
        u32 rwind = ntohl(ackinfo->rwind);
 
        _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
@@ -657,9 +660,16 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
               ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
               rwind, ntohl(ackinfo->jumbo_max));
 
-       if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
-               rwind = RXRPC_RXTX_BUFF_SIZE - 1;
-       call->tx_winsize = rwind;
+       if (call->tx_winsize != rwind) {
+               if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
+                       rwind = RXRPC_RXTX_BUFF_SIZE - 1;
+               if (rwind > call->tx_winsize)
+                       wake = true;
+               trace_rxrpc_rx_rwind_change(call, sp->hdr.serial,
+                                           ntohl(ackinfo->rwind), wake);
+               call->tx_winsize = rwind;
+       }
+
        if (call->cong_ssthresh > rwind)
                call->cong_ssthresh = rwind;
 
@@ -673,6 +683,9 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
                spin_unlock_bh(&peer->lock);
                _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
        }
+
+       if (wake)
+               wake_up(&call->waitq);
 }
 
 /*
@@ -799,7 +812,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
                return rxrpc_proto_abort("AK0", call, 0);
 
        /* Ignore ACKs unless we are or have just been transmitting. */
-       switch (call->state) {
+       switch (READ_ONCE(call->state)) {
        case RXRPC_CALL_CLIENT_SEND_REQUEST:
        case RXRPC_CALL_CLIENT_AWAIT_REPLY:
        case RXRPC_CALL_SERVER_SEND_REPLY:
@@ -866,7 +879,7 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
 }
 
 /*
- * Process an ABORT packet.
+ * Process an ABORT packet directed at a call.
  */
 static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
 {
@@ -881,10 +894,12 @@ static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
                          &wtmp, sizeof(wtmp)) >= 0)
                abort_code = ntohl(wtmp);
 
+       trace_rxrpc_rx_abort(call, sp->hdr.serial, abort_code);
+
        _proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code);
 
        if (rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
-                                     abort_code, ECONNABORTED))
+                                     abort_code, -ECONNABORTED))
                rxrpc_notify_socket(call);
 }
 
@@ -940,14 +955,14 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
 static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
                                          struct rxrpc_call *call)
 {
-       switch (call->state) {
+       switch (READ_ONCE(call->state)) {
        case RXRPC_CALL_SERVER_AWAIT_ACK:
                rxrpc_call_completed(call);
                break;
        case RXRPC_CALL_COMPLETE:
                break;
        default:
-               if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, ESHUTDOWN)) {
+               if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, -ESHUTDOWN)) {
                        set_bit(RXRPC_CALL_EV_ABORT, &call->events);
                        rxrpc_queue_call(call);
                }
@@ -1006,8 +1021,11 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
        struct rxrpc_wire_header whdr;
 
        /* dig out the RxRPC connection details */
-       if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
+       if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) {
+               trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
+                                     tracepoint_string("bad_hdr"));
                return -EBADMSG;
+       }
 
        memset(sp, 0, sizeof(*sp));
        sp->hdr.epoch           = ntohl(whdr.epoch);
index 7d4375e557e6ee4a50961ad25d9e6f01a34290db..af276f173b10ebb26a8b68d2560ad958b8f3d09a 100644 (file)
@@ -46,7 +46,10 @@ static int none_respond_to_challenge(struct rxrpc_connection *conn,
                                     struct sk_buff *skb,
                                     u32 *_abort_code)
 {
-       *_abort_code = RX_PROTOCOL_ERROR;
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+       trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
+                             tracepoint_string("chall_none"));
        return -EPROTO;
 }
 
@@ -54,7 +57,10 @@ static int none_verify_response(struct rxrpc_connection *conn,
                                struct sk_buff *skb,
                                u32 *_abort_code)
 {
-       *_abort_code = RX_PROTOCOL_ERROR;
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+       trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
+                             tracepoint_string("resp_none"));
        return -EPROTO;
 }
 
index bf13b8470c9ad51783325d7f47c32414b07f0c40..1ed9c0c2e94f1c70ab0ca61fa16e87f76530f78a 100644 (file)
@@ -296,7 +296,7 @@ void rxrpc_peer_error_distributor(struct work_struct *work)
                hlist_del_init(&call->error_link);
                rxrpc_see_call(call);
 
-               if (rxrpc_set_call_completion(call, compl, 0, error))
+               if (rxrpc_set_call_completion(call, compl, 0, -error))
                        rxrpc_notify_socket(call);
        }
 
index 6491ca46a03fda6dc66e02e887ad08012acca14b..f9caf3b775097f62e8cb27fe12f3e925a3f8c323 100644 (file)
@@ -83,11 +83,11 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
                ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
                break;
        case RXRPC_CALL_NETWORK_ERROR:
-               tmp = call->error;
+               tmp = -call->error;
                ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &tmp);
                break;
        case RXRPC_CALL_LOCAL_ERROR:
-               tmp = call->error;
+               tmp = -call->error;
                ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp);
                break;
        default:
@@ -527,7 +527,7 @@ try_again:
                msg->msg_namelen = len;
        }
 
-       switch (call->state) {
+       switch (READ_ONCE(call->state)) {
        case RXRPC_CALL_SERVER_ACCEPTING:
                ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
                break;
@@ -640,7 +640,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
 
        mutex_lock(&call->user_mutex);
 
-       switch (call->state) {
+       switch (READ_ONCE(call->state)) {
        case RXRPC_CALL_CLIENT_RECV_REPLY:
        case RXRPC_CALL_SERVER_RECV_REQUEST:
        case RXRPC_CALL_SERVER_ACK_REQUEST:
@@ -682,14 +682,16 @@ out:
        return ret;
 
 short_data:
+       trace_rxrpc_rx_eproto(call, 0, tracepoint_string("short_data"));
        ret = -EBADMSG;
        goto out;
 excess_data:
+       trace_rxrpc_rx_eproto(call, 0, tracepoint_string("excess_data"));
        ret = -EMSGSIZE;
        goto out;
 call_complete:
        *_abort = call->abort_code;
-       ret = -call->error;
+       ret = call->error;
        if (call->completion == RXRPC_CALL_SUCCEEDED) {
                ret = 1;
                if (size > 0)
index 4374e7b9c7bff9fdd1d36a5a9fc8f135414ed360..1bb9b2ccc2673714367d2ec4356d39b5a3839895 100644 (file)
@@ -148,15 +148,13 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
                                    u32 data_size,
                                    void *sechdr)
 {
-       struct rxrpc_skb_priv *sp;
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
        struct rxkad_level1_hdr hdr;
        struct rxrpc_crypt iv;
        struct scatterlist sg;
        u16 check;
 
-       sp = rxrpc_skb(skb);
-
        _enter("");
 
        check = sp->hdr.seq ^ call->call_id;
@@ -323,6 +321,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
        struct rxrpc_crypt iv;
        struct scatterlist sg[16];
        struct sk_buff *trailer;
+       bool aborted;
        u32 data_size, buf;
        u16 check;
        int nsg;
@@ -330,7 +329,8 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
        _enter("");
 
        if (len < 8) {
-               rxrpc_abort_call("V1H", call, seq, RXKADSEALEDINCON, EPROTO);
+               aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_hdr", "V1H",
+                                          RXKADSEALEDINCON);
                goto protocol_error;
        }
 
@@ -355,7 +355,8 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
 
        /* Extract the decrypted packet length */
        if (skb_copy_bits(skb, offset, &sechdr, sizeof(sechdr)) < 0) {
-               rxrpc_abort_call("XV1", call, seq, RXKADDATALEN, EPROTO);
+               aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_len", "XV1",
+                                            RXKADDATALEN);
                goto protocol_error;
        }
        offset += sizeof(sechdr);
@@ -368,12 +369,14 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
        check ^= seq ^ call->call_id;
        check &= 0xffff;
        if (check != 0) {
-               rxrpc_abort_call("V1C", call, seq, RXKADSEALEDINCON, EPROTO);
+               aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_check", "V1C",
+                                            RXKADSEALEDINCON);
                goto protocol_error;
        }
 
        if (data_size > len) {
-               rxrpc_abort_call("V1L", call, seq, RXKADDATALEN, EPROTO);
+               aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_datalen", "V1L",
+                                            RXKADDATALEN);
                goto protocol_error;
        }
 
@@ -381,8 +384,8 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
        return 0;
 
 protocol_error:
-       rxrpc_send_abort_packet(call);
-       _leave(" = -EPROTO");
+       if (aborted)
+               rxrpc_send_abort_packet(call);
        return -EPROTO;
 
 nomem:
@@ -403,6 +406,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
        struct rxrpc_crypt iv;
        struct scatterlist _sg[4], *sg;
        struct sk_buff *trailer;
+       bool aborted;
        u32 data_size, buf;
        u16 check;
        int nsg;
@@ -410,7 +414,8 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
        _enter(",{%d}", skb->len);
 
        if (len < 8) {
-               rxrpc_abort_call("V2H", call, seq, RXKADSEALEDINCON, EPROTO);
+               aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_hdr", "V2H",
+                                            RXKADSEALEDINCON);
                goto protocol_error;
        }
 
@@ -445,7 +450,8 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
 
        /* Extract the decrypted packet length */
        if (skb_copy_bits(skb, offset, &sechdr, sizeof(sechdr)) < 0) {
-               rxrpc_abort_call("XV2", call, seq, RXKADDATALEN, EPROTO);
+               aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_len", "XV2",
+                                            RXKADDATALEN);
                goto protocol_error;
        }
        offset += sizeof(sechdr);
@@ -458,12 +464,14 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
        check ^= seq ^ call->call_id;
        check &= 0xffff;
        if (check != 0) {
-               rxrpc_abort_call("V2C", call, seq, RXKADSEALEDINCON, EPROTO);
+               aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_check", "V2C",
+                                            RXKADSEALEDINCON);
                goto protocol_error;
        }
 
        if (data_size > len) {
-               rxrpc_abort_call("V2L", call, seq, RXKADDATALEN, EPROTO);
+               aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_datalen", "V2L",
+                                            RXKADDATALEN);
                goto protocol_error;
        }
 
@@ -471,8 +479,8 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
        return 0;
 
 protocol_error:
-       rxrpc_send_abort_packet(call);
-       _leave(" = -EPROTO");
+       if (aborted)
+               rxrpc_send_abort_packet(call);
        return -EPROTO;
 
 nomem:
@@ -491,6 +499,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
        SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
        struct rxrpc_crypt iv;
        struct scatterlist sg;
+       bool aborted;
        u16 cksum;
        u32 x, y;
 
@@ -522,10 +531,9 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
                cksum = 1; /* zero checksums are not permitted */
 
        if (cksum != expected_cksum) {
-               rxrpc_abort_call("VCK", call, seq, RXKADSEALEDINCON, EPROTO);
-               rxrpc_send_abort_packet(call);
-               _leave(" = -EPROTO [csum failed]");
-               return -EPROTO;
+               aborted = rxrpc_abort_eproto(call, skb, "rxkad_csum", "VCK",
+                                            RXKADSEALEDINCON);
+               goto protocol_error;
        }
 
        switch (call->conn->params.security_level) {
@@ -538,6 +546,11 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
        default:
                return -ENOANO;
        }
+
+protocol_error:
+       if (aborted)
+               rxrpc_send_abort_packet(call);
+       return -EPROTO;
 }
 
 /*
@@ -754,22 +767,23 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
        struct rxkad_response resp
                __attribute__((aligned(8))); /* must be aligned for crypto */
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       const char *eproto;
        u32 version, nonce, min_level, abort_code;
        int ret;
 
        _enter("{%d,%x}", conn->debug_id, key_serial(conn->params.key));
 
-       if (!conn->params.key) {
-               _leave(" = -EPROTO [no key]");
-               return -EPROTO;
-       }
+       eproto = tracepoint_string("chall_no_key");
+       abort_code = RX_PROTOCOL_ERROR;
+       if (!conn->params.key)
+               goto protocol_error;
 
+       abort_code = RXKADEXPIRED;
        ret = key_validate(conn->params.key);
-       if (ret < 0) {
-               *_abort_code = RXKADEXPIRED;
-               return ret;
-       }
+       if (ret < 0)
+               goto other_error;
 
+       eproto = tracepoint_string("chall_short");
        abort_code = RXKADPACKETSHORT;
        if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
                          &challenge, sizeof(challenge)) < 0)
@@ -782,13 +796,15 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
        _proto("Rx CHALLENGE %%%u { v=%u n=%u ml=%u }",
               sp->hdr.serial, version, nonce, min_level);
 
+       eproto = tracepoint_string("chall_ver");
        abort_code = RXKADINCONSISTENCY;
        if (version != RXKAD_VERSION)
                goto protocol_error;
 
        abort_code = RXKADLEVELFAIL;
+       ret = -EACCES;
        if (conn->params.security_level < min_level)
-               goto protocol_error;
+               goto other_error;
 
        token = conn->params.key->payload.data[0];
 
@@ -815,28 +831,34 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
        return rxkad_send_response(conn, &sp->hdr, &resp, token->kad);
 
 protocol_error:
+       trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
+       ret = -EPROTO;
+other_error:
        *_abort_code = abort_code;
-       _leave(" = -EPROTO [%d]", abort_code);
-       return -EPROTO;
+       return ret;
 }
 
 /*
  * decrypt the kerberos IV ticket in the response
  */
 static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
+                               struct sk_buff *skb,
                                void *ticket, size_t ticket_len,
                                struct rxrpc_crypt *_session_key,
                                time_t *_expiry,
                                u32 *_abort_code)
 {
        struct skcipher_request *req;
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        struct rxrpc_crypt iv, key;
        struct scatterlist sg[1];
        struct in_addr addr;
        unsigned int life;
+       const char *eproto;
        time_t issue, now;
        bool little_endian;
        int ret;
+       u32 abort_code;
        u8 *p, *q, *name, *end;
 
        _enter("{%d},{%x}", conn->debug_id, key_serial(conn->server_key));
@@ -847,11 +869,11 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
        if (ret < 0) {
                switch (ret) {
                case -EKEYEXPIRED:
-                       *_abort_code = RXKADEXPIRED;
-                       goto error;
+                       abort_code = RXKADEXPIRED;
+                       goto other_error;
                default:
-                       *_abort_code = RXKADNOAUTH;
-                       goto error;
+                       abort_code = RXKADNOAUTH;
+                       goto other_error;
                }
        }
 
@@ -860,13 +882,11 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
 
        memcpy(&iv, &conn->server_key->payload.data[2], sizeof(iv));
 
+       ret = -ENOMEM;
        req = skcipher_request_alloc(conn->server_key->payload.data[0],
                                     GFP_NOFS);
-       if (!req) {
-               *_abort_code = RXKADNOAUTH;
-               ret = -ENOMEM;
-               goto error;
-       }
+       if (!req)
+               goto temporary_error;
 
        sg_init_one(&sg[0], ticket, ticket_len);
        skcipher_request_set_callback(req, 0, NULL, NULL);
@@ -877,11 +897,12 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
        p = ticket;
        end = p + ticket_len;
 
-#define Z(size)                                                \
+#define Z(field)                                       \
        ({                                              \
                u8 *__str = p;                          \
+               eproto = tracepoint_string("rxkad_bad_"#field); \
                q = memchr(p, 0, end - p);              \
-               if (!q || q - p > (size))               \
+               if (!q || q - p > (field##_SZ))         \
                        goto bad_ticket;                \
                for (; p < q; p++)                      \
                        if (!isprint(*p))               \
@@ -896,17 +917,18 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
        p++;
 
        /* extract the authentication name */
-       name = Z(ANAME_SZ);
+       name = Z(ANAME);
        _debug("KIV ANAME: %s", name);
 
        /* extract the principal's instance */
-       name = Z(INST_SZ);
+       name = Z(INST);
        _debug("KIV INST : %s", name);
 
        /* extract the principal's authentication domain */
-       name = Z(REALM_SZ);
+       name = Z(REALM);
        _debug("KIV REALM: %s", name);
 
+       eproto = tracepoint_string("rxkad_bad_len");
        if (end - p < 4 + 8 + 4 + 2)
                goto bad_ticket;
 
@@ -941,36 +963,37 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
 
        /* check the ticket is in date */
        if (issue > now) {
-               *_abort_code = RXKADNOAUTH;
+               abort_code = RXKADNOAUTH;
                ret = -EKEYREJECTED;
-               goto error;
+               goto other_error;
        }
 
        if (issue < now - life) {
-               *_abort_code = RXKADEXPIRED;
+               abort_code = RXKADEXPIRED;
                ret = -EKEYEXPIRED;
-               goto error;
+               goto other_error;
        }
 
        *_expiry = issue + life;
 
        /* get the service name */
-       name = Z(SNAME_SZ);
+       name = Z(SNAME);
        _debug("KIV SNAME: %s", name);
 
        /* get the service instance name */
-       name = Z(INST_SZ);
+       name = Z(INST);
        _debug("KIV SINST: %s", name);
-
-       ret = 0;
-error:
-       _leave(" = %d", ret);
-       return ret;
+       return 0;
 
 bad_ticket:
-       *_abort_code = RXKADBADTICKET;
-       ret = -EBADMSG;
-       goto error;
+       trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
+       abort_code = RXKADBADTICKET;
+       ret = -EPROTO;
+other_error:
+       *_abort_code = abort_code;
+       return ret;
+temporary_error:
+       return ret;
 }
 
 /*
@@ -1020,6 +1043,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
                __attribute__((aligned(8))); /* must be aligned for crypto */
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        struct rxrpc_crypt session_key;
+       const char *eproto;
        time_t expiry;
        void *ticket;
        u32 abort_code, version, kvno, ticket_len, level;
@@ -1028,6 +1052,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
 
        _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
 
+       eproto = tracepoint_string("rxkad_rsp_short");
        abort_code = RXKADPACKETSHORT;
        if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
                          &response, sizeof(response)) < 0)
@@ -1041,40 +1066,43 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
        _proto("Rx RESPONSE %%%u { v=%u kv=%u tl=%u }",
               sp->hdr.serial, version, kvno, ticket_len);
 
+       eproto = tracepoint_string("rxkad_rsp_ver");
        abort_code = RXKADINCONSISTENCY;
        if (version != RXKAD_VERSION)
                goto protocol_error;
 
+       eproto = tracepoint_string("rxkad_rsp_tktlen");
        abort_code = RXKADTICKETLEN;
        if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN)
                goto protocol_error;
 
+       eproto = tracepoint_string("rxkad_rsp_unkkey");
        abort_code = RXKADUNKNOWNKEY;
        if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5)
                goto protocol_error;
 
        /* extract the kerberos ticket and decrypt and decode it */
+       ret = -ENOMEM;
        ticket = kmalloc(ticket_len, GFP_NOFS);
        if (!ticket)
-               return -ENOMEM;
+               goto temporary_error;
 
+       eproto = tracepoint_string("rxkad_tkt_short");
        abort_code = RXKADPACKETSHORT;
        if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
                          ticket, ticket_len) < 0)
                goto protocol_error_free;
 
-       ret = rxkad_decrypt_ticket(conn, ticket, ticket_len, &session_key,
-                                  &expiry, &abort_code);
-       if (ret < 0) {
-               *_abort_code = abort_code;
-               kfree(ticket);
-               return ret;
-       }
+       ret = rxkad_decrypt_ticket(conn, skb, ticket, ticket_len, &session_key,
+                                  &expiry, _abort_code);
+       if (ret < 0)
+               goto temporary_error_free;
 
        /* use the session key from inside the ticket to decrypt the
         * response */
        rxkad_decrypt_response(conn, &response, &session_key);
 
+       eproto = tracepoint_string("rxkad_rsp_param");
        abort_code = RXKADSEALEDINCON;
        if (ntohl(response.encrypted.epoch) != conn->proto.epoch)
                goto protocol_error_free;
@@ -1085,6 +1113,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
        csum = response.encrypted.checksum;
        response.encrypted.checksum = 0;
        rxkad_calc_response_checksum(&response);
+       eproto = tracepoint_string("rxkad_rsp_csum");
        if (response.encrypted.checksum != csum)
                goto protocol_error_free;
 
@@ -1093,11 +1122,15 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
                struct rxrpc_call *call;
                u32 call_id = ntohl(response.encrypted.call_id[i]);
 
+               eproto = tracepoint_string("rxkad_rsp_callid");
                if (call_id > INT_MAX)
                        goto protocol_error_unlock;
 
+               eproto = tracepoint_string("rxkad_rsp_callctr");
                if (call_id < conn->channels[i].call_counter)
                        goto protocol_error_unlock;
+
+               eproto = tracepoint_string("rxkad_rsp_callst");
                if (call_id > conn->channels[i].call_counter) {
                        call = rcu_dereference_protected(
                                conn->channels[i].call,
@@ -1109,10 +1142,12 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
        }
        spin_unlock(&conn->channel_lock);
 
+       eproto = tracepoint_string("rxkad_rsp_seq");
        abort_code = RXKADOUTOFSEQUENCE;
        if (ntohl(response.encrypted.inc_nonce) != conn->security_nonce + 1)
                goto protocol_error_free;
 
+       eproto = tracepoint_string("rxkad_rsp_level");
        abort_code = RXKADLEVELFAIL;
        level = ntohl(response.encrypted.level);
        if (level > RXRPC_SECURITY_ENCRYPT)
@@ -1123,10 +1158,8 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
         * this the connection security can be handled in exactly the same way
         * as for a client connection */
        ret = rxrpc_get_server_data_key(conn, &session_key, expiry, kvno);
-       if (ret < 0) {
-               kfree(ticket);
-               return ret;
-       }
+       if (ret < 0)
+               goto temporary_error_free;
 
        kfree(ticket);
        _leave(" = 0");
@@ -1137,9 +1170,18 @@ protocol_error_unlock:
 protocol_error_free:
        kfree(ticket);
 protocol_error:
+       trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
        *_abort_code = abort_code;
-       _leave(" = -EPROTO [%d]", abort_code);
        return -EPROTO;
+
+temporary_error_free:
+       kfree(ticket);
+temporary_error:
+       /* Ignore the response packet if we got a temporary error such as
+        * ENOMEM.  We just want to send the challenge again.  Note that we
+        * also come out this way if the ticket decryption fails.
+        */
+       return ret;
 }
 
 /*
index bc2d3dcff9de76fcc42a20a3aeaec2305ebd2d6c..96ffa5d5733bd73249e32e2c10a420af9946e9b2 100644 (file)
@@ -488,6 +488,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
 int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
        __releases(&rx->sk.sk_lock.slock)
 {
+       enum rxrpc_call_state state;
        enum rxrpc_command cmd;
        struct rxrpc_call *call;
        unsigned long user_call_ID = 0;
@@ -526,13 +527,17 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                        return PTR_ERR(call);
                /* ... and we have the call lock. */
        } else {
-               ret = -EBUSY;
-               if (call->state == RXRPC_CALL_UNINITIALISED ||
-                   call->state == RXRPC_CALL_CLIENT_AWAIT_CONN ||
-                   call->state == RXRPC_CALL_SERVER_PREALLOC ||
-                   call->state == RXRPC_CALL_SERVER_SECURING ||
-                   call->state == RXRPC_CALL_SERVER_ACCEPTING)
+               switch (READ_ONCE(call->state)) {
+               case RXRPC_CALL_UNINITIALISED:
+               case RXRPC_CALL_CLIENT_AWAIT_CONN:
+               case RXRPC_CALL_SERVER_PREALLOC:
+               case RXRPC_CALL_SERVER_SECURING:
+               case RXRPC_CALL_SERVER_ACCEPTING:
+                       ret = -EBUSY;
                        goto error_release_sock;
+               default:
+                       break;
+               }
 
                ret = mutex_lock_interruptible(&call->user_mutex);
                release_sock(&rx->sk);
@@ -542,25 +547,26 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                }
        }
 
+       state = READ_ONCE(call->state);
        _debug("CALL %d USR %lx ST %d on CONN %p",
-              call->debug_id, call->user_call_ID, call->state, call->conn);
+              call->debug_id, call->user_call_ID, state, call->conn);
 
-       if (call->state >= RXRPC_CALL_COMPLETE) {
+       if (state >= RXRPC_CALL_COMPLETE) {
                /* it's too late for this call */
                ret = -ESHUTDOWN;
        } else if (cmd == RXRPC_CMD_SEND_ABORT) {
                ret = 0;
-               if (rxrpc_abort_call("CMD", call, 0, abort_code, ECONNABORTED))
+               if (rxrpc_abort_call("CMD", call, 0, abort_code, -ECONNABORTED))
                        ret = rxrpc_send_abort_packet(call);
        } else if (cmd != RXRPC_CMD_SEND_DATA) {
                ret = -EINVAL;
        } else if (rxrpc_is_client_call(call) &&
-                  call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
+                  state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
                /* request phase complete for this client call */
                ret = -EPROTO;
        } else if (rxrpc_is_service_call(call) &&
-                  call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
-                  call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
+                  state != RXRPC_CALL_SERVER_ACK_REQUEST &&
+                  state != RXRPC_CALL_SERVER_SEND_REPLY) {
                /* Reply phase not begun or not complete for service call. */
                ret = -EPROTO;
        } else {
@@ -605,14 +611,22 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
        _debug("CALL %d USR %lx ST %d on CONN %p",
               call->debug_id, call->user_call_ID, call->state, call->conn);
 
-       if (call->state >= RXRPC_CALL_COMPLETE) {
-               ret = -ESHUTDOWN; /* it's too late for this call */
-       } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
-                  call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
-                  call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
-               ret = -EPROTO; /* request phase complete for this client call */
-       } else {
+       switch (READ_ONCE(call->state)) {
+       case RXRPC_CALL_CLIENT_SEND_REQUEST:
+       case RXRPC_CALL_SERVER_ACK_REQUEST:
+       case RXRPC_CALL_SERVER_SEND_REPLY:
                ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len);
+               break;
+       case RXRPC_CALL_COMPLETE:
+               read_lock_bh(&call->state_lock);
+               ret = -call->error;
+               read_unlock_bh(&call->state_lock);
+               break;
+       default:
+               /* Request phase complete for this client call */
+               trace_rxrpc_rx_eproto(call, 0, tracepoint_string("late_send"));
+               ret = -EPROTO;
+               break;
        }
 
        mutex_unlock(&call->user_mutex);
@@ -629,20 +643,24 @@ EXPORT_SYMBOL(rxrpc_kernel_send_data);
  * @error: Local error value
  * @why: 3-char string indicating why.
  *
- * Allow a kernel service to abort a call, if it's still in an abortable state.
+ * Allow a kernel service to abort a call, if it's still in an abortable state
+ * and return true if the call was aborted, false if it was already complete.
  */
-void rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
+bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
                             u32 abort_code, int error, const char *why)
 {
+       bool aborted;
+
        _enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why);
 
        mutex_lock(&call->user_mutex);
 
-       if (rxrpc_abort_call(why, call, 0, abort_code, error))
+       aborted = rxrpc_abort_call(why, call, 0, abort_code, error);
+       if (aborted)
                rxrpc_send_abort_packet(call);
 
        mutex_unlock(&call->user_mutex);
-       _leave("");
+       return aborted;
 }
 
 EXPORT_SYMBOL(rxrpc_kernel_abort_call);
index ab80629099622c47933efb36662a323f98f66773..f9bb43c25697e70d18fe9bbba90f6e98dfe05759 100644 (file)
@@ -113,6 +113,9 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
        if (ret < 0)
                return ret;
 
+       if (!tb[TCA_CONNMARK_PARMS])
+               return -EINVAL;
+
        parm = nla_data(tb[TCA_CONNMARK_PARMS]);
 
        if (!tcf_hash_check(tn, parm->index, a, bind)) {
index e978ccd4402cbc68ba1c46e20909a047978df1c2..6c319a40c1cc744734bd8d82b5d0c5baec26bbbc 100644 (file)
@@ -181,6 +181,9 @@ static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
        struct tcphdr *tcph;
        const struct iphdr *iph;
 
+       if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+               return 1;
+
        tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
        if (tcph == NULL)
                return 0;
@@ -202,6 +205,9 @@ static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
        struct tcphdr *tcph;
        const struct ipv6hdr *ip6h;
 
+       if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+               return 1;
+
        tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
        if (tcph == NULL)
                return 0;
@@ -225,6 +231,9 @@ static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
        const struct iphdr *iph;
        u16 ul;
 
+       if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+               return 1;
+
        /*
         * Support both UDP and UDPLITE checksum algorithms, Don't use
         * udph->len to get the real length without any protocol check,
@@ -278,6 +287,9 @@ static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
        const struct ipv6hdr *ip6h;
        u16 ul;
 
+       if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+               return 1;
+
        /*
         * Support both UDP and UDPLITE checksum algorithms, Don't use
         * udph->len to get the real length without any protocol check,
index 71e7ff22f7c92a86cacad9a1b8d18d3d726f52fb..c75ea5c9102c50f5810b52bf7b1d0e42a7acdfd2 100644 (file)
@@ -603,8 +603,8 @@ nla_put_failure:
        return -1;
 }
 
-int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
-                      u16 metaid, u16 mlen, void *mdata)
+static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
+                             u16 metaid, u16 mlen, void *mdata)
 {
        struct tcf_meta_info *e;
 
index 3b7074e2302487808dc1d16b01143d0b292ebe4e..c736627f8f4a0e0ff86db535ec95459a417e4ada 100644 (file)
@@ -228,7 +228,6 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
 
        return skb->len;
 nla_put_failure:
-       rcu_read_unlock();
        nlmsg_trim(skb, b);
        return -1;
 }
index 3d6b9286c203f298b14b5254e5c12cb4781eb4b1..ca193af8634ad71a80e490c4ce916c4cedfd543f 100644 (file)
@@ -508,9 +508,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
                get_random_bytes(&fnew->hashrnd, 4);
        }
 
-       fnew->perturb_timer.function = flow_perturbation;
-       fnew->perturb_timer.data = (unsigned long)fnew;
-       init_timer_deferrable(&fnew->perturb_timer);
+       setup_deferrable_timer(&fnew->perturb_timer, flow_perturbation,
+                              (unsigned long)fnew);
 
        tcf_exts_change(tp, &fnew->exts, &e);
        tcf_em_tree_change(tp, &fnew->ematches, &t);
index bcf49cd2278670197f2a7e9d4e9a62ae8d117468..62567bfe52c723262a291360cecd572fefced164 100644 (file)
@@ -274,7 +274,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
        return NULL;
 }
 
-void qdisc_hash_add(struct Qdisc *q)
+void qdisc_hash_add(struct Qdisc *q, bool invisible)
 {
        if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
                struct Qdisc *root = qdisc_dev(q)->qdisc;
@@ -282,6 +282,8 @@ void qdisc_hash_add(struct Qdisc *q)
                WARN_ON_ONCE(root == &noop_qdisc);
                ASSERT_RTNL();
                hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
+               if (invisible)
+                       q->flags |= TCQ_F_INVISIBLE;
        }
 }
 EXPORT_SYMBOL(qdisc_hash_add);
@@ -1003,7 +1005,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
                                goto err_out4;
                }
 
-               qdisc_hash_add(sch);
+               qdisc_hash_add(sch, false);
 
                return sch;
        }
@@ -1401,9 +1403,14 @@ nla_put_failure:
        return -1;
 }
 
-static bool tc_qdisc_dump_ignore(struct Qdisc *q)
+static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
 {
-       return (q->flags & TCQ_F_BUILTIN) ? true : false;
+       if (q->flags & TCQ_F_BUILTIN)
+               return true;
+       if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
+               return true;
+
+       return false;
 }
 
 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
@@ -1417,12 +1424,12 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
        if (!skb)
                return -ENOBUFS;
 
-       if (old && !tc_qdisc_dump_ignore(old)) {
+       if (old && !tc_qdisc_dump_ignore(old, false)) {
                if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
                                  0, RTM_DELQDISC) < 0)
                        goto err_out;
        }
-       if (new && !tc_qdisc_dump_ignore(new)) {
+       if (new && !tc_qdisc_dump_ignore(new, false)) {
                if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
                                  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
                        goto err_out;
@@ -1439,7 +1446,8 @@ err_out:
 
 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
                              struct netlink_callback *cb,
-                             int *q_idx_p, int s_q_idx, bool recur)
+                             int *q_idx_p, int s_q_idx, bool recur,
+                             bool dump_invisible)
 {
        int ret = 0, q_idx = *q_idx_p;
        struct Qdisc *q;
@@ -1452,7 +1460,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
        if (q_idx < s_q_idx) {
                q_idx++;
        } else {
-               if (!tc_qdisc_dump_ignore(q) &&
+               if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
                    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                  RTM_NEWQDISC) <= 0)
@@ -1474,7 +1482,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
                        q_idx++;
                        continue;
                }
-               if (!tc_qdisc_dump_ignore(q) &&
+               if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
                    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                  RTM_NEWQDISC) <= 0)
@@ -1496,12 +1504,21 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
        int idx, q_idx;
        int s_idx, s_q_idx;
        struct net_device *dev;
+       const struct nlmsghdr *nlh = cb->nlh;
+       struct tcmsg *tcm = nlmsg_data(nlh);
+       struct nlattr *tca[TCA_MAX + 1];
+       int err;
 
        s_idx = cb->args[0];
        s_q_idx = q_idx = cb->args[1];
 
        idx = 0;
        ASSERT_RTNL();
+
+       err = nlmsg_parse(nlh, sizeof(*tcm), tca, TCA_MAX, NULL);
+       if (err < 0)
+               return err;
+
        for_each_netdev(net, dev) {
                struct netdev_queue *dev_queue;
 
@@ -1512,13 +1529,14 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
                q_idx = 0;
 
                if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
-                                      true) < 0)
+                                      true, tca[TCA_DUMP_INVISIBLE]) < 0)
                        goto done;
 
                dev_queue = dev_ingress_queue(dev);
                if (dev_queue &&
                    tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
-                                      &q_idx, s_q_idx, false) < 0)
+                                      &q_idx, s_q_idx, false,
+                                      tca[TCA_DUMP_INVISIBLE]) < 0)
                        goto done;
 
 cont:
@@ -1762,7 +1780,7 @@ static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
 {
        struct qdisc_dump_args arg;
 
-       if (tc_qdisc_dump_ignore(q) ||
+       if (tc_qdisc_dump_ignore(q, false) ||
            *t_p < s_t || !q->ops->cl_ops ||
            (tcm->tcm_parent &&
             TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
index d6ca18dc04c3e9e72efedd44088e95118a06b711..cf93e5ff3d630e50442d65b5440883fb8467e6a0 100644 (file)
@@ -1161,6 +1161,8 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
                                      sch->handle);
        if (!q->link.q)
                q->link.q = &noop_qdisc;
+       else
+               qdisc_hash_add(q->link.q, true);
 
        q->link.priority = TC_CBQ_MAXPRIO - 1;
        q->link.priority2 = TC_CBQ_MAXPRIO - 1;
@@ -1600,6 +1602,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
        cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
        if (!cl->q)
                cl->q = &noop_qdisc;
+       else
+               qdisc_hash_add(cl->q, true);
+
        cl->common.classid = classid;
        cl->tparent = parent;
        cl->qdisc = sch;
index 3b86a97bc67c3e953cb181eddcb5c0c16bf3b27f..593183a5b5b59013bfe7c59ce9e143879b793c4c 100644 (file)
@@ -58,7 +58,6 @@ struct choke_sched_data {
 
 /* Variables */
        struct red_vars  vars;
-       struct tcf_proto __rcu *filter_list;
        struct {
                u32     prob_drop;      /* Early probability drops */
                u32     prob_mark;      /* Early probability marks */
@@ -152,11 +151,6 @@ static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
        choke_skb_cb(skb)->classid = classid;
 }
 
-static u16 choke_get_classid(const struct sk_buff *skb)
-{
-       return choke_skb_cb(skb)->classid;
-}
-
 /*
  * Compare flow of two packets
  *  Returns true only if source and destination address and port match.
@@ -187,40 +181,6 @@ static bool choke_match_flow(struct sk_buff *skb1,
                       sizeof(choke_skb_cb(skb1)->keys));
 }
 
-/*
- * Classify flow using either:
- *  1. pre-existing classification result in skb
- *  2. fast internal classification
- *  3. use TC filter based classification
- */
-static bool choke_classify(struct sk_buff *skb,
-                          struct Qdisc *sch, int *qerr)
-
-{
-       struct choke_sched_data *q = qdisc_priv(sch);
-       struct tcf_result res;
-       struct tcf_proto *fl;
-       int result;
-
-       fl = rcu_dereference_bh(q->filter_list);
-       result = tc_classify(skb, fl, &res, false);
-       if (result >= 0) {
-#ifdef CONFIG_NET_CLS_ACT
-               switch (result) {
-               case TC_ACT_STOLEN:
-               case TC_ACT_QUEUED:
-                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
-               case TC_ACT_SHOT:
-                       return false;
-               }
-#endif
-               choke_set_classid(skb, TC_H_MIN(res.classid));
-               return true;
-       }
-
-       return false;
-}
-
 /*
  * Select a packet at random from queue
  * HACK: since queue can have holes from previous deletion; retry several
@@ -257,25 +217,15 @@ static bool choke_match_random(const struct choke_sched_data *q,
                return false;
 
        oskb = choke_peek_random(q, pidx);
-       if (rcu_access_pointer(q->filter_list))
-               return choke_get_classid(nskb) == choke_get_classid(oskb);
-
        return choke_match_flow(oskb, nskb);
 }
 
 static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                         struct sk_buff **to_free)
 {
-       int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
        struct choke_sched_data *q = qdisc_priv(sch);
        const struct red_parms *p = &q->parms;
 
-       if (rcu_access_pointer(q->filter_list)) {
-               /* If using external classifiers, get result and record it. */
-               if (!choke_classify(skb, sch, &ret))
-                       goto other_drop;        /* Packet was eaten by filter */
-       }
-
        choke_skb_cb(skb)->keys_valid = 0;
        /* Compute average queue usage (see RED) */
        q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
@@ -339,12 +289,6 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 congestion_drop:
        qdisc_drop(skb, sch, to_free);
        return NET_XMIT_CN;
-
-other_drop:
-       if (ret & __NET_XMIT_BYPASS)
-               qdisc_qstats_drop(sch);
-       __qdisc_drop(skb, to_free);
-       return ret;
 }
 
 static struct sk_buff *choke_dequeue(struct Qdisc *sch)
@@ -538,7 +482,6 @@ static void choke_destroy(struct Qdisc *sch)
 {
        struct choke_sched_data *q = qdisc_priv(sch);
 
-       tcf_destroy_chain(&q->filter_list);
        choke_free(q->tab);
 }
 
index bb4cbdf7500482b170eef6e7923cf2f2259e52b5..9fe67e257dfa8a52b38142a9269fe363616e1187 100644 (file)
@@ -117,6 +117,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                                               &pfifo_qdisc_ops, classid);
        if (cl->qdisc == NULL)
                cl->qdisc = &noop_qdisc;
+       else
+               qdisc_hash_add(cl->qdisc, true);
 
        if (tca[TCA_RATE]) {
                err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
index 802ac7c2e5e87eed1341ba4c09d3e5d70bc75876..cfa1f2cdbaf73120bdb98abd109c4c3ff7723ab4 100644 (file)
@@ -201,9 +201,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
 
        if (p->set_tc_index) {
+               int wlen = skb_network_offset(skb);
+
                switch (tc_skb_protocol(skb)) {
                case htons(ETH_P_IP):
-                       if (skb_cow_head(skb, sizeof(struct iphdr)))
+                       wlen += sizeof(struct iphdr);
+                       if (!pskb_may_pull(skb, wlen) ||
+                           skb_try_make_writable(skb, wlen))
                                goto drop;
 
                        skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
@@ -211,7 +215,9 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                        break;
 
                case htons(ETH_P_IPV6):
-                       if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
+                       wlen += sizeof(struct ipv6hdr);
+                       if (!pskb_may_pull(skb, wlen) ||
+                           skb_try_make_writable(skb, wlen))
                                goto drop;
 
                        skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
@@ -368,6 +374,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
        p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle);
        if (p->q == NULL)
                p->q = &noop_qdisc;
+       else
+               qdisc_hash_add(p->q, true);
 
        pr_debug("%s: qdisc %p\n", __func__, p->q);
 
index 9f3a884d15903fd9012c01b5eee802e02f9f709e..097bbe9857a55f9ce3b41d07a834c197639469d1 100644 (file)
@@ -288,7 +288,6 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
        struct fq_codel_flow *flow;
        struct list_head *head;
        u32 prev_drop_count, prev_ecn_mark;
-       unsigned int prev_backlog;
 
 begin:
        head = &q->new_flows;
@@ -307,7 +306,6 @@ begin:
 
        prev_drop_count = q->cstats.drop_count;
        prev_ecn_mark = q->cstats.ecn_mark;
-       prev_backlog = sch->qstats.backlog;
 
        skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
                            &flow->cvars, &q->cstats, qdisc_pkt_len,
index b052b27a984e39c244c94132f1162a7033e5cc63..3e64d23e098cff218eea7ea0371302a535e6935c 100644 (file)
@@ -795,7 +795,7 @@ static void attach_default_qdiscs(struct net_device *dev)
        }
 #ifdef CONFIG_NET_SCHED
        if (dev->qdisc)
-               qdisc_hash_add(dev->qdisc);
+               qdisc_hash_add(dev->qdisc, false);
 #endif
 }
 
index 3ffaa6fb0990f0aa31487a2f1829b1f1accf8b21..0198c6cdda4973a0e4d9ac96e1c10c242d0954e9 100644 (file)
@@ -1066,6 +1066,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                                      &pfifo_qdisc_ops, classid);
        if (cl->qdisc == NULL)
                cl->qdisc = &noop_qdisc;
+       else
+               qdisc_hash_add(cl->qdisc, true);
        INIT_LIST_HEAD(&cl->children);
        cl->vt_tree = RB_ROOT;
        cl->cf_tree = RB_ROOT;
@@ -1425,6 +1427,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
                                          sch->handle);
        if (q->root.qdisc == NULL)
                q->root.qdisc = &noop_qdisc;
+       else
+               qdisc_hash_add(q->root.qdisc, true);
        INIT_LIST_HEAD(&q->root.children);
        q->root.vt_tree = RB_ROOT;
        q->root.cf_tree = RB_ROOT;
index 4cd5fb134bc9e2dbcdd61b51fb951f94301ed54c..95867033542ec4c889e3c1e7ebd266700aafbef7 100644 (file)
@@ -1460,6 +1460,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                qdisc_class_hash_insert(&q->clhash, &cl->common);
                if (parent)
                        parent->children++;
+               if (cl->un.leaf.q != &noop_qdisc)
+                       qdisc_hash_add(cl->un.leaf.q, true);
        } else {
                if (tca[TCA_RATE]) {
                        err = gen_replace_estimator(&cl->bstats, NULL,
index 20b7f1646f69270e08d8b7588759a0146f262e89..cadfdd4f1e521b3d68b8fa62d5797f3ff604651d 100644 (file)
@@ -84,7 +84,7 @@ static void mq_attach(struct Qdisc *sch)
                        qdisc_destroy(old);
 #ifdef CONFIG_NET_SCHED
                if (ntx < dev->real_num_tx_queues)
-                       qdisc_hash_add(qdisc);
+                       qdisc_hash_add(qdisc, false);
 #endif
 
        }
index 922683418e53853cb71747d8d30ab0e4a989254b..0a4cf27ea54bd78768d4fa084f7b082460f5f266 100644 (file)
 
 struct mqprio_sched {
        struct Qdisc            **qdiscs;
-       int hw_owned;
+       int hw_offload;
 };
 
 static void mqprio_destroy(struct Qdisc *sch)
 {
        struct net_device *dev = qdisc_dev(sch);
        struct mqprio_sched *priv = qdisc_priv(sch);
-       struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO};
        unsigned int ntx;
 
        if (priv->qdiscs) {
@@ -39,10 +38,15 @@ static void mqprio_destroy(struct Qdisc *sch)
                kfree(priv->qdiscs);
        }
 
-       if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc)
+       if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) {
+               struct tc_mqprio_qopt offload = { 0 };
+               struct tc_to_netdev tc = { .type = TC_SETUP_MQPRIO,
+                                          { .mqprio = &offload } };
+
                dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
-       else
+       } else {
                netdev_set_num_tc(dev, 0);
+       }
 }
 
 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
@@ -59,15 +63,20 @@ static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
                        return -EINVAL;
        }
 
-       /* net_device does not support requested operation */
-       if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
-               return -EINVAL;
+       /* Limit qopt->hw to maximum supported offload value.  Drivers have
+        * the option of overriding this later if they don't support the a
+        * given offload type.
+        */
+       if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
+               qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
 
-       /* if hw owned qcount and qoffset are taken from LLD so
-        * no reason to verify them here
+       /* If hardware offload is requested we will leave it to the device
+        * to either populate the queue counts itself or to validate the
+        * provided queue counts.  If ndo_setup_tc is not present then
+        * hardware doesn't support offload and we should return an error.
         */
        if (qopt->hw)
-               return 0;
+               return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL;
 
        for (i = 0; i < qopt->num_tc; i++) {
                unsigned int last = qopt->offset[i] + qopt->count[i];
@@ -139,13 +148,15 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
         * supplied and verified mapping
         */
        if (qopt->hw) {
-               struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO,
-                                         { .tc = qopt->num_tc }};
+               struct tc_mqprio_qopt offload = *qopt;
+               struct tc_to_netdev tc = { .type = TC_SETUP_MQPRIO,
+                                          { .mqprio = &offload } };
 
-               priv->hw_owned = 1;
                err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
                if (err)
                        return err;
+
+               priv->hw_offload = offload.hw;
        } else {
                netdev_set_num_tc(dev, qopt->num_tc);
                for (i = 0; i < qopt->num_tc; i++)
@@ -175,7 +186,7 @@ static void mqprio_attach(struct Qdisc *sch)
                if (old)
                        qdisc_destroy(old);
                if (ntx < dev->real_num_tx_queues)
-                       qdisc_hash_add(qdisc);
+                       qdisc_hash_add(qdisc, false);
        }
        kfree(priv->qdiscs);
        priv->qdiscs = NULL;
@@ -243,7 +254,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
 
        opt.num_tc = netdev_get_num_tc(dev);
        memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
-       opt.hw = priv->hw_owned;
+       opt.hw = priv->hw_offload;
 
        for (i = 0; i < netdev_get_num_tc(dev); i++) {
                opt.count[i] = dev->tc_to_txq[i].count;
index e7839a0d0eaa52572f675fdb1dfc590c2a70ac76..43a3a10b3c8118fc2e0deff98be2635d2ad81330 100644 (file)
@@ -217,6 +217,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
                                sch_tree_lock(sch);
                                old = q->queues[i];
                                q->queues[i] = child;
+                               if (child != &noop_qdisc)
+                                       qdisc_hash_add(child, true);
 
                                if (old != &noop_qdisc) {
                                        qdisc_tree_reduce_backlog(old,
index c8bb62a1e7449344a0fd81241fe0102ea2f9c0f9..94b4928ad4134b25e0a28baf1aaa4879b18f56c0 100644 (file)
@@ -462,7 +462,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        /* If a delay is expected, orphan the skb. (orphaning usually takes
         * place at TX completion time, so _before_ the link transit delay)
         */
-       if (q->latency || q->jitter)
+       if (q->latency || q->jitter || q->rate)
                skb_orphan_partial(skb);
 
        /*
@@ -530,21 +530,31 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                now = psched_get_time();
 
                if (q->rate) {
-                       struct sk_buff *last;
+                       struct netem_skb_cb *last = NULL;
+
+                       if (sch->q.tail)
+                               last = netem_skb_cb(sch->q.tail);
+                       if (q->t_root.rb_node) {
+                               struct sk_buff *t_skb;
+                               struct netem_skb_cb *t_last;
+
+                               t_skb = netem_rb_to_skb(rb_last(&q->t_root));
+                               t_last = netem_skb_cb(t_skb);
+                               if (!last ||
+                                   t_last->time_to_send > last->time_to_send) {
+                                       last = t_last;
+                               }
+                       }
 
-                       if (sch->q.qlen)
-                               last = sch->q.tail;
-                       else
-                               last = netem_rb_to_skb(rb_last(&q->t_root));
                        if (last) {
                                /*
                                 * Last packet in queue is reference point (now),
                                 * calculate this time bonus and subtract
                                 * from delay.
                                 */
-                               delay -= netem_skb_cb(last)->time_to_send - now;
+                               delay -= last->time_to_send - now;
                                delay = max_t(psched_tdiff_t, 0, delay);
-                               now = netem_skb_cb(last)->time_to_send;
+                               now = last->time_to_send;
                        }
 
                        delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
index d4d7db267b6edfa56582ca4a588590e0ded9fe66..92c2e6d448d7984af35d6beb2cb3aea717b76511 100644 (file)
@@ -192,8 +192,11 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
                qdisc_destroy(child);
        }
 
-       for (i = oldbands; i < q->bands; i++)
+       for (i = oldbands; i < q->bands; i++) {
                q->queues[i] = queues[i];
+               if (q->queues[i] != &noop_qdisc)
+                       qdisc_hash_add(q->queues[i], true);
+       }
 
        sch_tree_unlock(sch);
        return 0;
index f9e712ce2d15ce9280c31d2f75d62b84034ae51d..6c85f3e9239bbc2b127ca7b7e61826de3b57873c 100644 (file)
@@ -494,6 +494,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                        goto destroy_class;
        }
 
+       if (cl->qdisc != &noop_qdisc)
+               qdisc_hash_add(cl->qdisc, true);
        sch_tree_lock(sch);
        qdisc_class_hash_insert(&q->clhash, &cl->common);
        sch_tree_unlock(sch);
index 249b2a18acbd99288eb0a2579a0f29c2ab0b3ded..799ea6dd69b266ccb25d52abab68116e3508b3cb 100644 (file)
@@ -191,6 +191,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
                        return PTR_ERR(child);
        }
 
+       if (child != &noop_qdisc)
+               qdisc_hash_add(child, true);
        sch_tree_lock(sch);
        q->flags = ctl->flags;
        q->limit = ctl->limit;
index fe6963d2151956c508b510edec680b89201173ce..ae862f172c944283be1cbb56f971cf821cd12bf8 100644 (file)
@@ -513,6 +513,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
        if (IS_ERR(child))
                return PTR_ERR(child);
 
+       if (child != &noop_qdisc)
+               qdisc_hash_add(child, true);
        sch_tree_lock(sch);
 
        qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
index 42e8c8615e6563a2deabbb3c3437e3985d01ae14..b00e02c139de8d7c0b66ec6ee0d8b6c677529609 100644 (file)
@@ -714,9 +714,8 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
        struct sfq_sched_data *q = qdisc_priv(sch);
        int i;
 
-       q->perturb_timer.function = sfq_perturbation;
-       q->perturb_timer.data = (unsigned long)sch;
-       init_timer_deferrable(&q->perturb_timer);
+       setup_deferrable_timer(&q->perturb_timer, sfq_perturbation,
+                              (unsigned long)sch);
 
        for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
                q->dep[i].next = i + SFQ_MAX_FLOWS;
index 303355c449ab336227d9b115496e0882f2f2a079..9850126129a378d46105d0535fde46e0a7f1a9c4 100644 (file)
@@ -396,6 +396,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
                                          q->qdisc->qstats.backlog);
                qdisc_destroy(q->qdisc);
                q->qdisc = child;
+               if (child != &noop_qdisc)
+                       qdisc_hash_add(child, true);
        }
        q->limit = qopt->limit;
        if (tb[TCA_TBF_PBURST])
index 2a6835b4562b61cff52425a530524f1c48bc7919..a9708da28eb53ff2987264c6c7d7ca6ec2ff09e9 100644 (file)
@@ -71,9 +71,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
 {
        struct net *net = sock_net(sk);
        struct sctp_sock *sp;
-       int i;
        sctp_paramhdr_t *p;
-       int err;
+       int i;
 
        /* Retrieve the SCTP per socket area.  */
        sp = sctp_sk((struct sock *)sk);
@@ -247,6 +246,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
        if (!sctp_ulpq_init(&asoc->ulpq, asoc))
                goto fail_init;
 
+       if (sctp_stream_new(asoc, gfp))
+               goto fail_init;
+
        /* Assume that peer would support both address types unless we are
         * told otherwise.
         */
@@ -264,9 +266,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
 
        /* AUTH related initializations */
        INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
-       err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
-       if (err)
-               goto fail_init;
+       if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
+               goto stream_free;
 
        asoc->active_key_id = ep->active_key_id;
        asoc->prsctp_enable = ep->prsctp_enable;
@@ -289,6 +290,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
 
        return asoc;
 
+stream_free:
+       sctp_stream_free(asoc->stream);
 fail_init:
        sock_put(asoc->base.sk);
        sctp_endpoint_put(asoc->ep);
@@ -1409,7 +1412,7 @@ sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
 /* Update the association's pmtu and frag_point by going through all the
  * transports. This routine is called when a transport's PMTU has changed.
  */
-void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
+void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
 {
        struct sctp_transport *t;
        __u32 pmtu = 0;
@@ -1421,8 +1424,8 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
        list_for_each_entry(t, &asoc->peer.transport_addr_list,
                                transports) {
                if (t->pmtu_pending && t->dst) {
-                       sctp_transport_update_pmtu(sk, t,
-                                                  SCTP_TRUNC4(dst_mtu(t->dst)));
+                       sctp_transport_update_pmtu(
+                                       t, SCTP_TRUNC4(dst_mtu(t->dst)));
                        t->pmtu_pending = 0;
                }
                if (!pmtu || (t->pathmtu < pmtu))
index e3621cb4827fadb5f5cb41ebe8455dfa3300a765..697721a7a3f1761373aa66b847bd744ea1b42d10 100644 (file)
@@ -306,14 +306,24 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
 
        if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&
            time_after(jiffies, chunk->msg->expires_at)) {
-               if (chunk->sent_count)
+               struct sctp_stream_out *streamout =
+                       &chunk->asoc->stream->out[chunk->sinfo.sinfo_stream];
+
+               if (chunk->sent_count) {
                        chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
-               else
+                       streamout->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
+               } else {
                        chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
+                       streamout->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
+               }
                return 1;
        } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&
                   chunk->sent_count > chunk->sinfo.sinfo_timetolive) {
+               struct sctp_stream_out *streamout =
+                       &chunk->asoc->stream->out[chunk->sinfo.sinfo_stream];
+
                chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
+               streamout->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
                return 1;
        } else if (!SCTP_PR_POLICY(chunk->sinfo.sinfo_flags) &&
                   chunk->msg->expires_at &&
index 2a28ab20487f03f61ed8d74cb511bce2973ce242..0e06a278d2a911e2360e75e983b623e453284b7b 100644 (file)
@@ -401,10 +401,10 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
 
        if (t->param_flags & SPP_PMTUD_ENABLE) {
                /* Update transports view of the MTU */
-               sctp_transport_update_pmtu(sk, t, pmtu);
+               sctp_transport_update_pmtu(t, pmtu);
 
                /* Update association pmtu. */
-               sctp_assoc_sync_pmtu(sk, asoc);
+               sctp_assoc_sync_pmtu(asoc);
        }
 
        /* Retransmit with the new pmtu setting.
index 063baac5b9fe4048e9d7b41e848a33f0f73c61d4..961ee59f696a0b0a8b6c2bade0031a073dff53ad 100644 (file)
@@ -640,14 +640,15 @@ static sctp_scope_t sctp_v6_scope(union sctp_addr *addr)
 
 /* Create and initialize a new sk for the socket to be returned by accept(). */
 static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
-                                            struct sctp_association *asoc)
+                                            struct sctp_association *asoc,
+                                            bool kern)
 {
        struct sock *newsk;
        struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
        struct sctp6_sock *newsctp6sk;
        struct ipv6_txoptions *opt;
 
-       newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0);
+       newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, kern);
        if (!newsk)
                goto out;
 
index 71ce6b945dcb54d831425bdb02e315a14dae69ef..1409a875ad8e22172a4b6ec08ce339da3c8b80ab 100644 (file)
@@ -86,43 +86,53 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
 {
        struct sctp_transport *tp = packet->transport;
        struct sctp_association *asoc = tp->asoc;
+       struct sock *sk;
 
        pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
-
        packet->vtag = vtag;
 
-       if (asoc && tp->dst) {
-               struct sock *sk = asoc->base.sk;
-
-               rcu_read_lock();
-               if (__sk_dst_get(sk) != tp->dst) {
-                       dst_hold(tp->dst);
-                       sk_setup_caps(sk, tp->dst);
-               }
-
-               if (sk_can_gso(sk)) {
-                       struct net_device *dev = tp->dst->dev;
+       /* do the following jobs only once for a flush schedule */
+       if (!sctp_packet_empty(packet))
+               return;
 
-                       packet->max_size = dev->gso_max_size;
-               } else {
-                       packet->max_size = asoc->pathmtu;
-               }
-               rcu_read_unlock();
+       /* set packet max_size with pathmtu */
+       packet->max_size = tp->pathmtu;
+       if (!asoc)
+               return;
 
-       } else {
-               packet->max_size = tp->pathmtu;
+       /* update dst or transport pathmtu if in need */
+       sk = asoc->base.sk;
+       if (!sctp_transport_dst_check(tp)) {
+               sctp_transport_route(tp, NULL, sctp_sk(sk));
+               if (asoc->param_flags & SPP_PMTUD_ENABLE)
+                       sctp_assoc_sync_pmtu(asoc);
+       } else if (!sctp_transport_pmtu_check(tp)) {
+               if (asoc->param_flags & SPP_PMTUD_ENABLE)
+                       sctp_assoc_sync_pmtu(asoc);
        }
 
-       if (ecn_capable && sctp_packet_empty(packet)) {
-               struct sctp_chunk *chunk;
+       /* If there a is a prepend chunk stick it on the list before
+        * any other chunks get appended.
+        */
+       if (ecn_capable) {
+               struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
 
-               /* If there a is a prepend chunk stick it on the list before
-                * any other chunks get appended.
-                */
-               chunk = sctp_get_ecne_prepend(asoc);
                if (chunk)
                        sctp_packet_append_chunk(packet, chunk);
        }
+
+       if (!tp->dst)
+               return;
+
+       /* set packet max_size with gso_max_size if gso is enabled*/
+       rcu_read_lock();
+       if (__sk_dst_get(sk) != tp->dst) {
+               dst_hold(tp->dst);
+               sk_setup_caps(sk, tp->dst);
+       }
+       packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size
+                                         : asoc->pathmtu;
+       rcu_read_unlock();
 }
 
 /* Initialize the packet structure. */
@@ -546,7 +556,6 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
        struct sctp_association *asoc = tp->asoc;
        struct sctp_chunk *chunk, *tmp;
        int pkt_count, gso = 0;
-       int confirm;
        struct dst_entry *dst;
        struct sk_buff *head;
        struct sctphdr *sh;
@@ -583,12 +592,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
        sh->vtag = htonl(packet->vtag);
        sh->checksum = 0;
 
-       /* update dst if in need */
-       if (!sctp_transport_dst_check(tp)) {
-               sctp_transport_route(tp, NULL, sctp_sk(sk));
-               if (asoc && asoc->param_flags & SPP_PMTUD_ENABLE)
-                       sctp_assoc_sync_pmtu(sk, asoc);
-       }
+       /* drop packet if no dst */
        dst = dst_clone(tp->dst);
        if (!dst) {
                IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
@@ -625,13 +629,13 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
                        asoc->peer.last_sent_to = tp;
        }
        head->ignore_df = packet->ipfragok;
-       confirm = tp->dst_pending_confirm;
-       if (confirm)
+       if (tp->dst_pending_confirm)
                skb_set_dst_pending_confirm(head, 1);
        /* neighbour should be confirmed on successful transmission or
         * positive error
         */
-       if (tp->af_specific->sctp_xmit(head, tp) >= 0 && confirm)
+       if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
+           tp->dst_pending_confirm)
                tp->dst_pending_confirm = 0;
 
 out:
@@ -705,7 +709,7 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
         */
 
        if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
-           !chunk->msg->force_delay)
+           !asoc->force_delay)
                /* Nothing unacked */
                return SCTP_XMIT_OK;
 
index db352e5d61f8980dc461a162959643d872997217..fe4c3d462f6ebc48d11d2a587f2adb7a39fde2e5 100644 (file)
@@ -353,6 +353,8 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
        struct sctp_chunk *chk, *temp;
 
        list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
+               struct sctp_stream_out *streamout;
+
                if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
                    chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
                        continue;
@@ -361,8 +363,10 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
                sctp_insert_list(&asoc->outqueue.abandoned,
                                 &chk->transmitted_list);
 
+               streamout = &asoc->stream->out[chk->sinfo.sinfo_stream];
                asoc->sent_cnt_removable--;
                asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
+               streamout->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
 
                if (!chk->tsn_gap_acked) {
                        if (chk->transport)
@@ -382,19 +386,26 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
 }
 
 static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
-                                   struct sctp_sndrcvinfo *sinfo,
-                                   struct list_head *queue, int msg_len)
+                                   struct sctp_sndrcvinfo *sinfo, int msg_len)
 {
+       struct sctp_outq *q = &asoc->outqueue;
        struct sctp_chunk *chk, *temp;
 
-       list_for_each_entry_safe(chk, temp, queue, list) {
+       list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
                if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
                    chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
                        continue;
 
                list_del_init(&chk->list);
+               q->out_qlen -= chk->skb->len;
                asoc->sent_cnt_removable--;
                asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
+               if (chk->sinfo.sinfo_stream < asoc->stream->outcnt) {
+                       struct sctp_stream_out *streamout =
+                               &asoc->stream->out[chk->sinfo.sinfo_stream];
+
+                       streamout->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
+               }
 
                msg_len -= SCTP_DATA_SNDSIZE(chk) +
                           sizeof(struct sk_buff) +
@@ -431,9 +442,7 @@ void sctp_prsctp_prune(struct sctp_association *asoc,
                        return;
        }
 
-       sctp_prsctp_prune_unsent(asoc, sinfo,
-                                &asoc->outqueue.out_chunk_list,
-                                msg_len);
+       sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
 }
 
 /* Mark all the eligible packets on a transport for retransmission.  */
@@ -1027,8 +1036,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                        /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
                         * stream identifier.
                         */
-                       if (chunk->sinfo.sinfo_stream >=
-                           asoc->c.sinit_num_ostreams) {
+                       if (chunk->sinfo.sinfo_stream >= asoc->stream->outcnt) {
 
                                /* Mark as failed send. */
                                sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
index 206377fe91ec4db4a59b24bf45daa0e42be0015b..a0b29d43627f48425e83d7d3c9698d99315dd869 100644 (file)
@@ -361,8 +361,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
        sctp_seq_dump_remote_addrs(seq, assoc);
        seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d "
                   "%8d %8d %8d %8d",
-               assoc->hbinterval, assoc->c.sinit_max_instreams,
-               assoc->c.sinit_num_ostreams, assoc->max_retrans,
+               assoc->hbinterval, assoc->stream->incnt,
+               assoc->stream->outcnt, assoc->max_retrans,
                assoc->init_retries, assoc->shutdown_retries,
                assoc->rtx_data_chunks,
                atomic_read(&sk->sk_wmem_alloc),
index 1b6d4574d2b02a2877caba604bb549352a0f0470..989a900383b57c57590bff37e3aee7426fb0b156 100644 (file)
@@ -575,10 +575,11 @@ static int sctp_v4_is_ce(const struct sk_buff *skb)
 
 /* Create and initialize a new sk for the socket returned by accept(). */
 static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
-                                            struct sctp_association *asoc)
+                                            struct sctp_association *asoc,
+                                            bool kern)
 {
        struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL,
-                       sk->sk_prot, 0);
+                       sk->sk_prot, kern);
        struct inet_sock *newinet;
 
        if (!newsk)
index 969a30c7bb5431530b293c8ed51f2fdea61dd8bf..118faff6a332ee24caf3d772b6f00641128ef104 100644 (file)
@@ -2460,15 +2460,10 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
         * association.
         */
        if (!asoc->temp) {
-               int error;
-
-               asoc->stream = sctp_stream_new(asoc->c.sinit_max_instreams,
-                                              asoc->c.sinit_num_ostreams, gfp);
-               if (!asoc->stream)
+               if (sctp_stream_init(asoc, gfp))
                        goto clean_up;
 
-               error = sctp_assoc_set_id(asoc, gfp);
-               if (error)
+               if (sctp_assoc_set_id(asoc, gfp))
                        goto clean_up;
        }
 
index e03bb1aab4d095b65259c33f4fba6990e90f586b..4f5e6cfc7f601b4de8db8802668d553f1af8491d 100644 (file)
@@ -3872,9 +3872,18 @@ sctp_disposition_t sctp_sf_do_reconf(struct net *net,
                else if (param.p->type == SCTP_PARAM_RESET_IN_REQUEST)
                        reply = sctp_process_strreset_inreq(
                                (struct sctp_association *)asoc, param, &ev);
-               /* More handles for other types will be added here, by now it
-                * just ignores other types.
-                */
+               else if (param.p->type == SCTP_PARAM_RESET_TSN_REQUEST)
+                       reply = sctp_process_strreset_tsnreq(
+                               (struct sctp_association *)asoc, param, &ev);
+               else if (param.p->type == SCTP_PARAM_RESET_ADD_OUT_STREAMS)
+                       reply = sctp_process_strreset_addstrm_out(
+                               (struct sctp_association *)asoc, param, &ev);
+               else if (param.p->type == SCTP_PARAM_RESET_ADD_IN_STREAMS)
+                       reply = sctp_process_strreset_addstrm_in(
+                               (struct sctp_association *)asoc, param, &ev);
+               else if (param.p->type == SCTP_PARAM_RESET_RESPONSE)
+                       reply = sctp_process_strreset_resp(
+                               (struct sctp_association *)asoc, param, &ev);
 
                if (ev)
                        sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
@@ -3946,7 +3955,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
 
        /* Silently discard the chunk if stream-id is not valid */
        sctp_walk_fwdtsn(skip, chunk) {
-               if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
+               if (ntohs(skip->stream) >= asoc->stream->incnt)
                        goto discard_noforce;
        }
 
@@ -4017,7 +4026,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
 
        /* Silently discard the chunk if stream-id is not valid */
        sctp_walk_fwdtsn(skip, chunk) {
-               if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
+               if (ntohs(skip->stream) >= asoc->stream->incnt)
                        goto gen_shutdown;
        }
 
@@ -6353,7 +6362,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
         * and discard the DATA chunk.
         */
        sid = ntohs(data_hdr->stream);
-       if (sid >= asoc->c.sinit_max_instreams) {
+       if (sid >= asoc->stream->incnt) {
                /* Mark tsn as received even though we drop it */
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
 
index 6f0a9be50f5055fd7efa29bb8b183cc37b23b25f..8e56df8d175d0f066030d2fbb6f655a84e265992 100644 (file)
@@ -1907,7 +1907,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
        }
 
        if (asoc->pmtu_pending)
-               sctp_assoc_pending_pmtu(sk, asoc);
+               sctp_assoc_pending_pmtu(asoc);
 
        /* If fragmentation is disabled and the message length exceeds the
         * association fragmentation point, return EMSGSIZE.  The I-D
@@ -1920,7 +1920,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
        }
 
        /* Check for invalid stream. */
-       if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) {
+       if (sinfo->sinfo_stream >= asoc->stream->outcnt) {
                err = -EINVAL;
                goto out_free;
        }
@@ -1965,7 +1965,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
                err = PTR_ERR(datamsg);
                goto out_free;
        }
-       datamsg->force_delay = !!(msg->msg_flags & MSG_MORE);
+       asoc->force_delay = !!(msg->msg_flags & MSG_MORE);
 
        /* Now send the (possibly) fragmented message. */
        list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
@@ -2435,7 +2435,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
        if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
                if (trans) {
                        trans->pathmtu = params->spp_pathmtu;
-                       sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
+                       sctp_assoc_sync_pmtu(asoc);
                } else if (asoc) {
                        asoc->pathmtu = params->spp_pathmtu;
                } else {
@@ -2451,7 +2451,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
                                (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
                        if (update) {
                                sctp_transport_pmtu(trans, sctp_opt2sk(sp));
-                               sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
+                               sctp_assoc_sync_pmtu(asoc);
                        }
                } else if (asoc) {
                        asoc->param_flags =
@@ -3758,6 +3758,39 @@ out:
        return retval;
 }
 
+static int sctp_setsockopt_reconfig_supported(struct sock *sk,
+                                             char __user *optval,
+                                             unsigned int optlen)
+{
+       struct sctp_assoc_value params;
+       struct sctp_association *asoc;
+       int retval = -EINVAL;
+
+       if (optlen != sizeof(params))
+               goto out;
+
+       if (copy_from_user(&params, optval, optlen)) {
+               retval = -EFAULT;
+               goto out;
+       }
+
+       asoc = sctp_id2assoc(sk, params.assoc_id);
+       if (asoc) {
+               asoc->reconf_enable = !!params.assoc_value;
+       } else if (!params.assoc_id) {
+               struct sctp_sock *sp = sctp_sk(sk);
+
+               sp->ep->reconf_enable = !!params.assoc_value;
+       } else {
+               goto out;
+       }
+
+       retval = 0;
+
+out:
+       return retval;
+}
+
 static int sctp_setsockopt_enable_strreset(struct sock *sk,
                                           char __user *optval,
                                           unsigned int optlen)
@@ -4038,6 +4071,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
        case SCTP_DEFAULT_PRINFO:
                retval = sctp_setsockopt_default_prinfo(sk, optval, optlen);
                break;
+       case SCTP_RECONFIG_SUPPORTED:
+               retval = sctp_setsockopt_reconfig_supported(sk, optval, optlen);
+               break;
        case SCTP_ENABLE_STREAM_RESET:
                retval = sctp_setsockopt_enable_strreset(sk, optval, optlen);
                break;
@@ -4116,7 +4152,7 @@ static int sctp_disconnect(struct sock *sk, int flags)
  * descriptor will be returned from accept() to represent the newly
  * formed association.
  */
-static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
+static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
 {
        struct sctp_sock *sp;
        struct sctp_endpoint *ep;
@@ -4151,7 +4187,7 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
         */
        asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
 
-       newsk = sp->pf->create_accept_sk(sk, asoc);
+       newsk = sp->pf->create_accept_sk(sk, asoc, kern);
        if (!newsk) {
                error = -ENOMEM;
                goto out;
@@ -4461,8 +4497,8 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
        info->sctpi_rwnd = asoc->a_rwnd;
        info->sctpi_unackdata = asoc->unack_data;
        info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
-       info->sctpi_instrms = asoc->c.sinit_max_instreams;
-       info->sctpi_outstrms = asoc->c.sinit_num_ostreams;
+       info->sctpi_instrms = asoc->stream->incnt;
+       info->sctpi_outstrms = asoc->stream->outcnt;
        list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
                info->sctpi_inqueue++;
        list_for_each(pos, &asoc->outqueue.out_chunk_list)
@@ -4691,8 +4727,8 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
        status.sstat_unackdata = asoc->unack_data;
 
        status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
-       status.sstat_instrms = asoc->c.sinit_max_instreams;
-       status.sstat_outstrms = asoc->c.sinit_num_ostreams;
+       status.sstat_instrms = asoc->stream->incnt;
+       status.sstat_outstrms = asoc->stream->outcnt;
        status.sstat_fragmentation_point = asoc->frag_point;
        status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
        memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
@@ -6540,6 +6576,102 @@ out:
        return retval;
 }
 
+static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
+                                          char __user *optval,
+                                          int __user *optlen)
+{
+       struct sctp_stream_out *streamout;
+       struct sctp_association *asoc;
+       struct sctp_prstatus params;
+       int retval = -EINVAL;
+       int policy;
+
+       if (len < sizeof(params))
+               goto out;
+
+       len = sizeof(params);
+       if (copy_from_user(&params, optval, len)) {
+               retval = -EFAULT;
+               goto out;
+       }
+
+       policy = params.sprstat_policy;
+       if (policy & ~SCTP_PR_SCTP_MASK)
+               goto out;
+
+       asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
+       if (!asoc || params.sprstat_sid >= asoc->stream->outcnt)
+               goto out;
+
+       streamout = &asoc->stream->out[params.sprstat_sid];
+       if (policy == SCTP_PR_SCTP_NONE) {
+               params.sprstat_abandoned_unsent = 0;
+               params.sprstat_abandoned_sent = 0;
+               for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
+                       params.sprstat_abandoned_unsent +=
+                               streamout->abandoned_unsent[policy];
+                       params.sprstat_abandoned_sent +=
+                               streamout->abandoned_sent[policy];
+               }
+       } else {
+               params.sprstat_abandoned_unsent =
+                       streamout->abandoned_unsent[__SCTP_PR_INDEX(policy)];
+               params.sprstat_abandoned_sent =
+                       streamout->abandoned_sent[__SCTP_PR_INDEX(policy)];
+       }
+
+       if (put_user(len, optlen) || copy_to_user(optval, &params, len)) {
+               retval = -EFAULT;
+               goto out;
+       }
+
+       retval = 0;
+
+out:
+       return retval;
+}
+
+static int sctp_getsockopt_reconfig_supported(struct sock *sk, int len,
+                                             char __user *optval,
+                                             int __user *optlen)
+{
+       struct sctp_assoc_value params;
+       struct sctp_association *asoc;
+       int retval = -EFAULT;
+
+       if (len < sizeof(params)) {
+               retval = -EINVAL;
+               goto out;
+       }
+
+       len = sizeof(params);
+       if (copy_from_user(&params, optval, len))
+               goto out;
+
+       asoc = sctp_id2assoc(sk, params.assoc_id);
+       if (asoc) {
+               params.assoc_value = asoc->reconf_enable;
+       } else if (!params.assoc_id) {
+               struct sctp_sock *sp = sctp_sk(sk);
+
+               params.assoc_value = sp->ep->reconf_enable;
+       } else {
+               retval = -EINVAL;
+               goto out;
+       }
+
+       if (put_user(len, optlen))
+               goto out;
+
+       if (copy_to_user(optval, &params, len))
+               goto out;
+
+       retval = 0;
+
+out:
+       return retval;
+}
+
 static int sctp_getsockopt_enable_strreset(struct sock *sk, int len,
                                           char __user *optval,
                                           int __user *optlen)
@@ -6748,6 +6880,14 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
                retval = sctp_getsockopt_pr_assocstatus(sk, len, optval,
                                                        optlen);
                break;
+       case SCTP_PR_STREAM_STATUS:
+               retval = sctp_getsockopt_pr_streamstatus(sk, len, optval,
+                                                        optlen);
+               break;
+       case SCTP_RECONFIG_SUPPORTED:
+               retval = sctp_getsockopt_reconfig_supported(sk, len, optval,
+                                                           optlen);
+               break;
        case SCTP_ENABLE_STREAM_RESET:
                retval = sctp_getsockopt_enable_strreset(sk, len, optval,
                                                         optlen);
@@ -7437,9 +7577,12 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
                if (sk->sk_shutdown & RCV_SHUTDOWN)
                        break;
 
-               if (sk_can_busy_loop(sk) &&
-                   sk_busy_loop(sk, noblock))
-                       continue;
+               if (sk_can_busy_loop(sk)) {
+                       sk_busy_loop(sk, noblock);
+
+                       if (!skb_queue_empty(&sk->sk_receive_queue))
+                               continue;
+               }
 
                /* User doesn't want to wait.  */
                error = -EAGAIN;
index 1c6cc04fa3a41f7266597f9cd80420c228094a2b..eff6008a32ba5e692957daf31d5f405eff77c3b6 100644 (file)
 #include <net/sctp/sctp.h>
 #include <net/sctp/sm.h>
 
-struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp)
+int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp)
 {
        struct sctp_stream *stream;
        int i;
 
        stream = kzalloc(sizeof(*stream), gfp);
        if (!stream)
-               return NULL;
+               return -ENOMEM;
 
-       stream->outcnt = outcnt;
+       stream->outcnt = asoc->c.sinit_num_ostreams;
        stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
        if (!stream->out) {
                kfree(stream);
-               return NULL;
+               return -ENOMEM;
        }
        for (i = 0; i < stream->outcnt; i++)
                stream->out[i].state = SCTP_STREAM_OPEN;
 
-       stream->incnt = incnt;
+       asoc->stream = stream;
+
+       return 0;
+}
+
+int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp)
+{
+       struct sctp_stream *stream = asoc->stream;
+       int i;
+
+       /* Initial stream->out size may be very big, so free it and alloc
+        * a new one with new outcnt to save memory.
+        */
+       kfree(stream->out);
+       stream->outcnt = asoc->c.sinit_num_ostreams;
+       stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
+       if (!stream->out)
+               goto nomem;
+
+       for (i = 0; i < stream->outcnt; i++)
+               stream->out[i].state = SCTP_STREAM_OPEN;
+
+       stream->incnt = asoc->c.sinit_max_instreams;
        stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp);
        if (!stream->in) {
                kfree(stream->out);
-               kfree(stream);
-               return NULL;
+               goto nomem;
        }
 
-       return stream;
+       return 0;
+
+nomem:
+       asoc->stream = NULL;
+       kfree(stream);
+
+       return -ENOMEM;
 }
 
 void sctp_stream_free(struct sctp_stream *stream)
@@ -267,18 +294,6 @@ int sctp_send_add_streams(struct sctp_association *asoc,
                stream->out = streamout;
        }
 
-       if (in) {
-               struct sctp_stream_in *streamin;
-
-               streamin = krealloc(stream->in, incnt * sizeof(*streamin),
-                                   GFP_KERNEL);
-               if (!streamin)
-                       goto out;
-
-               memset(streamin + stream->incnt, 0, in * sizeof(*streamin));
-               stream->in = streamin;
-       }
-
        chunk = sctp_make_strreset_addstrm(asoc, out, in);
        if (!chunk)
                goto out;
@@ -303,13 +318,14 @@ out:
 }
 
 static sctp_paramhdr_t *sctp_chunk_lookup_strreset_param(
-                       struct sctp_association *asoc, __u32 resp_seq)
+                       struct sctp_association *asoc, __u32 resp_seq,
+                       __be16 type)
 {
        struct sctp_chunk *chunk = asoc->strreset_chunk;
        struct sctp_reconf_chunk *hdr;
        union sctp_params param;
 
-       if (ntohl(resp_seq) != asoc->strreset_outseq || !chunk)
+       if (!chunk)
                return NULL;
 
        hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr;
@@ -320,7 +336,8 @@ static sctp_paramhdr_t *sctp_chunk_lookup_strreset_param(
                 */
                struct sctp_strreset_tsnreq *req = param.v;
 
-               if (req->request_seq == resp_seq)
+               if ((!resp_seq || req->request_seq == resp_seq) &&
+                   (!type || type == req->param_hdr.type))
                        return param.v;
        }
 
@@ -361,13 +378,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
                goto out;
 
        if (asoc->strreset_chunk) {
-               sctp_paramhdr_t *param_hdr;
-               struct sctp_transport *t;
-
-               param_hdr = sctp_chunk_lookup_strreset_param(
-                                       asoc, outreq->response_seq);
-               if (!param_hdr || param_hdr->type !=
-                                       SCTP_PARAM_RESET_IN_REQUEST) {
+               if (!sctp_chunk_lookup_strreset_param(
+                               asoc, outreq->response_seq,
+                               SCTP_PARAM_RESET_IN_REQUEST)) {
                        /* same process with outstanding isn't 0 */
                        result = SCTP_STRRESET_ERR_IN_PROGRESS;
                        goto out;
@@ -377,6 +390,8 @@ struct sctp_chunk *sctp_process_strreset_outreq(
                asoc->strreset_outseq++;
 
                if (!asoc->strreset_outstanding) {
+                       struct sctp_transport *t;
+
                        t = asoc->strreset_chunk->transport;
                        if (del_timer(&t->reconf_timer))
                                sctp_transport_put(t);
@@ -477,3 +492,367 @@ out:
 
        return chunk;
 }
+
+struct sctp_chunk *sctp_process_strreset_tsnreq(
+                               struct sctp_association *asoc,
+                               union sctp_params param,
+                               struct sctp_ulpevent **evp)
+{
+       __u32 init_tsn = 0, next_tsn = 0, max_tsn_seen;
+       struct sctp_strreset_tsnreq *tsnreq = param.v;
+       struct sctp_stream *stream = asoc->stream;
+       __u32 result = SCTP_STRRESET_DENIED;
+       __u32 request_seq;
+       __u16 i;
+
+       request_seq = ntohl(tsnreq->request_seq);
+       if (request_seq > asoc->strreset_inseq) {
+               result = SCTP_STRRESET_ERR_BAD_SEQNO;
+               goto out;
+       } else if (request_seq == asoc->strreset_inseq) {
+               asoc->strreset_inseq++;
+       }
+
+       if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
+               goto out;
+
+       if (asoc->strreset_outstanding) {
+               result = SCTP_STRRESET_ERR_IN_PROGRESS;
+               goto out;
+       }
+
+       /* G3: The same processing as though a SACK chunk with no gap report
+        *     and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
+        *     received MUST be performed.
+        */
+       max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
+       sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen);
+       sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+
+       /* G1: Compute an appropriate value for the Receiver's Next TSN -- the
+        *     TSN that the peer should use to send the next DATA chunk.  The
+        *     value SHOULD be the smallest TSN not acknowledged by the
+        *     receiver of the request plus 2^31.
+        */
+       init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + (1 << 31);
+       sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
+                        init_tsn, GFP_ATOMIC);
+
+       /* G4: The same processing as though a FWD-TSN chunk (as defined in
+        *     [RFC3758]) with all streams affected and a new cumulative TSN
+        *     ACK of the Receiver's Next TSN minus 1 were received MUST be
+        *     performed.
+        */
+       sctp_outq_free(&asoc->outqueue);
+
+       /* G2: Compute an appropriate value for the local endpoint's next TSN,
+        *     i.e., the next TSN assigned by the receiver of the SSN/TSN reset
+        *     chunk.  The value SHOULD be the highest TSN sent by the receiver
+        *     of the request plus 1.
+        */
+       next_tsn = asoc->next_tsn;
+       asoc->ctsn_ack_point = next_tsn - 1;
+       asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
+
+       /* G5:  The next expected and outgoing SSNs MUST be reset to 0 for all
+        *      incoming and outgoing streams.
+        */
+       for (i = 0; i < stream->outcnt; i++)
+               stream->out[i].ssn = 0;
+       for (i = 0; i < stream->incnt; i++)
+               stream->in[i].ssn = 0;
+
+       result = SCTP_STRRESET_PERFORMED;
+
+       *evp = sctp_ulpevent_make_assoc_reset_event(asoc, 0, init_tsn,
+                                                   next_tsn, GFP_ATOMIC);
+
+out:
+       return sctp_make_strreset_tsnresp(asoc, result, request_seq,
+                                         next_tsn, init_tsn);
+}
+
+struct sctp_chunk *sctp_process_strreset_addstrm_out(
+                               struct sctp_association *asoc,
+                               union sctp_params param,
+                               struct sctp_ulpevent **evp)
+{
+       struct sctp_strreset_addstrm *addstrm = param.v;
+       struct sctp_stream *stream = asoc->stream;
+       __u32 result = SCTP_STRRESET_DENIED;
+       struct sctp_stream_in *streamin;
+       __u32 request_seq, incnt;
+       __u16 in;
+
+       request_seq = ntohl(addstrm->request_seq);
+       if (request_seq > asoc->strreset_inseq) {
+               result = SCTP_STRRESET_ERR_BAD_SEQNO;
+               goto out;
+       } else if (request_seq == asoc->strreset_inseq) {
+               asoc->strreset_inseq++;
+       }
+
+       if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
+               goto out;
+
+       if (asoc->strreset_chunk) {
+               if (!sctp_chunk_lookup_strreset_param(
+                       asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) {
+                       /* same process with outstanding isn't 0 */
+                       result = SCTP_STRRESET_ERR_IN_PROGRESS;
+                       goto out;
+               }
+
+               asoc->strreset_outstanding--;
+               asoc->strreset_outseq++;
+
+               if (!asoc->strreset_outstanding) {
+                       struct sctp_transport *t;
+
+                       t = asoc->strreset_chunk->transport;
+                       if (del_timer(&t->reconf_timer))
+                               sctp_transport_put(t);
+
+                       sctp_chunk_put(asoc->strreset_chunk);
+                       asoc->strreset_chunk = NULL;
+               }
+       }
+
+       in = ntohs(addstrm->number_of_streams);
+       incnt = stream->incnt + in;
+       if (!in || incnt > SCTP_MAX_STREAM)
+               goto out;
+
+       streamin = krealloc(stream->in, incnt * sizeof(*streamin),
+                           GFP_ATOMIC);
+       if (!streamin)
+               goto out;
+
+       memset(streamin + stream->incnt, 0, in * sizeof(*streamin));
+       stream->in = streamin;
+       stream->incnt = incnt;
+
+       result = SCTP_STRRESET_PERFORMED;
+
+       *evp = sctp_ulpevent_make_stream_change_event(asoc,
+               0, ntohs(addstrm->number_of_streams), 0, GFP_ATOMIC);
+
+out:
+       return sctp_make_strreset_resp(asoc, result, request_seq);
+}
+
+struct sctp_chunk *sctp_process_strreset_addstrm_in(
+                               struct sctp_association *asoc,
+                               union sctp_params param,
+                               struct sctp_ulpevent **evp)
+{
+       struct sctp_strreset_addstrm *addstrm = param.v;
+       struct sctp_stream *stream = asoc->stream;
+       __u32 result = SCTP_STRRESET_DENIED;
+       struct sctp_stream_out *streamout;
+       struct sctp_chunk *chunk = NULL;
+       __u32 request_seq, outcnt;
+       __u16 out;
+
+       request_seq = ntohl(addstrm->request_seq);
+       if (request_seq > asoc->strreset_inseq) {
+               result = SCTP_STRRESET_ERR_BAD_SEQNO;
+               goto out;
+       } else if (request_seq == asoc->strreset_inseq) {
+               asoc->strreset_inseq++;
+       }
+
+       if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
+               goto out;
+
+       if (asoc->strreset_outstanding) {
+               result = SCTP_STRRESET_ERR_IN_PROGRESS;
+               goto out;
+       }
+
+       out = ntohs(addstrm->number_of_streams);
+       outcnt = stream->outcnt + out;
+       if (!out || outcnt > SCTP_MAX_STREAM)
+               goto out;
+
+       streamout = krealloc(stream->out, outcnt * sizeof(*streamout),
+                            GFP_ATOMIC);
+       if (!streamout)
+               goto out;
+
+       memset(streamout + stream->outcnt, 0, out * sizeof(*streamout));
+       stream->out = streamout;
+
+       chunk = sctp_make_strreset_addstrm(asoc, out, 0);
+       if (!chunk)
+               goto out;
+
+       asoc->strreset_chunk = chunk;
+       asoc->strreset_outstanding = 1;
+       sctp_chunk_hold(asoc->strreset_chunk);
+
+       stream->outcnt = outcnt;
+
+       *evp = sctp_ulpevent_make_stream_change_event(asoc,
+               0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC);
+
+out:
+       if (!chunk)
+               chunk = sctp_make_strreset_resp(asoc, result, request_seq);
+
+       return chunk;
+}
+
+struct sctp_chunk *sctp_process_strreset_resp(
+                               struct sctp_association *asoc,
+                               union sctp_params param,
+                               struct sctp_ulpevent **evp)
+{
+       struct sctp_strreset_resp *resp = param.v;
+       struct sctp_stream *stream = asoc->stream;
+       struct sctp_transport *t;
+       __u16 i, nums, flags = 0;
+       sctp_paramhdr_t *req;
+       __u32 result;
+
+       req = sctp_chunk_lookup_strreset_param(asoc, resp->response_seq, 0);
+       if (!req)
+               return NULL;
+
+       result = ntohl(resp->result);
+       if (result != SCTP_STRRESET_PERFORMED) {
+               /* if in progress, do nothing but retransmit */
+               if (result == SCTP_STRRESET_IN_PROGRESS)
+                       return NULL;
+               else if (result == SCTP_STRRESET_DENIED)
+                       flags = SCTP_STREAM_RESET_DENIED;
+               else
+                       flags = SCTP_STREAM_RESET_FAILED;
+       }
+
+       if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) {
+               struct sctp_strreset_outreq *outreq;
+               __u16 *str_p = NULL;
+
+               outreq = (struct sctp_strreset_outreq *)req;
+               nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) / 2;
+
+               if (result == SCTP_STRRESET_PERFORMED) {
+                       if (nums) {
+                               str_p = outreq->list_of_streams;
+                               for (i = 0; i < nums; i++)
+                                       stream->out[ntohs(str_p[i])].ssn = 0;
+                       } else {
+                               for (i = 0; i < stream->outcnt; i++)
+                                       stream->out[i].ssn = 0;
+                       }
+
+                       flags = SCTP_STREAM_RESET_OUTGOING_SSN;
+               }
+
+               for (i = 0; i < stream->outcnt; i++)
+                       stream->out[i].state = SCTP_STREAM_OPEN;
+
+               *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
+                       nums, str_p, GFP_ATOMIC);
+       } else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) {
+               struct sctp_strreset_inreq *inreq;
+               __u16 *str_p = NULL;
+
+               /* if the result is performed, it's impossible for inreq */
+               if (result == SCTP_STRRESET_PERFORMED)
+                       return NULL;
+
+               inreq = (struct sctp_strreset_inreq *)req;
+               nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / 2;
+
+               str_p = inreq->list_of_streams;
+               *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
+                       nums, str_p, GFP_ATOMIC);
+       } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {
+               struct sctp_strreset_resptsn *resptsn;
+               __u32 stsn, rtsn;
+
+               /* check for resptsn, as sctp_verify_reconf didn't do it*/
+               if (ntohs(param.p->length) != sizeof(*resptsn))
+                       return NULL;
+
+               resptsn = (struct sctp_strreset_resptsn *)resp;
+               stsn = ntohl(resptsn->senders_next_tsn);
+               rtsn = ntohl(resptsn->receivers_next_tsn);
+
+               if (result == SCTP_STRRESET_PERFORMED) {
+                       __u32 mtsn = sctp_tsnmap_get_max_tsn_seen(
+                                               &asoc->peer.tsn_map);
+
+                       sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn);
+                       sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+
+                       sctp_tsnmap_init(&asoc->peer.tsn_map,
+                                        SCTP_TSN_MAP_INITIAL,
+                                        stsn, GFP_ATOMIC);
+
+                       sctp_outq_free(&asoc->outqueue);
+
+                       asoc->next_tsn = rtsn;
+                       asoc->ctsn_ack_point = asoc->next_tsn - 1;
+                       asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
+
+                       for (i = 0; i < stream->outcnt; i++)
+                               stream->out[i].ssn = 0;
+                       for (i = 0; i < stream->incnt; i++)
+                               stream->in[i].ssn = 0;
+               }
+
+               for (i = 0; i < stream->outcnt; i++)
+                       stream->out[i].state = SCTP_STREAM_OPEN;
+
+               *evp = sctp_ulpevent_make_assoc_reset_event(asoc, flags,
+                       stsn, rtsn, GFP_ATOMIC);
+       } else if (req->type == SCTP_PARAM_RESET_ADD_OUT_STREAMS) {
+               struct sctp_strreset_addstrm *addstrm;
+               __u16 number;
+
+               addstrm = (struct sctp_strreset_addstrm *)req;
+               nums = ntohs(addstrm->number_of_streams);
+               number = stream->outcnt - nums;
+
+               if (result == SCTP_STRRESET_PERFORMED)
+                       for (i = number; i < stream->outcnt; i++)
+                               stream->out[i].state = SCTP_STREAM_OPEN;
+               else
+                       stream->outcnt = number;
+
+               *evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
+                       0, nums, GFP_ATOMIC);
+       } else if (req->type == SCTP_PARAM_RESET_ADD_IN_STREAMS) {
+               struct sctp_strreset_addstrm *addstrm;
+
+               /* if the result is performed, it's impossible for addstrm in
+                * request.
+                */
+               if (result == SCTP_STRRESET_PERFORMED)
+                       return NULL;
+
+               addstrm = (struct sctp_strreset_addstrm *)req;
+               nums = ntohs(addstrm->number_of_streams);
+
+               *evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
+                       nums, 0, GFP_ATOMIC);
+       }
+
+       asoc->strreset_outstanding--;
+       asoc->strreset_outseq++;
+
+       /* remove everything for this reconf request */
+       if (!asoc->strreset_outstanding) {
+               t = asoc->strreset_chunk->transport;
+               if (del_timer(&t->reconf_timer))
+                       sctp_transport_put(t);
+
+               sctp_chunk_put(asoc->strreset_chunk);
+               asoc->strreset_chunk = NULL;
+       }
+
+       return NULL;
+}
index daf8554fd42a5e537bb58572823b2028f74be930..0e732f68c2bfc3b791dade5a85c36628904ee490 100644 (file)
@@ -274,6 +274,13 @@ static struct ctl_table sctp_net_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "reconf_enable",
+               .data           = &init_net.sctp.reconf_enable,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
        {
                .procname       = "auth_enable",
                .data           = &init_net.sctp.auth_enable,
index 3379668af3686de2ec14db980b1ef527a6d1045f..721eeebfcd8a50609877db61ede41575e012606a 100644 (file)
@@ -251,14 +251,13 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
                transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
 }
 
-void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu)
+void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
 {
-       struct dst_entry *dst;
+       struct dst_entry *dst = sctp_transport_dst_check(t);
 
        if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
                pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
-                       __func__, pmtu,
-                       SCTP_DEFAULT_MINSEGMENT);
+                       __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
                /* Use default minimum segment size and disable
                 * pmtu discovery on this transport.
                 */
@@ -267,17 +266,13 @@ void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 p
                t->pathmtu = pmtu;
        }
 
-       dst = sctp_transport_dst_check(t);
-       if (!dst)
-               t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
-
        if (dst) {
-               dst->ops->update_pmtu(dst, sk, NULL, pmtu);
-
+               dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
                dst = sctp_transport_dst_check(t);
-               if (!dst)
-                       t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
        }
+
+       if (!dst)
+               t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
 }
 
 /* Caches the dst entry and source address for a transport's destination
index c8881bc542a066e6f7f234beea3c7208394242c5..ec2b3e013c2f4ba48eabfc8d8ed20921cabc08a1 100644 (file)
@@ -883,6 +883,62 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
        return event;
 }
 
+struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event(
+       const struct sctp_association *asoc, __u16 flags, __u32 local_tsn,
+       __u32 remote_tsn, gfp_t gfp)
+{
+       struct sctp_assoc_reset_event *areset;
+       struct sctp_ulpevent *event;
+       struct sk_buff *skb;
+
+       event = sctp_ulpevent_new(sizeof(struct sctp_assoc_reset_event),
+                                 MSG_NOTIFICATION, gfp);
+       if (!event)
+               return NULL;
+
+       skb = sctp_event2skb(event);
+       areset = (struct sctp_assoc_reset_event *)
+               skb_put(skb, sizeof(struct sctp_assoc_reset_event));
+
+       areset->assocreset_type = SCTP_ASSOC_RESET_EVENT;
+       areset->assocreset_flags = flags;
+       areset->assocreset_length = sizeof(struct sctp_assoc_reset_event);
+       sctp_ulpevent_set_owner(event, asoc);
+       areset->assocreset_assoc_id = sctp_assoc2id(asoc);
+       areset->assocreset_local_tsn = local_tsn;
+       areset->assocreset_remote_tsn = remote_tsn;
+
+       return event;
+}
+
+struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event(
+       const struct sctp_association *asoc, __u16 flags,
+       __u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp)
+{
+       struct sctp_stream_change_event *schange;
+       struct sctp_ulpevent *event;
+       struct sk_buff *skb;
+
+       event = sctp_ulpevent_new(sizeof(struct sctp_stream_change_event),
+                                 MSG_NOTIFICATION, gfp);
+       if (!event)
+               return NULL;
+
+       skb = sctp_event2skb(event);
+       schange = (struct sctp_stream_change_event *)
+               skb_put(skb, sizeof(struct sctp_stream_change_event));
+
+       schange->strchange_type = SCTP_STREAM_CHANGE_EVENT;
+       schange->strchange_flags = flags;
+       schange->strchange_length = sizeof(struct sctp_stream_change_event);
+       sctp_ulpevent_set_owner(event, asoc);
+       schange->strchange_assoc_id = sctp_assoc2id(asoc);
+       schange->strchange_instrms = strchange_instrms;
+       schange->strchange_outstrms = strchange_outstrms;
+
+       return event;
+}
+
 /* Return the notification type, assuming this is a notification
  * event.
  */
index 85837ab90e8916e612d5dd0a21ef48c5e2c9e544..5b6ee21368a68b4c02a5b8987a09a6d6ee613a0e 100644 (file)
@@ -147,7 +147,6 @@ static int smc_release(struct socket *sock)
                schedule_delayed_work(&smc->sock_put_work,
                                      SMC_CLOSE_SOCK_PUT_DELAY);
        }
-       sk->sk_prot->unhash(sk);
        release_sock(sk);
 
        sock_put(sk);
@@ -451,6 +450,9 @@ static int smc_connect_rdma(struct smc_sock *smc)
                goto decline_rdma_unlock;
        }
 
+       smc_close_init(smc);
+       smc_rx_init(smc);
+
        if (local_contact == SMC_FIRST_CONTACT) {
                rc = smc_ib_ready_link(link);
                if (rc) {
@@ -477,7 +479,6 @@ static int smc_connect_rdma(struct smc_sock *smc)
 
        mutex_unlock(&smc_create_lgr_pending);
        smc_tx_init(smc);
-       smc_rx_init(smc);
 
 out_connected:
        smc_copy_sock_settings_to_clc(smc);
@@ -637,7 +638,8 @@ struct sock *smc_accept_dequeue(struct sock *parent,
 
                smc_accept_unlink(new_sk);
                if (new_sk->sk_state == SMC_CLOSED) {
-                       /* tbd in follow-on patch: close this sock */
+                       new_sk->sk_prot->unhash(new_sk);
+                       sock_put(new_sk);
                        continue;
                }
                if (new_sock)
@@ -657,8 +659,13 @@ void smc_close_non_accepted(struct sock *sk)
        if (!sk->sk_lingertime)
                /* wait for peer closing */
                sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
-       if (!smc->use_fallback)
+       if (smc->use_fallback) {
+               sk->sk_state = SMC_CLOSED;
+       } else {
                smc_close_active(smc);
+               sock_set_flag(sk, SOCK_DEAD);
+               sk->sk_shutdown |= SHUTDOWN_MASK;
+       }
        if (smc->clcsock) {
                struct socket *tcp;
 
@@ -666,11 +673,9 @@ void smc_close_non_accepted(struct sock *sk)
                smc->clcsock = NULL;
                sock_release(tcp);
        }
-       sock_set_flag(sk, SOCK_DEAD);
-       sk->sk_shutdown |= SHUTDOWN_MASK;
        if (smc->use_fallback) {
                schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
-       } else {
+       } else if (sk->sk_state == SMC_CLOSED) {
                smc_conn_free(&smc->conn);
                schedule_delayed_work(&smc->sock_put_work,
                                      SMC_CLOSE_SOCK_PUT_DELAY);
@@ -800,6 +805,9 @@ static void smc_listen_work(struct work_struct *work)
                goto decline_rdma;
        }
 
+       smc_close_init(new_smc);
+       smc_rx_init(new_smc);
+
        rc = smc_clc_send_accept(new_smc, local_contact);
        if (rc)
                goto out_err;
@@ -839,7 +847,6 @@ static void smc_listen_work(struct work_struct *work)
        }
 
        smc_tx_init(new_smc);
-       smc_rx_init(new_smc);
 
 out_connected:
        sk_refcnt_debug_inc(newsmcsk);
@@ -944,7 +951,7 @@ out:
 }
 
 static int smc_accept(struct socket *sock, struct socket *new_sock,
-                     int flags)
+                     int flags, bool kern)
 {
        struct sock *sk = sock->sk, *nsk;
        DECLARE_WAITQUEUE(wait, current);
index ee5fbea24549d2df9f55bb0ad177d548c637bb0f..6e44313e4467d01fbdc77f07ee7d14c50ab92ce2 100644 (file)
@@ -164,6 +164,7 @@ struct smc_connection {
 #ifndef KERNEL_HAS_ATOMIC64
        spinlock_t              acurs_lock;     /* protect cursors */
 #endif
+       struct work_struct      close_work;     /* peer sent some closing */
 };
 
 struct smc_sock {                              /* smc sock container */
index 5a339493872efd123826c31a2771485b3322f67a..a7294edbc22177d2c5b5a21deffaacea1e0855e2 100644 (file)
@@ -217,8 +217,13 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
                smc->sk.sk_err = ECONNRESET;
                conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
        }
-       if (smc_cdc_rxed_any_close_or_senddone(conn))
-               smc_close_passive_received(smc);
+       if (smc_cdc_rxed_any_close_or_senddone(conn)) {
+               smc->sk.sk_shutdown |= RCV_SHUTDOWN;
+               if (smc->clcsock && smc->clcsock->sk)
+                       smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
+               sock_set_flag(&smc->sk, SOCK_DONE);
+               schedule_work(&conn->close_work);
+       }
 
        /* piggy backed tx info */
        /* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
@@ -228,8 +233,6 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
                smc_close_wake_tx_prepared(smc);
        }
 
-       /* subsequent patch: trigger socket release if connection closed */
-
        /* socket connected but not accepted */
        if (!smc->sk.sk_socket)
                return;
index 67a71d170bedb4be2658cfa5f7a654098da4962f..3c2e166b5d222f4c932179ae442cbd88969efc92 100644 (file)
@@ -117,7 +117,6 @@ void smc_close_active_abort(struct smc_sock *smc)
        struct smc_cdc_conn_state_flags *txflags =
                &smc->conn.local_tx_ctrl.conn_state_flags;
 
-       bh_lock_sock(&smc->sk);
        smc->sk.sk_err = ECONNABORTED;
        if (smc->clcsock && smc->clcsock->sk) {
                smc->clcsock->sk->sk_err = ECONNABORTED;
@@ -125,6 +124,7 @@ void smc_close_active_abort(struct smc_sock *smc)
        }
        switch (smc->sk.sk_state) {
        case SMC_INIT:
+       case SMC_ACTIVE:
                smc->sk.sk_state = SMC_PEERABORTWAIT;
                break;
        case SMC_APPCLOSEWAIT1:
@@ -161,10 +161,15 @@ void smc_close_active_abort(struct smc_sock *smc)
        }
 
        sock_set_flag(&smc->sk, SOCK_DEAD);
-       bh_unlock_sock(&smc->sk);
        smc->sk.sk_state_change(&smc->sk);
 }
 
+static inline bool smc_close_sent_any_close(struct smc_connection *conn)
+{
+       return conn->local_tx_ctrl.conn_state_flags.peer_conn_abort ||
+              conn->local_tx_ctrl.conn_state_flags.peer_conn_closed;
+}
+
 int smc_close_active(struct smc_sock *smc)
 {
        struct smc_cdc_conn_state_flags *txflags =
@@ -185,8 +190,7 @@ again:
        case SMC_INIT:
                sk->sk_state = SMC_CLOSED;
                if (smc->smc_listen_work.func)
-                       flush_work(&smc->smc_listen_work);
-               sock_put(sk);
+                       cancel_work_sync(&smc->smc_listen_work);
                break;
        case SMC_LISTEN:
                sk->sk_state = SMC_CLOSED;
@@ -198,7 +202,7 @@ again:
                }
                release_sock(sk);
                smc_close_cleanup_listen(sk);
-               flush_work(&smc->tcp_listen_work);
+               cancel_work_sync(&smc->smc_listen_work);
                lock_sock(sk);
                break;
        case SMC_ACTIVE:
@@ -218,7 +222,7 @@ again:
        case SMC_APPFINCLOSEWAIT:
                /* socket already shutdown wr or both (active close) */
                if (txflags->peer_done_writing &&
-                   !txflags->peer_conn_closed) {
+                   !smc_close_sent_any_close(conn)) {
                        /* just shutdown wr done, send close request */
                        rc = smc_close_final(conn);
                }
@@ -248,6 +252,13 @@ again:
                break;
        case SMC_PEERCLOSEWAIT1:
        case SMC_PEERCLOSEWAIT2:
+               if (txflags->peer_done_writing &&
+                   !smc_close_sent_any_close(conn)) {
+                       /* just shutdown wr done, send close request */
+                       rc = smc_close_final(conn);
+               }
+               /* peer sending PeerConnectionClosed will cause transition */
+               break;
        case SMC_PEERFINCLOSEWAIT:
                /* peer sending PeerConnectionClosed will cause transition */
                break;
@@ -285,7 +296,7 @@ static void smc_close_passive_abort_received(struct smc_sock *smc)
        case SMC_PEERCLOSEWAIT1:
        case SMC_PEERCLOSEWAIT2:
                if (txflags->peer_done_writing &&
-                   !txflags->peer_conn_closed) {
+                   !smc_close_sent_any_close(&smc->conn)) {
                        /* just shutdown, but not yet closed locally */
                        smc_close_abort(&smc->conn);
                        sk->sk_state = SMC_PROCESSABORT;
@@ -306,22 +317,27 @@ static void smc_close_passive_abort_received(struct smc_sock *smc)
 
 /* Some kind of closing has been received: peer_conn_closed, peer_conn_abort,
  * or peer_done_writing.
- * Called under tasklet context.
  */
-void smc_close_passive_received(struct smc_sock *smc)
+static void smc_close_passive_work(struct work_struct *work)
 {
-       struct smc_cdc_conn_state_flags *rxflags =
-               &smc->conn.local_rx_ctrl.conn_state_flags;
+       struct smc_connection *conn = container_of(work,
+                                                  struct smc_connection,
+                                                  close_work);
+       struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
+       struct smc_cdc_conn_state_flags *rxflags;
        struct sock *sk = &smc->sk;
        int old_state;
 
-       sk->sk_shutdown |= RCV_SHUTDOWN;
-       if (smc->clcsock && smc->clcsock->sk)
-               smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
-       sock_set_flag(&smc->sk, SOCK_DONE);
-
+       lock_sock(&smc->sk);
        old_state = sk->sk_state;
 
+       if (!conn->alert_token_local) {
+               /* abnormal termination */
+               smc_close_active_abort(smc);
+               goto wakeup;
+       }
+
+       rxflags = &smc->conn.local_rx_ctrl.conn_state_flags;
        if (rxflags->peer_conn_abort) {
                smc_close_passive_abort_received(smc);
                goto wakeup;
@@ -331,7 +347,7 @@ void smc_close_passive_received(struct smc_sock *smc)
        case SMC_INIT:
                if (atomic_read(&smc->conn.bytes_to_rcv) ||
                    (rxflags->peer_done_writing &&
-                    !rxflags->peer_conn_closed))
+                    !smc_cdc_rxed_any_close(conn)))
                        sk->sk_state = SMC_APPCLOSEWAIT1;
                else
                        sk->sk_state = SMC_CLOSED;
@@ -348,7 +364,7 @@ void smc_close_passive_received(struct smc_sock *smc)
                if (!smc_cdc_rxed_any_close(&smc->conn))
                        break;
                if (sock_flag(sk, SOCK_DEAD) &&
-                   (sk->sk_shutdown == SHUTDOWN_MASK)) {
+                   smc_close_sent_any_close(conn)) {
                        /* smc_release has already been called locally */
                        sk->sk_state = SMC_CLOSED;
                } else {
@@ -367,17 +383,19 @@ void smc_close_passive_received(struct smc_sock *smc)
        }
 
 wakeup:
-       if (old_state != sk->sk_state)
-               sk->sk_state_change(sk);
        sk->sk_data_ready(sk); /* wakeup blocked rcvbuf consumers */
        sk->sk_write_space(sk); /* wakeup blocked sndbuf producers */
 
-       if ((sk->sk_state == SMC_CLOSED) &&
-           (sock_flag(sk, SOCK_DEAD) || (old_state == SMC_INIT))) {
-               smc_conn_free(&smc->conn);
-               schedule_delayed_work(&smc->sock_put_work,
-                                     SMC_CLOSE_SOCK_PUT_DELAY);
+       if (old_state != sk->sk_state) {
+               sk->sk_state_change(sk);
+               if ((sk->sk_state == SMC_CLOSED) &&
+                   (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
+                       smc_conn_free(&smc->conn);
+                       schedule_delayed_work(&smc->sock_put_work,
+                                             SMC_CLOSE_SOCK_PUT_DELAY);
+               }
        }
+       release_sock(&smc->sk);
 }
 
 void smc_close_sock_put_work(struct work_struct *work)
@@ -442,3 +460,9 @@ again:
                sk->sk_state_change(&smc->sk);
        return rc;
 }
+
+/* Initialize close properties on connection establishment. */
+void smc_close_init(struct smc_sock *smc)
+{
+       INIT_WORK(&smc->conn.close_work, smc_close_passive_work);
+}
index bc9a2df3633cee576b15c0a9d715d1bdf6753957..4a3d99a8d7cbf18598ef74dae61e2be2a0a17b48 100644 (file)
@@ -21,8 +21,8 @@
 void smc_close_wake_tx_prepared(struct smc_sock *smc);
 void smc_close_active_abort(struct smc_sock *smc);
 int smc_close_active(struct smc_sock *smc);
-void smc_close_passive_received(struct smc_sock *smc);
 void smc_close_sock_put_work(struct work_struct *work);
 int smc_close_shutdown_write(struct smc_sock *smc);
+void smc_close_init(struct smc_sock *smc);
 
 #endif /* SMC_CLOSE_H */
index 0eac633fb3549f6e715f25145b16e22b2bc49241..65020e93ff210bb7f5db079399984c55e54c80f1 100644 (file)
@@ -316,7 +316,7 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
                smc = container_of(conn, struct smc_sock, conn);
                sock_hold(&smc->sk);
                __smc_lgr_unregister_conn(conn);
-               smc_close_active_abort(smc);
+               schedule_work(&conn->close_work);
                sock_put(&smc->sk);
                node = rb_first(&lgr->conns_all);
        }
index e6743c008ac548259447daa0e0c207aa7f249c63..16b7c801f8b662630f11e32f79c54f7e2774be11 100644 (file)
@@ -179,8 +179,6 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
        u8 port_idx;
 
        smcibdev = container_of(handler, struct smc_ib_device, event_handler);
-       if (!smc_pnet_find_ib(smcibdev->ibdev->name))
-               return;
 
        switch (ibevent->event) {
        case IB_EVENT_PORT_ERR:
@@ -259,7 +257,6 @@ int smc_ib_create_queue_pair(struct smc_link *lnk)
                        .max_recv_wr = SMC_WR_BUF_CNT * 3,
                        .max_send_sge = SMC_IB_MAX_SEND_SGE,
                        .max_recv_sge = 1,
-                       .max_inline_data = SMC_WR_TX_SIZE,
                },
                .sq_sig_type = IB_SIGNAL_REQ_WR,
                .qp_type = IB_QPT_RC,
index a95f74bb556915f92d0fa1bdbd2e34eb9814c3b1..7e1f0e24d17790f526aa50d07ff5e5d6596b6f3c 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef _SMC_IB_H
 #define _SMC_IB_H
 
+#include <linux/interrupt.h>
 #include <linux/if_ether.h>
 #include <rdma/ib_verbs.h>
 
index 9d3e7fb8348d3efae50515446591642815697b45..78f7af28ae4f25d71469d54d44f98ef4e2df94eb 100644 (file)
@@ -219,7 +219,7 @@ static bool smc_pnetid_valid(const char *pnet_name, char *pnetid)
 }
 
 /* Find an infiniband device by a given name. The device might not exist. */
-struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
+static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
 {
        struct smc_ib_device *ibdev;
 
@@ -523,8 +523,11 @@ void smc_pnet_find_roce_resource(struct sock *sk,
        read_lock(&smc_pnettable.lock);
        list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) {
                if (dst->dev == pnetelem->ndev) {
-                       *smcibdev = pnetelem->smcibdev;
-                       *ibport = pnetelem->ib_port;
+                       if (smc_ib_port_active(pnetelem->smcibdev,
+                                              pnetelem->ib_port)) {
+                               *smcibdev = pnetelem->smcibdev;
+                               *ibport = pnetelem->ib_port;
+                       }
                        break;
                }
        }
index 32ab3df928ca183643fab96ffd8c09667a54fbde..c4f1bccd43589c0d2d2591925dfa3053b7fa4aef 100644 (file)
@@ -16,7 +16,6 @@ struct smc_ib_device;
 int smc_pnet_init(void) __init;
 void smc_pnet_exit(void);
 int smc_pnet_remove_by_ibdev(struct smc_ib_device *ibdev);
-struct smc_ib_device *smc_pnet_find_ib(char *ib_name);
 void smc_pnet_find_roce_resource(struct sock *sk,
                                 struct smc_ib_device **smcibdev, u8 *ibport);
 
index c4ef9a4ec56971e685d419a4a89f51e8181709c1..f0c8b089f770a229b7c805fe5b2d59da40a5c1e3 100644 (file)
@@ -36,11 +36,10 @@ static void smc_rx_data_ready(struct sock *sk)
        if (skwq_has_sleeper(wq))
                wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
                                                POLLRDNORM | POLLRDBAND);
+       sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
        if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
            (sk->sk_state == SMC_CLOSED))
                sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
-       else
-               sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
        rcu_read_unlock();
 }
 
index 69a0013dd25cecbee0658168da577d15ed8913d8..21ec1832ab517d647ac8942ddbd5484492eb4c94 100644 (file)
@@ -431,9 +431,13 @@ static void smc_tx_work(struct work_struct *work)
                                                   struct smc_connection,
                                                   tx_work);
        struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
+       int rc;
 
        lock_sock(&smc->sk);
-       smc_tx_sndbuf_nonempty(conn);
+       rc = smc_tx_sndbuf_nonempty(conn);
+       if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked &&
+           !atomic_read(&conn->bytes_to_rcv))
+               conn->local_rx_ctrl.prod_flags.write_blocked = 0;
        release_sock(&smc->sk);
 }
 
index eadf157418dcd42c036685e5858bea6e5c559506..874ee9f9d79674f9576c28f9d1dd669e03e62422 100644 (file)
@@ -447,7 +447,7 @@ static void smc_wr_init_sge(struct smc_link *lnk)
                lnk->wr_tx_ibs[i].num_sge = 1;
                lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
                lnk->wr_tx_ibs[i].send_flags =
-                       IB_SEND_SIGNALED | IB_SEND_SOLICITED | IB_SEND_INLINE;
+                       IB_SEND_SIGNALED | IB_SEND_SOLICITED;
        }
        for (i = 0; i < lnk->wr_rx_cnt; i++) {
                lnk->wr_rx_sges[i].addr =
index 2c1e8677ff2d4fdb2f29eaa6e06a7c323d27d981..eea997036ada5085f26a0159baf913cbde366f4b 100644 (file)
@@ -652,6 +652,16 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
 }
 EXPORT_SYMBOL(kernel_sendmsg);
 
+static bool skb_is_err_queue(const struct sk_buff *skb)
+{
+       /* pkt_type of skbs enqueued on the error queue are set to
+        * PACKET_OUTGOING in skb_set_err_queue(). This is only safe to do
+        * in recvmsg, since skbs received on a local socket will never
+        * have a pkt_type of PACKET_OUTGOING.
+        */
+       return skb->pkt_type == PACKET_OUTGOING;
+}
+
 /*
  * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
  */
@@ -695,7 +705,8 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
                put_cmsg(msg, SOL_SOCKET,
                         SCM_TIMESTAMPING, sizeof(tss), &tss);
 
-               if (skb->len && (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS))
+               if (skb_is_err_queue(skb) && skb->len &&
+                   SKB_EXT_ERR(skb)->opt_stats)
                        put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS,
                                 skb->len, skb->data);
        }
@@ -1506,7 +1517,7 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
        if (err)
                goto out_fd;
 
-       err = sock->ops->accept(sock, newsock, sock->file->f_flags);
+       err = sock->ops->accept(sock, newsock, sock->file->f_flags, false);
        if (err < 0)
                goto out_fd;
 
@@ -1731,6 +1742,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
        /* We assume all kernel code knows the size of sockaddr_storage */
        msg.msg_namelen = 0;
        msg.msg_iocb = NULL;
+       msg.msg_flags = 0;
        if (sock->file->f_flags & O_NONBLOCK)
                flags |= MSG_DONTWAIT;
        err = sock_recvmsg(sock, &msg, flags);
@@ -3238,7 +3250,7 @@ int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
        if (err < 0)
                goto done;
 
-       err = sock->ops->accept(sock, *newsock, flags);
+       err = sock->ops->accept(sock, *newsock, flags, true);
        if (err < 0) {
                sock_release(*newsock);
                *newsock = NULL;
@@ -3344,3 +3356,49 @@ int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
        return sock->ops->shutdown(sock, how);
 }
 EXPORT_SYMBOL(kernel_sock_shutdown);
+
+/* This routine returns the IP overhead imposed by a socket i.e.
+ * the length of the underlying IP header, depending on whether
+ * this is an IPv4 or IPv6 socket and the length from IP options turned
+ * on at the socket.
+ */
+u32 kernel_sock_ip_overhead(struct sock *sk)
+{
+       struct inet_sock *inet;
+       struct ip_options_rcu *opt;
+       u32 overhead = 0;
+       bool owned_by_user;
+#if IS_ENABLED(CONFIG_IPV6)
+       struct ipv6_pinfo *np;
+       struct ipv6_txoptions *optv6 = NULL;
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
+       if (!sk)
+               return overhead;
+
+       owned_by_user = sock_owned_by_user(sk);
+       switch (sk->sk_family) {
+       case AF_INET:
+               inet = inet_sk(sk);
+               overhead += sizeof(struct iphdr);
+               opt = rcu_dereference_protected(inet->inet_opt,
+                                               owned_by_user);
+               if (opt)
+                       overhead += opt->opt.optlen;
+               return overhead;
+#if IS_ENABLED(CONFIG_IPV6)
+       case AF_INET6:
+               np = inet6_sk(sk);
+               overhead += sizeof(struct ipv6hdr);
+               if (np)
+                       optv6 = rcu_dereference_protected(np->opt,
+                                                         owned_by_user);
+               if (optv6)
+                       overhead += (optv6->opt_flen + optv6->opt_nflen);
+               return overhead;
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+       default: /* Returns 0 overhead if the socket is not ipv4 or ipv6 */
+               return overhead;
+       }
+}
+EXPORT_SYMBOL(kernel_sock_ip_overhead);
index 8931e33b65412d7b8bbe8b3872e5f7d7b27d92d5..2b720fa35c4ff7c2ae906e9e76d13d27a2b2f008 100644 (file)
@@ -1635,6 +1635,7 @@ static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv,
 
        xprt = &svsk->sk_xprt;
        svc_xprt_init(net, &svc_tcp_bc_class, xprt, serv);
+       set_bit(XPT_CONG_CTRL, &svsk->sk_xprt.xpt_flags);
 
        serv->sv_bc_xprt = xprt;
 
index c13a5c35ce14d992515fa99e456976ed0cd1c382..fc8f14c7bfec60dc5828340861a747e49f06193e 100644 (file)
@@ -127,6 +127,7 @@ static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *serv,
        xprt = &cma_xprt->sc_xprt;
 
        svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv);
+       set_bit(XPT_CONG_CTRL, &xprt->xpt_flags);
        serv->sv_bc_xprt = xprt;
 
        dprintk("svcrdma: %s(%p)\n", __func__, xprt);
index 81cd31acf690f41573e5fedd9b837376543f5ce9..3b332b395045b5b0ad07bc13a30db1420d7f7082 100644 (file)
@@ -503,7 +503,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
        struct ib_cq *sendcq, *recvcq;
        int rc;
 
-       max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES);
+       max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge,
+                       RPCRDMA_MAX_SEND_SGES);
        if (max_sge < RPCRDMA_MIN_SEND_SGES) {
                pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
                return -ENOMEM;
index 9be6592e4a6fa20c78995396ffa3dfcd1f19537a..bd0aac87b41ac627e5c897256a79150226926514 100644 (file)
@@ -416,6 +416,7 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
 
        tipc_subscrp_convert_seq(&s->evt.s.seq, s->swap, &ns);
 
+       tipc_subscrp_get(s);
        list_add(&s->nameseq_list, &nseq->subscriptions);
 
        if (!sseq)
@@ -787,6 +788,7 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
        if (seq != NULL) {
                spin_lock_bh(&seq->lock);
                list_del_init(&s->nameseq_list);
+               tipc_subscrp_put(s);
                if (!seq->first_free && list_empty(&seq->subscriptions)) {
                        hlist_del_init_rcu(&seq->ns_list);
                        kfree(seq->sseqs);
index 43e4045e72bc00cfbc9db6c1bf987a46e272969b..15f6ce7bf8687a95a9fe54080ec84cf62de9c762 100644 (file)
@@ -115,7 +115,8 @@ static void tipc_data_ready(struct sock *sk);
 static void tipc_write_space(struct sock *sk);
 static void tipc_sock_destruct(struct sock *sk);
 static int tipc_release(struct socket *sock);
-static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
+                      bool kern);
 static void tipc_sk_timeout(unsigned long data);
 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
                           struct tipc_name_seq const *seq);
@@ -2029,7 +2030,8 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
  *
  * Returns 0 on success, errno otherwise
  */
-static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
+                      bool kern)
 {
        struct sock *new_sk, *sk = sock->sk;
        struct sk_buff *buf;
@@ -2051,7 +2053,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
 
        buf = skb_peek(&sk->sk_receive_queue);
 
-       res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 0);
+       res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
        if (res)
                goto exit;
        security_sk_clone(sock->sk, new_sock->sk);
@@ -2509,6 +2511,28 @@ static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        }
 }
 
+static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
+{
+       struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
+       struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
+       u32 onode = tipc_own_addr(sock_net(sock1->sk));
+
+       tsk1->peer.family = AF_TIPC;
+       tsk1->peer.addrtype = TIPC_ADDR_ID;
+       tsk1->peer.scope = TIPC_NODE_SCOPE;
+       tsk1->peer.addr.id.ref = tsk2->portid;
+       tsk1->peer.addr.id.node = onode;
+       tsk2->peer.family = AF_TIPC;
+       tsk2->peer.addrtype = TIPC_ADDR_ID;
+       tsk2->peer.scope = TIPC_NODE_SCOPE;
+       tsk2->peer.addr.id.ref = tsk1->portid;
+       tsk2->peer.addr.id.node = onode;
+
+       tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
+       tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
+       return 0;
+}
+
 /* Protocol switches for the various types of TIPC sockets */
 
 static const struct proto_ops msg_ops = {
@@ -2517,7 +2541,7 @@ static const struct proto_ops msg_ops = {
        .release        = tipc_release,
        .bind           = tipc_bind,
        .connect        = tipc_connect,
-       .socketpair     = sock_no_socketpair,
+       .socketpair     = tipc_socketpair,
        .accept         = sock_no_accept,
        .getname        = tipc_getname,
        .poll           = tipc_poll,
@@ -2538,7 +2562,7 @@ static const struct proto_ops packet_ops = {
        .release        = tipc_release,
        .bind           = tipc_bind,
        .connect        = tipc_connect,
-       .socketpair     = sock_no_socketpair,
+       .socketpair     = tipc_socketpair,
        .accept         = tipc_accept,
        .getname        = tipc_getname,
        .poll           = tipc_poll,
@@ -2559,7 +2583,7 @@ static const struct proto_ops stream_ops = {
        .release        = tipc_release,
        .bind           = tipc_bind,
        .connect        = tipc_connect,
-       .socketpair     = sock_no_socketpair,
+       .socketpair     = tipc_socketpair,
        .accept         = tipc_accept,
        .getname        = tipc_getname,
        .poll           = tipc_poll,
index 9d94e65d0894183b4af94ed24e84b94c0478b551..0bf91cd3733cb37ecc8ba4ccf7ae5a26cb6e966d 100644 (file)
@@ -54,8 +54,6 @@ struct tipc_subscriber {
 
 static void tipc_subscrp_delete(struct tipc_subscription *sub);
 static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
-static void tipc_subscrp_put(struct tipc_subscription *subscription);
-static void tipc_subscrp_get(struct tipc_subscription *subscription);
 
 /**
  * htohl - convert value to endianness used by destination
@@ -125,7 +123,6 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
 {
        struct tipc_name_seq seq;
 
-       tipc_subscrp_get(sub);
        tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
        if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
                return;
@@ -135,12 +132,17 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
 
        tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
                                node);
-       tipc_subscrp_put(sub);
 }
 
 static void tipc_subscrp_timeout(unsigned long data)
 {
        struct tipc_subscription *sub = (struct tipc_subscription *)data;
+       struct tipc_subscriber *subscriber = sub->subscriber;
+
+       spin_lock_bh(&subscriber->lock);
+       tipc_nametbl_unsubscribe(sub);
+       list_del(&sub->subscrp_list);
+       spin_unlock_bh(&subscriber->lock);
 
        /* Notify subscriber of timeout */
        tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
@@ -172,21 +174,17 @@ static void tipc_subscrp_kref_release(struct kref *kref)
        struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
        struct tipc_subscriber *subscriber = sub->subscriber;
 
-       spin_lock_bh(&subscriber->lock);
-       tipc_nametbl_unsubscribe(sub);
-       list_del(&sub->subscrp_list);
        atomic_dec(&tn->subscription_count);
-       spin_unlock_bh(&subscriber->lock);
        kfree(sub);
        tipc_subscrb_put(subscriber);
 }
 
-static void tipc_subscrp_put(struct tipc_subscription *subscription)
+void tipc_subscrp_put(struct tipc_subscription *subscription)
 {
        kref_put(&subscription->kref, tipc_subscrp_kref_release);
 }
 
-static void tipc_subscrp_get(struct tipc_subscription *subscription)
+void tipc_subscrp_get(struct tipc_subscription *subscription)
 {
        kref_get(&subscription->kref);
 }
@@ -205,11 +203,9 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
                if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
                        continue;
 
-               tipc_subscrp_get(sub);
-               spin_unlock_bh(&subscriber->lock);
+               tipc_nametbl_unsubscribe(sub);
+               list_del(&sub->subscrp_list);
                tipc_subscrp_delete(sub);
-               tipc_subscrp_put(sub);
-               spin_lock_bh(&subscriber->lock);
 
                if (s)
                        break;
index ffdc214c117a924f34b416fde415fcd18201ebc0..ee52957dc9524a76ac371aa19d950dc90bfe4035 100644 (file)
@@ -78,4 +78,7 @@ u32 tipc_subscrp_convert_seq_type(u32 type, int swap);
 int tipc_topsrv_start(struct net *net);
 void tipc_topsrv_stop(struct net *net);
 
+void tipc_subscrp_put(struct tipc_subscription *subscription);
+void tipc_subscrp_get(struct tipc_subscription *subscription);
+
 #endif
index ee37b390260a62f026f08e3da827ae45666bc2a6..6a7fe7660551f45c065a7f472b805c0b8073f6bb 100644 (file)
@@ -636,7 +636,7 @@ static int unix_bind(struct socket *, struct sockaddr *, int);
 static int unix_stream_connect(struct socket *, struct sockaddr *,
                               int addr_len, int flags);
 static int unix_socketpair(struct socket *, struct socket *);
-static int unix_accept(struct socket *, struct socket *, int);
+static int unix_accept(struct socket *, struct socket *, int, bool);
 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
 static unsigned int unix_dgram_poll(struct file *, struct socket *,
@@ -996,7 +996,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        unsigned int hash;
        struct unix_address *addr;
        struct hlist_head *list;
-       struct path path = { NULL, NULL };
+       struct path path = { };
 
        err = -EINVAL;
        if (sunaddr->sun_family != AF_UNIX)
@@ -1402,7 +1402,8 @@ static void unix_sock_inherit_flags(const struct socket *old,
                set_bit(SOCK_PASSSEC, &new->flags);
 }
 
-static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
+static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
+                      bool kern)
 {
        struct sock *sk = sock->sk;
        struct sock *tsk;
index 6a0d48525fcf9a71f54bb43495b200b300f5341e..c36757e728442bb936c17f8a975b420f5f8a5972 100644 (file)
@@ -146,6 +146,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
        if (s) {
                struct unix_sock *u = unix_sk(s);
 
+               BUG_ON(!atomic_long_read(&u->inflight));
                BUG_ON(list_empty(&u->link));
 
                if (atomic_long_dec_and_test(&u->inflight))
@@ -341,6 +342,14 @@ void unix_gc(void)
        }
        list_del(&cursor);
 
+       /* Now gc_candidates contains only garbage.  Restore original
+        * inflight counters for these as well, and remove the skbuffs
+        * which are creating the cycle(s).
+        */
+       skb_queue_head_init(&hitlist);
+       list_for_each_entry(u, &gc_candidates, link)
+               scan_children(&u->sk, inc_inflight, &hitlist);
+
        /* not_cycle_list contains those sockets which do not make up a
         * cycle.  Restore these to the inflight list.
         */
@@ -350,14 +359,6 @@ void unix_gc(void)
                list_move_tail(&u->link, &gc_inflight_list);
        }
 
-       /* Now gc_candidates contains only garbage.  Restore original
-        * inflight counters for these as well, and remove the skbuffs
-        * which are creating the cycle(s).
-        */
-       skb_queue_head_init(&hitlist);
-       list_for_each_entry(u, &gc_candidates, link)
-       scan_children(&u->sk, inc_inflight, &hitlist);
-
        spin_unlock(&unix_gc_lock);
 
        /* Here we are. Hitlist is filled. Die. */
index 9192ead6675114128817267926befe23f7cc1111..6f7f6757ceefb500551fafbf40c462835c4baf88 100644 (file)
@@ -1102,10 +1102,19 @@ static const struct proto_ops vsock_dgram_ops = {
        .sendpage = sock_no_sendpage,
 };
 
+static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+       if (!transport->cancel_pkt)
+               return -EOPNOTSUPP;
+
+       return transport->cancel_pkt(vsk);
+}
+
 static void vsock_connect_timeout(struct work_struct *work)
 {
        struct sock *sk;
        struct vsock_sock *vsk;
+       int cancel = 0;
 
        vsk = container_of(work, struct vsock_sock, dwork.work);
        sk = sk_vsock(vsk);
@@ -1116,8 +1125,11 @@ static void vsock_connect_timeout(struct work_struct *work)
                sk->sk_state = SS_UNCONNECTED;
                sk->sk_err = ETIMEDOUT;
                sk->sk_error_report(sk);
+               cancel = 1;
        }
        release_sock(sk);
+       if (cancel)
+               vsock_transport_cancel_pkt(vsk);
 
        sock_put(sk);
 }
@@ -1224,11 +1236,13 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
                        err = sock_intr_errno(timeout);
                        sk->sk_state = SS_UNCONNECTED;
                        sock->state = SS_UNCONNECTED;
+                       vsock_transport_cancel_pkt(vsk);
                        goto out_wait;
                } else if (timeout == 0) {
                        err = -ETIMEDOUT;
                        sk->sk_state = SS_UNCONNECTED;
                        sock->state = SS_UNCONNECTED;
+                       vsock_transport_cancel_pkt(vsk);
                        goto out_wait;
                }
 
@@ -1250,7 +1264,8 @@ out:
        return err;
 }
 
-static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
+static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
+                       bool kern)
 {
        struct sock *listener;
        int err;
index 9d24c0e958b18e614e30b24c0fcfbbe2152941f3..68675a151f22b8b63c02b25a67b833d9a6046d84 100644 (file)
@@ -213,6 +213,47 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
        return len;
 }
 
+static int
+virtio_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+       struct virtio_vsock *vsock;
+       struct virtio_vsock_pkt *pkt, *n;
+       int cnt = 0;
+       LIST_HEAD(freeme);
+
+       vsock = virtio_vsock_get();
+       if (!vsock) {
+               return -ENODEV;
+       }
+
+       spin_lock_bh(&vsock->send_pkt_list_lock);
+       list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
+               if (pkt->vsk != vsk)
+                       continue;
+               list_move(&pkt->list, &freeme);
+       }
+       spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+       list_for_each_entry_safe(pkt, n, &freeme, list) {
+               if (pkt->reply)
+                       cnt++;
+               list_del(&pkt->list);
+               virtio_transport_free_pkt(pkt);
+       }
+
+       if (cnt) {
+               struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
+               int new_cnt;
+
+               new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
+               if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
+                   new_cnt < virtqueue_get_vring_size(rx_vq))
+                       queue_work(virtio_vsock_workqueue, &vsock->rx_work);
+       }
+
+       return 0;
+}
+
 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
 {
        int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
@@ -462,6 +503,7 @@ static struct virtio_transport virtio_transport = {
                .release                  = virtio_transport_release,
                .connect                  = virtio_transport_connect,
                .shutdown                 = virtio_transport_shutdown,
+               .cancel_pkt               = virtio_transport_cancel_pkt,
 
                .dgram_bind               = virtio_transport_dgram_bind,
                .dgram_dequeue            = virtio_transport_dgram_dequeue,
index 8d592a45b59786746d186e12d0c362d07c30bdac..af087b44ceea2311e53060e2442b4af2024bb037 100644 (file)
@@ -58,6 +58,7 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
        pkt->len                = len;
        pkt->hdr.len            = cpu_to_le32(len);
        pkt->reply              = info->reply;
+       pkt->vsk                = info->vsk;
 
        if (info->msg && len > 0) {
                pkt->buf = kmalloc(len, GFP_KERNEL);
@@ -180,6 +181,7 @@ static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
        struct virtio_vsock_pkt_info info = {
                .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
                .type = type,
+               .vsk = vsk,
        };
 
        return virtio_transport_send_pkt_info(vsk, &info);
@@ -519,6 +521,7 @@ int virtio_transport_connect(struct vsock_sock *vsk)
        struct virtio_vsock_pkt_info info = {
                .op = VIRTIO_VSOCK_OP_REQUEST,
                .type = VIRTIO_VSOCK_TYPE_STREAM,
+               .vsk = vsk,
        };
 
        return virtio_transport_send_pkt_info(vsk, &info);
@@ -534,6 +537,7 @@ int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
                          VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
                         (mode & SEND_SHUTDOWN ?
                          VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
+               .vsk = vsk,
        };
 
        return virtio_transport_send_pkt_info(vsk, &info);
@@ -560,6 +564,7 @@ virtio_transport_stream_enqueue(struct vsock_sock *vsk,
                .type = VIRTIO_VSOCK_TYPE_STREAM,
                .msg = msg,
                .pkt_len = len,
+               .vsk = vsk,
        };
 
        return virtio_transport_send_pkt_info(vsk, &info);
@@ -581,6 +586,7 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
                .op = VIRTIO_VSOCK_OP_RST,
                .type = VIRTIO_VSOCK_TYPE_STREAM,
                .reply = !!pkt,
+               .vsk = vsk,
        };
 
        /* Send RST only if the original pkt is not a RST pkt */
@@ -826,6 +832,7 @@ virtio_transport_send_response(struct vsock_sock *vsk,
                .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
                .remote_port = le32_to_cpu(pkt->hdr.src_port),
                .reply = true,
+               .vsk = vsk,
        };
 
        return virtio_transport_send_pkt_info(vsk, &info);
index 4be4fbbc0b5035662b1cd756bd4e99dd3351309e..10ae7823a19def7bde20d669e3913a40178e7da2 100644 (file)
@@ -96,31 +96,23 @@ static int PROTOCOL_OVERRIDE = -1;
 
 static s32 vmci_transport_error_to_vsock_error(s32 vmci_error)
 {
-       int err;
-
        switch (vmci_error) {
        case VMCI_ERROR_NO_MEM:
-               err = ENOMEM;
-               break;
+               return -ENOMEM;
        case VMCI_ERROR_DUPLICATE_ENTRY:
        case VMCI_ERROR_ALREADY_EXISTS:
-               err = EADDRINUSE;
-               break;
+               return -EADDRINUSE;
        case VMCI_ERROR_NO_ACCESS:
-               err = EPERM;
-               break;
+               return -EPERM;
        case VMCI_ERROR_NO_RESOURCES:
-               err = ENOBUFS;
-               break;
+               return -ENOBUFS;
        case VMCI_ERROR_INVALID_RESOURCE:
-               err = EHOSTUNREACH;
-               break;
+               return -EHOSTUNREACH;
        case VMCI_ERROR_INVALID_ARGS:
        default:
-               err = EINVAL;
+               break;
        }
-
-       return err > 0 ? -err : err;
+       return -EINVAL;
 }
 
 static u32 vmci_transport_peer_rid(u32 peer_cid)
index d7f8be4e321a32eba3a615aa69a860c212511625..2312dc2ffdb98b37b2909274c57eed68935267d7 100644 (file)
@@ -545,22 +545,18 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
 {
        int err;
 
-       rtnl_lock();
-
        if (!cb->args[0]) {
                err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
                                  genl_family_attrbuf(&nl80211_fam),
                                  nl80211_fam.maxattr, nl80211_policy);
                if (err)
-                       goto out_unlock;
+                       return err;
 
                *wdev = __cfg80211_wdev_from_attrs(
                                        sock_net(skb->sk),
                                        genl_family_attrbuf(&nl80211_fam));
-               if (IS_ERR(*wdev)) {
-                       err = PTR_ERR(*wdev);
-                       goto out_unlock;
-               }
+               if (IS_ERR(*wdev))
+                       return PTR_ERR(*wdev);
                *rdev = wiphy_to_rdev((*wdev)->wiphy);
                /* 0 is the first index - add 1 to parse only once */
                cb->args[0] = (*rdev)->wiphy_idx + 1;
@@ -570,10 +566,8 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
                struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
                struct wireless_dev *tmp;
 
-               if (!wiphy) {
-                       err = -ENODEV;
-                       goto out_unlock;
-               }
+               if (!wiphy)
+                       return -ENODEV;
                *rdev = wiphy_to_rdev(wiphy);
                *wdev = NULL;
 
@@ -584,21 +578,11 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
                        }
                }
 
-               if (!*wdev) {
-                       err = -ENODEV;
-                       goto out_unlock;
-               }
+               if (!*wdev)
+                       return -ENODEV;
        }
 
        return 0;
- out_unlock:
-       rtnl_unlock();
-       return err;
-}
-
-static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev)
-{
-       rtnl_unlock();
 }
 
 /* IE validation */
@@ -2608,17 +2592,17 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
        int filter_wiphy = -1;
        struct cfg80211_registered_device *rdev;
        struct wireless_dev *wdev;
+       int ret;
 
        rtnl_lock();
        if (!cb->args[2]) {
                struct nl80211_dump_wiphy_state state = {
                        .filter_wiphy = -1,
                };
-               int ret;
 
                ret = nl80211_dump_wiphy_parse(skb, cb, &state);
                if (ret)
-                       return ret;
+                       goto out_unlock;
 
                filter_wiphy = state.filter_wiphy;
 
@@ -2663,12 +2647,14 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
                wp_idx++;
        }
  out:
-       rtnl_unlock();
-
        cb->args[0] = wp_idx;
        cb->args[1] = if_idx;
 
-       return skb->len;
+       ret = skb->len;
+ out_unlock:
+       rtnl_unlock();
+
+       return ret;
 }
 
 static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
@@ -4452,9 +4438,10 @@ static int nl80211_dump_station(struct sk_buff *skb,
        int sta_idx = cb->args[2];
        int err;
 
+       rtnl_lock();
        err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
        if (err)
-               return err;
+               goto out_err;
 
        if (!wdev->netdev) {
                err = -EINVAL;
@@ -4489,7 +4476,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
        cb->args[2] = sta_idx;
        err = skb->len;
  out_err:
-       nl80211_finish_wdev_dump(rdev);
+       rtnl_unlock();
 
        return err;
 }
@@ -5275,9 +5262,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
        int path_idx = cb->args[2];
        int err;
 
+       rtnl_lock();
        err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
        if (err)
-               return err;
+               goto out_err;
 
        if (!rdev->ops->dump_mpath) {
                err = -EOPNOTSUPP;
@@ -5310,7 +5298,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
        cb->args[2] = path_idx;
        err = skb->len;
  out_err:
-       nl80211_finish_wdev_dump(rdev);
+       rtnl_unlock();
        return err;
 }
 
@@ -5470,9 +5458,10 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
        int path_idx = cb->args[2];
        int err;
 
+       rtnl_lock();
        err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
        if (err)
-               return err;
+               goto out_err;
 
        if (!rdev->ops->dump_mpp) {
                err = -EOPNOTSUPP;
@@ -5505,7 +5494,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
        cb->args[2] = path_idx;
        err = skb->len;
  out_err:
-       nl80211_finish_wdev_dump(rdev);
+       rtnl_unlock();
        return err;
 }
 
@@ -7674,9 +7663,12 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
        int start = cb->args[2], idx = 0;
        int err;
 
+       rtnl_lock();
        err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
-       if (err)
+       if (err) {
+               rtnl_unlock();
                return err;
+       }
 
        wdev_lock(wdev);
        spin_lock_bh(&rdev->bss_lock);
@@ -7699,7 +7691,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
        wdev_unlock(wdev);
 
        cb->args[2] = idx;
-       nl80211_finish_wdev_dump(rdev);
+       rtnl_unlock();
 
        return skb->len;
 }
@@ -7784,9 +7776,10 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
        int res;
        bool radio_stats;
 
+       rtnl_lock();
        res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
        if (res)
-               return res;
+               goto out_err;
 
        /* prepare_wdev_dump parsed the attributes */
        radio_stats = attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS];
@@ -7827,7 +7820,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
        cb->args[2] = survey_idx;
        res = skb->len;
  out_err:
-       nl80211_finish_wdev_dump(rdev);
+       rtnl_unlock();
        return res;
 }
 
@@ -11508,17 +11501,13 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
        void *data = NULL;
        unsigned int data_len = 0;
 
-       rtnl_lock();
-
        if (cb->args[0]) {
                /* subtract the 1 again here */
                struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
                struct wireless_dev *tmp;
 
-               if (!wiphy) {
-                       err = -ENODEV;
-                       goto out_unlock;
-               }
+               if (!wiphy)
+                       return -ENODEV;
                *rdev = wiphy_to_rdev(wiphy);
                *wdev = NULL;
 
@@ -11538,23 +11527,19 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
        err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
                          attrbuf, nl80211_fam.maxattr, nl80211_policy);
        if (err)
-               goto out_unlock;
+               return err;
 
        if (!attrbuf[NL80211_ATTR_VENDOR_ID] ||
-           !attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) {
-               err = -EINVAL;
-               goto out_unlock;
-       }
+           !attrbuf[NL80211_ATTR_VENDOR_SUBCMD])
+               return -EINVAL;
 
        *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf);
        if (IS_ERR(*wdev))
                *wdev = NULL;
 
        *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf);
-       if (IS_ERR(*rdev)) {
-               err = PTR_ERR(*rdev);
-               goto out_unlock;
-       }
+       if (IS_ERR(*rdev))
+               return PTR_ERR(*rdev);
 
        vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]);
        subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]);
@@ -11567,19 +11552,15 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
                if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd)
                        continue;
 
-               if (!vcmd->dumpit) {
-                       err = -EOPNOTSUPP;
-                       goto out_unlock;
-               }
+               if (!vcmd->dumpit)
+                       return -EOPNOTSUPP;
 
                vcmd_idx = i;
                break;
        }
 
-       if (vcmd_idx < 0) {
-               err = -EOPNOTSUPP;
-               goto out_unlock;
-       }
+       if (vcmd_idx < 0)
+               return -EOPNOTSUPP;
 
        if (attrbuf[NL80211_ATTR_VENDOR_DATA]) {
                data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]);
@@ -11596,9 +11577,6 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
 
        /* keep rtnl locked in successful case */
        return 0;
- out_unlock:
-       rtnl_unlock();
-       return err;
 }
 
 static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
@@ -11613,9 +11591,10 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
        int err;
        struct nlattr *vendor_data;
 
+       rtnl_lock();
        err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev);
        if (err)
-               return err;
+               goto out;
 
        vcmd_idx = cb->args[2];
        data = (void *)cb->args[3];
@@ -11624,15 +11603,21 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
 
        if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV |
                           WIPHY_VENDOR_CMD_NEED_NETDEV)) {
-               if (!wdev)
-                       return -EINVAL;
+               if (!wdev) {
+                       err = -EINVAL;
+                       goto out;
+               }
                if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV &&
-                   !wdev->netdev)
-                       return -EINVAL;
+                   !wdev->netdev) {
+                       err = -EINVAL;
+                       goto out;
+               }
 
                if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) {
-                       if (!wdev_running(wdev))
-                               return -ENETDOWN;
+                       if (!wdev_running(wdev)) {
+                               err = -ENETDOWN;
+                               goto out;
+                       }
                }
        }
 
index 16b6b5988be969299c34a9881f258a300b366e2c..570a2b67ca1036796cc5021a0f0ce546811a4e6f 100644 (file)
@@ -132,12 +132,10 @@ static int wiphy_resume(struct device *dev)
        /* Age scan results with time spent in suspend */
        cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
 
-       if (rdev->ops->resume) {
-               rtnl_lock();
-               if (rdev->wiphy.registered)
-                       ret = rdev_resume(rdev);
-               rtnl_unlock();
-       }
+       rtnl_lock();
+       if (rdev->wiphy.registered && rdev->ops->resume)
+               ret = rdev_resume(rdev);
+       rtnl_unlock();
 
        return ret;
 }
index fd28a49dbe8f0c99bb798acec314c63084fc22c6..8b911c29860e79f21b0ac8e1d3a80ed373fd537e 100644 (file)
@@ -852,7 +852,8 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
        return rc;
 }
 
-static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
+static int x25_accept(struct socket *sock, struct socket *newsock, int flags,
+                     bool kern)
 {
        struct sock *sk = sock->sk;
        struct sock *newsk;
index 666c5ffe929dca388b218d99c2d71946a231872f..eaea9c4fb3b0e6ec8903cfc5456519e8d4e7dcd1 100644 (file)
@@ -54,8 +54,8 @@ static inline unsigned int __xfrm4_dpref_spref_hash(const xfrm_address_t *daddr,
 static inline unsigned int __xfrm6_pref_hash(const xfrm_address_t *addr,
                                             __u8 prefixlen)
 {
-       int pdw;
-       int pbi;
+       unsigned int pdw;
+       unsigned int pbi;
        u32 initval = 0;
 
        pdw = prefixlen >> 5;     /* num of whole u32 in prefix */
index 0806dccdf5078451e0dd9c5b5573d040ab21c831..236cbbc0ab9cfff05cd027ffb0dc56aa15e61033 100644 (file)
@@ -1243,7 +1243,7 @@ static inline int policy_to_flow_dir(int dir)
 }
 
 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
-                                                const struct flowi *fl)
+                                                const struct flowi *fl, u16 family)
 {
        struct xfrm_policy *pol;
 
@@ -1251,8 +1251,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
  again:
        pol = rcu_dereference(sk->sk_policy[dir]);
        if (pol != NULL) {
-               bool match = xfrm_selector_match(&pol->selector, fl,
-                                                sk->sk_family);
+               bool match = xfrm_selector_match(&pol->selector, fl, family);
                int err = 0;
 
                if (match) {
@@ -2239,7 +2238,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
        sk = sk_const_to_full_sk(sk);
        if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
                num_pols = 1;
-               pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
+               pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
                err = xfrm_expand_policies(fl, family, pols,
                                           &num_pols, &num_xfrms);
                if (err < 0)
@@ -2518,7 +2517,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
        pol = NULL;
        sk = sk_to_full_sk(sk);
        if (sk && sk->sk_policy[dir]) {
-               pol = xfrm_sk_policy_lookup(sk, dir, &fl);
+               pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
                if (IS_ERR(pol)) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
                        return 0;
@@ -3069,6 +3068,11 @@ static int __net_init xfrm_net_init(struct net *net)
 {
        int rv;
 
+       /* Initialize the per-net locks here */
+       spin_lock_init(&net->xfrm.xfrm_state_lock);
+       spin_lock_init(&net->xfrm.xfrm_policy_lock);
+       mutex_init(&net->xfrm.xfrm_cfg_mutex);
+
        rv = xfrm_statistics_init(net);
        if (rv < 0)
                goto out_statistics;
@@ -3085,11 +3089,6 @@ static int __net_init xfrm_net_init(struct net *net)
        if (rv < 0)
                goto out;
 
-       /* Initialize the per-net locks here */
-       spin_lock_init(&net->xfrm.xfrm_state_lock);
-       spin_lock_init(&net->xfrm.xfrm_policy_lock);
-       mutex_init(&net->xfrm.xfrm_cfg_mutex);
-
        return 0;
 
 out:
index 9705c279494b248b759155d671cfbc778fa25058..4f7e62ddc17e431902a3a31a4eea7f1563d758bd 100644 (file)
@@ -412,7 +412,14 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
        up = nla_data(rp);
        ulen = xfrm_replay_state_esn_len(up);
 
-       if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
+       /* Check the overall length and the internal bitmap length to avoid
+        * potential overflow. */
+       if (nla_len(rp) < ulen ||
+           xfrm_replay_state_esn_len(replay_esn) != ulen ||
+           replay_esn->bmp_len != up->bmp_len)
+               return -EINVAL;
+
+       if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
                return -EINVAL;
 
        return 0;
@@ -3101,7 +3108,6 @@ static bool xfrm_is_alive(const struct km_event *c)
 }
 
 static struct xfrm_mgr netlink_mgr = {
-       .id             = "netlink",
        .notify         = xfrm_send_state_notify,
        .acquire        = xfrm_send_acquire,
        .compile_policy = xfrm_compile_policy,
index 09e9d535bd7487d81574cf8572a41b6e697566fd..d42b495b099278cc7a96b4dc0945d56c38c06287 100644 (file)
@@ -34,6 +34,8 @@ hostprogs-y += sampleip
 hostprogs-y += tc_l2_redirect
 hostprogs-y += lwt_len_hist
 hostprogs-y += xdp_tx_iptunnel
+hostprogs-y += test_map_in_map
+hostprogs-y += per_socket_stats_example
 
 # Libbpf dependencies
 LIBBPF := ../../tools/lib/bpf/bpf.o
@@ -72,6 +74,8 @@ sampleip-objs := bpf_load.o $(LIBBPF) sampleip_user.o
 tc_l2_redirect-objs := bpf_load.o $(LIBBPF) tc_l2_redirect_user.o
 lwt_len_hist-objs := bpf_load.o $(LIBBPF) lwt_len_hist_user.o
 xdp_tx_iptunnel-objs := bpf_load.o $(LIBBPF) xdp_tx_iptunnel_user.o
+test_map_in_map-objs := bpf_load.o $(LIBBPF) test_map_in_map_user.o
+per_socket_stats_example-objs := $(LIBBPF) cookie_uid_helper_example.o
 
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
@@ -105,6 +109,8 @@ always += trace_event_kern.o
 always += sampleip_kern.o
 always += lwt_len_hist_kern.o
 always += xdp_tx_iptunnel_kern.o
+always += test_map_in_map_kern.o
+always += cookie_uid_helper_example.o
 
 HOSTCFLAGS += -I$(objtree)/usr/include
 HOSTCFLAGS += -I$(srctree)/tools/lib/
@@ -139,6 +145,7 @@ HOSTLOADLIBES_sampleip += -lelf
 HOSTLOADLIBES_tc_l2_redirect += -l elf
 HOSTLOADLIBES_lwt_len_hist += -l elf
 HOSTLOADLIBES_xdp_tx_iptunnel += -lelf
+HOSTLOADLIBES_test_map_in_map += -lelf
 
 # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
 #  make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
index faaffe2e139a989de6f90835121a725091d4a289..52de9d88c0213547da837deedac238023644342d 100644 (file)
@@ -80,6 +80,7 @@ struct bpf_map_def {
        unsigned int value_size;
        unsigned int max_entries;
        unsigned int map_flags;
+       unsigned int inner_map_idx;
 };
 
 static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) =
index b86ee54da2d14d6ba0de18481d2c55ed1a70a67b..dcdce1270d38617af84087f532ba16a402d46124 100644 (file)
@@ -43,6 +43,7 @@ struct bpf_map_def {
        unsigned int value_size;
        unsigned int max_entries;
        unsigned int map_flags;
+       unsigned int inner_map_idx;
 };
 
 static int populate_prog_array(const char *event, int prog_fd)
@@ -198,11 +199,22 @@ static int load_maps(struct bpf_map_def *maps, int len)
 
        for (i = 0; i < len / sizeof(struct bpf_map_def); i++) {
 
-               map_fd[i] = bpf_create_map(maps[i].type,
-                                          maps[i].key_size,
-                                          maps[i].value_size,
-                                          maps[i].max_entries,
-                                          maps[i].map_flags);
+               if (maps[i].type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
+                   maps[i].type == BPF_MAP_TYPE_HASH_OF_MAPS) {
+                       int inner_map_fd = map_fd[maps[i].inner_map_idx];
+
+                       map_fd[i] = bpf_create_map_in_map(maps[i].type,
+                                                         maps[i].key_size,
+                                                         inner_map_fd,
+                                                         maps[i].max_entries,
+                                                         maps[i].map_flags);
+               } else {
+                       map_fd[i] = bpf_create_map(maps[i].type,
+                                                  maps[i].key_size,
+                                                  maps[i].value_size,
+                                                  maps[i].max_entries,
+                                                  maps[i].map_flags);
+               }
                if (map_fd[i] < 0) {
                        printf("failed to create a map: %d %s\n",
                               errno, strerror(errno));
diff --git a/samples/bpf/cookie_uid_helper_example.c b/samples/bpf/cookie_uid_helper_example.c
new file mode 100644 (file)
index 0000000..ad5afed
--- /dev/null
@@ -0,0 +1,321 @@
+/* This test is a demo of using get_socket_uid and get_socket_cookie
+ * helper function to do per socket based network traffic monitoring.
+ * It requires iptables version higher then 1.6.1. to load pinned eBPF
+ * program into the xt_bpf match.
+ *
+ * TEST:
+ * ./run_cookie_uid_helper_example.sh -option
+ * option:
+ *     -t: do traffic monitoring test, the program will continuously
+ * print out network traffic happens after program started A sample
+ * output is shown below:
+ *
+ * cookie: 877, uid: 0x3e8, Pakcet Count: 20, Bytes Count: 11058
+ * cookie: 132, uid: 0x0, Pakcet Count: 2, Bytes Count: 286
+ * cookie: 812, uid: 0x3e8, Pakcet Count: 3, Bytes Count: 1726
+ * cookie: 802, uid: 0x3e8, Pakcet Count: 2, Bytes Count: 104
+ * cookie: 877, uid: 0x3e8, Pakcet Count: 20, Bytes Count: 11058
+ * cookie: 831, uid: 0x3e8, Pakcet Count: 2, Bytes Count: 104
+ * cookie: 0, uid: 0x0, Pakcet Count: 6, Bytes Count: 712
+ * cookie: 880, uid: 0xfffe, Pakcet Count: 1, Bytes Count: 70
+ *
+ *     -s: do getsockopt SO_COOKIE test, the program will set up a pair of
+ * UDP sockets and send packets between them. And read out the traffic data
+ * directly from the ebpf map based on the socket cookie.
+ *
+ * Clean up: if using shell script, the script file will delete the iptables
+ * rule and unmount the bpf program when exit. Else the iptables rule need
+ * to be deleted by hand, see run_cookie_uid_helper_example.sh for detail.
+ */
+
+#define _GNU_SOURCE
+
+#define offsetof(type, member) __builtin_offsetof(type, member)
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <error.h>
+#include <limits.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <net/if.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <bpf/bpf.h>
+#include "libbpf.h"
+
+#define PORT 8888
+
+struct stats {
+       uint32_t uid;
+       uint64_t packets;
+       uint64_t bytes;
+};
+
+static int map_fd, prog_fd;
+
+static bool test_finish;
+
+static void maps_create(void)
+{
+       map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(uint32_t),
+                               sizeof(struct stats), 100, 0);
+       if (map_fd < 0)
+               error(1, errno, "map create failed!\n");
+}
+
+static void prog_load(void)
+{
+       static char log_buf[1 << 16];
+
+       struct bpf_insn prog[] = {
+               /*
+                * Save sk_buff for future usage. value stored in R6 to R10 will
+                * not be reset after a bpf helper function call.
+                */
+               BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+               /*
+                * pc1: BPF_FUNC_get_socket_cookie takes one parameter,
+                * R1: sk_buff
+                */
+               BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                               BPF_FUNC_get_socket_cookie),
+               /* pc2-4: save &socketCookie to r7 for future usage*/
+               BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+               BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+               /*
+                * pc5-8: set up the registers for BPF_FUNC_map_lookup_elem,
+                * it takes two parameters (R1: map_fd,  R2: &socket_cookie)
+                */
+               BPF_LD_MAP_FD(BPF_REG_1, map_fd),
+               BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+               BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                               BPF_FUNC_map_lookup_elem),
+               /*
+                * pc9. if r0 != 0x0, go to pc+14, since we have the cookie
+                * stored already
+                * Otherwise do pc10-22 to setup a new data entry.
+                */
+               BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 14),
+               BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+               BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                               BPF_FUNC_get_socket_uid),
+               /*
+                * Place a struct stats in the R10 stack and sequentially
+                * place the member value into the memory. Packets value
+                * is set by directly place a IMM value 1 into the stack.
+                */
+               BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0,
+                               -32 + offsetof(struct stats, uid)),
+               BPF_ST_MEM(BPF_DW, BPF_REG_10,
+                               -32 + offsetof(struct stats, packets), 1),
+               /*
+                * __sk_buff is a special struct used for eBPF program to
+                * directly access some sk_buff field.
+                */
+               BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+                               offsetof(struct __sk_buff, len)),
+               BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1,
+                               -32 + offsetof(struct stats, bytes)),
+               /*
+                * add new map entry using BPF_FUNC_map_update_elem, it takes
+                * 4 parameters (R1: map_fd, R2: &socket_cookie, R3: &stats,
+                * R4: flags)
+                */
+               BPF_LD_MAP_FD(BPF_REG_1, map_fd),
+               BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+               BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -32),
+               BPF_MOV64_IMM(BPF_REG_4, 0),
+               BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                               BPF_FUNC_map_update_elem),
+               BPF_JMP_IMM(BPF_JA, 0, 0, 5),
+               /*
+                * pc24-30 update the packet info to a exist data entry, it can
+                * be done by directly write to pointers instead of using
+                * BPF_FUNC_map_update_elem helper function
+                */
+               BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
+               BPF_MOV64_IMM(BPF_REG_1, 1),
+               BPF_STX_XADD(BPF_DW, BPF_REG_9, BPF_REG_1,
+                               offsetof(struct stats, packets)),
+               BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+                               offsetof(struct __sk_buff, len)),
+               BPF_STX_XADD(BPF_DW, BPF_REG_9, BPF_REG_1,
+                               offsetof(struct stats, bytes)),
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
+                               offsetof(struct __sk_buff, len)),
+               BPF_EXIT_INSN(),
+       };
+       prog_fd = bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog,
+                                       ARRAY_SIZE(prog), "GPL", 0,
+                                       log_buf, sizeof(log_buf));
+       if (prog_fd < 0)
+               error(1, errno, "failed to load prog\n%s\n", log_buf);
+}
+
+static void prog_attach_iptables(char *file)
+{
+       int ret;
+       char rules[100];
+
+       if (bpf_obj_pin(prog_fd, file))
+               error(1, errno, "bpf_obj_pin");
+       if (strlen(file) > 50) {
+               printf("file path too long: %s\n", file);
+               exit(1);
+       }
+       sprintf(rules, "iptables -A OUTPUT -m bpf --object-pinned %s -j ACCEPT",
+               file);
+       ret = system(rules);
+       if (ret < 0) {
+               printf("iptables rule update failed: %d/n", WEXITSTATUS(ret));
+               exit(1);
+       }
+}
+
+static void print_table(void)
+{
+       struct stats curEntry;
+       uint32_t curN = UINT32_MAX;
+       uint32_t nextN;
+       int res;
+
+       while (bpf_map_get_next_key(map_fd, &curN, &nextN) > -1) {
+               curN = nextN;
+               res = bpf_map_lookup_elem(map_fd, &curN, &curEntry);
+               if (res < 0) {
+                       error(1, errno, "fail to get entry value of Key: %u\n",
+                               curN);
+               } else {
+                       printf("cookie: %u, uid: 0x%x, Packet Count: %lu,"
+                               " Bytes Count: %lu\n", curN, curEntry.uid,
+                               curEntry.packets, curEntry.bytes);
+               }
+       }
+}
+
+static void udp_client(void)
+{
+       struct sockaddr_in si_other = {0};
+       struct sockaddr_in si_me = {0};
+       struct stats dataEntry;
+       int s_rcv, s_send, i, recv_len;
+       char message = 'a';
+       char buf;
+       uint64_t cookie;
+       int res;
+       socklen_t cookie_len = sizeof(cookie);
+       socklen_t slen = sizeof(si_other);
+
+       s_rcv = socket(PF_INET, SOCK_DGRAM, 0);
+       if (s_rcv < 0)
+               error(1, errno, "rcv socket creat failed!\n");
+       si_other.sin_family = AF_INET;
+       si_other.sin_port = htons(PORT);
+       if (inet_aton("127.0.0.1", &si_other.sin_addr) == 0)
+               error(1, errno, "inet_aton\n");
+       if (bind(s_rcv, (struct sockaddr *)&si_other, sizeof(si_other)) == -1)
+               error(1, errno, "bind\n");
+       s_send = socket(PF_INET, SOCK_DGRAM, 0);
+       if (s_send < 0)
+               error(1, errno, "send socket creat failed!\n");
+       res = getsockopt(s_send, SOL_SOCKET, SO_COOKIE, &cookie, &cookie_len);
+       if (res < 0)
+               printf("get cookie failed: %s\n", strerror(errno));
+       res = bpf_map_lookup_elem(map_fd, &cookie, &dataEntry);
+       if (res != -1)
+               error(1, errno, "socket stat found while flow not active\n");
+       for (i = 0; i < 10; i++) {
+               res = sendto(s_send, &message, sizeof(message), 0,
+                            (struct sockaddr *)&si_other, slen);
+               if (res == -1)
+                       error(1, errno, "send\n");
+               if (res != sizeof(message))
+                       error(1, 0, "%uB != %luB\n", res, sizeof(message));
+               recv_len = recvfrom(s_rcv, &buf, sizeof(buf), 0,
+                            (struct sockaddr *)&si_me, &slen);
+               if (recv_len < 0)
+                       error(1, errno, "revieve\n");
+               res = memcmp(&(si_other.sin_addr), &(si_me.sin_addr),
+                          sizeof(si_me.sin_addr));
+               if (res != 0)
+                       error(1, EFAULT, "sender addr error: %d\n", res);
+               printf("Message received: %c\n", buf);
+               res = bpf_map_lookup_elem(map_fd, &cookie, &dataEntry);
+               if (res < 0)
+                       error(1, errno, "lookup sk stat failed, cookie: %lu\n",
+                             cookie);
+               printf("cookie: %lu, uid: 0x%x, Packet Count: %lu,"
+                       " Bytes Count: %lu\n\n", cookie, dataEntry.uid,
+                       dataEntry.packets, dataEntry.bytes);
+       }
+       close(s_send);
+       close(s_rcv);
+}
+
+static int usage(void)
+{
+       printf("Usage: ./run_cookie_uid_helper_example.sh"
+               " bpfObjName -option\n"
+               "       -t      traffic monitor test\n"
+               "       -s      getsockopt cookie test\n");
+       return 1;
+}
+
+void finish(int ret)
+{
+       test_finish = true;
+}
+
+int main(int argc, char *argv[])
+{
+       int opt;
+       bool cfg_test_traffic = false;
+       bool cfg_test_cookie = false;
+
+       if (argc != 3)
+               return usage();
+       while ((opt = getopt(argc, argv, "ts")) != -1) {
+               switch (opt) {
+               case 't':
+                       cfg_test_traffic = true;
+                       break;
+               case 's':
+                       cfg_test_cookie = true;
+                       break;
+
+               default:
+                       printf("unknown option %c\n", opt);
+                       usage();
+                       return -1;
+               }
+       }
+       maps_create();
+       prog_load();
+       prog_attach_iptables(argv[2]);
+       if (cfg_test_traffic) {
+               if (signal(SIGINT, finish) == SIG_ERR)
+                       error(1, errno, "register handler failed");
+               while (!test_finish) {
+                       print_table();
+                       printf("\n");
+                       sleep(1);
+               };
+       } else if (cfg_test_cookie) {
+               udp_client();
+       }
+       close(prog_fd);
+       close(map_fd);
+       return 0;
+}
index 3705fba453a005fb32f5dfb51dd5763f5f364ccf..8ab36a04c174a9c154594b7ef4fe2ef2c5c5dc5e 100644 (file)
@@ -135,6 +135,16 @@ struct bpf_insn;
                .off   = OFF,                                   \
                .imm   = 0 })
 
+/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
+
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF)                      \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,   \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = OFF,                                   \
+               .imm   = 0 })
+
 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
 
 #define BPF_ST_MEM(SIZE, DST, OFF, IMM)                                \
index a91872a97742a6413c316548c66ab8349ba1aff0..9da2a3441b0a2e88eb63c3b60b2e8b76949b72fe 100644 (file)
@@ -65,6 +65,13 @@ struct bpf_map_def SEC("maps") lpm_trie_map_alloc = {
        .map_flags = BPF_F_NO_PREALLOC,
 };
 
+struct bpf_map_def SEC("maps") array_map = {
+       .type = BPF_MAP_TYPE_ARRAY,
+       .key_size = sizeof(u32),
+       .value_size = sizeof(long),
+       .max_entries = MAX_ENTRIES,
+};
+
 SEC("kprobe/sys_getuid")
 int stress_hmap(struct pt_regs *ctx)
 {
@@ -165,5 +172,31 @@ int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
        return 0;
 }
 
+SEC("kprobe/sys_getpgid")
+int stress_hash_map_lookup(struct pt_regs *ctx)
+{
+       u32 key = 1, i;
+       long *value;
+
+#pragma clang loop unroll(full)
+       for (i = 0; i < 64; ++i)
+               value = bpf_map_lookup_elem(&hash_map, &key);
+
+       return 0;
+}
+
+SEC("kprobe/sys_getpgrp")
+int stress_array_map_lookup(struct pt_regs *ctx)
+{
+       u32 key = 1, i;
+       long *value;
+
+#pragma clang loop unroll(full)
+       for (i = 0; i < 64; ++i)
+               value = bpf_map_lookup_elem(&array_map, &key);
+
+       return 0;
+}
+
 char _license[] SEC("license") = "GPL";
 u32 _version SEC("version") = LINUX_VERSION_CODE;
index 680260a91f50c893dd26a1b968cc220a299c530f..e29ff318a79365ae5f84e833b45f9d2a46895421 100644 (file)
@@ -38,6 +38,8 @@ static __u64 time_get_ns(void)
 #define LRU_HASH_PREALLOC      (1 << 4)
 #define PERCPU_LRU_HASH_PREALLOC       (1 << 5)
 #define LPM_KMALLOC            (1 << 6)
+#define HASH_LOOKUP            (1 << 7)
+#define ARRAY_LOOKUP           (1 << 8)
 
 static int test_flags = ~0;
 
@@ -125,6 +127,30 @@ static void test_lpm_kmalloc(int cpu)
               cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
 }
 
+static void test_hash_lookup(int cpu)
+{
+       __u64 start_time;
+       int i;
+
+       start_time = time_get_ns();
+       for (i = 0; i < MAX_CNT; i++)
+               syscall(__NR_getpgid, 0);
+       printf("%d:hash_lookup %lld lookups per sec\n",
+              cpu, MAX_CNT * 1000000000ll * 64 / (time_get_ns() - start_time));
+}
+
+static void test_array_lookup(int cpu)
+{
+       __u64 start_time;
+       int i;
+
+       start_time = time_get_ns();
+       for (i = 0; i < MAX_CNT; i++)
+               syscall(__NR_getpgrp, 0);
+       printf("%d:array_lookup %lld lookups per sec\n",
+              cpu, MAX_CNT * 1000000000ll * 64 / (time_get_ns() - start_time));
+}
+
 static void loop(int cpu)
 {
        cpu_set_t cpuset;
@@ -153,6 +179,12 @@ static void loop(int cpu)
 
        if (test_flags & LPM_KMALLOC)
                test_lpm_kmalloc(cpu);
+
+       if (test_flags & HASH_LOOKUP)
+               test_hash_lookup(cpu);
+
+       if (test_flags & ARRAY_LOOKUP)
+               test_array_lookup(cpu);
 }
 
 static void run_perf_test(int tasks)
diff --git a/samples/bpf/run_cookie_uid_helper_example.sh b/samples/bpf/run_cookie_uid_helper_example.sh
new file mode 100755 (executable)
index 0000000..f898cfa
--- /dev/null
@@ -0,0 +1,14 @@
+#!/bin/bash
+local_dir="$(pwd)"
+root_dir=$local_dir/../..
+mnt_dir=$(mktemp -d --tmp)
+
+on_exit() {
+       iptables -D OUTPUT -m bpf --object-pinned ${mnt_dir}/bpf_prog -j ACCEPT
+       umount ${mnt_dir}
+       rm -r ${mnt_dir}
+}
+
+trap on_exit EXIT
+mount -t bpf bpf ${mnt_dir}
+./per_socket_stats_example ${mnt_dir}/bpf_prog $1
diff --git a/samples/bpf/test_map_in_map_kern.c b/samples/bpf/test_map_in_map_kern.c
new file mode 100644 (file)
index 0000000..42c44d0
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#define KBUILD_MODNAME "foo"
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/in6.h>
+#include "bpf_helpers.h"
+
+#define MAX_NR_PORTS 65536
+
+/* map #0 */
+struct bpf_map_def SEC("maps") port_a = {
+       .type = BPF_MAP_TYPE_ARRAY,
+       .key_size = sizeof(u32),
+       .value_size = sizeof(int),
+       .max_entries = MAX_NR_PORTS,
+};
+
+/* map #1 */
+struct bpf_map_def SEC("maps") port_h = {
+       .type = BPF_MAP_TYPE_HASH,
+       .key_size = sizeof(u32),
+       .value_size = sizeof(int),
+       .max_entries = 1,
+};
+
+/* map #2 */
+struct bpf_map_def SEC("maps") reg_result_h = {
+       .type = BPF_MAP_TYPE_HASH,
+       .key_size = sizeof(u32),
+       .value_size = sizeof(int),
+       .max_entries = 1,
+};
+
+/* map #3 */
+struct bpf_map_def SEC("maps") inline_result_h = {
+       .type = BPF_MAP_TYPE_HASH,
+       .key_size = sizeof(u32),
+       .value_size = sizeof(int),
+       .max_entries = 1,
+};
+
+/* map #4 */ /* Test case #0 */
+struct bpf_map_def SEC("maps") a_of_port_a = {
+       .type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
+       .key_size = sizeof(u32),
+       .inner_map_idx = 0, /* map_fd[0] is port_a */
+       .max_entries = MAX_NR_PORTS,
+};
+
+/* map #5 */ /* Test case #1 */
+struct bpf_map_def SEC("maps") h_of_port_a = {
+       .type = BPF_MAP_TYPE_HASH_OF_MAPS,
+       .key_size = sizeof(u32),
+       .inner_map_idx = 0, /* map_fd[0] is port_a */
+       .max_entries = 1,
+};
+
+/* map #6 */ /* Test case #2 */
+struct bpf_map_def SEC("maps") h_of_port_h = {
+       .type = BPF_MAP_TYPE_HASH_OF_MAPS,
+       .key_size = sizeof(u32),
+       .inner_map_idx = 1, /* map_fd[1] is port_h */
+       .max_entries = 1,
+};
+
+static __always_inline int do_reg_lookup(void *inner_map, u32 port)
+{
+       int *result;
+
+       result = bpf_map_lookup_elem(inner_map, &port);
+       return result ? *result : -ENOENT;
+}
+
+static __always_inline int do_inline_array_lookup(void *inner_map, u32 port)
+{
+       int *result;
+
+       if (inner_map != &port_a)
+               return -EINVAL;
+
+       result = bpf_map_lookup_elem(&port_a, &port);
+       return result ? *result : -ENOENT;
+}
+
+static __always_inline int do_inline_hash_lookup(void *inner_map, u32 port)
+{
+       int *result;
+
+       if (inner_map != &port_h)
+               return -EINVAL;
+
+       result = bpf_map_lookup_elem(&port_h, &port);
+       return result ? *result : -ENOENT;
+}
+
+SEC("kprobe/sys_connect")
+int trace_sys_connect(struct pt_regs *ctx)
+{
+       struct sockaddr_in6 *in6;
+       u16 test_case, port, dst6[8];
+       int addrlen, ret, inline_ret, ret_key = 0;
+       u32 port_key;
+       void *outer_map, *inner_map;
+       bool inline_hash = false;
+
+       in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx);
+       addrlen = (int)PT_REGS_PARM3(ctx);
+
+       if (addrlen != sizeof(*in6))
+               return 0;
+
+       ret = bpf_probe_read(dst6, sizeof(dst6), &in6->sin6_addr);
+       if (ret) {
+               inline_ret = ret;
+               goto done;
+       }
+
+       if (dst6[0] != 0xdead || dst6[1] != 0xbeef)
+               return 0;
+
+       test_case = dst6[7];
+
+       ret = bpf_probe_read(&port, sizeof(port), &in6->sin6_port);
+       if (ret) {
+               inline_ret = ret;
+               goto done;
+       }
+
+       port_key = port;
+
+       ret = -ENOENT;
+       if (test_case == 0) {
+               outer_map = &a_of_port_a;
+       } else if (test_case == 1) {
+               outer_map = &h_of_port_a;
+       } else if (test_case == 2) {
+               outer_map = &h_of_port_h;
+       } else {
+               ret = __LINE__;
+               inline_ret = ret;
+               goto done;
+       }
+
+       inner_map = bpf_map_lookup_elem(outer_map, &port_key);
+       if (!inner_map) {
+               ret = __LINE__;
+               inline_ret = ret;
+               goto done;
+       }
+
+       ret = do_reg_lookup(inner_map, port_key);
+
+       if (test_case == 0 || test_case == 1)
+               inline_ret = do_inline_array_lookup(inner_map, port_key);
+       else
+               inline_ret = do_inline_hash_lookup(inner_map, port_key);
+
+done:
+       bpf_map_update_elem(&reg_result_h, &ret_key, &ret, BPF_ANY);
+       bpf_map_update_elem(&inline_result_h, &ret_key, &inline_ret, BPF_ANY);
+
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/test_map_in_map_user.c b/samples/bpf/test_map_in_map_user.c
new file mode 100644 (file)
index 0000000..f62fdc2
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <arpa/inet.h>
+#include <stdint.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+#define PORT_A         (map_fd[0])
+#define PORT_H         (map_fd[1])
+#define REG_RESULT_H   (map_fd[2])
+#define INLINE_RESULT_H        (map_fd[3])
+#define A_OF_PORT_A    (map_fd[4]) /* Test case #0 */
+#define H_OF_PORT_A    (map_fd[5]) /* Test case #1 */
+#define H_OF_PORT_H    (map_fd[6]) /* Test case #2 */
+
+static const char * const test_names[] = {
+       "Array of Array",
+       "Hash of Array",
+       "Hash of Hash",
+};
+
+#define NR_TESTS (sizeof(test_names) / sizeof(*test_names))
+
+static void populate_map(uint32_t port_key, int magic_result)
+{
+       int ret;
+
+       ret = bpf_map_update_elem(PORT_A, &port_key, &magic_result, BPF_ANY);
+       assert(!ret);
+
+       ret = bpf_map_update_elem(PORT_H, &port_key, &magic_result,
+                                 BPF_NOEXIST);
+       assert(!ret);
+
+       ret = bpf_map_update_elem(A_OF_PORT_A, &port_key, &PORT_A, BPF_ANY);
+       assert(!ret);
+
+       ret = bpf_map_update_elem(H_OF_PORT_A, &port_key, &PORT_A, BPF_NOEXIST);
+       assert(!ret);
+
+       ret = bpf_map_update_elem(H_OF_PORT_H, &port_key, &PORT_H, BPF_NOEXIST);
+       assert(!ret);
+}
+
+static void test_map_in_map(void)
+{
+       struct sockaddr_in6 in6 = { .sin6_family = AF_INET6 };
+       uint32_t result_key = 0, port_key;
+       int result, inline_result;
+       int magic_result = 0xfaceb00c;
+       int ret;
+       int i;
+
+       port_key = rand() & 0x00FF;
+       populate_map(port_key, magic_result);
+
+       in6.sin6_addr.s6_addr16[0] = 0xdead;
+       in6.sin6_addr.s6_addr16[1] = 0xbeef;
+       in6.sin6_port = port_key;
+
+       for (i = 0; i < NR_TESTS; i++) {
+               printf("%s: ", test_names[i]);
+
+               in6.sin6_addr.s6_addr16[7] = i;
+               ret = connect(-1, (struct sockaddr *)&in6, sizeof(in6));
+               assert(ret == -1 && errno == EBADF);
+
+               ret = bpf_map_lookup_elem(REG_RESULT_H, &result_key, &result);
+               assert(!ret);
+
+               ret = bpf_map_lookup_elem(INLINE_RESULT_H, &result_key,
+                                         &inline_result);
+               assert(!ret);
+
+               if (result != magic_result || inline_result != magic_result) {
+                       printf("Error. result:%d inline_result:%d\n",
+                              result, inline_result);
+                       exit(1);
+               }
+
+               bpf_map_delete_elem(REG_RESULT_H, &result_key);
+               bpf_map_delete_elem(INLINE_RESULT_H, &result_key);
+
+               printf("Pass\n");
+       }
+}
+
+int main(int argc, char **argv)
+{
+       struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+       char filename[256];
+
+       assert(!setrlimit(RLIMIT_MEMLOCK, &r));
+
+       snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+       if (load_bpf_file(filename)) {
+               printf("%s", bpf_log_buf);
+               return 1;
+       }
+
+       test_map_in_map();
+
+       return 0;
+}
index d6ca649cb0e96d4d91a4980048912544b8183e06..afe3fd3af1e40616857b3e6c425be632c1fa2667 100644 (file)
@@ -148,6 +148,10 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \
 # Usage:  EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
 cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
 
+# cc-if-fullversion
+# Usage:  EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1)
+cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4))
+
 # cc-ldoption
 # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
 cc-ldoption = $(call try-run,\
index 0a07f9014944ed92a8e2e42983ae43be60b3e471..7234e61e7ce370a775ec6981b391b6d102a01770 100644 (file)
@@ -155,7 +155,7 @@ else
 # $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files
 #   and locates generated .h files
 # FIXME: Replace both with specific CFLAGS* statements in the makefiles
-__c_flags      = $(if $(obj),-I$(srctree)/$(src) -I$(obj)) \
+__c_flags      = $(if $(obj),$(call addtree,-I$(src)) -I$(obj)) \
                  $(call flags,_c_flags)
 __a_flags      = $(call flags,_a_flags)
 __cpp_flags     = $(call flags,_cpp_flags)
index 9b0b5cbc5b899be4ddbafe2ce5f3ec5ab0743b6c..0f98634c20a097697cec9849dc9e4b338cd5e5c9 100644 (file)
@@ -133,7 +133,7 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gc
 #if BUILDING_GCC_VERSION < 6000
        register_callback(plugin_name, PLUGIN_START_UNIT, &sancov_start_unit, NULL);
        register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_sancov);
-       register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &sancov_plugin_pass_info);
+       register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &sancov_pass_info);
 #endif
 
        return 0;
index 26d208b435a0d347b8f11df13bcb79cf3618770c..cfddddb9c9d722b63c522450a5b277a4fd22d318 100644 (file)
@@ -914,7 +914,7 @@ on_treeview2_button_press_event(GtkWidget * widget,
                        current = menu;
                        display_tree_part();
                        gtk_widget_set_sensitive(back_btn, TRUE);
-               } else if ((col == COL_OPTION)) {
+               } else if (col == COL_OPTION) {
                        toggle_sym_value(menu);
                        gtk_tree_view_expand_row(view, path, TRUE);
                }
index cf7e52e4781b9b193f28dcbe1f6de9ed415eb50d..9b6e246a45d09f530b3527b81946a30b1256697f 100644 (file)
@@ -22,4 +22,6 @@ SECTIONS {
 
        . = ALIGN(8);
        .init_array             0 : { *(SORT(.init_array.*)) *(.init_array) }
+
+       __jump_table            0 : ALIGN(8) { KEEP(*(__jump_table)) }
 }
index 0458b037c8a137daa0f0fc205cabc188b18ae513..0545f5a8cabed76cb2c49cfd8c2d08f567bc4980 100644 (file)
@@ -372,6 +372,8 @@ disassocation||disassociation
 disapear||disappear
 disapeared||disappeared
 disappared||disappeared
+disble||disable
+disbled||disabled
 disconnet||disconnect
 discontinous||discontinuous
 dispertion||dispersion
@@ -732,6 +734,7 @@ oustanding||outstanding
 overaall||overall
 overhread||overhead
 overlaping||overlapping
+overide||override
 overrided||overridden
 overriden||overridden
 overun||overrun
index 2ca9cde939d44976365aa67fe72f51be9b92897d..8e67bb4c9caba658b18f9d1dd0593907c8556c27 100644 (file)
@@ -69,6 +69,7 @@ static struct nlmsg_perm nlmsg_route_perms[] =
        { RTM_GETDCB,           NETLINK_ROUTE_SOCKET__NLMSG_READ  },
        { RTM_SETDCB,           NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
        { RTM_NEWNETCONF,       NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+       { RTM_DELNETCONF,       NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
        { RTM_GETNETCONF,       NETLINK_ROUTE_SOCKET__NLMSG_READ  },
        { RTM_NEWMDB,           NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
        { RTM_DELMDB,           NETLINK_ROUTE_SOCKET__NLMSG_WRITE  },
index 4c935202ce23be4fc57c9d79c3fe0a160d9b8a89..f3b1d7f50b81156d4c29c5c0958a884e9292bbae 100644 (file)
@@ -1832,6 +1832,7 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
             info->output_pool != client->pool->size)) {
                if (snd_seq_write_pool_allocated(client)) {
                        /* remove all existing cells */
+                       snd_seq_pool_mark_closing(client->pool);
                        snd_seq_queue_client_leave_cells(client->number);
                        snd_seq_pool_done(client->pool);
                }
index 448efd4e980edf97138b43b6263a9909d07c076a..01c4cfe30c9feffd4fa24c7223e58d9a604cb026 100644 (file)
@@ -72,6 +72,9 @@ void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
                return;
        *fifo = NULL;
 
+       if (f->pool)
+               snd_seq_pool_mark_closing(f->pool);
+
        snd_seq_fifo_clear(f);
 
        /* wake up clients if any */
@@ -264,6 +267,10 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
        /* NOTE: overflow flag is not cleared */
        spin_unlock_irqrestore(&f->lock, flags);
 
+       /* close the old pool and wait until all users are gone */
+       snd_seq_pool_mark_closing(oldpool);
+       snd_use_lock_sync(&f->use_lock);
+
        /* release cells in old pool */
        for (cell = oldhead; cell; cell = next) {
                next = cell->next;
index 1a1acf3ddda4c9aeb022548b9438498e0f036762..d4c61ec9be13d7389addd27bc70acf58bda2eecc 100644 (file)
@@ -415,6 +415,18 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
        return 0;
 }
 
+/* refuse the further insertion to the pool */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
+{
+       unsigned long flags;
+
+       if (snd_BUG_ON(!pool))
+               return;
+       spin_lock_irqsave(&pool->lock, flags);
+       pool->closing = 1;
+       spin_unlock_irqrestore(&pool->lock, flags);
+}
+
 /* remove events */
 int snd_seq_pool_done(struct snd_seq_pool *pool)
 {
@@ -425,10 +437,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
                return -EINVAL;
 
        /* wait for closing all threads */
-       spin_lock_irqsave(&pool->lock, flags);
-       pool->closing = 1;
-       spin_unlock_irqrestore(&pool->lock, flags);
-
        if (waitqueue_active(&pool->output_sleep))
                wake_up(&pool->output_sleep);
 
@@ -485,6 +493,7 @@ int snd_seq_pool_delete(struct snd_seq_pool **ppool)
        *ppool = NULL;
        if (pool == NULL)
                return 0;
+       snd_seq_pool_mark_closing(pool);
        snd_seq_pool_done(pool);
        kfree(pool);
        return 0;
index 4a2ec779b8a701b1aba2402e9de583878f7a39ca..32f959c17786d9ac8c071ba0e6fd070dc06da78b 100644 (file)
@@ -84,6 +84,7 @@ static inline int snd_seq_total_cells(struct snd_seq_pool *pool)
 int snd_seq_pool_init(struct snd_seq_pool *pool);
 
 /* done pool - free events */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool);
 int snd_seq_pool_done(struct snd_seq_pool *pool);
 
 /* create pool */
index ab4cdab5cfa57abf3db2a8da806d0bf7031fed67..79edd88d5cd08398afb86c63b3d32b025162164e 100644 (file)
@@ -1905,7 +1905,7 @@ static int hw_card_start(struct hw *hw)
                return err;
 
        /* Set DMA transfer mask */
-       if (dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
+       if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
                dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits));
        } else {
                dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
index c15c51bea26d0afdcc6d8c806993754eaaa2e031..69266b8ea2ad7b498097c4bc231fbad6e55ff37e 100644 (file)
@@ -261,6 +261,7 @@ enum {
        CXT_FIXUP_HP_530,
        CXT_FIXUP_CAP_MIX_AMP_5047,
        CXT_FIXUP_MUTE_LED_EAPD,
+       CXT_FIXUP_HP_DOCK,
        CXT_FIXUP_HP_SPECTRE,
        CXT_FIXUP_HP_GATE_MIC,
 };
@@ -778,6 +779,14 @@ static const struct hda_fixup cxt_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cxt_fixup_mute_led_eapd,
        },
+       [CXT_FIXUP_HP_DOCK] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x16, 0x21011020 }, /* line-out */
+                       { 0x18, 0x2181103f }, /* line-in */
+                       { }
+               }
+       },
        [CXT_FIXUP_HP_SPECTRE] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -839,6 +848,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
        SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
+       SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
        SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
        SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
        SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
@@ -871,6 +881,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
        { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" },
        { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" },
        { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" },
+       { .id = CXT_FIXUP_HP_DOCK, .name = "hp-dock" },
        {}
 };
 
index 4e112221d825462ef7e1ae38c092e40e58667f38..299835d1fbaadb5f312ee86502deacd4f7643603 100644 (file)
@@ -4847,6 +4847,7 @@ enum {
        ALC286_FIXUP_HP_GPIO_LED,
        ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
        ALC280_FIXUP_HP_DOCK_PINS,
+       ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED,
        ALC280_FIXUP_HP_9480M,
        ALC288_FIXUP_DELL_HEADSET_MODE,
        ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -4857,6 +4858,7 @@ enum {
        ALC292_FIXUP_DISABLE_AAMIX,
        ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK,
        ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+       ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
        ALC275_FIXUP_DELL_XPS,
        ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
        ALC293_FIXUP_LENOVO_SPK_NOISE,
@@ -5388,6 +5390,16 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC280_FIXUP_HP_GPIO4
        },
+       [ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1b, 0x21011020 }, /* line-out */
+                       { 0x18, 0x2181103f }, /* line-in */
+                       { },
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HP_GPIO_MIC1_LED
+       },
        [ALC280_FIXUP_HP_9480M] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc280_fixup_hp_9480m,
@@ -5459,6 +5471,15 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE
        },
+       [ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE
+       },
        [ALC275_FIXUP_DELL_XPS] = {
                .type = HDA_FIXUP_VERBS,
                .v.verbs = (const struct hda_verb[]) {
@@ -5531,7 +5552,7 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc298_fixup_speaker_volume,
                .chained = true,
-               .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+               .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
        },
        [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
                .type = HDA_FIXUP_PINS,
@@ -5647,7 +5668,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
-       SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+       SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -5816,6 +5837,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"},
        {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
        {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
+       {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"},
        {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
        {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
        {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
@@ -6090,6 +6112,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                ALC295_STANDARD_PINS,
                {0x17, 0x21014040},
                {0x18, 0x21a19050}),
+       SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC295_STANDARD_PINS),
        SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC298_STANDARD_PINS,
                {0x17, 0x90170110}),
index ec1067a679da406019bd4c98e6b6cf22fd5a4432..08b1399d1da2b818b997b752555532ebdf45312e 100644 (file)
@@ -89,7 +89,7 @@ static void acp_reg_write(u32 val, void __iomem *acp_mmio, u32 reg)
        writel(val, acp_mmio + (reg * 4));
 }
 
-/* Configure a given dma channel parameters - enable/disble,
+/* Configure a given dma channel parameters - enable/disable,
  * number of descriptors, priority
  */
 static void config_acp_dma_channel(void __iomem *acp_mmio, u8 ch_num,
index 89ac5f5a93eb31f510d8ab189badc781fd319e6f..7ae46c2647d453bcad1176b6877fcbbae110416b 100644 (file)
@@ -349,7 +349,7 @@ static int atmel_classd_codec_dai_digital_mute(struct snd_soc_dai *codec_dai,
 }
 
 #define CLASSD_ACLK_RATE_11M2896_MPY_8 (112896 * 100 * 8)
-#define CLASSD_ACLK_RATE_12M288_MPY_8  (12228 * 1000 * 8)
+#define CLASSD_ACLK_RATE_12M288_MPY_8  (12288 * 1000 * 8)
 
 static struct {
        int rate;
index 78fca8acd3ec0a2209876bf5b20a1363a03c6336..fd272a40485b077039c68f786671218a7f4204e2 100644 (file)
@@ -1534,21 +1534,20 @@ static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
                        pin->mst_capable = false;
                        /* if not MST, default is port[0] */
                        hport = &pin->ports[0];
-                       goto out;
                } else {
                        for (i = 0; i < pin->num_ports; i++) {
                                pin->mst_capable = true;
                                if (pin->ports[i].id == pipe) {
                                        hport = &pin->ports[i];
-                                       goto out;
+                                       break;
                                }
                        }
                }
+
+               if (hport)
+                       hdac_hdmi_present_sense(pin, hport);
        }
 
-out:
-       if (pin && hport)
-               hdac_hdmi_present_sense(pin, hport);
 }
 
 static struct i915_audio_component_audio_ops aops = {
@@ -1998,7 +1997,7 @@ static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
        struct hdac_hdmi_pin *pin, *pin_next;
        struct hdac_hdmi_cvt *cvt, *cvt_next;
        struct hdac_hdmi_pcm *pcm, *pcm_next;
-       struct hdac_hdmi_port *port;
+       struct hdac_hdmi_port *port, *port_next;
        int i;
 
        snd_soc_unregister_codec(&edev->hdac.dev);
@@ -2008,8 +2007,9 @@ static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
                if (list_empty(&pcm->port_list))
                        continue;
 
-               list_for_each_entry(port, &pcm->port_list, head)
-                       port = NULL;
+               list_for_each_entry_safe(port, port_next,
+                                       &pcm->port_list, head)
+                       list_del(&port->head);
 
                list_del(&pcm->head);
                kfree(pcm);
index 324461e985b3918211ad29052a5142a6560e26af..476135ec57268cf6863e9792d5fb027c6383e190 100644 (file)
@@ -1241,7 +1241,7 @@ static irqreturn_t rt5665_irq(int irq, void *data)
 static void rt5665_jd_check_handler(struct work_struct *work)
 {
        struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv,
-               calibrate_work.work);
+               jd_check_work.work);
 
        if (snd_soc_read(rt5665->codec, RT5665_AJD1_CTRL) & 0x0010) {
                /* jack out */
@@ -2252,7 +2252,7 @@ static const char * const rt5665_if2_1_adc_in_src[] = {
 
 static const SOC_ENUM_SINGLE_DECL(
        rt5665_if2_1_adc_in_enum, RT5665_DIG_INF2_DATA,
-       RT5665_IF3_ADC_IN_SFT, rt5665_if2_1_adc_in_src);
+       RT5665_IF2_1_ADC_IN_SFT, rt5665_if2_1_adc_in_src);
 
 static const struct snd_kcontrol_new rt5665_if2_1_adc_in_mux =
        SOC_DAPM_ENUM("IF2_1 ADC IN Source", rt5665_if2_1_adc_in_enum);
@@ -3178,6 +3178,9 @@ static const struct snd_soc_dapm_route rt5665_dapm_routes[] = {
        {"DAC Mono Right Filter", NULL, "DAC Mono R ASRC", is_using_asrc},
        {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc},
        {"DAC Stereo2 Filter", NULL, "DAC STO2 ASRC", is_using_asrc},
+       {"I2S1 ASRC", NULL, "CLKDET"},
+       {"I2S2 ASRC", NULL, "CLKDET"},
+       {"I2S3 ASRC", NULL, "CLKDET"},
 
        /*Vref*/
        {"Mic Det Power", NULL, "Vref2"},
@@ -3912,6 +3915,7 @@ static const struct snd_soc_dapm_route rt5665_dapm_routes[] = {
        {"Mono MIX", "MONOVOL Switch", "MONOVOL"},
        {"Mono Amp", NULL, "Mono MIX"},
        {"Mono Amp", NULL, "Vref2"},
+       {"Mono Amp", NULL, "Vref3"},
        {"Mono Amp", NULL, "CLKDET SYS"},
        {"Mono Amp", NULL, "CLKDET MONO"},
        {"Mono Playback", "Switch", "Mono Amp"},
@@ -4798,7 +4802,7 @@ static int rt5665_i2c_probe(struct i2c_client *i2c,
        /* Enhance performance*/
        regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_1,
                RT5665_HP_DRIVER_MASK | RT5665_LDO1_DVO_MASK,
-               RT5665_HP_DRIVER_5X | RT5665_LDO1_DVO_09);
+               RT5665_HP_DRIVER_5X | RT5665_LDO1_DVO_12);
 
        INIT_DELAYED_WORK(&rt5665->jack_detect_work,
                                rt5665_jack_detect_handler);
index 12f7080a0d3c3f1eeab2e3083020cffe98599891..a30f5e6d062882724230e2bf2f2d1157ee823a9b 100644 (file)
 #define RT5665_HP_DRIVER_MASK                  (0x3 << 2)
 #define RT5665_HP_DRIVER_1X                    (0x0 << 2)
 #define RT5665_HP_DRIVER_3X                    (0x1 << 2)
-#define RT5665_HP_DRIVER_5X                    (0x2 << 2)
+#define RT5665_HP_DRIVER_5X                    (0x3 << 2)
 #define RT5665_LDO1_DVO_MASK                   (0x3)
 #define RT5665_LDO1_DVO_09                     (0x0)
 #define RT5665_LDO1_DVO_10                     (0x1)
index d151224ffcca411a5685b2076c43748fd2134790..bbdb72f73df19ddf954daab19d3deb222208dc7c 100644 (file)
@@ -899,7 +899,10 @@ static int wm_coeff_put(struct snd_kcontrol *kctl,
 
        mutex_lock(&ctl->dsp->pwr_lock);
 
-       memcpy(ctl->cache, p, ctl->len);
+       if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
+               ret = -EPERM;
+       else
+               memcpy(ctl->cache, p, ctl->len);
 
        ctl->set = 1;
        if (ctl->enabled && ctl->dsp->running)
@@ -926,6 +929,8 @@ static int wm_coeff_tlv_put(struct snd_kcontrol *kctl,
                ctl->set = 1;
                if (ctl->enabled && ctl->dsp->running)
                        ret = wm_coeff_write_control(ctl, ctl->cache, size);
+               else if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
+                       ret = -EPERM;
        }
 
        mutex_unlock(&ctl->dsp->pwr_lock);
@@ -947,7 +952,7 @@ static int wm_coeff_put_acked(struct snd_kcontrol *kctl,
 
        mutex_lock(&ctl->dsp->pwr_lock);
 
-       if (ctl->enabled)
+       if (ctl->enabled && ctl->dsp->running)
                ret = wm_coeff_write_acked_control(ctl, val);
        else
                ret = -EPERM;
index 4924575d2e95d3d49c0bdad57bc8691c472da5b2..343b291fc3725f46b4d5270b65d5100ca083ef61 100644 (file)
@@ -115,6 +115,7 @@ int asoc_simple_card_parse_clk(struct device *dev,
        clk = devm_get_clk_from_child(dev, node, NULL);
        if (!IS_ERR(clk)) {
                simple_dai->sysclk = clk_get_rate(clk);
+               simple_dai->clk = clk;
        } else if (!of_property_read_u32(node, "system-clock-frequency", &val)) {
                simple_dai->sysclk = val;
        } else {
index ed58b5b3555a869ff91772689b761369873d6c89..2dbfb1b24ef4a629ecdac8004630fc66aeb529fc 100644 (file)
@@ -512,7 +512,7 @@ static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
                        if (bc->set_params != SKL_PARAM_INIT)
                                continue;
 
-                       mconfig->formats_config.caps = (u32 *)&bc->params;
+                       mconfig->formats_config.caps = (u32 *)bc->params;
                        mconfig->formats_config.caps_size = bc->size;
 
                        break;
index 05cf809cf9e1467dca19e4f02433f6e6b34532a3..d7013bde6f45fc7ed82db8ea0e92ae81226705f1 100644 (file)
@@ -13,7 +13,7 @@ config SND_SOC_MT2701
 
 config SND_SOC_MT2701_CS42448
        tristate "ASoc Audio driver for MT2701 with CS42448 codec"
-       depends on SND_SOC_MT2701
+       depends on SND_SOC_MT2701 && I2C
        select SND_SOC_CS42XX8_I2C
        select SND_SOC_BT_SCO
        help
index abb5eaac854a9b9c47027e278cd5204c18aceace..7d92a24b7cfa558afbb8331401c974c59d5f1ae5 100644 (file)
@@ -31,23 +31,24 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
        struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
        struct device *dev = rsnd_priv_to_dev(priv);
        u32 data;
+       u32 path[] = {
+               [1] = 1 << 0,
+               [5] = 1 << 8,
+               [6] = 1 << 12,
+               [9] = 1 << 15,
+       };
 
        if (!mix && !dvc)
                return 0;
 
+       if (ARRAY_SIZE(path) < rsnd_mod_id(mod) + 1)
+               return -ENXIO;
+
        if (mix) {
                struct rsnd_dai *rdai;
                struct rsnd_mod *src;
                struct rsnd_dai_stream *tio;
                int i;
-               u32 path[] = {
-                       [0] = 0,
-                       [1] = 1 << 0,
-                       [2] = 0,
-                       [3] = 0,
-                       [4] = 0,
-                       [5] = 1 << 8
-               };
 
                /*
                 * it is assuming that integrater is well understanding about
@@ -70,16 +71,19 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
        } else {
                struct rsnd_mod *src = rsnd_io_to_mod_src(io);
 
-               u32 path[] = {
-                       [0] = 0x30000,
-                       [1] = 0x30001,
-                       [2] = 0x40000,
-                       [3] = 0x10000,
-                       [4] = 0x20000,
-                       [5] = 0x40100
+               u8 cmd_case[] = {
+                       [0] = 0x3,
+                       [1] = 0x3,
+                       [2] = 0x4,
+                       [3] = 0x1,
+                       [4] = 0x2,
+                       [5] = 0x4,
+                       [6] = 0x1,
+                       [9] = 0x2,
                };
 
-               data = path[rsnd_mod_id(src)];
+               data = path[rsnd_mod_id(src)] |
+                       cmd_case[rsnd_mod_id(src)] << 16;
        }
 
        dev_dbg(dev, "ctu/mix path = 0x%08x", data);
index 1f405c83386759a1bfc7dbfd3ff51dba2ee33614..241cb3b08a0755dc93c06034cca814e9c509c090 100644 (file)
@@ -454,6 +454,20 @@ static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
        return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
 }
 
+static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
+{
+       struct rsnd_mod *mod = rsnd_mod_get(dma);
+       struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+       struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
+       void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
+       u32 val = ioread32(addr);
+
+       val &= ~mask;
+       val |= (data & mask);
+
+       iowrite32(val, addr);
+}
+
 static int rsnd_dmapp_stop(struct rsnd_mod *mod,
                           struct rsnd_dai_stream *io,
                           struct rsnd_priv *priv)
@@ -461,10 +475,10 @@ static int rsnd_dmapp_stop(struct rsnd_mod *mod,
        struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
        int i;
 
-       rsnd_dmapp_write(dma, 0, PDMACHCR);
+       rsnd_dmapp_bset(dma, 0,  PDMACHCR_DE, PDMACHCR);
 
        for (i = 0; i < 1024; i++) {
-               if (0 == rsnd_dmapp_read(dma, PDMACHCR))
+               if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
                        return 0;
                udelay(1);
        }
index 4e817c8a18c0bbe899374028ae56ef0754042d59..14fafdaf1395f9737191df18599ee58fc4f858fd 100644 (file)
@@ -64,7 +64,11 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod,
        mask1 = (1 << 4) | (1 << 20);   /* mask sync bit */
        mask2 = (1 << 4);               /* mask sync bit */
        val1  = val2  = 0;
-       if (rsnd_ssi_is_pin_sharing(io)) {
+       if (id == 8) {
+               /*
+                * SSI8 pin is sharing with SSI7, nothing to do.
+                */
+       } else if (rsnd_ssi_is_pin_sharing(io)) {
                int shift = -1;
 
                switch (id) {
index 6dca408faae334d223494c33e14d503518b497d7..2722bb0c557310d97816cfa7857b24d7c9bd4948 100644 (file)
@@ -3326,7 +3326,10 @@ static int snd_soc_platform_drv_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_soc_platform *platform = rtd->platform;
 
-       return platform->driver->pcm_new(rtd);
+       if (platform->driver->pcm_new)
+               return platform->driver->pcm_new(rtd);
+       else
+               return 0;
 }
 
 static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm)
@@ -3334,7 +3337,8 @@ static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm)
        struct snd_soc_pcm_runtime *rtd = pcm->private_data;
        struct snd_soc_platform *platform = rtd->platform;
 
-       platform->driver->pcm_free(pcm);
+       if (platform->driver->pcm_free)
+               platform->driver->pcm_free(pcm);
 }
 
 /**
index 5992c6ab3833ef60c15e21656fd7f5731b43c2d3..93a8df6ed880ea8cc32b62f94ab0b44befbe06ef 100644 (file)
@@ -349,6 +349,8 @@ static int uni_reader_startup(struct snd_pcm_substream *substream,
        struct uniperif *reader = priv->dai_data.uni;
        int ret;
 
+       reader->substream = substream;
+
        if (!UNIPERIF_TYPE_IS_TDM(reader))
                return 0;
 
@@ -378,6 +380,7 @@ static void uni_reader_shutdown(struct snd_pcm_substream *substream,
                /* Stop the reader */
                uni_reader_stop(reader);
        }
+       reader->substream = NULL;
 }
 
 static const struct snd_soc_dai_ops uni_reader_dai_ops = {
index b92bdc8361af3a118b0d944585e3076ae1e8c947..7527ba29a5a0ea6eb9c6498d8d0293ad4aead18a 100644 (file)
@@ -259,25 +259,20 @@ static int sun8i_codec_hw_params(struct snd_pcm_substream *substream,
        return 0;
 }
 
-static const struct snd_kcontrol_new sun8i_output_left_mixer_controls[] = {
-       SOC_DAPM_SINGLE("LSlot 0", SUN8I_DAC_MXR_SRC,
-                       SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA0L, 1, 0),
-       SOC_DAPM_SINGLE("LSlot 1", SUN8I_DAC_MXR_SRC,
-                       SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA1L, 1, 0),
-       SOC_DAPM_SINGLE("DACL", SUN8I_DAC_MXR_SRC,
-                       SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF2DACL, 1, 0),
-       SOC_DAPM_SINGLE("ADCL", SUN8I_DAC_MXR_SRC,
-                       SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_ADCL, 1, 0),
-};
-
-static const struct snd_kcontrol_new sun8i_output_right_mixer_controls[] = {
-       SOC_DAPM_SINGLE("RSlot 0", SUN8I_DAC_MXR_SRC,
+static const struct snd_kcontrol_new sun8i_dac_mixer_controls[] = {
+       SOC_DAPM_DOUBLE("AIF1 Slot 0 Digital DAC Playback Switch",
+                       SUN8I_DAC_MXR_SRC,
+                       SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA0L,
                        SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA0R, 1, 0),
-       SOC_DAPM_SINGLE("RSlot 1", SUN8I_DAC_MXR_SRC,
+       SOC_DAPM_DOUBLE("AIF1 Slot 1 Digital DAC Playback Switch",
+                       SUN8I_DAC_MXR_SRC,
+                       SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA1L,
                        SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA1R, 1, 0),
-       SOC_DAPM_SINGLE("DACR", SUN8I_DAC_MXR_SRC,
+       SOC_DAPM_DOUBLE("AIF2 Digital DAC Playback Switch", SUN8I_DAC_MXR_SRC,
+                       SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF2DACL,
                        SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF2DACR, 1, 0),
-       SOC_DAPM_SINGLE("ADCR", SUN8I_DAC_MXR_SRC,
+       SOC_DAPM_DOUBLE("ADC Digital DAC Playback Switch", SUN8I_DAC_MXR_SRC,
+                       SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_ADCL,
                        SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_ADCR, 1, 0),
 };
 
@@ -286,19 +281,21 @@ static const struct snd_soc_dapm_widget sun8i_codec_dapm_widgets[] = {
        SND_SOC_DAPM_SUPPLY("DAC", SUN8I_DAC_DIG_CTRL, SUN8I_DAC_DIG_CTRL_ENDA,
                            0, NULL, 0),
 
-       /* Analog DAC */
-       SND_SOC_DAPM_DAC("Digital Left DAC", "Playback", SUN8I_AIF1_DACDAT_CTRL,
-                        SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0L_ENA, 0),
-       SND_SOC_DAPM_DAC("Digital Right DAC", "Playback", SUN8I_AIF1_DACDAT_CTRL,
-                        SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0R_ENA, 0),
+       /* Analog DAC AIF */
+       SND_SOC_DAPM_AIF_IN("AIF1 Slot 0 Left", "Playback", 0,
+                           SUN8I_AIF1_DACDAT_CTRL,
+                           SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0L_ENA, 0),
+       SND_SOC_DAPM_AIF_IN("AIF1 Slot 0 Right", "Playback", 0,
+                           SUN8I_AIF1_DACDAT_CTRL,
+                           SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0R_ENA, 0),
 
        /* DAC Mixers */
-       SND_SOC_DAPM_MIXER("Left DAC Mixer", SND_SOC_NOPM, 0, 0,
-                          sun8i_output_left_mixer_controls,
-                          ARRAY_SIZE(sun8i_output_left_mixer_controls)),
-       SND_SOC_DAPM_MIXER("Right DAC Mixer", SND_SOC_NOPM, 0, 0,
-                          sun8i_output_right_mixer_controls,
-                          ARRAY_SIZE(sun8i_output_right_mixer_controls)),
+       SND_SOC_DAPM_MIXER("Left Digital DAC Mixer", SND_SOC_NOPM, 0, 0,
+                          sun8i_dac_mixer_controls,
+                          ARRAY_SIZE(sun8i_dac_mixer_controls)),
+       SND_SOC_DAPM_MIXER("Right Digital DAC Mixer", SND_SOC_NOPM, 0, 0,
+                          sun8i_dac_mixer_controls,
+                          ARRAY_SIZE(sun8i_dac_mixer_controls)),
 
        /* Clocks */
        SND_SOC_DAPM_SUPPLY("MODCLK AFI1", SUN8I_MOD_CLK_ENA,
@@ -321,8 +318,6 @@ static const struct snd_soc_dapm_widget sun8i_codec_dapm_widgets[] = {
                            SUN8I_MOD_RST_CTL_AIF1, 0, NULL, 0),
        SND_SOC_DAPM_SUPPLY("RST DAC", SUN8I_MOD_RST_CTL,
                            SUN8I_MOD_RST_CTL_DAC, 0, NULL, 0),
-
-       SND_SOC_DAPM_OUTPUT("HP"),
 };
 
 static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = {
@@ -338,16 +333,14 @@ static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = {
        { "DAC", NULL, "MODCLK DAC" },
 
        /* DAC Routes */
-       { "Digital Left DAC", NULL, "DAC" },
-       { "Digital Right DAC", NULL, "DAC" },
+       { "AIF1 Slot 0 Right", NULL, "DAC" },
+       { "AIF1 Slot 0 Left", NULL, "DAC" },
 
        /* DAC Mixer Routes */
-       { "Left DAC Mixer", "LSlot 0", "Digital Left DAC"},
-       { "Right DAC Mixer", "RSlot 0", "Digital Right DAC"},
-
-       /* End of route : HP out */
-       { "HP", NULL, "Left DAC Mixer" },
-       { "HP", NULL, "Right DAC Mixer" },
+       { "Left Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch",
+         "AIF1 Slot 0 Left"},
+       { "Right Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch",
+         "AIF1 Slot 0 Right"},
 };
 
 static struct snd_soc_dai_ops sun8i_codec_dai_ops = {
index 84c8f8fc597cd6046d17ee27839024b98329e33d..8adf4d1bd46e71237e6827f0eda8fc89e7b40ea1 100644 (file)
@@ -1,6 +1,7 @@
 menuconfig SND_X86
-       tristate "X86 sound devices"
+       bool "X86 sound devices"
        depends on X86
+       default y
        ---help---
          X86 sound devices that don't fall under SoC or PCI categories
 
index 122153b16ea4eeba1e84bf30a71a3dda199ae534..390d7c9685fd6107c83be2296ead9cb198b571a3 100644 (file)
                .off   = OFF,                                   \
                .imm   = 0 })
 
+/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
+
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF)                      \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,   \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = OFF,                                   \
+               .imm   = 0 })
+
 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
 
 #define BPF_ST_MEM(SIZE, DST, OFF, IMM)                                \
index 0539a0ceef38155835552360667070552ebce641..1e062bb54eec11b866cfb8faf99e44c725d2c4e5 100644 (file)
@@ -81,6 +81,7 @@ enum bpf_cmd {
        BPF_OBJ_GET,
        BPF_PROG_ATTACH,
        BPF_PROG_DETACH,
+       BPF_PROG_TEST_RUN,
 };
 
 enum bpf_map_type {
@@ -96,6 +97,8 @@ enum bpf_map_type {
        BPF_MAP_TYPE_LRU_HASH,
        BPF_MAP_TYPE_LRU_PERCPU_HASH,
        BPF_MAP_TYPE_LPM_TRIE,
+       BPF_MAP_TYPE_ARRAY_OF_MAPS,
+       BPF_MAP_TYPE_HASH_OF_MAPS,
 };
 
 enum bpf_prog_type {
@@ -152,6 +155,7 @@ union bpf_attr {
                __u32   value_size;     /* size of value in bytes */
                __u32   max_entries;    /* max number of entries in a map */
                __u32   map_flags;      /* prealloc or not */
+               __u32   inner_map_fd;   /* fd pointing to the inner map */
        };
 
        struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -186,6 +190,17 @@ union bpf_attr {
                __u32           attach_type;
                __u32           attach_flags;
        };
+
+       struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
+               __u32           prog_fd;
+               __u32           retval;
+               __u32           data_size_in;
+               __u32           data_size_out;
+               __aligned_u64   data_in;
+               __aligned_u64   data_out;
+               __u32           repeat;
+               __u32           duration;
+       } test;
 } __attribute__((aligned(8)));
 
 /* BPF helper function descriptions:
@@ -456,6 +471,18 @@ union bpf_attr {
  *     Return:
  *       > 0 length of the string including the trailing NUL on success
  *       < 0 error
+ *
+ * u64 bpf_get_socket_cookie(skb)
+ *     Get the cookie for the socket stored inside sk_buff.
+ *     @skb: pointer to skb
+ *     Return: 8 Bytes non-decreasing number on success or 0 if the socket
+ *     field is missing inside sk_buff
+ *
+ * u32 bpf_get_socket_uid(skb)
+ *     Get the owner uid of the socket stored inside sk_buff.
+ *     @skb: pointer to skb
+ *     Return: uid of the socket owner on success or 0 if the socket pointer
+ *     inside sk_buff is NULL
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
@@ -503,7 +530,9 @@ union bpf_attr {
        FN(get_numa_node_id),           \
        FN(skb_change_head),            \
        FN(xdp_adjust_head),            \
-       FN(probe_read_str),
+       FN(probe_read_str),             \
+       FN(get_socket_cookie),          \
+       FN(get_socket_uid),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
diff --git a/tools/include/uapi/linux/bpf_perf_event.h b/tools/include/uapi/linux/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..0674272
--- /dev/null
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
+#define _UAPI__LINUX_BPF_PERF_EVENT_H__
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+
+struct bpf_perf_event_data {
+       struct pt_regs regs;
+       __u64 sample_period;
+};
+
+#endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */
index 11c8d9bc762ef0c4bde99dec4292e84ff00d3477..5d19fdf80292c226769a91ccef519a47b3788b2b 100644 (file)
@@ -1387,7 +1387,7 @@ static bool pci_data_iowrite(u16 port, u32 mask, u32 val)
                /* Allow writing to any other BAR, or expansion ROM */
                iowrite(portoff, val, mask, &d->config_words[reg]);
                return true;
-               /* We let them overide latency timer and cacheline size */
+               /* We let them override latency timer and cacheline size */
        } else if (&d->config_words[reg] == (void *)&d->config.cacheline_size) {
                /* Only let them change the first two fields. */
                if (mask == 0xFFFFFFFF)
index e2efddf1023177c202d626257c8466f3cb8c40c3..1f5300e56b44dc7bca0b269261d5f7987eb564b6 100644 (file)
@@ -132,7 +132,7 @@ else
   Q = @
 endif
 
-# Disable command line variables (CFLAGS) overide from top
+# Disable command line variables (CFLAGS) override from top
 # level Makefile (perf), otherwise build Makefile will get
 # the same command line setup.
 MAKEOVERRIDES=
index 207c2eeddab064d7c304efed09653ba6e227d6e9..f84c398c11f4c3f1637c32f07d4ecd4da4df74ef 100644 (file)
@@ -69,6 +69,23 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size,
        return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
 }
 
+int bpf_create_map_in_map(enum bpf_map_type map_type, int key_size,
+                         int inner_map_fd, int max_entries, __u32 map_flags)
+{
+       union bpf_attr attr;
+
+       memset(&attr, '\0', sizeof(attr));
+
+       attr.map_type = map_type;
+       attr.key_size = key_size;
+       attr.value_size = 4;
+       attr.inner_map_fd = inner_map_fd;
+       attr.max_entries = max_entries;
+       attr.map_flags = map_flags;
+
+       return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+}
+
 int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
                     size_t insns_cnt, const char *license,
                     __u32 kern_version, char *log_buf, size_t log_buf_sz)
@@ -192,3 +209,27 @@ int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
 
        return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
 }
+
+int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
+                     void *data_out, __u32 *size_out, __u32 *retval,
+                     __u32 *duration)
+{
+       union bpf_attr attr;
+       int ret;
+
+       bzero(&attr, sizeof(attr));
+       attr.test.prog_fd = prog_fd;
+       attr.test.data_in = ptr_to_u64(data);
+       attr.test.data_out = ptr_to_u64(data_out);
+       attr.test.data_size_in = size;
+       attr.test.repeat = repeat;
+
+       ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
+       if (size_out)
+               *size_out = attr.test.data_size_out;
+       if (retval)
+               *retval = attr.test.retval;
+       if (duration)
+               *duration = attr.test.duration;
+       return ret;
+}
index 09c3dcac04963e6973d7a002e54b6a139af6212a..edb4daeff7a52c44f6bc366c265a7f5a3aadfc10 100644 (file)
@@ -26,6 +26,8 @@
 
 int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
                   int max_entries, __u32 map_flags);
+int bpf_create_map_in_map(enum bpf_map_type map_type, int key_size,
+                         int inner_map_fd, int max_entries, __u32 map_flags);
 
 /* Recommend log buffer size */
 #define BPF_LOG_BUF_SIZE 65536
@@ -45,6 +47,8 @@ int bpf_obj_get(const char *pathname);
 int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type,
                    unsigned int flags);
 int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
-
+int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
+                     void *data_out, __u32 *size_out, __u32 *retval,
+                     __u32 *duration);
 
 #endif
index ac6eb863b2a40df00c4ead9f48c872ab67949c19..1a2c07eb7795bb4fb43e4a97bdc721d7cbc7f3b8 100644 (file)
@@ -1618,8 +1618,7 @@ int bpf_program__nth_fd(struct bpf_program *prog, int n)
        return fd;
 }
 
-static void bpf_program__set_type(struct bpf_program *prog,
-                                 enum bpf_prog_type type)
+void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
 {
        prog->type = type;
 }
index b30394f9947a35356af870223664a4409f8e5fd6..32c7252f734e42f9895c4686c4236f4b259d99a3 100644 (file)
@@ -25,6 +25,7 @@
 #include <stdint.h>
 #include <stdbool.h>
 #include <sys/types.h>  // for size_t
+#include <linux/bpf.h>
 
 enum libbpf_errno {
        __LIBBPF_ERRNO__START = 4000,
@@ -185,6 +186,7 @@ int bpf_program__set_sched_cls(struct bpf_program *prog);
 int bpf_program__set_sched_act(struct bpf_program *prog);
 int bpf_program__set_xdp(struct bpf_program *prog);
 int bpf_program__set_perf_event(struct bpf_program *prog);
+void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type);
 
 bool bpf_program__is_socket_filter(struct bpf_program *prog);
 bool bpf_program__is_tracepoint(struct bpf_program *prog);
index 47076b15eebeaa5b54583761130b10ecef2fc0aa..9b8555ea3459c85bef282dad5166700771f0e5ed 100644 (file)
@@ -135,7 +135,7 @@ else
   Q = @
 endif
 
-# Disable command line variables (CFLAGS) overide from top
+# Disable command line variables (CFLAGS) override from top
 # level Makefile (perf), otherwise build Makefile will get
 # the same command line setup.
 MAKEOVERRIDES=
index 66342804161c80ea611b3dfa554a602fadc4213e..0c03538df74c01a1ecedc353e21b6c81083ee1e1 100644 (file)
@@ -140,7 +140,7 @@ struct pevent_plugin_option {
  *   struct pevent_plugin_option PEVENT_PLUGIN_OPTIONS[] = {
  *     {
  *             .name = "option-name",
- *             .plugin_alias = "overide-file-name", (optional)
+ *             .plugin_alias = "override-file-name", (optional)
  *             .description = "description of option to show users",
  *     },
  *     {
index 4cfdbb5b696783cbeb097f04220c180e7e87e82a..066086dd59a8017e293993a50d2f432d47441cfe 100644 (file)
@@ -805,11 +805,20 @@ static struct rela *find_switch_table(struct objtool_file *file,
                     insn->jump_dest->offset > orig_insn->offset))
                    break;
 
+               /* look for a relocation which references .rodata */
                text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
                                                    insn->len);
-               if (text_rela && text_rela->sym == file->rodata->sym)
-                       return find_rela_by_dest(file->rodata,
-                                                text_rela->addend);
+               if (!text_rela || text_rela->sym != file->rodata->sym)
+                       continue;
+
+               /*
+                * Make sure the .rodata address isn't associated with a
+                * symbol.  gcc jump tables are anonymous data.
+                */
+               if (find_symbol_containing(file->rodata, text_rela->addend))
+                       continue;
+
+               return find_rela_by_dest(file->rodata, text_rela->addend);
        }
 
        return NULL;
index 0d7983ac63ef9e300110d9a6ec6771a75378784e..d897702ce7427804da2c09387f674077f22accc5 100644 (file)
@@ -85,6 +85,18 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset)
        return NULL;
 }
 
+struct symbol *find_symbol_containing(struct section *sec, unsigned long offset)
+{
+       struct symbol *sym;
+
+       list_for_each_entry(sym, &sec->symbol_list, list)
+               if (sym->type != STT_SECTION &&
+                   offset >= sym->offset && offset < sym->offset + sym->len)
+                       return sym;
+
+       return NULL;
+}
+
 struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,
                                     unsigned int len)
 {
index aa1ff6596684f9304d0dd4bd3165f819b4dcdaf7..731973e1a3f5eb6bb1d6e67890c54c1440f237f3 100644 (file)
@@ -79,6 +79,7 @@ struct elf {
 struct elf *elf_open(const char *name);
 struct section *find_section_by_name(struct elf *elf, const char *name);
 struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
+struct symbol *find_symbol_containing(struct section *sec, unsigned long offset);
 struct rela *find_rela_by_dest(struct section *sec, unsigned long offset);
 struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,
                                     unsigned int len);
index 7913363bde5c0407fded864f62a839c8b28056ea..4f3c758d875d6ce6855db7fa0731436f9bb06671 100644 (file)
@@ -31,7 +31,7 @@
 #error Instruction buffer size too small
 #endif
 
-/* Based on branch_type() from perf_event_intel_lbr.c */
+/* Based on branch_type() from arch/x86/events/intel/lbr.c */
 static void intel_pt_insn_decoder(struct insn *insn,
                                  struct intel_pt_insn *intel_pt_insn)
 {
index 70e389bc4af71aa8f18ae67507fb65b5093a7f98..9b4d8ba22fed85f1f2bef6f5d47dc88cbb4df5d1 100644 (file)
@@ -202,7 +202,7 @@ void symbols__fixup_end(struct rb_root *symbols)
 
        /* Last entry */
        if (curr->end == curr->start)
-               curr->end = roundup(curr->start, 4096);
+               curr->end = roundup(curr->start, 4096) + 4096;
 }
 
 void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
index 6e4eb2fc2d1e78edc356692dcadfe3bfaebd65ec..0c8b61f8398edace8b4d7be42e50b8638679520d 100755 (executable)
@@ -1880,6 +1880,7 @@ sub get_grub_index {
 sub wait_for_input
 {
     my ($fp, $time) = @_;
+    my $start_time;
     my $rin;
     my $rout;
     my $nr;
@@ -1895,17 +1896,22 @@ sub wait_for_input
     vec($rin, fileno($fp), 1) = 1;
     vec($rin, fileno(\*STDIN), 1) = 1;
 
+    $start_time = time;
+
     while (1) {
        $nr = select($rout=$rin, undef, undef, $time);
 
-       if ($nr <= 0) {
-           return undef;
-       }
+       last if ($nr <= 0);
 
        # copy data from stdin to the console
        if (vec($rout, fileno(\*STDIN), 1) == 1) {
-           sysread(\*STDIN, $buf, 1000);
-           syswrite($fp, $buf, 1000);
+           $nr = sysread(\*STDIN, $buf, 1000);
+           syswrite($fp, $buf, $nr) if ($nr > 0);
+       }
+
+       # The timeout is based on time waiting for the fp data
+       if (vec($rout, fileno($fp), 1) != 1) {
+           last if (defined($time) && (time - $start_time > $time));
            next;
        }
 
@@ -1917,12 +1923,11 @@ sub wait_for_input
            last if ($ch eq "\n");
        }
 
-       if (!length($line)) {
-           return undef;
-       }
+       last if (!length($line));
 
        return $line;
     }
+    return undef;
 }
 
 sub reboot_to {
index f11315bedefc3d68152bef22da0f8ab60f8c5be6..6a9480c03cbdfce0ae5e29e32f2cedcc342d66e1 100644 (file)
@@ -1,6 +1,7 @@
 
 CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address
-LDFLAGS += -lpthread -lurcu
+LDFLAGS += -fsanitize=address
+LDLIBS+= -lpthread -lurcu
 TARGETS = main idr-test multiorder
 CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o
 OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
@@ -10,23 +11,25 @@ ifndef SHIFT
        SHIFT=3
 endif
 
+ifeq ($(BUILD), 32)
+       CFLAGS += -m32
+       LDFLAGS += -m32
+endif
+
 targets: mapshift $(TARGETS)
 
 main:  $(OFILES)
-       $(CC) $(CFLAGS) $(LDFLAGS) $^ -o main
 
 idr-test: idr-test.o $(CORE_OFILES)
-       $(CC) $(CFLAGS) $(LDFLAGS) $^ -o idr-test
 
 multiorder: multiorder.o $(CORE_OFILES)
-       $(CC) $(CFLAGS) $(LDFLAGS) $^ -o multiorder
 
 clean:
        $(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h
 
 vpath %.c ../../lib
 
-$(OFILES): *.h */*.h generated/map-shift.h \
+$(OFILES): Makefile *.h */*.h generated/map-shift.h \
        ../../include/linux/*.h \
        ../../include/asm/*.h \
        ../../../include/linux/radix-tree.h \
@@ -41,7 +44,7 @@ idr.c: ../../../lib/idr.c
 .PHONY: mapshift
 
 mapshift:
-       @if ! grep -qw $(SHIFT) generated/map-shift.h; then             \
+       @if ! grep -qws $(SHIFT) generated/map-shift.h; then            \
                echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" >          \
                                generated/map-shift.h;                  \
        fi
index 9b09ddfe462fd3b2ea782805560c349e720a4637..99c40f3ed1337f5e1bd6e6b2fd4d88dea8ec8064 100644 (file)
@@ -17,6 +17,9 @@
 #include <time.h>
 #include "test.h"
 
+#define for_each_index(i, base, order) \
+               for (i = base; i < base + (1 << order); i++)
+
 #define NSEC_PER_SEC   1000000000L
 
 static long long benchmark_iter(struct radix_tree_root *root, bool tagged)
@@ -57,27 +60,176 @@ again:
        return nsec;
 }
 
+static void benchmark_insert(struct radix_tree_root *root,
+                            unsigned long size, unsigned long step, int order)
+{
+       struct timespec start, finish;
+       unsigned long index;
+       long long nsec;
+
+       clock_gettime(CLOCK_MONOTONIC, &start);
+
+       for (index = 0 ; index < size ; index += step)
+               item_insert_order(root, index, order);
+
+       clock_gettime(CLOCK_MONOTONIC, &finish);
+
+       nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
+              (finish.tv_nsec - start.tv_nsec);
+
+       printv(2, "Size: %8ld, step: %8ld, order: %d, insertion: %15lld ns\n",
+               size, step, order, nsec);
+}
+
+static void benchmark_tagging(struct radix_tree_root *root,
+                            unsigned long size, unsigned long step, int order)
+{
+       struct timespec start, finish;
+       unsigned long index;
+       long long nsec;
+
+       clock_gettime(CLOCK_MONOTONIC, &start);
+
+       for (index = 0 ; index < size ; index += step)
+               radix_tree_tag_set(root, index, 0);
+
+       clock_gettime(CLOCK_MONOTONIC, &finish);
+
+       nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
+              (finish.tv_nsec - start.tv_nsec);
+
+       printv(2, "Size: %8ld, step: %8ld, order: %d, tagging: %17lld ns\n",
+               size, step, order, nsec);
+}
+
+static void benchmark_delete(struct radix_tree_root *root,
+                            unsigned long size, unsigned long step, int order)
+{
+       struct timespec start, finish;
+       unsigned long index, i;
+       long long nsec;
+
+       clock_gettime(CLOCK_MONOTONIC, &start);
+
+       for (index = 0 ; index < size ; index += step)
+               for_each_index(i, index, order)
+                       item_delete(root, i);
+
+       clock_gettime(CLOCK_MONOTONIC, &finish);
+
+       nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
+              (finish.tv_nsec - start.tv_nsec);
+
+       printv(2, "Size: %8ld, step: %8ld, order: %d, deletion: %16lld ns\n",
+               size, step, order, nsec);
+}
+
 static void benchmark_size(unsigned long size, unsigned long step, int order)
 {
        RADIX_TREE(tree, GFP_KERNEL);
        long long normal, tagged;
-       unsigned long index;
 
-       for (index = 0 ; index < size ; index += step) {
-               item_insert_order(&tree, index, order);
-               radix_tree_tag_set(&tree, index, 0);
-       }
+       benchmark_insert(&tree, size, step, order);
+       benchmark_tagging(&tree, size, step, order);
 
        tagged = benchmark_iter(&tree, true);
        normal = benchmark_iter(&tree, false);
 
-       printv(2, "Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n",
-               size, step, order, tagged, normal);
+       printv(2, "Size: %8ld, step: %8ld, order: %d, tagged iteration: %8lld ns\n",
+               size, step, order, tagged);
+       printv(2, "Size: %8ld, step: %8ld, order: %d, normal iteration: %8lld ns\n",
+               size, step, order, normal);
+
+       benchmark_delete(&tree, size, step, order);
 
        item_kill_tree(&tree);
        rcu_barrier();
 }
 
+static long long  __benchmark_split(unsigned long index,
+                                   int old_order, int new_order)
+{
+       struct timespec start, finish;
+       long long nsec;
+       RADIX_TREE(tree, GFP_ATOMIC);
+
+       item_insert_order(&tree, index, old_order);
+
+       clock_gettime(CLOCK_MONOTONIC, &start);
+       radix_tree_split(&tree, index, new_order);
+       clock_gettime(CLOCK_MONOTONIC, &finish);
+       nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
+              (finish.tv_nsec - start.tv_nsec);
+
+       item_kill_tree(&tree);
+
+       return nsec;
+
+}
+
+static void benchmark_split(unsigned long size, unsigned long step)
+{
+       int i, j, idx;
+       long long nsec = 0;
+
+
+       for (idx = 0; idx < size; idx += step) {
+               for (i = 3; i < 11; i++) {
+                       for (j = 0; j < i; j++) {
+                               nsec += __benchmark_split(idx, i, j);
+                       }
+               }
+       }
+
+       printv(2, "Size %8ld, step %8ld, split time %10lld ns\n",
+                       size, step, nsec);
+
+}
+
+static long long  __benchmark_join(unsigned long index,
+                            unsigned order1, unsigned order2)
+{
+       unsigned long loc;
+       struct timespec start, finish;
+       long long nsec;
+       void *item, *item2 = item_create(index + 1, order1);
+       RADIX_TREE(tree, GFP_KERNEL);
+
+       item_insert_order(&tree, index, order2);
+       item = radix_tree_lookup(&tree, index);
+
+       clock_gettime(CLOCK_MONOTONIC, &start);
+       radix_tree_join(&tree, index + 1, order1, item2);
+       clock_gettime(CLOCK_MONOTONIC, &finish);
+       nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
+               (finish.tv_nsec - start.tv_nsec);
+
+       loc = find_item(&tree, item);
+       if (loc == -1)
+               free(item);
+
+       item_kill_tree(&tree);
+
+       return nsec;
+}
+
+static void benchmark_join(unsigned long step)
+{
+       int i, j, idx;
+       long long nsec = 0;
+
+       for (idx = 0; idx < 1 << 10; idx += step) {
+               for (i = 1; i < 15; i++) {
+                       for (j = 0; j < i; j++) {
+                               nsec += __benchmark_join(idx, i, j);
+                       }
+               }
+       }
+
+       printv(2, "Size %8d, step %8ld, join time %10lld ns\n",
+                       1 << 10, step, nsec);
+}
+
 void benchmark(void)
 {
        unsigned long size[] = {1 << 10, 1 << 20, 0};
@@ -95,4 +247,11 @@ void benchmark(void)
        for (c = 0; size[c]; c++)
                for (s = 0; step[s]; s++)
                        benchmark_size(size[c], step[s] << 9, 9);
+
+       for (c = 0; size[c]; c++)
+               for (s = 0; step[s]; s++)
+                       benchmark_split(size[c], step[s]);
+
+       for (s = 0; step[s]; s++)
+               benchmark_join(step[s]);
 }
index a26098c6123d1cf99ce2b6669a0e186bd22cedbe..30cd0b296f1a76847122f009c2cd0f2d5b9109f4 100644 (file)
@@ -153,6 +153,30 @@ void idr_nowait_test(void)
        idr_destroy(&idr);
 }
 
+void idr_get_next_test(void)
+{
+       unsigned long i;
+       int nextid;
+       DEFINE_IDR(idr);
+
+       int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0};
+
+       for(i = 0; indices[i]; i++) {
+               struct item *item = item_create(indices[i], 0);
+               assert(idr_alloc(&idr, item, indices[i], indices[i+1],
+                                GFP_KERNEL) == indices[i]);
+       }
+
+       for(i = 0, nextid = 0; indices[i]; i++) {
+               idr_get_next(&idr, &nextid);
+               assert(nextid == indices[i]);
+               nextid++;
+       }
+
+       idr_for_each(&idr, item_idr_free, &idr);
+       idr_destroy(&idr);
+}
+
 void idr_checks(void)
 {
        unsigned long i;
@@ -202,6 +226,7 @@ void idr_checks(void)
        idr_alloc_test();
        idr_null_test();
        idr_nowait_test();
+       idr_get_next_test();
 }
 
 /*
@@ -338,7 +363,7 @@ void ida_check_random(void)
 {
        DEFINE_IDA(ida);
        DECLARE_BITMAP(bitmap, 2048);
-       int id;
+       int id, err;
        unsigned int i;
        time_t s = time(NULL);
 
@@ -352,8 +377,11 @@ void ida_check_random(void)
                        ida_remove(&ida, bit);
                } else {
                        __set_bit(bit, bitmap);
-                       ida_pre_get(&ida, GFP_KERNEL);
-                       assert(!ida_get_new_above(&ida, bit, &id));
+                       do {
+                               ida_pre_get(&ida, GFP_KERNEL);
+                               err = ida_get_new_above(&ida, bit, &id);
+                       } while (err == -ENOMEM);
+                       assert(!err);
                        assert(id == bit);
                }
        }
@@ -362,6 +390,24 @@ void ida_check_random(void)
                goto repeat;
 }
 
+void ida_simple_get_remove_test(void)
+{
+       DEFINE_IDA(ida);
+       unsigned long i;
+
+       for (i = 0; i < 10000; i++) {
+               assert(ida_simple_get(&ida, 0, 20000, GFP_KERNEL) == i);
+       }
+       assert(ida_simple_get(&ida, 5, 30, GFP_KERNEL) < 0);
+
+       for (i = 0; i < 10000; i++) {
+               ida_simple_remove(&ida, i);
+       }
+       assert(ida_is_empty(&ida));
+
+       ida_destroy(&ida);
+}
+
 void ida_checks(void)
 {
        DEFINE_IDA(ida);
@@ -428,15 +474,41 @@ void ida_checks(void)
        ida_check_max();
        ida_check_conv();
        ida_check_random();
+       ida_simple_get_remove_test();
 
        radix_tree_cpu_dead(1);
 }
 
+static void *ida_random_fn(void *arg)
+{
+       rcu_register_thread();
+       ida_check_random();
+       rcu_unregister_thread();
+       return NULL;
+}
+
+void ida_thread_tests(void)
+{
+       pthread_t threads[10];
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(threads); i++)
+               if (pthread_create(&threads[i], NULL, ida_random_fn, NULL)) {
+                       perror("creating ida thread");
+                       exit(1);
+               }
+
+       while (i--)
+               pthread_join(threads[i], NULL);
+}
+
 int __weak main(void)
 {
        radix_tree_init();
        idr_checks();
        ida_checks();
+       ida_thread_tests();
+       radix_tree_cpu_dead(1);
        rcu_barrier();
        if (nr_allocated)
                printf("nr_allocated = %d\n", nr_allocated);
index b829127d56705747a0a74c73e8b11b8cae8ecc27..bc9a78449572f10331a8bbc35070801f307b6caa 100644 (file)
@@ -368,6 +368,7 @@ int main(int argc, char **argv)
        iteration_test(0, 10 + 90 * long_run);
        iteration_test(7, 10 + 90 * long_run);
        single_thread_tests(long_run);
+       ida_thread_tests();
 
        /* Free any remaining preallocated nodes */
        radix_tree_cpu_dead(0);
index d4ff009892456a3b588df788488027da45209ecf..36dcf7d6945dc631ce7b2bd1c95fb0d14f662167 100644 (file)
@@ -330,6 +330,34 @@ static void single_check(void)
        item_kill_tree(&tree);
 }
 
+void radix_tree_clear_tags_test(void)
+{
+       unsigned long index;
+       struct radix_tree_node *node;
+       struct radix_tree_iter iter;
+       void **slot;
+
+       RADIX_TREE(tree, GFP_KERNEL);
+
+       item_insert(&tree, 0);
+       item_tag_set(&tree, 0, 0);
+       __radix_tree_lookup(&tree, 0, &node, &slot);
+       radix_tree_clear_tags(&tree, node, slot);
+       assert(item_tag_get(&tree, 0, 0) == 0);
+
+       for (index = 0; index < 1000; index++) {
+               item_insert(&tree, index);
+               item_tag_set(&tree, index, 0);
+       }
+
+       radix_tree_for_each_slot(slot, &tree, &iter, 0) {
+               radix_tree_clear_tags(&tree, iter.node, slot);
+               assert(item_tag_get(&tree, iter.index, 0) == 0);
+       }
+
+       item_kill_tree(&tree);
+}
+
 void tag_check(void)
 {
        single_check();
@@ -347,4 +375,5 @@ void tag_check(void)
        thrash_tags();
        rcu_barrier();
        printv(2, "after thrash_tags: %d allocated\n", nr_allocated);
+       radix_tree_clear_tags_test();
 }
index b30e11d9d271c39ccb284019938876ae2785f7ca..0f8220cc61663ffa2a872db42c96b4e5433ed0a7 100644 (file)
@@ -36,6 +36,7 @@ void iteration_test(unsigned order, unsigned duration);
 void benchmark(void);
 void idr_checks(void);
 void ida_checks(void);
+void ida_thread_tests(void);
 
 struct item *
 item_tag_set(struct radix_tree_root *root, unsigned long index, int tag);
index 4b498265dae6dc3b52b35818453f2c895809b323..d8d94b9bd76c7c4fb62c8ebf50e0033de97012da 100644 (file)
@@ -1,20 +1,39 @@
 LIBDIR := ../../../lib
-BPFOBJ := $(LIBDIR)/bpf/bpf.o
+BPFDIR := $(LIBDIR)/bpf
+APIDIR := ../../../include/uapi
+GENDIR := ../../../../include/generated
+GENHDR := $(GENDIR)/autoconf.h
 
-CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR)
+ifneq ($(wildcard $(GENHDR)),)
+  GENFLAGS := -DHAVE_GENHDR
+endif
 
-TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map
+CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
+LDLIBS += -lcap -lelf
+
+TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs
+
+TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o
 
 TEST_PROGS := test_kmod.sh
 
-.PHONY: all clean force
+include ../lib.mk
+
+BPFOBJ := $(OUTPUT)/libbpf.a
+
+$(TEST_GEN_PROGS): $(BPFOBJ)
+
+.PHONY: force
 
 # force a rebuild of BPFOBJ when its dependencies are updated
 force:
 
 $(BPFOBJ): force
-       $(MAKE) -C $(dir $(BPFOBJ))
+       $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
 
-$(test_objs): $(BPFOBJ)
+CLANG ?= clang
 
-include ../lib.mk
+%.o: %.c
+       $(CLANG) -I../../../include/uapi -I../../../../samples/bpf/ \
+               -D__x86_64__ -Wno-compare-distinct-pointer-types \
+               -O2 -target bpf -c $< -o $@
diff --git a/tools/testing/selftests/bpf/test_iptunnel_common.h b/tools/testing/selftests/bpf/test_iptunnel_common.h
new file mode 100644 (file)
index 0000000..e4cd252
--- /dev/null
@@ -0,0 +1,37 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _TEST_IPTNL_COMMON_H
+#define _TEST_IPTNL_COMMON_H
+
+#include <linux/types.h>
+
+#define MAX_IPTNL_ENTRIES 256U
+
+struct vip {
+       union {
+               __u32 v6[4];
+               __u32 v4;
+       } daddr;
+       __u16 dport;
+       __u16 family;
+       __u8 protocol;
+};
+
+struct iptnl_info {
+       union {
+               __u32 v6[4];
+               __u32 v4;
+       } saddr;
+       union {
+               __u32 v6[4];
+               __u32 v4;
+       } daddr;
+       __u16 family;
+       __u8 dmac[6];
+};
+
+#endif
diff --git a/tools/testing/selftests/bpf/test_l4lb.c b/tools/testing/selftests/bpf/test_l4lb.c
new file mode 100644 (file)
index 0000000..368bfe8
--- /dev/null
@@ -0,0 +1,474 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stddef.h>
+#include <stdbool.h>
+#include <string.h>
+#include <linux/pkt_cls.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include "bpf_helpers.h"
+#include "test_iptunnel_common.h"
+
+#define htons __builtin_bswap16
+#define ntohs __builtin_bswap16
+int _version SEC("version") = 1;
+
+static inline __u32 rol32(__u32 word, unsigned int shift)
+{
+       return (word << shift) | (word >> ((-shift) & 31));
+}
+
+/* copy paste of jhash from kernel sources to make sure llvm
+ * can compile it into valid sequence of bpf instructions
+ */
+#define __jhash_mix(a, b, c)                   \
+{                                              \
+       a -= c;  a ^= rol32(c, 4);  c += b;     \
+       b -= a;  b ^= rol32(a, 6);  a += c;     \
+       c -= b;  c ^= rol32(b, 8);  b += a;     \
+       a -= c;  a ^= rol32(c, 16); c += b;     \
+       b -= a;  b ^= rol32(a, 19); a += c;     \
+       c -= b;  c ^= rol32(b, 4);  b += a;     \
+}
+
+#define __jhash_final(a, b, c)                 \
+{                                              \
+       c ^= b; c -= rol32(b, 14);              \
+       a ^= c; a -= rol32(c, 11);              \
+       b ^= a; b -= rol32(a, 25);              \
+       c ^= b; c -= rol32(b, 16);              \
+       a ^= c; a -= rol32(c, 4);               \
+       b ^= a; b -= rol32(a, 14);              \
+       c ^= b; c -= rol32(b, 24);              \
+}
+
+#define JHASH_INITVAL          0xdeadbeef
+
+typedef unsigned int u32;
+
+static inline u32 jhash(const void *key, u32 length, u32 initval)
+{
+       u32 a, b, c;
+       const unsigned char *k = key;
+
+       a = b = c = JHASH_INITVAL + length + initval;
+
+       while (length > 12) {
+               a += *(u32 *)(k);
+               b += *(u32 *)(k + 4);
+               c += *(u32 *)(k + 8);
+               __jhash_mix(a, b, c);
+               length -= 12;
+               k += 12;
+       }
+       switch (length) {
+       case 12: c += (u32)k[11]<<24;
+       case 11: c += (u32)k[10]<<16;
+       case 10: c += (u32)k[9]<<8;
+       case 9:  c += k[8];
+       case 8:  b += (u32)k[7]<<24;
+       case 7:  b += (u32)k[6]<<16;
+       case 6:  b += (u32)k[5]<<8;
+       case 5:  b += k[4];
+       case 4:  a += (u32)k[3]<<24;
+       case 3:  a += (u32)k[2]<<16;
+       case 2:  a += (u32)k[1]<<8;
+       case 1:  a += k[0];
+                __jhash_final(a, b, c);
+       case 0: /* Nothing left to add */
+               break;
+       }
+
+       return c;
+}
+
+static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
+{
+       a += initval;
+       b += initval;
+       c += initval;
+       __jhash_final(a, b, c);
+       return c;
+}
+
+static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+       return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
+}
+
+#define PCKT_FRAGMENTED 65343
+#define IPV4_HDR_LEN_NO_OPT 20
+#define IPV4_PLUS_ICMP_HDR 28
+#define IPV6_PLUS_ICMP_HDR 48
+#define RING_SIZE 2
+#define MAX_VIPS 12
+#define MAX_REALS 5
+#define CTL_MAP_SIZE 16
+#define CH_RINGS_SIZE (MAX_VIPS * RING_SIZE)
+#define F_IPV6 (1 << 0)
+#define F_HASH_NO_SRC_PORT (1 << 0)
+#define F_ICMP (1 << 0)
+#define F_SYN_SET (1 << 1)
+
+struct packet_description {
+       union {
+               __be32 src;
+               __be32 srcv6[4];
+       };
+       union {
+               __be32 dst;
+               __be32 dstv6[4];
+       };
+       union {
+               __u32 ports;
+               __u16 port16[2];
+       };
+       __u8 proto;
+       __u8 flags;
+};
+
+struct ctl_value {
+       union {
+               __u64 value;
+               __u32 ifindex;
+               __u8 mac[6];
+       };
+};
+
+struct vip_meta {
+       __u32 flags;
+       __u32 vip_num;
+};
+
+struct real_definition {
+       union {
+               __be32 dst;
+               __be32 dstv6[4];
+       };
+       __u8 flags;
+};
+
+struct vip_stats {
+       __u64 bytes;
+       __u64 pkts;
+};
+
+struct eth_hdr {
+       unsigned char eth_dest[ETH_ALEN];
+       unsigned char eth_source[ETH_ALEN];
+       unsigned short eth_proto;
+};
+
+struct bpf_map_def SEC("maps") vip_map = {
+       .type = BPF_MAP_TYPE_HASH,
+       .key_size = sizeof(struct vip),
+       .value_size = sizeof(struct vip_meta),
+       .max_entries = MAX_VIPS,
+};
+
+struct bpf_map_def SEC("maps") ch_rings = {
+       .type = BPF_MAP_TYPE_ARRAY,
+       .key_size = sizeof(__u32),
+       .value_size = sizeof(__u32),
+       .max_entries = CH_RINGS_SIZE,
+};
+
+struct bpf_map_def SEC("maps") reals = {
+       .type = BPF_MAP_TYPE_ARRAY,
+       .key_size = sizeof(__u32),
+       .value_size = sizeof(struct real_definition),
+       .max_entries = MAX_REALS,
+};
+
+struct bpf_map_def SEC("maps") stats = {
+       .type = BPF_MAP_TYPE_PERCPU_ARRAY,
+       .key_size = sizeof(__u32),
+       .value_size = sizeof(struct vip_stats),
+       .max_entries = MAX_VIPS,
+};
+
+struct bpf_map_def SEC("maps") ctl_array = {
+       .type = BPF_MAP_TYPE_ARRAY,
+       .key_size = sizeof(__u32),
+       .value_size = sizeof(struct ctl_value),
+       .max_entries = CTL_MAP_SIZE,
+};
+
+static __always_inline __u32 get_packet_hash(struct packet_description *pckt,
+                                            bool ipv6)
+{
+       if (ipv6)
+               return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS),
+                                   pckt->ports, CH_RINGS_SIZE);
+       else
+               return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE);
+}
+
+static __always_inline bool get_packet_dst(struct real_definition **real,
+                                          struct packet_description *pckt,
+                                          struct vip_meta *vip_info,
+                                          bool is_ipv6)
+{
+       __u32 hash = get_packet_hash(pckt, is_ipv6) % RING_SIZE;
+       __u32 key = RING_SIZE * vip_info->vip_num + hash;
+       __u32 *real_pos;
+
+       real_pos = bpf_map_lookup_elem(&ch_rings, &key);
+       if (!real_pos)
+               return false;
+       key = *real_pos;
+       *real = bpf_map_lookup_elem(&reals, &key);
+       if (!(*real))
+               return false;
+       return true;
+}
+
+static __always_inline int parse_icmpv6(void *data, void *data_end, __u64 off,
+                                       struct packet_description *pckt)
+{
+       struct icmp6hdr *icmp_hdr;
+       struct ipv6hdr *ip6h;
+
+       icmp_hdr = data + off;
+       if (icmp_hdr + 1 > data_end)
+               return TC_ACT_SHOT;
+       if (icmp_hdr->icmp6_type != ICMPV6_PKT_TOOBIG)
+               return TC_ACT_OK;
+       off += sizeof(struct icmp6hdr);
+       ip6h = data + off;
+       if (ip6h + 1 > data_end)
+               return TC_ACT_SHOT;
+       pckt->proto = ip6h->nexthdr;
+       pckt->flags |= F_ICMP;
+       memcpy(pckt->srcv6, ip6h->daddr.s6_addr32, 16);
+       memcpy(pckt->dstv6, ip6h->saddr.s6_addr32, 16);
+       return TC_ACT_UNSPEC;
+}
+
+static __always_inline int parse_icmp(void *data, void *data_end, __u64 off,
+                                     struct packet_description *pckt)
+{
+       struct icmphdr *icmp_hdr;
+       struct iphdr *iph;
+
+       icmp_hdr = data + off;
+       if (icmp_hdr + 1 > data_end)
+               return TC_ACT_SHOT;
+       if (icmp_hdr->type != ICMP_DEST_UNREACH ||
+           icmp_hdr->code != ICMP_FRAG_NEEDED)
+               return TC_ACT_OK;
+       off += sizeof(struct icmphdr);
+       iph = data + off;
+       if (iph + 1 > data_end)
+               return TC_ACT_SHOT;
+       if (iph->ihl != 5)
+               return TC_ACT_SHOT;
+       pckt->proto = iph->protocol;
+       pckt->flags |= F_ICMP;
+       pckt->src = iph->daddr;
+       pckt->dst = iph->saddr;
+       return TC_ACT_UNSPEC;
+}
+
+static __always_inline bool parse_udp(void *data, __u64 off, void *data_end,
+                                     struct packet_description *pckt)
+{
+       struct udphdr *udp;
+       udp = data + off;
+
+       if (udp + 1 > data_end)
+               return false;
+
+       if (!(pckt->flags & F_ICMP)) {
+               pckt->port16[0] = udp->source;
+               pckt->port16[1] = udp->dest;
+       } else {
+               pckt->port16[0] = udp->dest;
+               pckt->port16[1] = udp->source;
+       }
+       return true;
+}
+
+static __always_inline bool parse_tcp(void *data, __u64 off, void *data_end,
+                                     struct packet_description *pckt)
+{
+       struct tcphdr *tcp;
+
+       tcp = data + off;
+       if (tcp + 1 > data_end)
+               return false;
+
+       if (tcp->syn)
+               pckt->flags |= F_SYN_SET;
+
+       if (!(pckt->flags & F_ICMP)) {
+               pckt->port16[0] = tcp->source;
+               pckt->port16[1] = tcp->dest;
+       } else {
+               pckt->port16[0] = tcp->dest;
+               pckt->port16[1] = tcp->source;
+       }
+       return true;
+}
+
+static __always_inline int process_packet(void *data, __u64 off, void *data_end,
+                                         bool is_ipv6, struct __sk_buff *skb)
+{
+       void *pkt_start = (void *)(long)skb->data;
+       struct packet_description pckt = {};
+       struct eth_hdr *eth = pkt_start;
+       struct bpf_tunnel_key tkey = {};
+       struct vip_stats *data_stats;
+       struct real_definition *dst;
+       struct vip_meta *vip_info;
+       struct ctl_value *cval;
+       __u32 v4_intf_pos = 1;
+       __u32 v6_intf_pos = 2;
+       struct ipv6hdr *ip6h;
+       struct vip vip = {};
+       struct iphdr *iph;
+       int tun_flag = 0;
+       __u16 pkt_bytes;
+       __u64 iph_len;
+       __u32 ifindex;
+       __u8 protocol;
+       __u32 vip_num;
+       int action;
+
+       tkey.tunnel_ttl = 64;
+       if (is_ipv6) {
+               ip6h = data + off;
+               if (ip6h + 1 > data_end)
+                       return TC_ACT_SHOT;
+
+               iph_len = sizeof(struct ipv6hdr);
+               protocol = ip6h->nexthdr;
+               pckt.proto = protocol;
+               pkt_bytes = ntohs(ip6h->payload_len);
+               off += iph_len;
+               if (protocol == IPPROTO_FRAGMENT) {
+                       return TC_ACT_SHOT;
+               } else if (protocol == IPPROTO_ICMPV6) {
+                       action = parse_icmpv6(data, data_end, off, &pckt);
+                       if (action >= 0)
+                               return action;
+                       off += IPV6_PLUS_ICMP_HDR;
+               } else {
+                       memcpy(pckt.srcv6, ip6h->saddr.s6_addr32, 16);
+                       memcpy(pckt.dstv6, ip6h->daddr.s6_addr32, 16);
+               }
+       } else {
+               iph = data + off;
+               if (iph + 1 > data_end)
+                       return TC_ACT_SHOT;
+               if (iph->ihl != 5)
+                       return TC_ACT_SHOT;
+
+               protocol = iph->protocol;
+               pckt.proto = protocol;
+               pkt_bytes = ntohs(iph->tot_len);
+               off += IPV4_HDR_LEN_NO_OPT;
+
+               if (iph->frag_off & PCKT_FRAGMENTED)
+                       return TC_ACT_SHOT;
+               if (protocol == IPPROTO_ICMP) {
+                       action = parse_icmp(data, data_end, off, &pckt);
+                       if (action >= 0)
+                               return action;
+                       off += IPV4_PLUS_ICMP_HDR;
+               } else {
+                       pckt.src = iph->saddr;
+                       pckt.dst = iph->daddr;
+               }
+       }
+       protocol = pckt.proto;
+
+       if (protocol == IPPROTO_TCP) {
+               if (!parse_tcp(data, off, data_end, &pckt))
+                       return TC_ACT_SHOT;
+       } else if (protocol == IPPROTO_UDP) {
+               if (!parse_udp(data, off, data_end, &pckt))
+                       return TC_ACT_SHOT;
+       } else {
+               return TC_ACT_SHOT;
+       }
+
+       if (is_ipv6)
+               memcpy(vip.daddr.v6, pckt.dstv6, 16);
+       else
+               vip.daddr.v4 = pckt.dst;
+
+       vip.dport = pckt.port16[1];
+       vip.protocol = pckt.proto;
+       vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+       if (!vip_info) {
+               vip.dport = 0;
+               vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+               if (!vip_info)
+                       return TC_ACT_SHOT;
+               pckt.port16[1] = 0;
+       }
+
+       if (vip_info->flags & F_HASH_NO_SRC_PORT)
+               pckt.port16[0] = 0;
+
+       if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6))
+               return TC_ACT_SHOT;
+
+       if (dst->flags & F_IPV6) {
+               cval = bpf_map_lookup_elem(&ctl_array, &v6_intf_pos);
+               if (!cval)
+                       return TC_ACT_SHOT;
+               ifindex = cval->ifindex;
+               memcpy(tkey.remote_ipv6, dst->dstv6, 16);
+               tun_flag = BPF_F_TUNINFO_IPV6;
+       } else {
+               cval = bpf_map_lookup_elem(&ctl_array, &v4_intf_pos);
+               if (!cval)
+                       return TC_ACT_SHOT;
+               ifindex = cval->ifindex;
+               tkey.remote_ipv4 = dst->dst;
+       }
+       vip_num = vip_info->vip_num;
+       data_stats = bpf_map_lookup_elem(&stats, &vip_num);
+       if (!data_stats)
+               return TC_ACT_SHOT;
+       data_stats->pkts++;
+       data_stats->bytes += pkt_bytes;
+       bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag);
+       *(u32 *)eth->eth_dest = tkey.remote_ipv4;
+       return bpf_redirect(ifindex, 0);
+}
+
+SEC("l4lb-demo")
+int balancer_ingress(struct __sk_buff *ctx)
+{
+       void *data_end = (void *)(long)ctx->data_end;
+       void *data = (void *)(long)ctx->data;
+       struct eth_hdr *eth = data;
+       __u32 eth_proto;
+       __u32 nh_off;
+
+       nh_off = sizeof(struct eth_hdr);
+       if (data + nh_off > data_end)
+               return TC_ACT_SHOT;
+       eth_proto = eth->eth_proto;
+       if (eth_proto == htons(ETH_P_IP))
+               return process_packet(data, nh_off, data_end, false, ctx);
+       else if (eth_proto == htons(ETH_P_IPV6))
+               return process_packet(data, nh_off, data_end, true, ctx);
+       else
+               return TC_ACT_SHOT;
+}
+char _license[] SEC("license") = "GPL";
index cada17ac00b8e6b5af37554ea8489be6ffc873a2..a0aa2009b0e0a81e65672eb795039a2172484c55 100644 (file)
@@ -80,8 +80,9 @@ static void test_hashmap(int task, void *data)
        assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0);
        key = 2;
        assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
-       key = 1;
-       assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
+       key = 3;
+       assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
+              errno == E2BIG);
 
        /* Check that key = 0 doesn't exist. */
        key = 0;
@@ -110,6 +111,24 @@ static void test_hashmap(int task, void *data)
        close(fd);
 }
 
+static void test_hashmap_sizes(int task, void *data)
+{
+       int fd, i, j;
+
+       for (i = 1; i <= 512; i <<= 1)
+               for (j = 1; j <= 1 << 18; j <<= 1) {
+                       fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j,
+                                           2, map_flags);
+                       if (fd < 0) {
+                               printf("Failed to create hashmap key=%d value=%d '%s'\n",
+                                      i, j, strerror(errno));
+                               exit(1);
+                       }
+                       close(fd);
+                       usleep(10); /* give kernel time to destroy */
+               }
+}
+
 static void test_hashmap_percpu(int task, void *data)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
@@ -317,7 +336,10 @@ static void test_arraymap_percpu(int task, void *data)
 static void test_arraymap_percpu_many_keys(void)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
-       unsigned int nr_keys = 20000;
+       /* nr_keys is not too large otherwise the test stresses percpu
+        * allocator more than anything else
+        */
+       unsigned int nr_keys = 2000;
        long values[nr_cpus];
        int key, fd, i;
 
@@ -419,6 +441,7 @@ static void test_map_stress(void)
 {
        run_parallel(100, test_hashmap, NULL);
        run_parallel(100, test_hashmap_percpu, NULL);
+       run_parallel(100, test_hashmap_sizes, NULL);
 
        run_parallel(100, test_arraymap, NULL);
        run_parallel(100, test_arraymap_percpu, NULL);
diff --git a/tools/testing/selftests/bpf/test_pkt_access.c b/tools/testing/selftests/bpf/test_pkt_access.c
new file mode 100644 (file)
index 0000000..fd1e083
--- /dev/null
@@ -0,0 +1,64 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/pkt_cls.h>
+#include "bpf_helpers.h"
+
+#define _htons __builtin_bswap16
+#define barrier() __asm__ __volatile__("": : :"memory")
+int _version SEC("version") = 1;
+
+SEC("test1")
+int process(struct __sk_buff *skb)
+{
+       void *data_end = (void *)(long)skb->data_end;
+       void *data = (void *)(long)skb->data;
+       struct ethhdr *eth = (struct ethhdr *)(data);
+       struct tcphdr *tcp = NULL;
+       __u8 proto = 255;
+       __u64 ihl_len;
+
+       if (eth + 1 > data_end)
+               return TC_ACT_SHOT;
+
+       if (eth->h_proto == _htons(ETH_P_IP)) {
+               struct iphdr *iph = (struct iphdr *)(eth + 1);
+
+               if (iph + 1 > data_end)
+                       return TC_ACT_SHOT;
+               ihl_len = iph->ihl * 4;
+               proto = iph->protocol;
+               tcp = (struct tcphdr *)((void *)(iph) + ihl_len);
+       } else if (eth->h_proto == _htons(ETH_P_IPV6)) {
+               struct ipv6hdr *ip6h = (struct ipv6hdr *)(eth + 1);
+
+               if (ip6h + 1 > data_end)
+                       return TC_ACT_SHOT;
+               ihl_len = sizeof(*ip6h);
+               proto = ip6h->nexthdr;
+               tcp = (struct tcphdr *)((void *)(ip6h) + ihl_len);
+       }
+
+       if (tcp) {
+               if (((void *)(tcp) + 20) > data_end || proto != 6)
+                       return TC_ACT_SHOT;
+               barrier(); /* to force ordering of checks */
+               if (((void *)(tcp) + 18) > data_end)
+                       return TC_ACT_SHOT;
+               if (tcp->urg_ptr == 123)
+                       return TC_ACT_OK;
+       }
+
+       return TC_ACT_UNSPEC;
+}
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
new file mode 100644 (file)
index 0000000..5275d4a
--- /dev/null
@@ -0,0 +1,284 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <assert.h>
+#include <stdlib.h>
+
+#include <linux/types.h>
+typedef __u16 __sum16;
+#include <arpa/inet.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+
+#include <sys/wait.h>
+#include <sys/resource.h>
+
+#include <linux/bpf.h>
+#include <linux/err.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "test_iptunnel_common.h"
+#include "bpf_util.h"
+
+#define _htons __builtin_bswap16
+
+static int error_cnt, pass_cnt;
+
+#define MAGIC_BYTES 123
+
+/* ipv4 test vector */
+static struct {
+       struct ethhdr eth;
+       struct iphdr iph;
+       struct tcphdr tcp;
+} __packed pkt_v4 = {
+       .eth.h_proto = _htons(ETH_P_IP),
+       .iph.ihl = 5,
+       .iph.protocol = 6,
+       .iph.tot_len = _htons(MAGIC_BYTES),
+       .tcp.urg_ptr = 123,
+};
+
+/* ipv6 test vector */
+static struct {
+       struct ethhdr eth;
+       struct ipv6hdr iph;
+       struct tcphdr tcp;
+} __packed pkt_v6 = {
+       .eth.h_proto = _htons(ETH_P_IPV6),
+       .iph.nexthdr = 6,
+       .iph.payload_len = _htons(MAGIC_BYTES),
+       .tcp.urg_ptr = 123,
+};
+
+#define CHECK(condition, tag, format...) ({                            \
+       int __ret = !!(condition);                                      \
+       if (__ret) {                                                    \
+               error_cnt++;                                            \
+               printf("%s:FAIL:%s ", __func__, tag);                   \
+               printf(format);                                         \
+       } else {                                                        \
+               pass_cnt++;                                             \
+               printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
+       }                                                               \
+})
+
+static int bpf_prog_load(const char *file, enum bpf_prog_type type,
+                        struct bpf_object **pobj, int *prog_fd)
+{
+       struct bpf_program *prog;
+       struct bpf_object *obj;
+       int err;
+
+       obj = bpf_object__open(file);
+       if (IS_ERR(obj)) {
+               error_cnt++;
+               return -ENOENT;
+       }
+
+       prog = bpf_program__next(NULL, obj);
+       if (!prog) {
+               bpf_object__close(obj);
+               error_cnt++;
+               return -ENOENT;
+       }
+
+       bpf_program__set_type(prog, type);
+       err = bpf_object__load(obj);
+       if (err) {
+               bpf_object__close(obj);
+               error_cnt++;
+               return -EINVAL;
+       }
+
+       *pobj = obj;
+       *prog_fd = bpf_program__fd(prog);
+       return 0;
+}
+
+static int bpf_find_map(const char *test, struct bpf_object *obj,
+                       const char *name)
+{
+       struct bpf_map *map;
+
+       map = bpf_object__find_map_by_name(obj, name);
+       if (!map) {
+               printf("%s:FAIL:map '%s' not found\n", test, name);
+               error_cnt++;
+               return -1;
+       }
+       return bpf_map__fd(map);
+}
+
+static void test_pkt_access(void)
+{
+       const char *file = "./test_pkt_access.o";
+       struct bpf_object *obj;
+       __u32 duration, retval;
+       int err, prog_fd;
+
+       err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+       if (err)
+               return;
+
+       err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
+                               NULL, NULL, &retval, &duration);
+       CHECK(err || errno || retval, "ipv4",
+             "err %d errno %d retval %d duration %d\n",
+             err, errno, retval, duration);
+
+       err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
+                               NULL, NULL, &retval, &duration);
+       CHECK(err || errno || retval, "ipv6",
+             "err %d errno %d retval %d duration %d\n",
+             err, errno, retval, duration);
+       bpf_object__close(obj);
+}
+
+static void test_xdp(void)
+{
+       struct vip key4 = {.protocol = 6, .family = AF_INET};
+       struct vip key6 = {.protocol = 6, .family = AF_INET6};
+       struct iptnl_info value4 = {.family = AF_INET};
+       struct iptnl_info value6 = {.family = AF_INET6};
+       const char *file = "./test_xdp.o";
+       struct bpf_object *obj;
+       char buf[128];
+       struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
+       struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
+       __u32 duration, retval, size;
+       int err, prog_fd, map_fd;
+
+       err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+       if (err)
+               return;
+
+       map_fd = bpf_find_map(__func__, obj, "vip2tnl");
+       if (map_fd < 0)
+               goto out;
+       bpf_map_update_elem(map_fd, &key4, &value4, 0);
+       bpf_map_update_elem(map_fd, &key6, &value6, 0);
+
+       err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+                               buf, &size, &retval, &duration);
+
+       CHECK(err || errno || retval != XDP_TX || size != 74 ||
+             iph->protocol != IPPROTO_IPIP, "ipv4",
+             "err %d errno %d retval %d size %d\n",
+             err, errno, retval, size);
+
+       err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
+                               buf, &size, &retval, &duration);
+       CHECK(err || errno || retval != XDP_TX || size != 114 ||
+             iph6->nexthdr != IPPROTO_IPV6, "ipv6",
+             "err %d errno %d retval %d size %d\n",
+             err, errno, retval, size);
+out:
+       bpf_object__close(obj);
+}
+
+#define MAGIC_VAL 0x1234
+#define NUM_ITER 100000
+#define VIP_NUM 5
+
+static void test_l4lb(void)
+{
+       unsigned int nr_cpus = bpf_num_possible_cpus();
+       const char *file = "./test_l4lb.o";
+       struct vip key = {.protocol = 6};
+       struct vip_meta {
+               __u32 flags;
+               __u32 vip_num;
+       } value = {.vip_num = VIP_NUM};
+       __u32 stats_key = VIP_NUM;
+       struct vip_stats {
+               __u64 bytes;
+               __u64 pkts;
+       } stats[nr_cpus];
+       struct real_definition {
+               union {
+                       __be32 dst;
+                       __be32 dstv6[4];
+               };
+               __u8 flags;
+       } real_def = {.dst = MAGIC_VAL};
+       __u32 ch_key = 11, real_num = 3;
+       __u32 duration, retval, size;
+       int err, i, prog_fd, map_fd;
+       __u64 bytes = 0, pkts = 0;
+       struct bpf_object *obj;
+       char buf[128];
+       u32 *magic = (u32 *)buf;
+
+       err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+       if (err)
+               return;
+
+       map_fd = bpf_find_map(__func__, obj, "vip_map");
+       if (map_fd < 0)
+               goto out;
+       bpf_map_update_elem(map_fd, &key, &value, 0);
+
+       map_fd = bpf_find_map(__func__, obj, "ch_rings");
+       if (map_fd < 0)
+               goto out;
+       bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
+
+       map_fd = bpf_find_map(__func__, obj, "reals");
+       if (map_fd < 0)
+               goto out;
+       bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
+
+       err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
+                               buf, &size, &retval, &duration);
+       CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
+             *magic != MAGIC_VAL, "ipv4",
+             "err %d errno %d retval %d size %d magic %x\n",
+             err, errno, retval, size, *magic);
+
+       err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
+                               buf, &size, &retval, &duration);
+       CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
+             *magic != MAGIC_VAL, "ipv6",
+             "err %d errno %d retval %d size %d magic %x\n",
+             err, errno, retval, size, *magic);
+
+       map_fd = bpf_find_map(__func__, obj, "stats");
+       if (map_fd < 0)
+               goto out;
+       bpf_map_lookup_elem(map_fd, &stats_key, stats);
+       for (i = 0; i < nr_cpus; i++) {
+               bytes += stats[i].bytes;
+               pkts += stats[i].pkts;
+       }
+       if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
+               error_cnt++;
+               printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
+       }
+out:
+       bpf_object__close(obj);
+}
+
+int main(void)
+{
+       struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
+
+       setrlimit(RLIMIT_MEMLOCK, &rinf);
+
+       test_pkt_access();
+       test_xdp();
+       test_l4lb();
+
+       printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
+       return 0;
+}
index e1f5b9eea1e874ab7f4698a1e20abc5588fe00bb..6178b65fee5941f9566fa711bd4ac77f34b816ad 100644 (file)
@@ -8,6 +8,8 @@
  * License as published by the Free Software Foundation.
  */
 
+#include <asm/types.h>
+#include <linux/types.h>
 #include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
 
 #include <bpf/bpf.h>
 
+#ifdef HAVE_GENHDR
+# include "autoconf.h"
+#else
+# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
+#  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
+# endif
+#endif
+
 #include "../../../include/linux/filter.h"
 
 #ifndef ARRAY_SIZE
@@ -36,6 +46,9 @@
 
 #define MAX_INSNS      512
 #define MAX_FIXUPS     8
+#define MAX_NR_MAPS    4
+
+#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS     (1 << 0)
 
 struct bpf_test {
        const char *descr;
@@ -43,6 +56,7 @@ struct bpf_test {
        int fixup_map1[MAX_FIXUPS];
        int fixup_map2[MAX_FIXUPS];
        int fixup_prog[MAX_FIXUPS];
+       int fixup_map_in_map[MAX_FIXUPS];
        const char *errstr;
        const char *errstr_unpriv;
        enum {
@@ -51,6 +65,7 @@ struct bpf_test {
                REJECT
        } result, result_unpriv;
        enum bpf_prog_type prog_type;
+       uint8_t flags;
 };
 
 /* Note we want this to be 64 bit aligned so that the end of our array is
@@ -2429,6 +2444,30 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
+       {
+               "direct packet access: test15 (spill with xadd)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+                       BPF_MOV64_IMM(BPF_REG_5, 4096),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+                       BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+                       BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R2 invalid mem access 'inv'",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
        {
                "helper access to packet: test1, valid packet_ptr range",
                .insns = {
@@ -2932,6 +2971,7 @@ static struct bpf_test tests[] = {
                .errstr_unpriv = "R0 pointer arithmetic prohibited",
                .result_unpriv = REJECT,
                .result = ACCEPT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "valid map access into an array with a variable",
@@ -2955,6 +2995,7 @@ static struct bpf_test tests[] = {
                .errstr_unpriv = "R0 pointer arithmetic prohibited",
                .result_unpriv = REJECT,
                .result = ACCEPT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "valid map access into an array with a signed variable",
@@ -2982,6 +3023,7 @@ static struct bpf_test tests[] = {
                .errstr_unpriv = "R0 pointer arithmetic prohibited",
                .result_unpriv = REJECT,
                .result = ACCEPT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "invalid map access into an array with a constant",
@@ -3023,6 +3065,7 @@ static struct bpf_test tests[] = {
                .errstr = "R0 min value is outside of the array range",
                .result_unpriv = REJECT,
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "invalid map access into an array with a variable",
@@ -3046,6 +3089,7 @@ static struct bpf_test tests[] = {
                .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
                .result_unpriv = REJECT,
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "invalid map access into an array with no floor check",
@@ -3072,6 +3116,7 @@ static struct bpf_test tests[] = {
                .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
                .result_unpriv = REJECT,
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "invalid map access into an array with a invalid max check",
@@ -3098,6 +3143,7 @@ static struct bpf_test tests[] = {
                .errstr = "invalid access to map value, value_size=48 off=44 size=8",
                .result_unpriv = REJECT,
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "invalid map access into an array with a invalid max check",
@@ -3127,6 +3173,7 @@ static struct bpf_test tests[] = {
                .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
                .result_unpriv = REJECT,
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "multiple registers share map_lookup_elem result",
@@ -3250,6 +3297,7 @@ static struct bpf_test tests[] = {
                .result = REJECT,
                .errstr_unpriv = "R0 pointer arithmetic prohibited",
                .result_unpriv = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "constant register |= constant should keep constant type",
@@ -3415,6 +3463,26 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_LWT_XMIT,
        },
+       {
+               "overlapping checks for direct packet access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+       },
        {
                "invalid access of tc_classid for LWT_IN",
                .insns = {
@@ -3959,7 +4027,208 @@ static struct bpf_test tests[] = {
                .result_unpriv = REJECT,
        },
        {
-               "map element value (adjusted) is preserved across register spilling",
+               "map element value or null is marked on register spilling",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 leaks addr",
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "map element value store of cleared call register",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R1 !read_ok",
+               .errstr = "R1 !read_ok",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "map element value with unaligned store",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
+                       BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "map element value with unaligned load",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
+                       BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "map element value illegal alu op, 1",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "invalid mem access 'inv'",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "map element value illegal alu op, 2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "invalid mem access 'inv'",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "map element value illegal alu op, 3",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "invalid mem access 'inv'",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "map element value illegal alu op, 4",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "invalid mem access 'inv'",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "map element value illegal alu op, 5",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+                       BPF_MOV64_IMM(BPF_REG_3, 4096),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+                       BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 invalid mem access 'inv'",
+               .errstr = "R0 invalid mem access 'inv'",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "map element value is preserved across register spilling",
                .insns = {
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
@@ -3981,6 +4250,7 @@ static struct bpf_test tests[] = {
                .errstr_unpriv = "R0 pointer arithmetic prohibited",
                .result = ACCEPT,
                .result_unpriv = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
@@ -4419,6 +4689,7 @@ static struct bpf_test tests[] = {
                .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
                .result = REJECT,
                .result_unpriv = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "invalid range check",
@@ -4450,6 +4721,76 @@ static struct bpf_test tests[] = {
                .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
                .result = REJECT,
                .result_unpriv = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "map in map access",
+               .insns = {
+                       BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_REG(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_in_map = { 3 },
+               .result = ACCEPT,
+       },
+       {
+               "invalid inner map pointer",
+               .insns = {
+                       BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_REG(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_in_map = { 3 },
+               .errstr = "R1 type=inv expected=map_ptr",
+               .errstr_unpriv = "R1 pointer arithmetic prohibited",
+               .result = REJECT,
+       },
+       {
+               "forgot null checking on the inner map pointer",
+               .insns = {
+                       BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_REG(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_in_map = { 3 },
+               .errstr = "R1 type=map_value_or_null expected=map_ptr",
+               .result = REJECT,
        }
 };
 
@@ -4487,55 +4828,90 @@ static int create_prog_array(void)
        return fd;
 }
 
+static int create_map_in_map(void)
+{
+       int inner_map_fd, outer_map_fd;
+
+       inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
+                                     sizeof(int), 1, 0);
+       if (inner_map_fd < 0) {
+               printf("Failed to create array '%s'!\n", strerror(errno));
+               return inner_map_fd;
+       }
+
+       outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS,
+                                            sizeof(int), inner_map_fd, 1, 0);
+       if (outer_map_fd < 0)
+               printf("Failed to create array of maps '%s'!\n",
+                      strerror(errno));
+
+       close(inner_map_fd);
+
+       return outer_map_fd;
+}
+
 static char bpf_vlog[32768];
 
 static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
-                         int *fd_f1, int *fd_f2, int *fd_f3)
+                         int *map_fds)
 {
        int *fixup_map1 = test->fixup_map1;
        int *fixup_map2 = test->fixup_map2;
        int *fixup_prog = test->fixup_prog;
+       int *fixup_map_in_map = test->fixup_map_in_map;
 
        /* Allocating HTs with 1 elem is fine here, since we only test
         * for verifier and not do a runtime lookup, so the only thing
         * that really matters is value size in this case.
         */
        if (*fixup_map1) {
-               *fd_f1 = create_map(sizeof(long long), 1);
+               map_fds[0] = create_map(sizeof(long long), 1);
                do {
-                       prog[*fixup_map1].imm = *fd_f1;
+                       prog[*fixup_map1].imm = map_fds[0];
                        fixup_map1++;
                } while (*fixup_map1);
        }
 
        if (*fixup_map2) {
-               *fd_f2 = create_map(sizeof(struct test_val), 1);
+               map_fds[1] = create_map(sizeof(struct test_val), 1);
                do {
-                       prog[*fixup_map2].imm = *fd_f2;
+                       prog[*fixup_map2].imm = map_fds[1];
                        fixup_map2++;
                } while (*fixup_map2);
        }
 
        if (*fixup_prog) {
-               *fd_f3 = create_prog_array();
+               map_fds[2] = create_prog_array();
                do {
-                       prog[*fixup_prog].imm = *fd_f3;
+                       prog[*fixup_prog].imm = map_fds[2];
                        fixup_prog++;
                } while (*fixup_prog);
        }
+
+       if (*fixup_map_in_map) {
+               map_fds[3] = create_map_in_map();
+               do {
+                       prog[*fixup_map_in_map].imm = map_fds[3];
+                       fixup_map_in_map++;
+               } while (*fixup_map_in_map);
+       }
 }
 
 static void do_test_single(struct bpf_test *test, bool unpriv,
                           int *passes, int *errors)
 {
+       int fd_prog, expected_ret, reject_from_alignment;
        struct bpf_insn *prog = test->insns;
        int prog_len = probe_filter_length(prog);
        int prog_type = test->prog_type;
-       int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1;
-       int fd_prog, expected_ret;
+       int map_fds[MAX_NR_MAPS];
        const char *expected_err;
+       int i;
 
-       do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
+       for (i = 0; i < MAX_NR_MAPS; i++)
+               map_fds[i] = -1;
+
+       do_test_fixup(test, prog, map_fds);
 
        fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
                                   prog, prog_len, "GPL", 0, bpf_vlog,
@@ -4545,8 +4921,19 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
                       test->result_unpriv : test->result;
        expected_err = unpriv && test->errstr_unpriv ?
                       test->errstr_unpriv : test->errstr;
+
+       reject_from_alignment = fd_prog < 0 &&
+                               (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
+                               strstr(bpf_vlog, "Unknown alignment.");
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+       if (reject_from_alignment) {
+               printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
+                      strerror(errno));
+               goto fail_log;
+       }
+#endif
        if (expected_ret == ACCEPT) {
-               if (fd_prog < 0) {
+               if (fd_prog < 0 && !reject_from_alignment) {
                        printf("FAIL\nFailed to load prog '%s'!\n",
                               strerror(errno));
                        goto fail_log;
@@ -4556,19 +4943,19 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
                        printf("FAIL\nUnexpected success to load!\n");
                        goto fail_log;
                }
-               if (!strstr(bpf_vlog, expected_err)) {
+               if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
                        printf("FAIL\nUnexpected error message!\n");
                        goto fail_log;
                }
        }
 
        (*passes)++;
-       printf("OK\n");
+       printf("OK%s\n", reject_from_alignment ?
+              " (NOTE: reject due to unknown alignment)" : "");
 close_fds:
        close(fd_prog);
-       close(fd_f1);
-       close(fd_f2);
-       close(fd_f3);
+       for (i = 0; i < MAX_NR_MAPS; i++)
+               close(map_fds[i]);
        sched_yield();
        return;
 fail_log:
@@ -4583,10 +4970,12 @@ static bool is_admin(void)
        cap_flag_value_t sysadmin = CAP_CLEAR;
        const cap_value_t cap_val = CAP_SYS_ADMIN;
 
+#ifdef CAP_IS_SUPPORTED
        if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
                perror("cap_get_flag");
                return false;
        }
+#endif
        caps = cap_get_proc();
        if (!caps) {
                perror("cap_get_proc");
diff --git a/tools/testing/selftests/bpf/test_xdp.c b/tools/testing/selftests/bpf/test_xdp.c
new file mode 100644 (file)
index 0000000..9a33b03
--- /dev/null
@@ -0,0 +1,236 @@
+/* Copyright (c) 2016,2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stddef.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/pkt_cls.h>
+#include <sys/socket.h>
+#include "bpf_helpers.h"
+#include "test_iptunnel_common.h"
+
+#define htons __builtin_bswap16
+#define ntohs __builtin_bswap16
+int _version SEC("version") = 1;
+
+struct bpf_map_def SEC("maps") rxcnt = {
+       .type = BPF_MAP_TYPE_PERCPU_ARRAY,
+       .key_size = sizeof(__u32),
+       .value_size = sizeof(__u64),
+       .max_entries = 256,
+};
+
+struct bpf_map_def SEC("maps") vip2tnl = {
+       .type = BPF_MAP_TYPE_HASH,
+       .key_size = sizeof(struct vip),
+       .value_size = sizeof(struct iptnl_info),
+       .max_entries = MAX_IPTNL_ENTRIES,
+};
+
+static __always_inline void count_tx(__u32 protocol)
+{
+       __u64 *rxcnt_count;
+
+       rxcnt_count = bpf_map_lookup_elem(&rxcnt, &protocol);
+       if (rxcnt_count)
+               *rxcnt_count += 1;
+}
+
+static __always_inline int get_dport(void *trans_data, void *data_end,
+                                    __u8 protocol)
+{
+       struct tcphdr *th;
+       struct udphdr *uh;
+
+       switch (protocol) {
+       case IPPROTO_TCP:
+               th = (struct tcphdr *)trans_data;
+               if (th + 1 > data_end)
+                       return -1;
+               return th->dest;
+       case IPPROTO_UDP:
+               uh = (struct udphdr *)trans_data;
+               if (uh + 1 > data_end)
+                       return -1;
+               return uh->dest;
+       default:
+               return 0;
+       }
+}
+
+static __always_inline void set_ethhdr(struct ethhdr *new_eth,
+                                      const struct ethhdr *old_eth,
+                                      const struct iptnl_info *tnl,
+                                      __be16 h_proto)
+{
+       memcpy(new_eth->h_source, old_eth->h_dest, sizeof(new_eth->h_source));
+       memcpy(new_eth->h_dest, tnl->dmac, sizeof(new_eth->h_dest));
+       new_eth->h_proto = h_proto;
+}
+
+static __always_inline int handle_ipv4(struct xdp_md *xdp)
+{
+       void *data_end = (void *)(long)xdp->data_end;
+       void *data = (void *)(long)xdp->data;
+       struct iptnl_info *tnl;
+       struct ethhdr *new_eth;
+       struct ethhdr *old_eth;
+       struct iphdr *iph = data + sizeof(struct ethhdr);
+       __u16 *next_iph;
+       __u16 payload_len;
+       struct vip vip = {};
+       int dport;
+       __u32 csum = 0;
+       int i;
+
+       if (iph + 1 > data_end)
+               return XDP_DROP;
+
+       dport = get_dport(iph + 1, data_end, iph->protocol);
+       if (dport == -1)
+               return XDP_DROP;
+
+       vip.protocol = iph->protocol;
+       vip.family = AF_INET;
+       vip.daddr.v4 = iph->daddr;
+       vip.dport = dport;
+       payload_len = ntohs(iph->tot_len);
+
+       tnl = bpf_map_lookup_elem(&vip2tnl, &vip);
+       /* It only does v4-in-v4 */
+       if (!tnl || tnl->family != AF_INET)
+               return XDP_PASS;
+
+       if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr)))
+               return XDP_DROP;
+
+       data = (void *)(long)xdp->data;
+       data_end = (void *)(long)xdp->data_end;
+
+       new_eth = data;
+       iph = data + sizeof(*new_eth);
+       old_eth = data + sizeof(*iph);
+
+       if (new_eth + 1 > data_end ||
+           old_eth + 1 > data_end ||
+           iph + 1 > data_end)
+               return XDP_DROP;
+
+       set_ethhdr(new_eth, old_eth, tnl, htons(ETH_P_IP));
+
+       iph->version = 4;
+       iph->ihl = sizeof(*iph) >> 2;
+       iph->frag_off = 0;
+       iph->protocol = IPPROTO_IPIP;
+       iph->check = 0;
+       iph->tos = 0;
+       iph->tot_len = htons(payload_len + sizeof(*iph));
+       iph->daddr = tnl->daddr.v4;
+       iph->saddr = tnl->saddr.v4;
+       iph->ttl = 8;
+
+       next_iph = (__u16 *)iph;
+#pragma clang loop unroll(full)
+       for (i = 0; i < sizeof(*iph) >> 1; i++)
+               csum += *next_iph++;
+
+       iph->check = ~((csum & 0xffff) + (csum >> 16));
+
+       count_tx(vip.protocol);
+
+       return XDP_TX;
+}
+
+static __always_inline int handle_ipv6(struct xdp_md *xdp)
+{
+       void *data_end = (void *)(long)xdp->data_end;
+       void *data = (void *)(long)xdp->data;
+       struct iptnl_info *tnl;
+       struct ethhdr *new_eth;
+       struct ethhdr *old_eth;
+       struct ipv6hdr *ip6h = data + sizeof(struct ethhdr);
+       __u16 payload_len;
+       struct vip vip = {};
+       int dport;
+
+       if (ip6h + 1 > data_end)
+               return XDP_DROP;
+
+       dport = get_dport(ip6h + 1, data_end, ip6h->nexthdr);
+       if (dport == -1)
+               return XDP_DROP;
+
+       vip.protocol = ip6h->nexthdr;
+       vip.family = AF_INET6;
+       memcpy(vip.daddr.v6, ip6h->daddr.s6_addr32, sizeof(vip.daddr));
+       vip.dport = dport;
+       payload_len = ip6h->payload_len;
+
+       tnl = bpf_map_lookup_elem(&vip2tnl, &vip);
+       /* It only does v6-in-v6 */
+       if (!tnl || tnl->family != AF_INET6)
+               return XDP_PASS;
+
+       if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr)))
+               return XDP_DROP;
+
+       data = (void *)(long)xdp->data;
+       data_end = (void *)(long)xdp->data_end;
+
+       new_eth = data;
+       ip6h = data + sizeof(*new_eth);
+       old_eth = data + sizeof(*ip6h);
+
+       if (new_eth + 1 > data_end || old_eth + 1 > data_end ||
+           ip6h + 1 > data_end)
+               return XDP_DROP;
+
+       set_ethhdr(new_eth, old_eth, tnl, htons(ETH_P_IPV6));
+
+       ip6h->version = 6;
+       ip6h->priority = 0;
+       memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl));
+       ip6h->payload_len = htons(ntohs(payload_len) + sizeof(*ip6h));
+       ip6h->nexthdr = IPPROTO_IPV6;
+       ip6h->hop_limit = 8;
+       memcpy(ip6h->saddr.s6_addr32, tnl->saddr.v6, sizeof(tnl->saddr.v6));
+       memcpy(ip6h->daddr.s6_addr32, tnl->daddr.v6, sizeof(tnl->daddr.v6));
+
+       count_tx(vip.protocol);
+
+       return XDP_TX;
+}
+
+SEC("xdp_tx_iptunnel")
+int _xdp_tx_iptunnel(struct xdp_md *xdp)
+{
+       void *data_end = (void *)(long)xdp->data_end;
+       void *data = (void *)(long)xdp->data;
+       struct ethhdr *eth = data;
+       __u16 h_proto;
+
+       if (eth + 1 > data_end)
+               return XDP_DROP;
+
+       h_proto = eth->h_proto;
+
+       if (h_proto == htons(ETH_P_IP))
+               return handle_ipv4(xdp);
+       else if (h_proto == htons(ETH_P_IPV6))
+
+               return handle_ipv6(xdp);
+       else
+               return XDP_DROP;
+}
+
+char _license[] SEC("license") = "GPL";
index fbfe5d0d5c2e05028e6af86d8db76d95e41c4fc5..35cbb4cba4109551cc14beabcba39dff5d22e85c 100644 (file)
@@ -5,7 +5,7 @@ CFLAGS += -I../../../../usr/include/
 
 reuseport_bpf_numa: LDFLAGS += -lnuma
 
-TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh
+TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh
 TEST_GEN_FILES =  socket
 TEST_GEN_FILES += psock_fanout psock_tpacket
 TEST_GEN_FILES += reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
diff --git a/tools/testing/selftests/net/netdevice.sh b/tools/testing/selftests/net/netdevice.sh
new file mode 100755 (executable)
index 0000000..4e00568
--- /dev/null
@@ -0,0 +1,200 @@
+#!/bin/sh
+#
+# This test is for checking network interface
+# For the moment it tests only ethernet interface (but wifi could be easily added)
+#
+# We assume that all network driver are loaded
+# if not they probably have failed earlier in the boot process and their logged error will be catched by another test
+#
+
+# this function will try to up the interface
+# if already up, nothing done
+# arg1: network interface name
+kci_net_start()
+{
+       netdev=$1
+
+       ip link show "$netdev" |grep -q UP
+       if [ $? -eq 0 ];then
+               echo "SKIP: $netdev: interface already up"
+               return 0
+       fi
+
+       ip link set "$netdev" up
+       if [ $? -ne 0 ];then
+               echo "FAIL: $netdev: Fail to up interface"
+               return 1
+       else
+               echo "PASS: $netdev: set interface up"
+               NETDEV_STARTED=1
+       fi
+       return 0
+}
+
+# this function will try to setup an IP and MAC address on a network interface
+# Doing nothing if the interface was already up
+# arg1: network interface name
+kci_net_setup()
+{
+       netdev=$1
+
+       # do nothing if the interface was already up
+       if [ $NETDEV_STARTED -eq 0 ];then
+               return 0
+       fi
+
+       MACADDR='02:03:04:05:06:07'
+       ip link set dev $netdev address "$MACADDR"
+       if [ $? -ne 0 ];then
+               echo "FAIL: $netdev: Cannot set MAC address"
+       else
+               ip link show $netdev |grep -q "$MACADDR"
+               if [ $? -eq 0 ];then
+                       echo "PASS: $netdev: set MAC address"
+               else
+                       echo "FAIL: $netdev: Cannot set MAC address"
+               fi
+       fi
+
+       #check that the interface did not already have an IP
+       ip address show "$netdev" |grep '^[[:space:]]*inet'
+       if [ $? -eq 0 ];then
+               echo "SKIP: $netdev: already have an IP"
+               return 0
+       fi
+
+       # TODO what ipaddr to set ? DHCP ?
+       echo "SKIP: $netdev: set IP address"
+       return 0
+}
+
+# test an ethtool command
+# arg1: return code for not supported (see ethtool code source)
+# arg2: summary of the command
+# arg3: command to execute
+kci_netdev_ethtool_test()
+{
+       if [ $# -le 2 ];then
+               echo "SKIP: $netdev: ethtool: invalid number of arguments"
+               return 1
+       fi
+       $3 >/dev/null
+       ret=$?
+       if [ $ret -ne 0 ];then
+               if [ $ret -eq "$1" ];then
+                       echo "SKIP: $netdev: ethtool $2 not supported"
+               else
+                       echo "FAIL: $netdev: ethtool $2"
+                       return 1
+               fi
+       else
+               echo "PASS: $netdev: ethtool $2"
+       fi
+       return 0
+}
+
+# test ethtool commands
+# arg1: network interface name
+kci_netdev_ethtool()
+{
+       netdev=$1
+
+       #check presence of ethtool
+       ethtool --version 2>/dev/null >/dev/null
+       if [ $? -ne 0 ];then
+               echo "SKIP: ethtool not present"
+               return 1
+       fi
+
+       TMP_ETHTOOL_FEATURES="$(mktemp)"
+       if [ ! -e "$TMP_ETHTOOL_FEATURES" ];then
+               echo "SKIP: Cannot create a tmp file"
+               return 1
+       fi
+
+       ethtool -k "$netdev" > "$TMP_ETHTOOL_FEATURES"
+       if [ $? -ne 0 ];then
+               echo "FAIL: $netdev: ethtool list features"
+               rm "$TMP_ETHTOOL_FEATURES"
+               return 1
+       fi
+       echo "PASS: $netdev: ethtool list features"
+       #TODO for each non fixed features, try to turn them on/off
+       rm "$TMP_ETHTOOL_FEATURES"
+
+       kci_netdev_ethtool_test 74 'dump' "ethtool -d $netdev"
+       kci_netdev_ethtool_test 94 'stats' "ethtool -S $netdev"
+       return 0
+}
+
+# stop a netdev
+# arg1: network interface name
+kci_netdev_stop()
+{
+       netdev=$1
+
+       if [ $NETDEV_STARTED -eq 0 ];then
+               echo "SKIP: $netdev: interface kept up"
+               return 0
+       fi
+
+       ip link set "$netdev" down
+       if [ $? -ne 0 ];then
+               echo "FAIL: $netdev: stop interface"
+               return 1
+       fi
+       echo "PASS: $netdev: stop interface"
+       return 0
+}
+
+# run all test on a netdev
+# arg1: network interface name
+kci_test_netdev()
+{
+       NETDEV_STARTED=0
+       IFACE_TO_UPDOWN="$1"
+       IFACE_TO_TEST="$1"
+       #check for VLAN interface
+       MASTER_IFACE="$(echo $1 | cut -d@ -f2)"
+       if [ ! -z "$MASTER_IFACE" ];then
+               IFACE_TO_UPDOWN="$MASTER_IFACE"
+               IFACE_TO_TEST="$(echo $1 | cut -d@ -f1)"
+       fi
+
+       NETDEV_STARTED=0
+       kci_net_start "$IFACE_TO_UPDOWN"
+
+       kci_net_setup "$IFACE_TO_TEST"
+
+       kci_netdev_ethtool "$IFACE_TO_TEST"
+
+       kci_netdev_stop "$IFACE_TO_UPDOWN"
+       return 0
+}
+
+#check for needed privileges
+if [ "$(id -u)" -ne 0 ];then
+       echo "SKIP: Need root privileges"
+       exit 0
+fi
+
+ip -Version 2>/dev/null >/dev/null
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without the ip tool"
+       exit 0
+fi
+
+TMP_LIST_NETDEV="$(mktemp)"
+if [ ! -e "$TMP_LIST_NETDEV" ];then
+       echo "FAIL: Cannot create a tmp file"
+       exit 1
+fi
+
+ip link show |grep '^[0-9]' | grep -oE '[[:space:]].*eth[0-9]*:|[[:space:]].*enp[0-9]s[0-9]:' | cut -d\  -f2 | cut -d: -f1> "$TMP_LIST_NETDEV"
+while read netdev
+do
+       kci_test_netdev "$netdev"
+done < "$TMP_LIST_NETDEV"
+
+rm "$TMP_LIST_NETDEV"
+exit 0
index 248a820048dfe89018697926f4f308acd786e694..66d31de60b9ae93ee53175b33983e3d86d67795f 100644 (file)
@@ -114,9 +114,11 @@ int test_harness(int (test_function)(void), char *name)
 
        rc = run_test(test_function, name);
 
-       if (rc == MAGIC_SKIP_RETURN_VALUE)
+       if (rc == MAGIC_SKIP_RETURN_VALUE) {
                test_skip(name);
-       else
+               /* so that skipped test is not marked as failed */
+               rc = 0;
+       } else
                test_finish(name, rc);
 
        return rc;
index d828bfb6ef2d9a55f5752352458bca0ab1958549..54064ced9e95b3c66e57748f82ddf65aca91e94b 100644 (file)
  */
 FUNC_START(load_vsx)
        li      r5,0
-       lxvx    vs20,r5,r3
+       lxvd2x  vs20,r5,r3
        addi    r5,r5,16
-       lxvx    vs21,r5,r3
+       lxvd2x  vs21,r5,r3
        addi    r5,r5,16
-       lxvx    vs22,r5,r3
+       lxvd2x  vs22,r5,r3
        addi    r5,r5,16
-       lxvx    vs23,r5,r3
+       lxvd2x  vs23,r5,r3
        addi    r5,r5,16
-       lxvx    vs24,r5,r3
+       lxvd2x  vs24,r5,r3
        addi    r5,r5,16
-       lxvx    vs25,r5,r3
+       lxvd2x  vs25,r5,r3
        addi    r5,r5,16
-       lxvx    vs26,r5,r3
+       lxvd2x  vs26,r5,r3
        addi    r5,r5,16
-       lxvx    vs27,r5,r3
+       lxvd2x  vs27,r5,r3
        addi    r5,r5,16
-       lxvx    vs28,r5,r3
+       lxvd2x  vs28,r5,r3
        addi    r5,r5,16
-       lxvx    vs29,r5,r3
+       lxvd2x  vs29,r5,r3
        addi    r5,r5,16
-       lxvx    vs30,r5,r3
+       lxvd2x  vs30,r5,r3
        addi    r5,r5,16
-       lxvx    vs31,r5,r3
+       lxvd2x  vs31,r5,r3
        blr
 FUNC_END(load_vsx)
 
 FUNC_START(store_vsx)
        li      r5,0
-       stxvx   vs20,r5,r3
+       stxvd2x vs20,r5,r3
        addi    r5,r5,16
-       stxvx   vs21,r5,r3
+       stxvd2x vs21,r5,r3
        addi    r5,r5,16
-       stxvx   vs22,r5,r3
+       stxvd2x vs22,r5,r3
        addi    r5,r5,16
-       stxvx   vs23,r5,r3
+       stxvd2x vs23,r5,r3
        addi    r5,r5,16
-       stxvx   vs24,r5,r3
+       stxvd2x vs24,r5,r3
        addi    r5,r5,16
-       stxvx   vs25,r5,r3
+       stxvd2x vs25,r5,r3
        addi    r5,r5,16
-       stxvx   vs26,r5,r3
+       stxvd2x vs26,r5,r3
        addi    r5,r5,16
-       stxvx   vs27,r5,r3
+       stxvd2x vs27,r5,r3
        addi    r5,r5,16
-       stxvx   vs28,r5,r3
+       stxvd2x vs28,r5,r3
        addi    r5,r5,16
-       stxvx   vs29,r5,r3
+       stxvd2x vs29,r5,r3
        addi    r5,r5,16
-       stxvx   vs30,r5,r3
+       stxvd2x vs30,r5,r3
        addi    r5,r5,16
-       stxvx   vs31,r5,r3
+       stxvd2x vs31,r5,r3
        blr
 FUNC_END(store_vsx)
index 4cff7e7ddcc47b80ef30a06a779ea45dae5a5f3e..41642ba5e318a153d805720e47475436817be53e 100644 (file)
@@ -1,5 +1,9 @@
 # Makefile for vm selftests
 
+ifndef OUTPUT
+  OUTPUT := $(shell pwd)
+endif
+
 CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS)
 LDLIBS = -lrt
 TEST_GEN_FILES = compaction_test
index 5b2b4b3c634ca17462730a7d3740fd698540a975..b4967d8752365545149274cf9e70b7717a826866 100644 (file)
@@ -245,7 +245,7 @@ void do_unexpected_base(void)
                long ret;
                asm volatile ("int $0x80"
                              : "=a" (ret) : "a" (243), "b" (low_desc)
-                             : "flags");
+                             : "r8", "r9", "r10", "r11");
                memcpy(&desc, low_desc, sizeof(desc));
                munmap(low_desc, sizeof(desc));
 
index 4af47079cf04305cec7e6a8d2aa0960a6fcfe352..f6121612e769f5600d1cc0920037ee4c6ee0bf92 100644 (file)
 #define AR_DB                  (1 << 22)
 #define AR_G                   (1 << 23)
 
+#ifdef __x86_64__
+# define INT80_CLOBBERS "r8", "r9", "r10", "r11"
+#else
+# define INT80_CLOBBERS
+#endif
+
 static int nerrs;
 
 /* Points to an array of 1024 ints, each holding its own index. */
@@ -588,7 +594,7 @@ static int invoke_set_thread_area(void)
        asm volatile ("int $0x80"
                      : "=a" (ret), "+m" (low_user_desc) :
                        "a" (243), "b" (low_user_desc)
-                     : "flags");
+                     : INT80_CLOBBERS);
        return ret;
 }
 
@@ -657,7 +663,7 @@ static void test_gdt_invalidation(void)
                        "+a" (eax)
                      : "m" (low_user_desc_clear),
                        [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
-                     : "flags");
+                     : INT80_CLOBBERS);
 
        if (sel != 0) {
                result = "FAIL";
@@ -688,7 +694,7 @@ static void test_gdt_invalidation(void)
                        "+a" (eax)
                      : "m" (low_user_desc_clear),
                        [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
-                     : "flags");
+                     : INT80_CLOBBERS);
 
        if (sel != 0) {
                result = "FAIL";
@@ -721,7 +727,7 @@ static void test_gdt_invalidation(void)
                        "+a" (eax)
                      : "m" (low_user_desc_clear),
                        [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
-                     : "flags");
+                     : INT80_CLOBBERS);
 
 #ifdef __x86_64__
        syscall(SYS_arch_prctl, ARCH_GET_FS, &new_base);
@@ -774,7 +780,7 @@ static void test_gdt_invalidation(void)
                        "+a" (eax)
                      : "m" (low_user_desc_clear),
                        [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
-                     : "flags");
+                     : INT80_CLOBBERS);
 
 #ifdef __x86_64__
        syscall(SYS_arch_prctl, ARCH_GET_GS, &new_base);
index b037ce9cf116b1da0ef57601f8f1840e45fcc775..eaea9243970840dab196cb1ddf84586e30803f0a 100644 (file)
@@ -58,7 +58,8 @@ static void do_full_int80(struct syscall_args32 *args)
        asm volatile ("int $0x80"
                      : "+a" (args->nr),
                        "+b" (args->arg0), "+c" (args->arg1), "+d" (args->arg2),
-                       "+S" (args->arg3), "+D" (args->arg4), "+r" (bp));
+                       "+S" (args->arg3), "+D" (args->arg4), "+r" (bp)
+                       : : "r8", "r9", "r10", "r11");
        args->arg5 = bp;
 #else
        sys32_helper(args, int80_and_ret);
index 50c26358e8b7ec055000ead54c2c80c69b371a6f..a48da95c18fdf1f0ea46e7cb628ff9a9caba931b 100644 (file)
@@ -56,9 +56,11 @@ static volatile sig_atomic_t sig_traps;
 #ifdef __x86_64__
 # define REG_IP REG_RIP
 # define WIDTH "q"
+# define INT80_CLOBBERS "r8", "r9", "r10", "r11"
 #else
 # define REG_IP REG_EIP
 # define WIDTH "l"
+# define INT80_CLOBBERS
 #endif
 
 static unsigned long get_eflags(void)
@@ -140,7 +142,8 @@ int main()
 
        printf("[RUN]\tSet TF and check int80\n");
        set_eflags(get_eflags() | X86_EFLAGS_TF);
-       asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid));
+       asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)
+                       : INT80_CLOBBERS);
        check_result();
 
        /*
index 571b64a01c509741146e5e2263d5042457a2e14c..8d1da1af4b09e47c174cf7151b37b98666f03953 100644 (file)
@@ -360,29 +360,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
        return ret;
 }
 
-static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
-                                            struct vgic_its *its,
-                                            gpa_t addr, unsigned int len)
-{
-       u32 reg = 0;
-
-       mutex_lock(&its->cmd_lock);
-       if (its->creadr == its->cwriter)
-               reg |= GITS_CTLR_QUIESCENT;
-       if (its->enabled)
-               reg |= GITS_CTLR_ENABLE;
-       mutex_unlock(&its->cmd_lock);
-
-       return reg;
-}
-
-static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
-                                    gpa_t addr, unsigned int len,
-                                    unsigned long val)
-{
-       its->enabled = !!(val & GITS_CTLR_ENABLE);
-}
-
 static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
                                              struct vgic_its *its,
                                              gpa_t addr, unsigned int len)
@@ -1161,33 +1138,16 @@ static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
 #define ITS_CMD_SIZE                   32
 #define ITS_CMD_OFFSET(reg)            ((reg) & GENMASK(19, 5))
 
-/*
- * By writing to CWRITER the guest announces new commands to be processed.
- * To avoid any races in the first place, we take the its_cmd lock, which
- * protects our ring buffer variables, so that there is only one user
- * per ITS handling commands at a given time.
- */
-static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
-                                       gpa_t addr, unsigned int len,
-                                       unsigned long val)
+/* Must be called with the cmd_lock held. */
+static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
 {
        gpa_t cbaser;
        u64 cmd_buf[4];
-       u32 reg;
 
-       if (!its)
-               return;
-
-       mutex_lock(&its->cmd_lock);
-
-       reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
-       reg = ITS_CMD_OFFSET(reg);
-       if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
-               mutex_unlock(&its->cmd_lock);
+       /* Commands are only processed when the ITS is enabled. */
+       if (!its->enabled)
                return;
-       }
 
-       its->cwriter = reg;
        cbaser = CBASER_ADDRESS(its->cbaser);
 
        while (its->cwriter != its->creadr) {
@@ -1207,6 +1167,34 @@ static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
                if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
                        its->creadr = 0;
        }
+}
+
+/*
+ * By writing to CWRITER the guest announces new commands to be processed.
+ * To avoid any races in the first place, we take the its_cmd lock, which
+ * protects our ring buffer variables, so that there is only one user
+ * per ITS handling commands at a given time.
+ */
+static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
+                                       gpa_t addr, unsigned int len,
+                                       unsigned long val)
+{
+       u64 reg;
+
+       if (!its)
+               return;
+
+       mutex_lock(&its->cmd_lock);
+
+       reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
+       reg = ITS_CMD_OFFSET(reg);
+       if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
+               mutex_unlock(&its->cmd_lock);
+               return;
+       }
+       its->cwriter = reg;
+
+       vgic_its_process_commands(kvm, its);
 
        mutex_unlock(&its->cmd_lock);
 }
@@ -1287,6 +1275,39 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm,
        *regptr = reg;
 }
 
+static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
+                                            struct vgic_its *its,
+                                            gpa_t addr, unsigned int len)
+{
+       u32 reg = 0;
+
+       mutex_lock(&its->cmd_lock);
+       if (its->creadr == its->cwriter)
+               reg |= GITS_CTLR_QUIESCENT;
+       if (its->enabled)
+               reg |= GITS_CTLR_ENABLE;
+       mutex_unlock(&its->cmd_lock);
+
+       return reg;
+}
+
+static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
+                                    gpa_t addr, unsigned int len,
+                                    unsigned long val)
+{
+       mutex_lock(&its->cmd_lock);
+
+       its->enabled = !!(val & GITS_CTLR_ENABLE);
+
+       /*
+        * Try to process any pending commands. This function bails out early
+        * if the ITS is disabled or no commands have been queued.
+        */
+       vgic_its_process_commands(kvm, its);
+
+       mutex_unlock(&its->cmd_lock);
+}
+
 #define REGISTER_ITS_DESC(off, rd, wr, length, acc)            \
 {                                                              \
        .reg_offset = off,                                      \
index 3654b4c835ef733c8f1255137849b253b71c1659..2a5db135272215d5c9d4bfa544b7d3ed11a9b9c3 100644 (file)
@@ -180,21 +180,37 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
 static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
                                    bool new_active_state)
 {
+       struct kvm_vcpu *requester_vcpu;
        spin_lock(&irq->irq_lock);
+
+       /*
+        * The vcpu parameter here can mean multiple things depending on how
+        * this function is called; when handling a trap from the kernel it
+        * depends on the GIC version, and these functions are also called as
+        * part of save/restore from userspace.
+        *
+        * Therefore, we have to figure out the requester in a reliable way.
+        *
+        * When accessing VGIC state from user space, the requester_vcpu is
+        * NULL, which is fine, because we guarantee that no VCPUs are running
+        * when accessing VGIC state from user space so irq->vcpu->cpu is
+        * always -1.
+        */
+       requester_vcpu = kvm_arm_get_running_vcpu();
+
        /*
         * If this virtual IRQ was written into a list register, we
         * have to make sure the CPU that runs the VCPU thread has
-        * synced back LR state to the struct vgic_irq.  We can only
-        * know this for sure, when either this irq is not assigned to
-        * anyone's AP list anymore, or the VCPU thread is not
-        * running on any CPUs.
+        * synced back the LR state to the struct vgic_irq.
         *
-        * In the opposite case, we know the VCPU thread may be on its
-        * way back from the guest and still has to sync back this
-        * IRQ, so we release and re-acquire the spin_lock to let the
-        * other thread sync back the IRQ.
+        * As long as the conditions below are true, we know the VCPU thread
+        * may be on its way back from the guest (we kicked the VCPU thread in
+        * vgic_change_active_prepare)  and still has to sync back this IRQ,
+        * so we release and re-acquire the spin_lock to let the other thread
+        * sync back the IRQ.
         */
        while (irq->vcpu && /* IRQ may have state in an LR somewhere */
+              irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
               irq->vcpu->cpu != -1) /* VCPU thread is running */
                cond_resched_lock(&irq->irq_lock);
 
index edc6ee2dc852e9fb0f425e44e741434a71983731..be0f4c3e0142e04216cb28e1f965487d52d0b4c9 100644 (file)
@@ -229,10 +229,13 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu)
        /*
         * If we are emulating a GICv3, we do it in an non-GICv2-compatible
         * way, so we force SRE to 1 to demonstrate this to the guest.
+        * Also, we don't support any form of IRQ/FIQ bypass.
         * This goes with the spec allowing the value to be RAO/WI.
         */
        if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
-               vgic_v3->vgic_sre = ICC_SRE_EL1_SRE;
+               vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
+                                    ICC_SRE_EL1_DFB |
+                                    ICC_SRE_EL1_SRE);
                vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
        } else {
                vgic_v3->vgic_sre = 0;
index a29786dd95221017b141a060b031c5c899dac2e5..4d28a9ddbee01077fea01beeeae5523917822da9 100644 (file)
@@ -870,7 +870,8 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
                        continue;
 
                kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
-               kvm->buses[bus_idx]->ioeventfd_count--;
+               if (kvm->buses[bus_idx])
+                       kvm->buses[bus_idx]->ioeventfd_count--;
                ioeventfd_release(p);
                ret = 0;
                break;
index a17d78759727f352991a97b4c2bed21266657760..88257b311cb579b5b720330456f99fbec97a58ac 100644 (file)
@@ -727,8 +727,11 @@ static void kvm_destroy_vm(struct kvm *kvm)
        list_del(&kvm->vm_list);
        spin_unlock(&kvm_lock);
        kvm_free_irq_routing(kvm);
-       for (i = 0; i < KVM_NR_BUSES; i++)
-               kvm_io_bus_destroy(kvm->buses[i]);
+       for (i = 0; i < KVM_NR_BUSES; i++) {
+               if (kvm->buses[i])
+                       kvm_io_bus_destroy(kvm->buses[i]);
+               kvm->buses[i] = NULL;
+       }
        kvm_coalesced_mmio_free(kvm);
 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
        mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
@@ -1062,7 +1065,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
         * changes) is disallowed above, so any other attribute changes getting
         * here can be skipped.
         */
-       if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
+       if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) {
                r = kvm_iommu_map_pages(kvm, &new);
                return r;
        }
@@ -3474,6 +3477,8 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
        };
 
        bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+       if (!bus)
+               return -ENOMEM;
        r = __kvm_io_bus_write(vcpu, bus, &range, val);
        return r < 0 ? r : 0;
 }
@@ -3491,6 +3496,8 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
        };
 
        bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+       if (!bus)
+               return -ENOMEM;
 
        /* First try the device referenced by cookie. */
        if ((cookie >= 0) && (cookie < bus->dev_count) &&
@@ -3541,6 +3548,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
        };
 
        bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+       if (!bus)
+               return -ENOMEM;
        r = __kvm_io_bus_read(vcpu, bus, &range, val);
        return r < 0 ? r : 0;
 }
@@ -3553,6 +3562,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
        struct kvm_io_bus *new_bus, *bus;
 
        bus = kvm->buses[bus_idx];
+       if (!bus)
+               return -ENOMEM;
+
        /* exclude ioeventfd which is limited by maximum fd */
        if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
                return -ENOSPC;
@@ -3572,37 +3584,41 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 }
 
 /* Caller must hold slots_lock. */
-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
-                             struct kvm_io_device *dev)
+void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+                              struct kvm_io_device *dev)
 {
-       int i, r;
+       int i;
        struct kvm_io_bus *new_bus, *bus;
 
        bus = kvm->buses[bus_idx];
-       r = -ENOENT;
+       if (!bus)
+               return;
+
        for (i = 0; i < bus->dev_count; i++)
                if (bus->range[i].dev == dev) {
-                       r = 0;
                        break;
                }
 
-       if (r)
-               return r;
+       if (i == bus->dev_count)
+               return;
 
        new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
                          sizeof(struct kvm_io_range)), GFP_KERNEL);
-       if (!new_bus)
-               return -ENOMEM;
+       if (!new_bus)  {
+               pr_err("kvm: failed to shrink bus, removing it completely\n");
+               goto broken;
+       }
 
        memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
        new_bus->dev_count--;
        memcpy(new_bus->range + i, bus->range + i + 1,
               (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
 
+broken:
        rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
        synchronize_srcu_expedited(&kvm->srcu);
        kfree(bus);
-       return r;
+       return;
 }
 
 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
@@ -3615,6 +3631,8 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
        srcu_idx = srcu_read_lock(&kvm->srcu);
 
        bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
+       if (!bus)
+               goto out_unlock;
 
        dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
        if (dev_idx < 0)